repo_name
string
path
string
copies
string
size
string
content
string
license
string
kyupltd/linux
arch/sparc/prom/tree_32.c
12136
7216
/* * tree.c: Basic device tree traversal/scanning for the Linux * prom library. * * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) */ #include <linux/string.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/ctype.h> #include <linux/module.h> #include <asm/openprom.h> #include <asm/oplib.h> extern void restore_current(void); static char promlib_buf[128]; /* Internal version of prom_getchild that does not alter return values. */ static phandle __prom_getchild(phandle node) { unsigned long flags; phandle cnode; spin_lock_irqsave(&prom_lock, flags); cnode = prom_nodeops->no_child(node); restore_current(); spin_unlock_irqrestore(&prom_lock, flags); return cnode; } /* Return the child of node 'node' or zero if no this node has no * direct descendent. */ phandle prom_getchild(phandle node) { phandle cnode; if ((s32)node == -1) return 0; cnode = __prom_getchild(node); if (cnode == 0 || (s32)cnode == -1) return 0; return cnode; } EXPORT_SYMBOL(prom_getchild); /* Internal version of prom_getsibling that does not alter return values. */ static phandle __prom_getsibling(phandle node) { unsigned long flags; phandle cnode; spin_lock_irqsave(&prom_lock, flags); cnode = prom_nodeops->no_nextnode(node); restore_current(); spin_unlock_irqrestore(&prom_lock, flags); return cnode; } /* Return the next sibling of node 'node' or zero if no more siblings * at this level of depth in the tree. */ phandle prom_getsibling(phandle node) { phandle sibnode; if ((s32)node == -1) return 0; sibnode = __prom_getsibling(node); if (sibnode == 0 || (s32)sibnode == -1) return 0; return sibnode; } EXPORT_SYMBOL(prom_getsibling); /* Return the length in bytes of property 'prop' at node 'node'. * Return -1 on error. */ int prom_getproplen(phandle node, const char *prop) { int ret; unsigned long flags; if((!node) || (!prop)) return -1; spin_lock_irqsave(&prom_lock, flags); ret = prom_nodeops->no_proplen(node, prop); restore_current(); spin_unlock_irqrestore(&prom_lock, flags); return ret; } EXPORT_SYMBOL(prom_getproplen); /* Acquire a property 'prop' at node 'node' and place it in * 'buffer' which has a size of 'bufsize'. If the acquisition * was successful the length will be returned, else -1 is returned. */ int prom_getproperty(phandle node, const char *prop, char *buffer, int bufsize) { int plen, ret; unsigned long flags; plen = prom_getproplen(node, prop); if((plen > bufsize) || (plen == 0) || (plen == -1)) return -1; /* Ok, things seem all right. */ spin_lock_irqsave(&prom_lock, flags); ret = prom_nodeops->no_getprop(node, prop, buffer); restore_current(); spin_unlock_irqrestore(&prom_lock, flags); return ret; } EXPORT_SYMBOL(prom_getproperty); /* Acquire an integer property and return its value. Returns -1 * on failure. */ int prom_getint(phandle node, char *prop) { static int intprop; if(prom_getproperty(node, prop, (char *) &intprop, sizeof(int)) != -1) return intprop; return -1; } EXPORT_SYMBOL(prom_getint); /* Acquire an integer property, upon error return the passed default * integer. */ int prom_getintdefault(phandle node, char *property, int deflt) { int retval; retval = prom_getint(node, property); if(retval == -1) return deflt; return retval; } EXPORT_SYMBOL(prom_getintdefault); /* Acquire a boolean property, 1=TRUE 0=FALSE. */ int prom_getbool(phandle node, char *prop) { int retval; retval = prom_getproplen(node, prop); if(retval == -1) return 0; return 1; } EXPORT_SYMBOL(prom_getbool); /* Acquire a property whose value is a string, returns a null * string on error. The char pointer is the user supplied string * buffer. */ void prom_getstring(phandle node, char *prop, char *user_buf, int ubuf_size) { int len; len = prom_getproperty(node, prop, user_buf, ubuf_size); if(len != -1) return; user_buf[0] = 0; } EXPORT_SYMBOL(prom_getstring); /* Search siblings at 'node_start' for a node with name * 'nodename'. Return node if successful, zero if not. */ phandle prom_searchsiblings(phandle node_start, char *nodename) { phandle thisnode; int error; for(thisnode = node_start; thisnode; thisnode=prom_getsibling(thisnode)) { error = prom_getproperty(thisnode, "name", promlib_buf, sizeof(promlib_buf)); /* Should this ever happen? */ if(error == -1) continue; if(strcmp(nodename, promlib_buf)==0) return thisnode; } return 0; } EXPORT_SYMBOL(prom_searchsiblings); /* Interal version of nextprop that does not alter return values. */ static char *__prom_nextprop(phandle node, char * oprop) { unsigned long flags; char *prop; spin_lock_irqsave(&prom_lock, flags); prop = prom_nodeops->no_nextprop(node, oprop); restore_current(); spin_unlock_irqrestore(&prom_lock, flags); return prop; } /* Return the property type string after property type 'oprop' * at node 'node' . Returns empty string if no more * property types for this node. */ char *prom_nextprop(phandle node, char *oprop, char *buffer) { if (node == 0 || (s32)node == -1) return ""; return __prom_nextprop(node, oprop); } EXPORT_SYMBOL(prom_nextprop); phandle prom_finddevice(char *name) { char nbuf[128]; char *s = name, *d; phandle node = prom_root_node, node2; unsigned int which_io, phys_addr; struct linux_prom_registers reg[PROMREG_MAX]; while (*s++) { if (!*s) return node; /* path '.../' is legal */ node = prom_getchild(node); for (d = nbuf; *s != 0 && *s != '@' && *s != '/';) *d++ = *s++; *d = 0; node = prom_searchsiblings(node, nbuf); if (!node) return 0; if (*s == '@') { if (isxdigit(s[1]) && s[2] == ',') { which_io = simple_strtoul(s+1, NULL, 16); phys_addr = simple_strtoul(s+3, &d, 16); if (d != s + 3 && (!*d || *d == '/') && d <= s + 3 + 8) { node2 = node; while (node2 && (s32)node2 != -1) { if (prom_getproperty (node2, "reg", (char *)reg, sizeof (reg)) > 0) { if (which_io == reg[0].which_io && phys_addr == reg[0].phys_addr) { node = node2; break; } } node2 = prom_getsibling(node2); if (!node2 || (s32)node2 == -1) break; node2 = prom_searchsiblings(prom_getsibling(node2), nbuf); } } } while (*s != 0 && *s != '/') s++; } } return node; } EXPORT_SYMBOL(prom_finddevice); /* Set property 'pname' at node 'node' to value 'value' which has a length * of 'size' bytes. Return the number of bytes the prom accepted. */ int prom_setprop(phandle node, const char *pname, char *value, int size) { unsigned long flags; int ret; if (size == 0) return 0; if ((pname == NULL) || (value == NULL)) return 0; spin_lock_irqsave(&prom_lock, flags); ret = prom_nodeops->no_setprop(node, pname, value, size); restore_current(); spin_unlock_irqrestore(&prom_lock, flags); return ret; } EXPORT_SYMBOL(prom_setprop); phandle prom_inst2pkg(int inst) { phandle node; unsigned long flags; spin_lock_irqsave(&prom_lock, flags); node = (*romvec->pv_v2devops.v2_inst2pkg)(inst); restore_current(); spin_unlock_irqrestore(&prom_lock, flags); if ((s32)node == -1) return 0; return node; }
gpl-2.0
winxuser/android_kernel_samsung_i9305
arch/arm/mach-exynos/asv-4x12.c
105
8831
/* linux/arch/arm/mach-exynos/asv-4x12.c * * Copyright (c) 2011 Samsung Electronics Co., Ltd. * http://www.samsung.com/ * * EXYNOS4X12 - ASV(Adaptive Supply Voltage) driver * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <mach/asv.h> #include <mach/map.h> #include <plat/cpu.h> /* ASV function for Fused Chip */ #define IDS_ARM_OFFSET 24 #define IDS_ARM_MASK 0xFF #define HPM_OFFSET 12 #define HPM_MASK 0x1F #define FUSED_SG_OFFSET 3 #define ORIG_SG_OFFSET 17 #define ORIG_SG_MASK 0xF #define MOD_SG_OFFSET 21 #define MOD_SG_MASK 0x7 #define LOCKING_OFFSET 7 #define LOCKING_MASK 0x1F #define EMA_OFFSET 6 #define EMA_MASK 0x1 #define DEFAULT_ASV_GROUP 1 #define CHIP_ID_REG (S5P_VA_CHIPID + 0x4) unsigned int exynos_armclk_max; struct asv_judge_table exynos4x12_limit[] = { /* HPM, IDS */ { 0, 0}, /* Reserved Group */ { 0, 0}, /* Reserved Group */ { 14, 9}, { 16, 14}, { 18, 17}, { 20, 20}, { 21, 24}, { 22, 30}, { 23, 34}, { 24, 39}, {100, 100}, {999, 999}, /* Reserved Group */ }; struct asv_judge_table exynos4x12_limit_rev2[] = { /* HPM, IDS */ { 0, 3}, /* Reserved Group */ { 15, 8}, /* ASV1 Group */ { 16, 11}, { 18, 14}, { 19, 18}, { 20, 22}, { 21, 26}, { 22, 29}, { 23, 36}, { 24, 40}, { 25, 45}, { 26, 50}, {999, 999}, /* ASV11 Group */ }; struct asv_judge_table exynos4212_limit[] = { /* HPM, IDS */ { 15, 6}, /* Reserved Group */ { 15, 9}, { 17, 12}, { 20, 14}, { 22, 18}, { 24, 22}, { 26, 31}, { 27, 35}, { 28, 39}, {100, 44}, {999, 49}, /* Reserved Group */ {999, 999}, /* Reserved Group */ }; static int exynos4x12_get_hpm(struct samsung_asv *asv_info) { asv_info->hpm_result = (asv_info->pkg_id >> HPM_OFFSET) & HPM_MASK; return 0; } static int exynos4x12_get_ids(struct samsung_asv *asv_info) { asv_info->ids_result = (asv_info->pkg_id >> IDS_ARM_OFFSET) & IDS_ARM_MASK; return 0; } static void exynos4x12_pre_set_abb(void) { if (soc_is_exynos4212()) { switch (exynos_result_of_asv) { case 0: case 1: exynos4x12_set_abb_member(ABB_ARM, ABB_MODE_080V); exynos4x12_set_abb_member(ABB_INT, ABB_MODE_100V); exynos4x12_set_abb_member(ABB_MIF, ABB_MODE_100V); exynos4x12_set_abb_member(ABB_G3D, ABB_MODE_100V); break; case 2: case 3: exynos4x12_set_abb_member(ABB_ARM, ABB_MODE_100V); exynos4x12_set_abb_member(ABB_INT, ABB_MODE_100V); exynos4x12_set_abb_member(ABB_MIF, ABB_MODE_140V); exynos4x12_set_abb_member(ABB_G3D, ABB_MODE_100V); break; case 4: case 5: case 6: case 7: case 8: case 9: case 10: case 11: case 12: exynos4x12_set_abb_member(ABB_ARM, ABB_MODE_130V); exynos4x12_set_abb_member(ABB_INT, ABB_MODE_130V); exynos4x12_set_abb_member(ABB_MIF, ABB_MODE_140V); exynos4x12_set_abb_member(ABB_G3D, ABB_MODE_130V); break; default: exynos4x12_set_abb(ABB_MODE_130V); break; } } else if (soc_is_exynos4412()) { if (samsung_rev() >= EXYNOS4412_REV_2_0) { switch (exynos_result_of_asv) { case 0: case 1: exynos4x12_set_abb_member(ABB_ARM, ABB_MODE_075V); exynos4x12_set_abb_member(ABB_INT, ABB_MODE_100V); exynos4x12_set_abb_member(ABB_MIF, ABB_MODE_100V); exynos4x12_set_abb_member(ABB_G3D, ABB_MODE_100V); break; case 2: exynos4x12_set_abb_member(ABB_ARM, ABB_MODE_100V); exynos4x12_set_abb_member(ABB_INT, ABB_MODE_100V); exynos4x12_set_abb_member(ABB_MIF, ABB_MODE_140V); exynos4x12_set_abb_member(ABB_G3D, ABB_MODE_100V); break; case 3: case 4: case 5: case 6: case 7: exynos4x12_set_abb_member(ABB_ARM, ABB_MODE_130V); exynos4x12_set_abb_member(ABB_INT, ABB_MODE_130V); exynos4x12_set_abb_member(ABB_MIF, ABB_MODE_140V); exynos4x12_set_abb_member(ABB_G3D, ABB_MODE_100V); break; case 8: case 9: case 10: case 11: case 12: exynos4x12_set_abb_member(ABB_ARM, ABB_MODE_130V); exynos4x12_set_abb_member(ABB_INT, ABB_MODE_130V); exynos4x12_set_abb_member(ABB_MIF, ABB_MODE_140V); exynos4x12_set_abb_member(ABB_G3D, ABB_MODE_130V); break; default: exynos4x12_set_abb(ABB_MODE_130V); break; } } else { switch (exynos_result_of_asv) { case 0: case 1: case 2: case 3: exynos4x12_set_abb(ABB_MODE_100V); break; case 4: case 5: case 6: case 7: exynos4x12_set_abb(ABB_MODE_130V); break; default: exynos4x12_set_abb(ABB_MODE_130V); break; } } } else { pr_err("%s: Can't find SoC type \n", __func__); } } static int exynos4x12_asv_store_result(struct samsung_asv *asv_info) { unsigned int i; if (soc_is_exynos4412()) { if (samsung_rev() >= EXYNOS4412_REV_2_0) { for (i = 0; i < ARRAY_SIZE(exynos4x12_limit_rev2); i++) { if ((asv_info->ids_result <= exynos4x12_limit_rev2[i].ids_limit) || (asv_info->hpm_result <= exynos4x12_limit_rev2[i].hpm_limit)) { exynos_result_of_asv = i; break; } } } else { for (i = 0; i < ARRAY_SIZE(exynos4x12_limit); i++) { if ((asv_info->ids_result <= exynos4x12_limit[i].ids_limit) || (asv_info->hpm_result <= exynos4x12_limit[i].hpm_limit)) { exynos_result_of_asv = i; break; } } } } else { for (i = 0; i < ARRAY_SIZE(exynos4212_limit); i++) { if ((asv_info->ids_result <= exynos4212_limit[i].ids_limit) || (asv_info->hpm_result <= exynos4212_limit[i].hpm_limit)) { exynos_result_of_asv = i; break; } } } /* * If ASV result value is lower than default value * Fix with default value. */ if (samsung_rev() < EXYNOS4412_REV_2_0) { if (exynos_result_of_asv < DEFAULT_ASV_GROUP) exynos_result_of_asv = DEFAULT_ASV_GROUP; } #ifndef CONFIG_SAMSUNG_PRODUCT_SHIP pr_info("EXYNOS4X12(NO SG): IDS : %d HPM : %d RESULT : %d\n", asv_info->ids_result, asv_info->hpm_result, exynos_result_of_asv); #endif exynos4x12_pre_set_abb(); return 0; } int exynos4x12_asv_init(struct samsung_asv *asv_info) { unsigned int tmp; unsigned int exynos_orig_sp; unsigned int exynos_mod_sp; int exynos_cal_asv; exynos_result_of_asv = 0; exynos_special_flag = 0; exynos_dynamic_ema = false; pr_info("EXYNOS4X12: Adaptive Support Voltage init\n"); tmp = __raw_readl(CHIP_ID_REG); /* Store PKG_ID */ asv_info->pkg_id = tmp; #ifdef CONFIG_EXYNOS4X12_1000MHZ_SUPPORT exynos_armclk_max = 1000000; #else /* If maximum armclock is fused, set its value */ if (samsung_rev() < EXYNOS4412_REV_2_0) { switch (tmp & MOD_SG_MASK) { case 0: case 3: exynos_armclk_max = 1400000; break; case 2: exynos_armclk_max = 1000000; break; default: exynos_armclk_max = 1400000; break; } } #endif if ((tmp >> EMA_OFFSET) & EMA_MASK) exynos_dynamic_ema = true; else exynos_dynamic_ema = false; /* If Speed group is fused, get speed group from */ if ((tmp >> FUSED_SG_OFFSET) & 0x1) { exynos_orig_sp = (tmp >> ORIG_SG_OFFSET) & ORIG_SG_MASK; exynos_mod_sp = (tmp >> MOD_SG_OFFSET) & MOD_SG_MASK; exynos_cal_asv = exynos_orig_sp - exynos_mod_sp; if (soc_is_exynos4212()) { if (exynos_cal_asv < 0) exynos_result_of_asv = DEFAULT_ASV_GROUP; else exynos_result_of_asv = exynos_cal_asv; } else { /* * If There is no origin speed group, * store 1 asv group into exynos_result_of_asv. */ if (!exynos_orig_sp) { pr_info("EXYNOS4X12: No Origin speed Group\n"); exynos_result_of_asv = DEFAULT_ASV_GROUP; } else { if (exynos_cal_asv < DEFAULT_ASV_GROUP) exynos_result_of_asv = DEFAULT_ASV_GROUP; else exynos_result_of_asv = exynos_cal_asv; } } pr_info("EXYNOS4X12(SG): ORIG : %d MOD : %d RESULT : %d\n", exynos_orig_sp, exynos_mod_sp, exynos_result_of_asv); /* * If fused speed group is 1 and ids value is lower than 3, * voltage value should be set to asv 0 group. */ if (samsung_rev() >= EXYNOS4412_REV_2_0) { if (exynos_result_of_asv == 1) { exynos4x12_get_ids(asv_info); if ((asv_info->ids_result <= exynos4x12_limit_rev2[0].ids_limit)) exynos_result_of_asv = 0; } } /* set Special flag into exynos_special_flag */ exynos_special_flag = (tmp >> LOCKING_OFFSET) & LOCKING_MASK; exynos4x12_pre_set_abb(); return -EEXIST; } /* set Special flag into exynos_special_flag */ exynos_special_flag = (tmp >> LOCKING_OFFSET) & LOCKING_MASK; asv_info->get_ids = exynos4x12_get_ids; asv_info->get_hpm = exynos4x12_get_hpm; asv_info->store_result = exynos4x12_asv_store_result; return 0; }
gpl-2.0
bugralevent/linux
sound/pci/hda/patch_si3054.c
617
9753
/* * Universal Interface for Intel High Definition Audio Codec * * HD audio interface patch for Silicon Labs 3054/5 modem codec * * Copyright (c) 2005 Sasha Khapyorsky <sashak@alsa-project.org> * Takashi Iwai <tiwai@suse.de> * * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/module.h> #include <sound/core.h> #include "hda_codec.h" #include "hda_local.h" /* si3054 verbs */ #define SI3054_VERB_READ_NODE 0x900 #define SI3054_VERB_WRITE_NODE 0x100 /* si3054 nodes (registers) */ #define SI3054_EXTENDED_MID 2 #define SI3054_LINE_RATE 3 #define SI3054_LINE_LEVEL 4 #define SI3054_GPIO_CFG 5 #define SI3054_GPIO_POLARITY 6 #define SI3054_GPIO_STICKY 7 #define SI3054_GPIO_WAKEUP 8 #define SI3054_GPIO_STATUS 9 #define SI3054_GPIO_CONTROL 10 #define SI3054_MISC_AFE 11 #define SI3054_CHIPID 12 #define SI3054_LINE_CFG1 13 #define SI3054_LINE_STATUS 14 #define SI3054_DC_TERMINATION 15 #define SI3054_LINE_CONFIG 16 #define SI3054_CALLPROG_ATT 17 #define SI3054_SQ_CONTROL 18 #define SI3054_MISC_CONTROL 19 #define SI3054_RING_CTRL1 20 #define SI3054_RING_CTRL2 21 /* extended MID */ #define SI3054_MEI_READY 0xf /* line level */ #define SI3054_ATAG_MASK 0x00f0 #define SI3054_DTAG_MASK 0xf000 /* GPIO bits */ #define SI3054_GPIO_OH 0x0001 #define SI3054_GPIO_CID 0x0002 /* chipid and revisions */ #define SI3054_CHIPID_CODEC_REV_MASK 0x000f #define SI3054_CHIPID_DAA_REV_MASK 0x00f0 #define SI3054_CHIPID_INTERNATIONAL 0x0100 #define SI3054_CHIPID_DAA_ID 0x0f00 #define SI3054_CHIPID_CODEC_ID (1<<12) /* si3054 codec registers (nodes) access macros */ #define GET_REG(codec,reg) (snd_hda_codec_read(codec,reg,0,SI3054_VERB_READ_NODE,0)) #define SET_REG(codec,reg,val) (snd_hda_codec_write(codec,reg,0,SI3054_VERB_WRITE_NODE,val)) #define SET_REG_CACHE(codec,reg,val) \ snd_hda_codec_write_cache(codec,reg,0,SI3054_VERB_WRITE_NODE,val) struct si3054_spec { unsigned international; }; /* * Modem mixer */ #define PRIVATE_VALUE(reg,mask) ((reg<<16)|(mask&0xffff)) #define PRIVATE_REG(val) ((val>>16)&0xffff) #define PRIVATE_MASK(val) (val&0xffff) #define si3054_switch_info snd_ctl_boolean_mono_info static int si3054_switch_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *uvalue) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); u16 reg = PRIVATE_REG(kcontrol->private_value); u16 mask = PRIVATE_MASK(kcontrol->private_value); uvalue->value.integer.value[0] = (GET_REG(codec, reg)) & mask ? 1 : 0 ; return 0; } static int si3054_switch_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *uvalue) { struct hda_codec *codec = snd_kcontrol_chip(kcontrol); u16 reg = PRIVATE_REG(kcontrol->private_value); u16 mask = PRIVATE_MASK(kcontrol->private_value); if (uvalue->value.integer.value[0]) SET_REG_CACHE(codec, reg, (GET_REG(codec, reg)) | mask); else SET_REG_CACHE(codec, reg, (GET_REG(codec, reg)) & ~mask); return 0; } #define SI3054_KCONTROL(kname,reg,mask) { \ .iface = SNDRV_CTL_ELEM_IFACE_MIXER, \ .name = kname, \ .subdevice = HDA_SUBDEV_NID_FLAG | reg, \ .info = si3054_switch_info, \ .get = si3054_switch_get, \ .put = si3054_switch_put, \ .private_value = PRIVATE_VALUE(reg,mask), \ } static const struct snd_kcontrol_new si3054_modem_mixer[] = { SI3054_KCONTROL("Off-hook Switch", SI3054_GPIO_CONTROL, SI3054_GPIO_OH), SI3054_KCONTROL("Caller ID Switch", SI3054_GPIO_CONTROL, SI3054_GPIO_CID), {} }; static int si3054_build_controls(struct hda_codec *codec) { return snd_hda_add_new_ctls(codec, si3054_modem_mixer); } /* * PCM callbacks */ static int si3054_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { u16 val; SET_REG(codec, SI3054_LINE_RATE, substream->runtime->rate); val = GET_REG(codec, SI3054_LINE_LEVEL); val &= 0xff << (8 * (substream->stream != SNDRV_PCM_STREAM_PLAYBACK)); val |= ((stream_tag & 0xf) << 4) << (8 * (substream->stream == SNDRV_PCM_STREAM_PLAYBACK)); SET_REG(codec, SI3054_LINE_LEVEL, val); snd_hda_codec_setup_stream(codec, hinfo->nid, stream_tag, 0, format); return 0; } static int si3054_pcm_open(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { static unsigned int rates[] = { 8000, 9600, 16000 }; static struct snd_pcm_hw_constraint_list hw_constraints_rates = { .count = ARRAY_SIZE(rates), .list = rates, .mask = 0, }; substream->runtime->hw.period_bytes_min = 80; return snd_pcm_hw_constraint_list(substream->runtime, 0, SNDRV_PCM_HW_PARAM_RATE, &hw_constraints_rates); } static const struct hda_pcm_stream si3054_pcm = { .substreams = 1, .channels_min = 1, .channels_max = 1, .nid = 0x1, .rates = SNDRV_PCM_RATE_8000|SNDRV_PCM_RATE_16000|SNDRV_PCM_RATE_KNOT, .formats = SNDRV_PCM_FMTBIT_S16_LE, .maxbps = 16, .ops = { .open = si3054_pcm_open, .prepare = si3054_pcm_prepare, }, }; static int si3054_build_pcms(struct hda_codec *codec) { struct hda_pcm *info; info = snd_hda_codec_pcm_new(codec, "Si3054 Modem"); if (!info) return -ENOMEM; info->stream[SNDRV_PCM_STREAM_PLAYBACK] = si3054_pcm; info->stream[SNDRV_PCM_STREAM_CAPTURE] = si3054_pcm; info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = codec->core.mfg; info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = codec->core.mfg; info->pcm_type = HDA_PCM_TYPE_MODEM; return 0; } /* * Init part */ static int si3054_init(struct hda_codec *codec) { struct si3054_spec *spec = codec->spec; unsigned wait_count; u16 val; if (snd_hdac_regmap_add_vendor_verb(&codec->core, SI3054_VERB_WRITE_NODE)) return -ENOMEM; snd_hda_codec_write(codec, AC_NODE_ROOT, 0, AC_VERB_SET_CODEC_RESET, 0); snd_hda_codec_write(codec, codec->core.mfg, 0, AC_VERB_SET_STREAM_FORMAT, 0); SET_REG(codec, SI3054_LINE_RATE, 9600); SET_REG(codec, SI3054_LINE_LEVEL, SI3054_DTAG_MASK|SI3054_ATAG_MASK); SET_REG(codec, SI3054_EXTENDED_MID, 0); wait_count = 10; do { msleep(2); val = GET_REG(codec, SI3054_EXTENDED_MID); } while ((val & SI3054_MEI_READY) != SI3054_MEI_READY && wait_count--); if((val&SI3054_MEI_READY) != SI3054_MEI_READY) { codec_err(codec, "si3054: cannot initialize. EXT MID = %04x\n", val); /* let's pray that this is no fatal error */ /* return -EACCES; */ } SET_REG(codec, SI3054_GPIO_POLARITY, 0xffff); SET_REG(codec, SI3054_GPIO_CFG, 0x0); SET_REG(codec, SI3054_MISC_AFE, 0); SET_REG(codec, SI3054_LINE_CFG1,0x200); if((GET_REG(codec,SI3054_LINE_STATUS) & (1<<6)) == 0) { codec_dbg(codec, "Link Frame Detect(FDT) is not ready (line status: %04x)\n", GET_REG(codec,SI3054_LINE_STATUS)); } spec->international = GET_REG(codec, SI3054_CHIPID) & SI3054_CHIPID_INTERNATIONAL; return 0; } static void si3054_free(struct hda_codec *codec) { kfree(codec->spec); } /* */ static const struct hda_codec_ops si3054_patch_ops = { .build_controls = si3054_build_controls, .build_pcms = si3054_build_pcms, .init = si3054_init, .free = si3054_free, }; static int patch_si3054(struct hda_codec *codec) { struct si3054_spec *spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (spec == NULL) return -ENOMEM; codec->spec = spec; codec->patch_ops = si3054_patch_ops; return 0; } /* * patch entries */ static const struct hda_codec_preset snd_hda_preset_si3054[] = { { .id = 0x163c3055, .name = "Si3054", .patch = patch_si3054 }, { .id = 0x163c3155, .name = "Si3054", .patch = patch_si3054 }, { .id = 0x11c13026, .name = "Si3054", .patch = patch_si3054 }, { .id = 0x11c13055, .name = "Si3054", .patch = patch_si3054 }, { .id = 0x11c13155, .name = "Si3054", .patch = patch_si3054 }, { .id = 0x10573055, .name = "Si3054", .patch = patch_si3054 }, { .id = 0x10573057, .name = "Si3054", .patch = patch_si3054 }, { .id = 0x10573155, .name = "Si3054", .patch = patch_si3054 }, /* VIA HDA on Clevo m540 */ { .id = 0x11063288, .name = "Si3054", .patch = patch_si3054 }, /* Asus A8J Modem (SM56) */ { .id = 0x15433155, .name = "Si3054", .patch = patch_si3054 }, /* LG LW20 modem */ { .id = 0x18540018, .name = "Si3054", .patch = patch_si3054 }, {} }; MODULE_ALIAS("snd-hda-codec-id:163c3055"); MODULE_ALIAS("snd-hda-codec-id:163c3155"); MODULE_ALIAS("snd-hda-codec-id:11c13026"); MODULE_ALIAS("snd-hda-codec-id:11c13055"); MODULE_ALIAS("snd-hda-codec-id:11c13155"); MODULE_ALIAS("snd-hda-codec-id:10573055"); MODULE_ALIAS("snd-hda-codec-id:10573057"); MODULE_ALIAS("snd-hda-codec-id:10573155"); MODULE_ALIAS("snd-hda-codec-id:11063288"); MODULE_ALIAS("snd-hda-codec-id:15433155"); MODULE_ALIAS("snd-hda-codec-id:18540018"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Si3054 HD-audio modem codec"); static struct hda_codec_driver si3054_driver = { .preset = snd_hda_preset_si3054, }; module_hda_codec_driver(si3054_driver);
gpl-2.0
gianmarcorev/rpi_linux
arch/x86/kernel/kgdb.c
617
21349
/* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * */ /* * Copyright (C) 2004 Amit S. Kale <amitkale@linsyssoft.com> * Copyright (C) 2000-2001 VERITAS Software Corporation. * Copyright (C) 2002 Andi Kleen, SuSE Labs * Copyright (C) 2004 LinSysSoft Technologies Pvt. Ltd. * Copyright (C) 2007 MontaVista Software, Inc. * Copyright (C) 2007-2008 Jason Wessel, Wind River Systems, Inc. */ /**************************************************************************** * Contributor: Lake Stevens Instrument Division$ * Written by: Glenn Engel $ * Updated by: Amit Kale<akale@veritas.com> * Updated by: Tom Rini <trini@kernel.crashing.org> * Updated by: Jason Wessel <jason.wessel@windriver.com> * Modified for 386 by Jim Kingdon, Cygnus Support. * Origianl kgdb, compatibility with 2.1.xx kernel by * David Grothe <dave@gcom.com> * Integrated into 2.2.5 kernel by Tigran Aivazian <tigran@sco.com> * X86_64 changes from Andi Kleen's patch merged by Jim Houston */ #include <linux/spinlock.h> #include <linux/kdebug.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/ptrace.h> #include <linux/sched.h> #include <linux/delay.h> #include <linux/kgdb.h> #include <linux/smp.h> #include <linux/nmi.h> #include <linux/hw_breakpoint.h> #include <linux/uaccess.h> #include <linux/memory.h> #include <asm/debugreg.h> #include <asm/apicdef.h> #include <asm/apic.h> #include <asm/nmi.h> struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = { #ifdef CONFIG_X86_32 { "ax", 4, offsetof(struct pt_regs, ax) }, { "cx", 4, offsetof(struct pt_regs, cx) }, { "dx", 4, offsetof(struct pt_regs, dx) }, { "bx", 4, offsetof(struct pt_regs, bx) }, { "sp", 4, offsetof(struct pt_regs, sp) }, { "bp", 4, offsetof(struct pt_regs, bp) }, { "si", 4, offsetof(struct pt_regs, si) }, { "di", 4, offsetof(struct pt_regs, di) }, { "ip", 4, offsetof(struct pt_regs, ip) }, { "flags", 4, offsetof(struct pt_regs, flags) }, { "cs", 4, offsetof(struct pt_regs, cs) }, { "ss", 4, offsetof(struct pt_regs, ss) }, { "ds", 4, offsetof(struct pt_regs, ds) }, { "es", 4, offsetof(struct pt_regs, es) }, #else { "ax", 8, offsetof(struct pt_regs, ax) }, { "bx", 8, offsetof(struct pt_regs, bx) }, { "cx", 8, offsetof(struct pt_regs, cx) }, { "dx", 8, offsetof(struct pt_regs, dx) }, { "si", 8, offsetof(struct pt_regs, si) }, { "di", 8, offsetof(struct pt_regs, di) }, { "bp", 8, offsetof(struct pt_regs, bp) }, { "sp", 8, offsetof(struct pt_regs, sp) }, { "r8", 8, offsetof(struct pt_regs, r8) }, { "r9", 8, offsetof(struct pt_regs, r9) }, { "r10", 8, offsetof(struct pt_regs, r10) }, { "r11", 8, offsetof(struct pt_regs, r11) }, { "r12", 8, offsetof(struct pt_regs, r12) }, { "r13", 8, offsetof(struct pt_regs, r13) }, { "r14", 8, offsetof(struct pt_regs, r14) }, { "r15", 8, offsetof(struct pt_regs, r15) }, { "ip", 8, offsetof(struct pt_regs, ip) }, { "flags", 4, offsetof(struct pt_regs, flags) }, { "cs", 4, offsetof(struct pt_regs, cs) }, { "ss", 4, offsetof(struct pt_regs, ss) }, { "ds", 4, -1 }, { "es", 4, -1 }, #endif { "fs", 4, -1 }, { "gs", 4, -1 }, }; int dbg_set_reg(int regno, void *mem, struct pt_regs *regs) { if ( #ifdef CONFIG_X86_32 regno == GDB_SS || regno == GDB_FS || regno == GDB_GS || #endif regno == GDB_SP || regno == GDB_ORIG_AX) return 0; if (dbg_reg_def[regno].offset != -1) memcpy((void *)regs + dbg_reg_def[regno].offset, mem, dbg_reg_def[regno].size); return 0; } char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs) { if (regno == GDB_ORIG_AX) { memcpy(mem, &regs->orig_ax, sizeof(regs->orig_ax)); return "orig_ax"; } if (regno >= DBG_MAX_REG_NUM || regno < 0) return NULL; if (dbg_reg_def[regno].offset != -1) memcpy(mem, (void *)regs + dbg_reg_def[regno].offset, dbg_reg_def[regno].size); #ifdef CONFIG_X86_32 switch (regno) { case GDB_SS: if (!user_mode(regs)) *(unsigned long *)mem = __KERNEL_DS; break; case GDB_SP: if (!user_mode(regs)) *(unsigned long *)mem = kernel_stack_pointer(regs); break; case GDB_GS: case GDB_FS: *(unsigned long *)mem = 0xFFFF; break; } #endif return dbg_reg_def[regno].name; } /** * sleeping_thread_to_gdb_regs - Convert ptrace regs to GDB regs * @gdb_regs: A pointer to hold the registers in the order GDB wants. * @p: The &struct task_struct of the desired process. * * Convert the register values of the sleeping process in @p to * the format that GDB expects. * This function is called when kgdb does not have access to the * &struct pt_regs and therefore it should fill the gdb registers * @gdb_regs with what has been saved in &struct thread_struct * thread field during switch_to. */ void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p) { #ifndef CONFIG_X86_32 u32 *gdb_regs32 = (u32 *)gdb_regs; #endif gdb_regs[GDB_AX] = 0; gdb_regs[GDB_BX] = 0; gdb_regs[GDB_CX] = 0; gdb_regs[GDB_DX] = 0; gdb_regs[GDB_SI] = 0; gdb_regs[GDB_DI] = 0; gdb_regs[GDB_BP] = *(unsigned long *)p->thread.sp; #ifdef CONFIG_X86_32 gdb_regs[GDB_DS] = __KERNEL_DS; gdb_regs[GDB_ES] = __KERNEL_DS; gdb_regs[GDB_PS] = 0; gdb_regs[GDB_CS] = __KERNEL_CS; gdb_regs[GDB_PC] = p->thread.ip; gdb_regs[GDB_SS] = __KERNEL_DS; gdb_regs[GDB_FS] = 0xFFFF; gdb_regs[GDB_GS] = 0xFFFF; #else gdb_regs32[GDB_PS] = *(unsigned long *)(p->thread.sp + 8); gdb_regs32[GDB_CS] = __KERNEL_CS; gdb_regs32[GDB_SS] = __KERNEL_DS; gdb_regs[GDB_PC] = 0; gdb_regs[GDB_R8] = 0; gdb_regs[GDB_R9] = 0; gdb_regs[GDB_R10] = 0; gdb_regs[GDB_R11] = 0; gdb_regs[GDB_R12] = 0; gdb_regs[GDB_R13] = 0; gdb_regs[GDB_R14] = 0; gdb_regs[GDB_R15] = 0; #endif gdb_regs[GDB_SP] = p->thread.sp; } static struct hw_breakpoint { unsigned enabled; unsigned long addr; int len; int type; struct perf_event * __percpu *pev; } breakinfo[HBP_NUM]; static unsigned long early_dr7; static void kgdb_correct_hw_break(void) { int breakno; for (breakno = 0; breakno < HBP_NUM; breakno++) { struct perf_event *bp; struct arch_hw_breakpoint *info; int val; int cpu = raw_smp_processor_id(); if (!breakinfo[breakno].enabled) continue; if (dbg_is_early) { set_debugreg(breakinfo[breakno].addr, breakno); early_dr7 |= encode_dr7(breakno, breakinfo[breakno].len, breakinfo[breakno].type); set_debugreg(early_dr7, 7); continue; } bp = *per_cpu_ptr(breakinfo[breakno].pev, cpu); info = counter_arch_bp(bp); if (bp->attr.disabled != 1) continue; bp->attr.bp_addr = breakinfo[breakno].addr; bp->attr.bp_len = breakinfo[breakno].len; bp->attr.bp_type = breakinfo[breakno].type; info->address = breakinfo[breakno].addr; info->len = breakinfo[breakno].len; info->type = breakinfo[breakno].type; val = arch_install_hw_breakpoint(bp); if (!val) bp->attr.disabled = 0; } if (!dbg_is_early) hw_breakpoint_restore(); } static int hw_break_reserve_slot(int breakno) { int cpu; int cnt = 0; struct perf_event **pevent; if (dbg_is_early) return 0; for_each_online_cpu(cpu) { cnt++; pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); if (dbg_reserve_bp_slot(*pevent)) goto fail; } return 0; fail: for_each_online_cpu(cpu) { cnt--; if (!cnt) break; pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); dbg_release_bp_slot(*pevent); } return -1; } static int hw_break_release_slot(int breakno) { struct perf_event **pevent; int cpu; if (dbg_is_early) return 0; for_each_online_cpu(cpu) { pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu); if (dbg_release_bp_slot(*pevent)) /* * The debugger is responsible for handing the retry on * remove failure. */ return -1; } return 0; } static int kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) { int i; for (i = 0; i < HBP_NUM; i++) if (breakinfo[i].addr == addr && breakinfo[i].enabled) break; if (i == HBP_NUM) return -1; if (hw_break_release_slot(i)) { printk(KERN_ERR "Cannot remove hw breakpoint at %lx\n", addr); return -1; } breakinfo[i].enabled = 0; return 0; } static void kgdb_remove_all_hw_break(void) { int i; int cpu = raw_smp_processor_id(); struct perf_event *bp; for (i = 0; i < HBP_NUM; i++) { if (!breakinfo[i].enabled) continue; bp = *per_cpu_ptr(breakinfo[i].pev, cpu); if (!bp->attr.disabled) { arch_uninstall_hw_breakpoint(bp); bp->attr.disabled = 1; continue; } if (dbg_is_early) early_dr7 &= ~encode_dr7(i, breakinfo[i].len, breakinfo[i].type); else if (hw_break_release_slot(i)) printk(KERN_ERR "KGDB: hw bpt remove failed %lx\n", breakinfo[i].addr); breakinfo[i].enabled = 0; } } static int kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype) { int i; for (i = 0; i < HBP_NUM; i++) if (!breakinfo[i].enabled) break; if (i == HBP_NUM) return -1; switch (bptype) { case BP_HARDWARE_BREAKPOINT: len = 1; breakinfo[i].type = X86_BREAKPOINT_EXECUTE; break; case BP_WRITE_WATCHPOINT: breakinfo[i].type = X86_BREAKPOINT_WRITE; break; case BP_ACCESS_WATCHPOINT: breakinfo[i].type = X86_BREAKPOINT_RW; break; default: return -1; } switch (len) { case 1: breakinfo[i].len = X86_BREAKPOINT_LEN_1; break; case 2: breakinfo[i].len = X86_BREAKPOINT_LEN_2; break; case 4: breakinfo[i].len = X86_BREAKPOINT_LEN_4; break; #ifdef CONFIG_X86_64 case 8: breakinfo[i].len = X86_BREAKPOINT_LEN_8; break; #endif default: return -1; } breakinfo[i].addr = addr; if (hw_break_reserve_slot(i)) { breakinfo[i].addr = 0; return -1; } breakinfo[i].enabled = 1; return 0; } /** * kgdb_disable_hw_debug - Disable hardware debugging while we in kgdb. * @regs: Current &struct pt_regs. * * This function will be called if the particular architecture must * disable hardware debugging while it is processing gdb packets or * handling exception. */ static void kgdb_disable_hw_debug(struct pt_regs *regs) { int i; int cpu = raw_smp_processor_id(); struct perf_event *bp; /* Disable hardware debugging while we are in kgdb: */ set_debugreg(0UL, 7); for (i = 0; i < HBP_NUM; i++) { if (!breakinfo[i].enabled) continue; if (dbg_is_early) { early_dr7 &= ~encode_dr7(i, breakinfo[i].len, breakinfo[i].type); continue; } bp = *per_cpu_ptr(breakinfo[i].pev, cpu); if (bp->attr.disabled == 1) continue; arch_uninstall_hw_breakpoint(bp); bp->attr.disabled = 1; } } #ifdef CONFIG_SMP /** * kgdb_roundup_cpus - Get other CPUs into a holding pattern * @flags: Current IRQ state * * On SMP systems, we need to get the attention of the other CPUs * and get them be in a known state. This should do what is needed * to get the other CPUs to call kgdb_wait(). Note that on some arches, * the NMI approach is not used for rounding up all the CPUs. For example, * in case of MIPS, smp_call_function() is used to roundup CPUs. In * this case, we have to make sure that interrupts are enabled before * calling smp_call_function(). The argument to this function is * the flags that will be used when restoring the interrupts. There is * local_irq_save() call before kgdb_roundup_cpus(). * * On non-SMP systems, this is not called. */ void kgdb_roundup_cpus(unsigned long flags) { apic->send_IPI_allbutself(APIC_DM_NMI); } #endif /** * kgdb_arch_handle_exception - Handle architecture specific GDB packets. * @e_vector: The error vector of the exception that happened. * @signo: The signal number of the exception that happened. * @err_code: The error code of the exception that happened. * @remcomInBuffer: The buffer of the packet we have read. * @remcomOutBuffer: The buffer of %BUFMAX bytes to write a packet into. * @linux_regs: The &struct pt_regs of the current process. * * This function MUST handle the 'c' and 's' command packets, * as well packets to set / remove a hardware breakpoint, if used. * If there are additional packets which the hardware needs to handle, * they are handled here. The code should return -1 if it wants to * process more packets, and a %0 or %1 if it wants to exit from the * kgdb callback. */ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code, char *remcomInBuffer, char *remcomOutBuffer, struct pt_regs *linux_regs) { unsigned long addr; char *ptr; switch (remcomInBuffer[0]) { case 'c': case 's': /* try to read optional parameter, pc unchanged if no parm */ ptr = &remcomInBuffer[1]; if (kgdb_hex2long(&ptr, &addr)) linux_regs->ip = addr; case 'D': case 'k': /* clear the trace bit */ linux_regs->flags &= ~X86_EFLAGS_TF; atomic_set(&kgdb_cpu_doing_single_step, -1); /* set the trace bit if we're stepping */ if (remcomInBuffer[0] == 's') { linux_regs->flags |= X86_EFLAGS_TF; atomic_set(&kgdb_cpu_doing_single_step, raw_smp_processor_id()); } return 0; } /* this means that we do not want to exit from the handler: */ return -1; } static inline int single_step_cont(struct pt_regs *regs, struct die_args *args) { /* * Single step exception from kernel space to user space so * eat the exception and continue the process: */ printk(KERN_ERR "KGDB: trap/step from kernel to user space, " "resuming...\n"); kgdb_arch_handle_exception(args->trapnr, args->signr, args->err, "c", "", regs); /* * Reset the BS bit in dr6 (pointed by args->err) to * denote completion of processing */ (*(unsigned long *)ERR_PTR(args->err)) &= ~DR_STEP; return NOTIFY_STOP; } static int was_in_debug_nmi[NR_CPUS]; static int kgdb_nmi_handler(unsigned int cmd, struct pt_regs *regs) { switch (cmd) { case NMI_LOCAL: if (atomic_read(&kgdb_active) != -1) { /* KGDB CPU roundup */ kgdb_nmicallback(raw_smp_processor_id(), regs); was_in_debug_nmi[raw_smp_processor_id()] = 1; touch_nmi_watchdog(); return NMI_HANDLED; } break; case NMI_UNKNOWN: if (was_in_debug_nmi[raw_smp_processor_id()]) { was_in_debug_nmi[raw_smp_processor_id()] = 0; return NMI_HANDLED; } break; default: /* do nothing */ break; } return NMI_DONE; } static int __kgdb_notify(struct die_args *args, unsigned long cmd) { struct pt_regs *regs = args->regs; switch (cmd) { case DIE_DEBUG: if (atomic_read(&kgdb_cpu_doing_single_step) != -1) { if (user_mode(regs)) return single_step_cont(regs, args); break; } else if (test_thread_flag(TIF_SINGLESTEP)) /* This means a user thread is single stepping * a system call which should be ignored */ return NOTIFY_DONE; /* fall through */ default: if (user_mode(regs)) return NOTIFY_DONE; } if (kgdb_handle_exception(args->trapnr, args->signr, cmd, regs)) return NOTIFY_DONE; /* Must touch watchdog before return to normal operation */ touch_nmi_watchdog(); return NOTIFY_STOP; } int kgdb_ll_trap(int cmd, const char *str, struct pt_regs *regs, long err, int trap, int sig) { struct die_args args = { .regs = regs, .str = str, .err = err, .trapnr = trap, .signr = sig, }; if (!kgdb_io_module_registered) return NOTIFY_DONE; return __kgdb_notify(&args, cmd); } static int kgdb_notify(struct notifier_block *self, unsigned long cmd, void *ptr) { unsigned long flags; int ret; local_irq_save(flags); ret = __kgdb_notify(ptr, cmd); local_irq_restore(flags); return ret; } static struct notifier_block kgdb_notifier = { .notifier_call = kgdb_notify, }; /** * kgdb_arch_init - Perform any architecture specific initalization. * * This function will handle the initalization of any architecture * specific callbacks. */ int kgdb_arch_init(void) { int retval; retval = register_die_notifier(&kgdb_notifier); if (retval) goto out; retval = register_nmi_handler(NMI_LOCAL, kgdb_nmi_handler, 0, "kgdb"); if (retval) goto out1; retval = register_nmi_handler(NMI_UNKNOWN, kgdb_nmi_handler, 0, "kgdb"); if (retval) goto out2; return retval; out2: unregister_nmi_handler(NMI_LOCAL, "kgdb"); out1: unregister_die_notifier(&kgdb_notifier); out: return retval; } static void kgdb_hw_overflow_handler(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs) { struct task_struct *tsk = current; int i; for (i = 0; i < 4; i++) if (breakinfo[i].enabled) tsk->thread.debugreg6 |= (DR_TRAP0 << i); } void kgdb_arch_late(void) { int i, cpu; struct perf_event_attr attr; struct perf_event **pevent; /* * Pre-allocate the hw breakpoint structions in the non-atomic * portion of kgdb because this operation requires mutexs to * complete. */ hw_breakpoint_init(&attr); attr.bp_addr = (unsigned long)kgdb_arch_init; attr.bp_len = HW_BREAKPOINT_LEN_1; attr.bp_type = HW_BREAKPOINT_W; attr.disabled = 1; for (i = 0; i < HBP_NUM; i++) { if (breakinfo[i].pev) continue; breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL); if (IS_ERR((void * __force)breakinfo[i].pev)) { printk(KERN_ERR "kgdb: Could not allocate hw" "breakpoints\nDisabling the kernel debugger\n"); breakinfo[i].pev = NULL; kgdb_arch_exit(); return; } for_each_online_cpu(cpu) { pevent = per_cpu_ptr(breakinfo[i].pev, cpu); pevent[0]->hw.sample_period = 1; pevent[0]->overflow_handler = kgdb_hw_overflow_handler; if (pevent[0]->destroy != NULL) { pevent[0]->destroy = NULL; release_bp_slot(*pevent); } } } } /** * kgdb_arch_exit - Perform any architecture specific uninitalization. * * This function will handle the uninitalization of any architecture * specific callbacks, for dynamic registration and unregistration. */ void kgdb_arch_exit(void) { int i; for (i = 0; i < 4; i++) { if (breakinfo[i].pev) { unregister_wide_hw_breakpoint(breakinfo[i].pev); breakinfo[i].pev = NULL; } } unregister_nmi_handler(NMI_UNKNOWN, "kgdb"); unregister_nmi_handler(NMI_LOCAL, "kgdb"); unregister_die_notifier(&kgdb_notifier); } /** * * kgdb_skipexception - Bail out of KGDB when we've been triggered. * @exception: Exception vector number * @regs: Current &struct pt_regs. * * On some architectures we need to skip a breakpoint exception when * it occurs after a breakpoint has been removed. * * Skip an int3 exception when it occurs after a breakpoint has been * removed. Backtrack eip by 1 since the int3 would have caused it to * increment by 1. */ int kgdb_skipexception(int exception, struct pt_regs *regs) { if (exception == 3 && kgdb_isremovedbreak(regs->ip - 1)) { regs->ip -= 1; return 1; } return 0; } unsigned long kgdb_arch_pc(int exception, struct pt_regs *regs) { if (exception == 3) return instruction_pointer(regs) - 1; return instruction_pointer(regs); } void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) { regs->ip = ip; } int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) { int err; #ifdef CONFIG_DEBUG_RODATA char opc[BREAK_INSTR_SIZE]; #endif /* CONFIG_DEBUG_RODATA */ bpt->type = BP_BREAKPOINT; err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); if (err) return err; err = probe_kernel_write((char *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); #ifdef CONFIG_DEBUG_RODATA if (!err) return err; /* * It is safe to call text_poke() because normal kernel execution * is stopped on all cores, so long as the text_mutex is not locked. */ if (mutex_is_locked(&text_mutex)) return -EBUSY; text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); if (err) return err; if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE)) return -EINVAL; bpt->type = BP_POKE_BREAKPOINT; #endif /* CONFIG_DEBUG_RODATA */ return err; } int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) { #ifdef CONFIG_DEBUG_RODATA int err; char opc[BREAK_INSTR_SIZE]; if (bpt->type != BP_POKE_BREAKPOINT) goto knl_write; /* * It is safe to call text_poke() because normal kernel execution * is stopped on all cores, so long as the text_mutex is not locked. */ if (mutex_is_locked(&text_mutex)) goto knl_write; text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE); err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE); if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE)) goto knl_write; return err; knl_write: #endif /* CONFIG_DEBUG_RODATA */ return probe_kernel_write((char *)bpt->bpt_addr, (char *)bpt->saved_instr, BREAK_INSTR_SIZE); } struct kgdb_arch arch_kgdb_ops = { /* Breakpoint instruction: */ .gdb_bpt_instr = { 0xcc }, .flags = KGDB_HW_BREAKPOINT, .set_hw_breakpoint = kgdb_set_hw_break, .remove_hw_breakpoint = kgdb_remove_hw_break, .disable_hw_break = kgdb_disable_hw_debug, .remove_all_hw_break = kgdb_remove_all_hw_break, .correct_hw_break = kgdb_correct_hw_break, };
gpl-2.0
omonar/linux
drivers/net/ethernet/octeon/octeon_mgmt.c
617
42429
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2009-2012 Cavium, Inc */ #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/etherdevice.h> #include <linux/capability.h> #include <linux/net_tstamp.h> #include <linux/interrupt.h> #include <linux/netdevice.h> #include <linux/spinlock.h> #include <linux/if_vlan.h> #include <linux/of_mdio.h> #include <linux/module.h> #include <linux/of_net.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/phy.h> #include <linux/io.h> #include <asm/octeon/octeon.h> #include <asm/octeon/cvmx-mixx-defs.h> #include <asm/octeon/cvmx-agl-defs.h> #define DRV_NAME "octeon_mgmt" #define DRV_VERSION "2.0" #define DRV_DESCRIPTION \ "Cavium Networks Octeon MII (management) port Network Driver" #define OCTEON_MGMT_NAPI_WEIGHT 16 /* Ring sizes that are powers of two allow for more efficient modulo * opertions. */ #define OCTEON_MGMT_RX_RING_SIZE 512 #define OCTEON_MGMT_TX_RING_SIZE 128 /* Allow 8 bytes for vlan and FCS. */ #define OCTEON_MGMT_RX_HEADROOM (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN) union mgmt_port_ring_entry { u64 d64; struct { #define RING_ENTRY_CODE_DONE 0xf #define RING_ENTRY_CODE_MORE 0x10 #ifdef __BIG_ENDIAN_BITFIELD u64 reserved_62_63:2; /* Length of the buffer/packet in bytes */ u64 len:14; /* For TX, signals that the packet should be timestamped */ u64 tstamp:1; /* The RX error code */ u64 code:7; /* Physical address of the buffer */ u64 addr:40; #else u64 addr:40; u64 code:7; u64 tstamp:1; u64 len:14; u64 reserved_62_63:2; #endif } s; }; #define MIX_ORING1 0x0 #define MIX_ORING2 0x8 #define MIX_IRING1 0x10 #define MIX_IRING2 0x18 #define MIX_CTL 0x20 #define MIX_IRHWM 0x28 #define MIX_IRCNT 0x30 #define MIX_ORHWM 0x38 #define MIX_ORCNT 0x40 #define MIX_ISR 0x48 #define MIX_INTENA 0x50 #define MIX_REMCNT 0x58 #define MIX_BIST 0x78 #define AGL_GMX_PRT_CFG 0x10 #define AGL_GMX_RX_FRM_CTL 0x18 #define AGL_GMX_RX_FRM_MAX 0x30 #define AGL_GMX_RX_JABBER 0x38 #define AGL_GMX_RX_STATS_CTL 0x50 #define AGL_GMX_RX_STATS_PKTS_DRP 0xb0 #define AGL_GMX_RX_STATS_OCTS_DRP 0xb8 #define AGL_GMX_RX_STATS_PKTS_BAD 0xc0 #define AGL_GMX_RX_ADR_CTL 0x100 #define AGL_GMX_RX_ADR_CAM_EN 0x108 #define AGL_GMX_RX_ADR_CAM0 0x180 #define AGL_GMX_RX_ADR_CAM1 0x188 #define AGL_GMX_RX_ADR_CAM2 0x190 #define AGL_GMX_RX_ADR_CAM3 0x198 #define AGL_GMX_RX_ADR_CAM4 0x1a0 #define AGL_GMX_RX_ADR_CAM5 0x1a8 #define AGL_GMX_TX_CLK 0x208 #define AGL_GMX_TX_STATS_CTL 0x268 #define AGL_GMX_TX_CTL 0x270 #define AGL_GMX_TX_STAT0 0x280 #define AGL_GMX_TX_STAT1 0x288 #define AGL_GMX_TX_STAT2 0x290 #define AGL_GMX_TX_STAT3 0x298 #define AGL_GMX_TX_STAT4 0x2a0 #define AGL_GMX_TX_STAT5 0x2a8 #define AGL_GMX_TX_STAT6 0x2b0 #define AGL_GMX_TX_STAT7 0x2b8 #define AGL_GMX_TX_STAT8 0x2c0 #define AGL_GMX_TX_STAT9 0x2c8 struct octeon_mgmt { struct net_device *netdev; u64 mix; u64 agl; u64 agl_prt_ctl; int port; int irq; bool has_rx_tstamp; u64 *tx_ring; dma_addr_t tx_ring_handle; unsigned int tx_next; unsigned int tx_next_clean; unsigned int tx_current_fill; /* The tx_list lock also protects the ring related variables */ struct sk_buff_head tx_list; /* RX variables only touched in napi_poll. No locking necessary. */ u64 *rx_ring; dma_addr_t rx_ring_handle; unsigned int rx_next; unsigned int rx_next_fill; unsigned int rx_current_fill; struct sk_buff_head rx_list; spinlock_t lock; unsigned int last_duplex; unsigned int last_link; unsigned int last_speed; struct device *dev; struct napi_struct napi; struct tasklet_struct tx_clean_tasklet; struct phy_device *phydev; struct device_node *phy_np; resource_size_t mix_phys; resource_size_t mix_size; resource_size_t agl_phys; resource_size_t agl_size; resource_size_t agl_prt_ctl_phys; resource_size_t agl_prt_ctl_size; }; static void octeon_mgmt_set_rx_irq(struct octeon_mgmt *p, int enable) { union cvmx_mixx_intena mix_intena; unsigned long flags; spin_lock_irqsave(&p->lock, flags); mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA); mix_intena.s.ithena = enable ? 1 : 0; cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); spin_unlock_irqrestore(&p->lock, flags); } static void octeon_mgmt_set_tx_irq(struct octeon_mgmt *p, int enable) { union cvmx_mixx_intena mix_intena; unsigned long flags; spin_lock_irqsave(&p->lock, flags); mix_intena.u64 = cvmx_read_csr(p->mix + MIX_INTENA); mix_intena.s.othena = enable ? 1 : 0; cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); spin_unlock_irqrestore(&p->lock, flags); } static void octeon_mgmt_enable_rx_irq(struct octeon_mgmt *p) { octeon_mgmt_set_rx_irq(p, 1); } static void octeon_mgmt_disable_rx_irq(struct octeon_mgmt *p) { octeon_mgmt_set_rx_irq(p, 0); } static void octeon_mgmt_enable_tx_irq(struct octeon_mgmt *p) { octeon_mgmt_set_tx_irq(p, 1); } static void octeon_mgmt_disable_tx_irq(struct octeon_mgmt *p) { octeon_mgmt_set_tx_irq(p, 0); } static unsigned int ring_max_fill(unsigned int ring_size) { return ring_size - 8; } static unsigned int ring_size_to_bytes(unsigned int ring_size) { return ring_size * sizeof(union mgmt_port_ring_entry); } static void octeon_mgmt_rx_fill_ring(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); while (p->rx_current_fill < ring_max_fill(OCTEON_MGMT_RX_RING_SIZE)) { unsigned int size; union mgmt_port_ring_entry re; struct sk_buff *skb; /* CN56XX pass 1 needs 8 bytes of padding. */ size = netdev->mtu + OCTEON_MGMT_RX_HEADROOM + 8 + NET_IP_ALIGN; skb = netdev_alloc_skb(netdev, size); if (!skb) break; skb_reserve(skb, NET_IP_ALIGN); __skb_queue_tail(&p->rx_list, skb); re.d64 = 0; re.s.len = size; re.s.addr = dma_map_single(p->dev, skb->data, size, DMA_FROM_DEVICE); /* Put it in the ring. */ p->rx_ring[p->rx_next_fill] = re.d64; dma_sync_single_for_device(p->dev, p->rx_ring_handle, ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), DMA_BIDIRECTIONAL); p->rx_next_fill = (p->rx_next_fill + 1) % OCTEON_MGMT_RX_RING_SIZE; p->rx_current_fill++; /* Ring the bell. */ cvmx_write_csr(p->mix + MIX_IRING2, 1); } } static void octeon_mgmt_clean_tx_buffers(struct octeon_mgmt *p) { union cvmx_mixx_orcnt mix_orcnt; union mgmt_port_ring_entry re; struct sk_buff *skb; int cleaned = 0; unsigned long flags; mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); while (mix_orcnt.s.orcnt) { spin_lock_irqsave(&p->tx_list.lock, flags); mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); if (mix_orcnt.s.orcnt == 0) { spin_unlock_irqrestore(&p->tx_list.lock, flags); break; } dma_sync_single_for_cpu(p->dev, p->tx_ring_handle, ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), DMA_BIDIRECTIONAL); re.d64 = p->tx_ring[p->tx_next_clean]; p->tx_next_clean = (p->tx_next_clean + 1) % OCTEON_MGMT_TX_RING_SIZE; skb = __skb_dequeue(&p->tx_list); mix_orcnt.u64 = 0; mix_orcnt.s.orcnt = 1; /* Acknowledge to hardware that we have the buffer. */ cvmx_write_csr(p->mix + MIX_ORCNT, mix_orcnt.u64); p->tx_current_fill--; spin_unlock_irqrestore(&p->tx_list.lock, flags); dma_unmap_single(p->dev, re.s.addr, re.s.len, DMA_TO_DEVICE); /* Read the hardware TX timestamp if one was recorded */ if (unlikely(re.s.tstamp)) { struct skb_shared_hwtstamps ts; u64 ns; memset(&ts, 0, sizeof(ts)); /* Read the timestamp */ ns = cvmx_read_csr(CVMX_MIXX_TSTAMP(p->port)); /* Remove the timestamp from the FIFO */ cvmx_write_csr(CVMX_MIXX_TSCTL(p->port), 0); /* Tell the kernel about the timestamp */ ts.hwtstamp = ns_to_ktime(ns); skb_tstamp_tx(skb, &ts); } dev_kfree_skb_any(skb); cleaned++; mix_orcnt.u64 = cvmx_read_csr(p->mix + MIX_ORCNT); } if (cleaned && netif_queue_stopped(p->netdev)) netif_wake_queue(p->netdev); } static void octeon_mgmt_clean_tx_tasklet(unsigned long arg) { struct octeon_mgmt *p = (struct octeon_mgmt *)arg; octeon_mgmt_clean_tx_buffers(p); octeon_mgmt_enable_tx_irq(p); } static void octeon_mgmt_update_rx_stats(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); unsigned long flags; u64 drop, bad; /* These reads also clear the count registers. */ drop = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP); bad = cvmx_read_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD); if (drop || bad) { /* Do an atomic update. */ spin_lock_irqsave(&p->lock, flags); netdev->stats.rx_errors += bad; netdev->stats.rx_dropped += drop; spin_unlock_irqrestore(&p->lock, flags); } } static void octeon_mgmt_update_tx_stats(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); unsigned long flags; union cvmx_agl_gmx_txx_stat0 s0; union cvmx_agl_gmx_txx_stat1 s1; /* These reads also clear the count registers. */ s0.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT0); s1.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_STAT1); if (s0.s.xsdef || s0.s.xscol || s1.s.scol || s1.s.mcol) { /* Do an atomic update. */ spin_lock_irqsave(&p->lock, flags); netdev->stats.tx_errors += s0.s.xsdef + s0.s.xscol; netdev->stats.collisions += s1.s.scol + s1.s.mcol; spin_unlock_irqrestore(&p->lock, flags); } } /* * Dequeue a receive skb and its corresponding ring entry. The ring * entry is returned, *pskb is updated to point to the skb. */ static u64 octeon_mgmt_dequeue_rx_buffer(struct octeon_mgmt *p, struct sk_buff **pskb) { union mgmt_port_ring_entry re; dma_sync_single_for_cpu(p->dev, p->rx_ring_handle, ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), DMA_BIDIRECTIONAL); re.d64 = p->rx_ring[p->rx_next]; p->rx_next = (p->rx_next + 1) % OCTEON_MGMT_RX_RING_SIZE; p->rx_current_fill--; *pskb = __skb_dequeue(&p->rx_list); dma_unmap_single(p->dev, re.s.addr, ETH_FRAME_LEN + OCTEON_MGMT_RX_HEADROOM, DMA_FROM_DEVICE); return re.d64; } static int octeon_mgmt_receive_one(struct octeon_mgmt *p) { struct net_device *netdev = p->netdev; union cvmx_mixx_ircnt mix_ircnt; union mgmt_port_ring_entry re; struct sk_buff *skb; struct sk_buff *skb2; struct sk_buff *skb_new; union mgmt_port_ring_entry re2; int rc = 1; re.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb); if (likely(re.s.code == RING_ENTRY_CODE_DONE)) { /* A good packet, send it up. */ skb_put(skb, re.s.len); good: /* Process the RX timestamp if it was recorded */ if (p->has_rx_tstamp) { /* The first 8 bytes are the timestamp */ u64 ns = *(u64 *)skb->data; struct skb_shared_hwtstamps *ts; ts = skb_hwtstamps(skb); ts->hwtstamp = ns_to_ktime(ns); __skb_pull(skb, 8); } skb->protocol = eth_type_trans(skb, netdev); netdev->stats.rx_packets++; netdev->stats.rx_bytes += skb->len; netif_receive_skb(skb); rc = 0; } else if (re.s.code == RING_ENTRY_CODE_MORE) { /* Packet split across skbs. This can happen if we * increase the MTU. Buffers that are already in the * rx ring can then end up being too small. As the rx * ring is refilled, buffers sized for the new MTU * will be used and we should go back to the normal * non-split case. */ skb_put(skb, re.s.len); do { re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); if (re2.s.code != RING_ENTRY_CODE_MORE && re2.s.code != RING_ENTRY_CODE_DONE) goto split_error; skb_put(skb2, re2.s.len); skb_new = skb_copy_expand(skb, 0, skb2->len, GFP_ATOMIC); if (!skb_new) goto split_error; if (skb_copy_bits(skb2, 0, skb_tail_pointer(skb_new), skb2->len)) goto split_error; skb_put(skb_new, skb2->len); dev_kfree_skb_any(skb); dev_kfree_skb_any(skb2); skb = skb_new; } while (re2.s.code == RING_ENTRY_CODE_MORE); goto good; } else { /* Some other error, discard it. */ dev_kfree_skb_any(skb); /* Error statistics are accumulated in * octeon_mgmt_update_rx_stats. */ } goto done; split_error: /* Discard the whole mess. */ dev_kfree_skb_any(skb); dev_kfree_skb_any(skb2); while (re2.s.code == RING_ENTRY_CODE_MORE) { re2.d64 = octeon_mgmt_dequeue_rx_buffer(p, &skb2); dev_kfree_skb_any(skb2); } netdev->stats.rx_errors++; done: /* Tell the hardware we processed a packet. */ mix_ircnt.u64 = 0; mix_ircnt.s.ircnt = 1; cvmx_write_csr(p->mix + MIX_IRCNT, mix_ircnt.u64); return rc; } static int octeon_mgmt_receive_packets(struct octeon_mgmt *p, int budget) { unsigned int work_done = 0; union cvmx_mixx_ircnt mix_ircnt; int rc; mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT); while (work_done < budget && mix_ircnt.s.ircnt) { rc = octeon_mgmt_receive_one(p); if (!rc) work_done++; /* Check for more packets. */ mix_ircnt.u64 = cvmx_read_csr(p->mix + MIX_IRCNT); } octeon_mgmt_rx_fill_ring(p->netdev); return work_done; } static int octeon_mgmt_napi_poll(struct napi_struct *napi, int budget) { struct octeon_mgmt *p = container_of(napi, struct octeon_mgmt, napi); struct net_device *netdev = p->netdev; unsigned int work_done = 0; work_done = octeon_mgmt_receive_packets(p, budget); if (work_done < budget) { /* We stopped because no more packets were available. */ napi_complete(napi); octeon_mgmt_enable_rx_irq(p); } octeon_mgmt_update_rx_stats(netdev); return work_done; } /* Reset the hardware to clean state. */ static void octeon_mgmt_reset_hw(struct octeon_mgmt *p) { union cvmx_mixx_ctl mix_ctl; union cvmx_mixx_bist mix_bist; union cvmx_agl_gmx_bist agl_gmx_bist; mix_ctl.u64 = 0; cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); do { mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); } while (mix_ctl.s.busy); mix_ctl.s.reset = 1; cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); cvmx_read_csr(p->mix + MIX_CTL); octeon_io_clk_delay(64); mix_bist.u64 = cvmx_read_csr(p->mix + MIX_BIST); if (mix_bist.u64) dev_warn(p->dev, "MIX failed BIST (0x%016llx)\n", (unsigned long long)mix_bist.u64); agl_gmx_bist.u64 = cvmx_read_csr(CVMX_AGL_GMX_BIST); if (agl_gmx_bist.u64) dev_warn(p->dev, "AGL failed BIST (0x%016llx)\n", (unsigned long long)agl_gmx_bist.u64); } struct octeon_mgmt_cam_state { u64 cam[6]; u64 cam_mask; int cam_index; }; static void octeon_mgmt_cam_state_add(struct octeon_mgmt_cam_state *cs, unsigned char *addr) { int i; for (i = 0; i < 6; i++) cs->cam[i] |= (u64)addr[i] << (8 * (cs->cam_index)); cs->cam_mask |= (1ULL << cs->cam_index); cs->cam_index++; } static void octeon_mgmt_set_rx_filtering(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); union cvmx_agl_gmx_rxx_adr_ctl adr_ctl; union cvmx_agl_gmx_prtx_cfg agl_gmx_prtx; unsigned long flags; unsigned int prev_packet_enable; unsigned int cam_mode = 1; /* 1 - Accept on CAM match */ unsigned int multicast_mode = 1; /* 1 - Reject all multicast. */ struct octeon_mgmt_cam_state cam_state; struct netdev_hw_addr *ha; int available_cam_entries; memset(&cam_state, 0, sizeof(cam_state)); if ((netdev->flags & IFF_PROMISC) || netdev->uc.count > 7) { cam_mode = 0; available_cam_entries = 8; } else { /* One CAM entry for the primary address, leaves seven * for the secondary addresses. */ available_cam_entries = 7 - netdev->uc.count; } if (netdev->flags & IFF_MULTICAST) { if (cam_mode == 0 || (netdev->flags & IFF_ALLMULTI) || netdev_mc_count(netdev) > available_cam_entries) multicast_mode = 2; /* 2 - Accept all multicast. */ else multicast_mode = 0; /* 0 - Use CAM. */ } if (cam_mode == 1) { /* Add primary address. */ octeon_mgmt_cam_state_add(&cam_state, netdev->dev_addr); netdev_for_each_uc_addr(ha, netdev) octeon_mgmt_cam_state_add(&cam_state, ha->addr); } if (multicast_mode == 0) { netdev_for_each_mc_addr(ha, netdev) octeon_mgmt_cam_state_add(&cam_state, ha->addr); } spin_lock_irqsave(&p->lock, flags); /* Disable packet I/O. */ agl_gmx_prtx.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); prev_packet_enable = agl_gmx_prtx.s.en; agl_gmx_prtx.s.en = 0; cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64); adr_ctl.u64 = 0; adr_ctl.s.cam_mode = cam_mode; adr_ctl.s.mcst = multicast_mode; adr_ctl.s.bcst = 1; /* Allow broadcast */ cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CTL, adr_ctl.u64); cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM0, cam_state.cam[0]); cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM1, cam_state.cam[1]); cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM2, cam_state.cam[2]); cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM3, cam_state.cam[3]); cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM4, cam_state.cam[4]); cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM5, cam_state.cam[5]); cvmx_write_csr(p->agl + AGL_GMX_RX_ADR_CAM_EN, cam_state.cam_mask); /* Restore packet I/O. */ agl_gmx_prtx.s.en = prev_packet_enable; cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, agl_gmx_prtx.u64); spin_unlock_irqrestore(&p->lock, flags); } static int octeon_mgmt_set_mac_address(struct net_device *netdev, void *addr) { int r = eth_mac_addr(netdev, addr); if (r) return r; octeon_mgmt_set_rx_filtering(netdev); return 0; } static int octeon_mgmt_change_mtu(struct net_device *netdev, int new_mtu) { struct octeon_mgmt *p = netdev_priv(netdev); int size_without_fcs = new_mtu + OCTEON_MGMT_RX_HEADROOM; /* Limit the MTU to make sure the ethernet packets are between * 64 bytes and 16383 bytes. */ if (size_without_fcs < 64 || size_without_fcs > 16383) { dev_warn(p->dev, "MTU must be between %d and %d.\n", 64 - OCTEON_MGMT_RX_HEADROOM, 16383 - OCTEON_MGMT_RX_HEADROOM); return -EINVAL; } netdev->mtu = new_mtu; cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_MAX, size_without_fcs); cvmx_write_csr(p->agl + AGL_GMX_RX_JABBER, (size_without_fcs + 7) & 0xfff8); return 0; } static irqreturn_t octeon_mgmt_interrupt(int cpl, void *dev_id) { struct net_device *netdev = dev_id; struct octeon_mgmt *p = netdev_priv(netdev); union cvmx_mixx_isr mixx_isr; mixx_isr.u64 = cvmx_read_csr(p->mix + MIX_ISR); /* Clear any pending interrupts */ cvmx_write_csr(p->mix + MIX_ISR, mixx_isr.u64); cvmx_read_csr(p->mix + MIX_ISR); if (mixx_isr.s.irthresh) { octeon_mgmt_disable_rx_irq(p); napi_schedule(&p->napi); } if (mixx_isr.s.orthresh) { octeon_mgmt_disable_tx_irq(p); tasklet_schedule(&p->tx_clean_tasklet); } return IRQ_HANDLED; } static int octeon_mgmt_ioctl_hwtstamp(struct net_device *netdev, struct ifreq *rq, int cmd) { struct octeon_mgmt *p = netdev_priv(netdev); struct hwtstamp_config config; union cvmx_mio_ptp_clock_cfg ptp; union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; bool have_hw_timestamps = false; if (copy_from_user(&config, rq->ifr_data, sizeof(config))) return -EFAULT; if (config.flags) /* reserved for future extensions */ return -EINVAL; /* Check the status of hardware for tiemstamps */ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { /* Get the current state of the PTP clock */ ptp.u64 = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_CFG); if (!ptp.s.ext_clk_en) { /* The clock has not been configured to use an * external source. Program it to use the main clock * reference. */ u64 clock_comp = (NSEC_PER_SEC << 32) / octeon_get_io_clock_rate(); if (!ptp.s.ptp_en) cvmx_write_csr(CVMX_MIO_PTP_CLOCK_COMP, clock_comp); pr_info("PTP Clock: Using sclk reference at %lld Hz\n", (NSEC_PER_SEC << 32) / clock_comp); } else { /* The clock is already programmed to use a GPIO */ u64 clock_comp = cvmx_read_csr(CVMX_MIO_PTP_CLOCK_COMP); pr_info("PTP Clock: Using GPIO %d at %lld Hz\n", ptp.s.ext_clk_in, (NSEC_PER_SEC << 32) / clock_comp); } /* Enable the clock if it wasn't done already */ if (!ptp.s.ptp_en) { ptp.s.ptp_en = 1; cvmx_write_csr(CVMX_MIO_PTP_CLOCK_CFG, ptp.u64); } have_hw_timestamps = true; } if (!have_hw_timestamps) return -EINVAL; switch (config.tx_type) { case HWTSTAMP_TX_OFF: case HWTSTAMP_TX_ON: break; default: return -ERANGE; } switch (config.rx_filter) { case HWTSTAMP_FILTER_NONE: p->has_rx_tstamp = false; rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); rxx_frm_ctl.s.ptp_mode = 0; cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); break; case HWTSTAMP_FILTER_ALL: case HWTSTAMP_FILTER_SOME: case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: case HWTSTAMP_FILTER_PTP_V2_EVENT: case HWTSTAMP_FILTER_PTP_V2_SYNC: case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: p->has_rx_tstamp = have_hw_timestamps; config.rx_filter = HWTSTAMP_FILTER_ALL; if (p->has_rx_tstamp) { rxx_frm_ctl.u64 = cvmx_read_csr(p->agl + AGL_GMX_RX_FRM_CTL); rxx_frm_ctl.s.ptp_mode = 1; cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); } break; default: return -ERANGE; } if (copy_to_user(rq->ifr_data, &config, sizeof(config))) return -EFAULT; return 0; } static int octeon_mgmt_ioctl(struct net_device *netdev, struct ifreq *rq, int cmd) { struct octeon_mgmt *p = netdev_priv(netdev); switch (cmd) { case SIOCSHWTSTAMP: return octeon_mgmt_ioctl_hwtstamp(netdev, rq, cmd); default: if (p->phydev) return phy_mii_ioctl(p->phydev, rq, cmd); return -EINVAL; } } static void octeon_mgmt_disable_link(struct octeon_mgmt *p) { union cvmx_agl_gmx_prtx_cfg prtx_cfg; /* Disable GMX before we make any changes. */ prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); prtx_cfg.s.en = 0; prtx_cfg.s.tx_en = 0; prtx_cfg.s.rx_en = 0; cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { int i; for (i = 0; i < 10; i++) { prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); if (prtx_cfg.s.tx_idle == 1 || prtx_cfg.s.rx_idle == 1) break; mdelay(1); i++; } } } static void octeon_mgmt_enable_link(struct octeon_mgmt *p) { union cvmx_agl_gmx_prtx_cfg prtx_cfg; /* Restore the GMX enable state only if link is set */ prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); prtx_cfg.s.tx_en = 1; prtx_cfg.s.rx_en = 1; prtx_cfg.s.en = 1; cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); } static void octeon_mgmt_update_link(struct octeon_mgmt *p) { union cvmx_agl_gmx_prtx_cfg prtx_cfg; prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); if (!p->phydev->link) prtx_cfg.s.duplex = 1; else prtx_cfg.s.duplex = p->phydev->duplex; switch (p->phydev->speed) { case 10: prtx_cfg.s.speed = 0; prtx_cfg.s.slottime = 0; if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { prtx_cfg.s.burst = 1; prtx_cfg.s.speed_msb = 1; } break; case 100: prtx_cfg.s.speed = 0; prtx_cfg.s.slottime = 0; if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { prtx_cfg.s.burst = 1; prtx_cfg.s.speed_msb = 0; } break; case 1000: /* 1000 MBits is only supported on 6XXX chips */ if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { prtx_cfg.s.speed = 1; prtx_cfg.s.speed_msb = 0; /* Only matters for half-duplex */ prtx_cfg.s.slottime = 1; prtx_cfg.s.burst = p->phydev->duplex; } break; case 0: /* No link */ default: break; } /* Write the new GMX setting with the port still disabled. */ cvmx_write_csr(p->agl + AGL_GMX_PRT_CFG, prtx_cfg.u64); /* Read GMX CFG again to make sure the config is completed. */ prtx_cfg.u64 = cvmx_read_csr(p->agl + AGL_GMX_PRT_CFG); if (OCTEON_IS_MODEL(OCTEON_CN6XXX)) { union cvmx_agl_gmx_txx_clk agl_clk; union cvmx_agl_prtx_ctl prtx_ctl; prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); agl_clk.u64 = cvmx_read_csr(p->agl + AGL_GMX_TX_CLK); /* MII (both speeds) and RGMII 1000 speed. */ agl_clk.s.clk_cnt = 1; if (prtx_ctl.s.mode == 0) { /* RGMII mode */ if (p->phydev->speed == 10) agl_clk.s.clk_cnt = 50; else if (p->phydev->speed == 100) agl_clk.s.clk_cnt = 5; } cvmx_write_csr(p->agl + AGL_GMX_TX_CLK, agl_clk.u64); } } static void octeon_mgmt_adjust_link(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); unsigned long flags; int link_changed = 0; if (!p->phydev) return; spin_lock_irqsave(&p->lock, flags); if (!p->phydev->link && p->last_link) link_changed = -1; if (p->phydev->link && (p->last_duplex != p->phydev->duplex || p->last_link != p->phydev->link || p->last_speed != p->phydev->speed)) { octeon_mgmt_disable_link(p); link_changed = 1; octeon_mgmt_update_link(p); octeon_mgmt_enable_link(p); } p->last_link = p->phydev->link; p->last_speed = p->phydev->speed; p->last_duplex = p->phydev->duplex; spin_unlock_irqrestore(&p->lock, flags); if (link_changed != 0) { if (link_changed > 0) { pr_info("%s: Link is up - %d/%s\n", netdev->name, p->phydev->speed, DUPLEX_FULL == p->phydev->duplex ? "Full" : "Half"); } else { pr_info("%s: Link is down\n", netdev->name); } } } static int octeon_mgmt_init_phy(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); if (octeon_is_simulation() || p->phy_np == NULL) { /* No PHYs in the simulator. */ netif_carrier_on(netdev); return 0; } p->phydev = of_phy_connect(netdev, p->phy_np, octeon_mgmt_adjust_link, 0, PHY_INTERFACE_MODE_MII); if (!p->phydev) return -ENODEV; return 0; } static int octeon_mgmt_open(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); union cvmx_mixx_ctl mix_ctl; union cvmx_agl_gmx_inf_mode agl_gmx_inf_mode; union cvmx_mixx_oring1 oring1; union cvmx_mixx_iring1 iring1; union cvmx_agl_gmx_rxx_frm_ctl rxx_frm_ctl; union cvmx_mixx_irhwm mix_irhwm; union cvmx_mixx_orhwm mix_orhwm; union cvmx_mixx_intena mix_intena; struct sockaddr sa; /* Allocate ring buffers. */ p->tx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), GFP_KERNEL); if (!p->tx_ring) return -ENOMEM; p->tx_ring_handle = dma_map_single(p->dev, p->tx_ring, ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), DMA_BIDIRECTIONAL); p->tx_next = 0; p->tx_next_clean = 0; p->tx_current_fill = 0; p->rx_ring = kzalloc(ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), GFP_KERNEL); if (!p->rx_ring) goto err_nomem; p->rx_ring_handle = dma_map_single(p->dev, p->rx_ring, ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), DMA_BIDIRECTIONAL); p->rx_next = 0; p->rx_next_fill = 0; p->rx_current_fill = 0; octeon_mgmt_reset_hw(p); mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); /* Bring it out of reset if needed. */ if (mix_ctl.s.reset) { mix_ctl.s.reset = 0; cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); do { mix_ctl.u64 = cvmx_read_csr(p->mix + MIX_CTL); } while (mix_ctl.s.reset); } if (OCTEON_IS_MODEL(OCTEON_CN5XXX)) { agl_gmx_inf_mode.u64 = 0; agl_gmx_inf_mode.s.en = 1; cvmx_write_csr(CVMX_AGL_GMX_INF_MODE, agl_gmx_inf_mode.u64); } if (OCTEON_IS_MODEL(OCTEON_CN56XX_PASS1_X) || OCTEON_IS_MODEL(OCTEON_CN52XX_PASS1_X)) { /* Force compensation values, as they are not * determined properly by HW */ union cvmx_agl_gmx_drv_ctl drv_ctl; drv_ctl.u64 = cvmx_read_csr(CVMX_AGL_GMX_DRV_CTL); if (p->port) { drv_ctl.s.byp_en1 = 1; drv_ctl.s.nctl1 = 6; drv_ctl.s.pctl1 = 6; } else { drv_ctl.s.byp_en = 1; drv_ctl.s.nctl = 6; drv_ctl.s.pctl = 6; } cvmx_write_csr(CVMX_AGL_GMX_DRV_CTL, drv_ctl.u64); } oring1.u64 = 0; oring1.s.obase = p->tx_ring_handle >> 3; oring1.s.osize = OCTEON_MGMT_TX_RING_SIZE; cvmx_write_csr(p->mix + MIX_ORING1, oring1.u64); iring1.u64 = 0; iring1.s.ibase = p->rx_ring_handle >> 3; iring1.s.isize = OCTEON_MGMT_RX_RING_SIZE; cvmx_write_csr(p->mix + MIX_IRING1, iring1.u64); memcpy(sa.sa_data, netdev->dev_addr, ETH_ALEN); octeon_mgmt_set_mac_address(netdev, &sa); octeon_mgmt_change_mtu(netdev, netdev->mtu); /* Enable the port HW. Packets are not allowed until * cvmx_mgmt_port_enable() is called. */ mix_ctl.u64 = 0; mix_ctl.s.crc_strip = 1; /* Strip the ending CRC */ mix_ctl.s.en = 1; /* Enable the port */ mix_ctl.s.nbtarb = 0; /* Arbitration mode */ /* MII CB-request FIFO programmable high watermark */ mix_ctl.s.mrq_hwm = 1; #ifdef __LITTLE_ENDIAN mix_ctl.s.lendian = 1; #endif cvmx_write_csr(p->mix + MIX_CTL, mix_ctl.u64); /* Read the PHY to find the mode of the interface. */ if (octeon_mgmt_init_phy(netdev)) { dev_err(p->dev, "Cannot initialize PHY on MIX%d.\n", p->port); goto err_noirq; } /* Set the mode of the interface, RGMII/MII. */ if (OCTEON_IS_MODEL(OCTEON_CN6XXX) && p->phydev) { union cvmx_agl_prtx_ctl agl_prtx_ctl; int rgmii_mode = (p->phydev->supported & (SUPPORTED_1000baseT_Half | SUPPORTED_1000baseT_Full)) != 0; agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); agl_prtx_ctl.s.mode = rgmii_mode ? 0 : 1; cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); /* MII clocks counts are based on the 125Mhz * reference, which has an 8nS period. So our delays * need to be multiplied by this factor. */ #define NS_PER_PHY_CLK 8 /* Take the DLL and clock tree out of reset */ agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); agl_prtx_ctl.s.clkrst = 0; if (rgmii_mode) { agl_prtx_ctl.s.dllrst = 0; agl_prtx_ctl.s.clktx_byp = 0; } cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); cvmx_read_csr(p->agl_prt_ctl); /* Force write out before wait */ /* Wait for the DLL to lock. External 125 MHz * reference clock must be stable at this point. */ ndelay(256 * NS_PER_PHY_CLK); /* Enable the interface */ agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); agl_prtx_ctl.s.enable = 1; cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); /* Read the value back to force the previous write */ agl_prtx_ctl.u64 = cvmx_read_csr(p->agl_prt_ctl); /* Enable the compensation controller */ agl_prtx_ctl.s.comp = 1; agl_prtx_ctl.s.drv_byp = 0; cvmx_write_csr(p->agl_prt_ctl, agl_prtx_ctl.u64); /* Force write out before wait. */ cvmx_read_csr(p->agl_prt_ctl); /* For compensation state to lock. */ ndelay(1040 * NS_PER_PHY_CLK); /* Default Interframe Gaps are too small. Recommended * workaround is. * * AGL_GMX_TX_IFG[IFG1]=14 * AGL_GMX_TX_IFG[IFG2]=10 */ cvmx_write_csr(CVMX_AGL_GMX_TX_IFG, 0xae); } octeon_mgmt_rx_fill_ring(netdev); /* Clear statistics. */ /* Clear on read. */ cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_CTL, 1); cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_DRP, 0); cvmx_write_csr(p->agl + AGL_GMX_RX_STATS_PKTS_BAD, 0); cvmx_write_csr(p->agl + AGL_GMX_TX_STATS_CTL, 1); cvmx_write_csr(p->agl + AGL_GMX_TX_STAT0, 0); cvmx_write_csr(p->agl + AGL_GMX_TX_STAT1, 0); /* Clear any pending interrupts */ cvmx_write_csr(p->mix + MIX_ISR, cvmx_read_csr(p->mix + MIX_ISR)); if (request_irq(p->irq, octeon_mgmt_interrupt, 0, netdev->name, netdev)) { dev_err(p->dev, "request_irq(%d) failed.\n", p->irq); goto err_noirq; } /* Interrupt every single RX packet */ mix_irhwm.u64 = 0; mix_irhwm.s.irhwm = 0; cvmx_write_csr(p->mix + MIX_IRHWM, mix_irhwm.u64); /* Interrupt when we have 1 or more packets to clean. */ mix_orhwm.u64 = 0; mix_orhwm.s.orhwm = 0; cvmx_write_csr(p->mix + MIX_ORHWM, mix_orhwm.u64); /* Enable receive and transmit interrupts */ mix_intena.u64 = 0; mix_intena.s.ithena = 1; mix_intena.s.othena = 1; cvmx_write_csr(p->mix + MIX_INTENA, mix_intena.u64); /* Enable packet I/O. */ rxx_frm_ctl.u64 = 0; rxx_frm_ctl.s.ptp_mode = p->has_rx_tstamp ? 1 : 0; rxx_frm_ctl.s.pre_align = 1; /* When set, disables the length check for non-min sized pkts * with padding in the client data. */ rxx_frm_ctl.s.pad_len = 1; /* When set, disables the length check for VLAN pkts */ rxx_frm_ctl.s.vlan_len = 1; /* When set, PREAMBLE checking is less strict */ rxx_frm_ctl.s.pre_free = 1; /* Control Pause Frames can match station SMAC */ rxx_frm_ctl.s.ctl_smac = 0; /* Control Pause Frames can match globally assign Multicast address */ rxx_frm_ctl.s.ctl_mcst = 1; /* Forward pause information to TX block */ rxx_frm_ctl.s.ctl_bck = 1; /* Drop Control Pause Frames */ rxx_frm_ctl.s.ctl_drp = 1; /* Strip off the preamble */ rxx_frm_ctl.s.pre_strp = 1; /* This port is configured to send PREAMBLE+SFD to begin every * frame. GMX checks that the PREAMBLE is sent correctly. */ rxx_frm_ctl.s.pre_chk = 1; cvmx_write_csr(p->agl + AGL_GMX_RX_FRM_CTL, rxx_frm_ctl.u64); /* Configure the port duplex, speed and enables */ octeon_mgmt_disable_link(p); if (p->phydev) octeon_mgmt_update_link(p); octeon_mgmt_enable_link(p); p->last_link = 0; p->last_speed = 0; /* PHY is not present in simulator. The carrier is enabled * while initializing the phy for simulator, leave it enabled. */ if (p->phydev) { netif_carrier_off(netdev); phy_start_aneg(p->phydev); } netif_wake_queue(netdev); napi_enable(&p->napi); return 0; err_noirq: octeon_mgmt_reset_hw(p); dma_unmap_single(p->dev, p->rx_ring_handle, ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), DMA_BIDIRECTIONAL); kfree(p->rx_ring); err_nomem: dma_unmap_single(p->dev, p->tx_ring_handle, ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), DMA_BIDIRECTIONAL); kfree(p->tx_ring); return -ENOMEM; } static int octeon_mgmt_stop(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); napi_disable(&p->napi); netif_stop_queue(netdev); if (p->phydev) phy_disconnect(p->phydev); p->phydev = NULL; netif_carrier_off(netdev); octeon_mgmt_reset_hw(p); free_irq(p->irq, netdev); /* dma_unmap is a nop on Octeon, so just free everything. */ skb_queue_purge(&p->tx_list); skb_queue_purge(&p->rx_list); dma_unmap_single(p->dev, p->rx_ring_handle, ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), DMA_BIDIRECTIONAL); kfree(p->rx_ring); dma_unmap_single(p->dev, p->tx_ring_handle, ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), DMA_BIDIRECTIONAL); kfree(p->tx_ring); return 0; } static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); union mgmt_port_ring_entry re; unsigned long flags; int rv = NETDEV_TX_BUSY; re.d64 = 0; re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0); re.s.len = skb->len; re.s.addr = dma_map_single(p->dev, skb->data, skb->len, DMA_TO_DEVICE); spin_lock_irqsave(&p->tx_list.lock, flags); if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) { spin_unlock_irqrestore(&p->tx_list.lock, flags); netif_stop_queue(netdev); spin_lock_irqsave(&p->tx_list.lock, flags); } if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) { spin_unlock_irqrestore(&p->tx_list.lock, flags); dma_unmap_single(p->dev, re.s.addr, re.s.len, DMA_TO_DEVICE); goto out; } __skb_queue_tail(&p->tx_list, skb); /* Put it in the ring. */ p->tx_ring[p->tx_next] = re.d64; p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE; p->tx_current_fill++; spin_unlock_irqrestore(&p->tx_list.lock, flags); dma_sync_single_for_device(p->dev, p->tx_ring_handle, ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), DMA_BIDIRECTIONAL); netdev->stats.tx_packets++; netdev->stats.tx_bytes += skb->len; /* Ring the bell. */ cvmx_write_csr(p->mix + MIX_ORING2, 1); netdev->trans_start = jiffies; rv = NETDEV_TX_OK; out: octeon_mgmt_update_tx_stats(netdev); return rv; } #ifdef CONFIG_NET_POLL_CONTROLLER static void octeon_mgmt_poll_controller(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); octeon_mgmt_receive_packets(p, 16); octeon_mgmt_update_rx_stats(netdev); } #endif static void octeon_mgmt_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *info) { strlcpy(info->driver, DRV_NAME, sizeof(info->driver)); strlcpy(info->version, DRV_VERSION, sizeof(info->version)); strlcpy(info->fw_version, "N/A", sizeof(info->fw_version)); strlcpy(info->bus_info, "N/A", sizeof(info->bus_info)); info->n_stats = 0; info->testinfo_len = 0; info->regdump_len = 0; info->eedump_len = 0; } static int octeon_mgmt_get_settings(struct net_device *netdev, struct ethtool_cmd *cmd) { struct octeon_mgmt *p = netdev_priv(netdev); if (p->phydev) return phy_ethtool_gset(p->phydev, cmd); return -EOPNOTSUPP; } static int octeon_mgmt_set_settings(struct net_device *netdev, struct ethtool_cmd *cmd) { struct octeon_mgmt *p = netdev_priv(netdev); if (!capable(CAP_NET_ADMIN)) return -EPERM; if (p->phydev) return phy_ethtool_sset(p->phydev, cmd); return -EOPNOTSUPP; } static int octeon_mgmt_nway_reset(struct net_device *dev) { struct octeon_mgmt *p = netdev_priv(dev); if (!capable(CAP_NET_ADMIN)) return -EPERM; if (p->phydev) return phy_start_aneg(p->phydev); return -EOPNOTSUPP; } static const struct ethtool_ops octeon_mgmt_ethtool_ops = { .get_drvinfo = octeon_mgmt_get_drvinfo, .get_settings = octeon_mgmt_get_settings, .set_settings = octeon_mgmt_set_settings, .nway_reset = octeon_mgmt_nway_reset, .get_link = ethtool_op_get_link, }; static const struct net_device_ops octeon_mgmt_ops = { .ndo_open = octeon_mgmt_open, .ndo_stop = octeon_mgmt_stop, .ndo_start_xmit = octeon_mgmt_xmit, .ndo_set_rx_mode = octeon_mgmt_set_rx_filtering, .ndo_set_mac_address = octeon_mgmt_set_mac_address, .ndo_do_ioctl = octeon_mgmt_ioctl, .ndo_change_mtu = octeon_mgmt_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = octeon_mgmt_poll_controller, #endif }; static int octeon_mgmt_probe(struct platform_device *pdev) { struct net_device *netdev; struct octeon_mgmt *p; const __be32 *data; const u8 *mac; struct resource *res_mix; struct resource *res_agl; struct resource *res_agl_prt_ctl; int len; int result; netdev = alloc_etherdev(sizeof(struct octeon_mgmt)); if (netdev == NULL) return -ENOMEM; SET_NETDEV_DEV(netdev, &pdev->dev); platform_set_drvdata(pdev, netdev); p = netdev_priv(netdev); netif_napi_add(netdev, &p->napi, octeon_mgmt_napi_poll, OCTEON_MGMT_NAPI_WEIGHT); p->netdev = netdev; p->dev = &pdev->dev; p->has_rx_tstamp = false; data = of_get_property(pdev->dev.of_node, "cell-index", &len); if (data && len == sizeof(*data)) { p->port = be32_to_cpup(data); } else { dev_err(&pdev->dev, "no 'cell-index' property\n"); result = -ENXIO; goto err; } snprintf(netdev->name, IFNAMSIZ, "mgmt%d", p->port); result = platform_get_irq(pdev, 0); if (result < 0) goto err; p->irq = result; res_mix = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res_mix == NULL) { dev_err(&pdev->dev, "no 'reg' resource\n"); result = -ENXIO; goto err; } res_agl = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (res_agl == NULL) { dev_err(&pdev->dev, "no 'reg' resource\n"); result = -ENXIO; goto err; } res_agl_prt_ctl = platform_get_resource(pdev, IORESOURCE_MEM, 3); if (res_agl_prt_ctl == NULL) { dev_err(&pdev->dev, "no 'reg' resource\n"); result = -ENXIO; goto err; } p->mix_phys = res_mix->start; p->mix_size = resource_size(res_mix); p->agl_phys = res_agl->start; p->agl_size = resource_size(res_agl); p->agl_prt_ctl_phys = res_agl_prt_ctl->start; p->agl_prt_ctl_size = resource_size(res_agl_prt_ctl); if (!devm_request_mem_region(&pdev->dev, p->mix_phys, p->mix_size, res_mix->name)) { dev_err(&pdev->dev, "request_mem_region (%s) failed\n", res_mix->name); result = -ENXIO; goto err; } if (!devm_request_mem_region(&pdev->dev, p->agl_phys, p->agl_size, res_agl->name)) { result = -ENXIO; dev_err(&pdev->dev, "request_mem_region (%s) failed\n", res_agl->name); goto err; } if (!devm_request_mem_region(&pdev->dev, p->agl_prt_ctl_phys, p->agl_prt_ctl_size, res_agl_prt_ctl->name)) { result = -ENXIO; dev_err(&pdev->dev, "request_mem_region (%s) failed\n", res_agl_prt_ctl->name); goto err; } p->mix = (u64)devm_ioremap(&pdev->dev, p->mix_phys, p->mix_size); p->agl = (u64)devm_ioremap(&pdev->dev, p->agl_phys, p->agl_size); p->agl_prt_ctl = (u64)devm_ioremap(&pdev->dev, p->agl_prt_ctl_phys, p->agl_prt_ctl_size); spin_lock_init(&p->lock); skb_queue_head_init(&p->tx_list); skb_queue_head_init(&p->rx_list); tasklet_init(&p->tx_clean_tasklet, octeon_mgmt_clean_tx_tasklet, (unsigned long)p); netdev->priv_flags |= IFF_UNICAST_FLT; netdev->netdev_ops = &octeon_mgmt_ops; netdev->ethtool_ops = &octeon_mgmt_ethtool_ops; mac = of_get_mac_address(pdev->dev.of_node); if (mac) memcpy(netdev->dev_addr, mac, ETH_ALEN); else eth_hw_addr_random(netdev); p->phy_np = of_parse_phandle(pdev->dev.of_node, "phy-handle", 0); result = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); if (result) goto err; netif_carrier_off(netdev); result = register_netdev(netdev); if (result) goto err; dev_info(&pdev->dev, "Version " DRV_VERSION "\n"); return 0; err: free_netdev(netdev); return result; } static int octeon_mgmt_remove(struct platform_device *pdev) { struct net_device *netdev = platform_get_drvdata(pdev); unregister_netdev(netdev); free_netdev(netdev); return 0; } static const struct of_device_id octeon_mgmt_match[] = { { .compatible = "cavium,octeon-5750-mix", }, {}, }; MODULE_DEVICE_TABLE(of, octeon_mgmt_match); static struct platform_driver octeon_mgmt_driver = { .driver = { .name = "octeon_mgmt", .of_match_table = octeon_mgmt_match, }, .probe = octeon_mgmt_probe, .remove = octeon_mgmt_remove, }; extern void octeon_mdiobus_force_mod_depencency(void); static int __init octeon_mgmt_mod_init(void) { /* Force our mdiobus driver module to be loaded first. */ octeon_mdiobus_force_mod_depencency(); return platform_driver_register(&octeon_mgmt_driver); } static void __exit octeon_mgmt_mod_exit(void) { platform_driver_unregister(&octeon_mgmt_driver); } module_init(octeon_mgmt_mod_init); module_exit(octeon_mgmt_mod_exit); MODULE_DESCRIPTION(DRV_DESCRIPTION); MODULE_AUTHOR("David Daney"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION);
gpl-2.0
mgenereu/linux
drivers/net/wireless/iwlwifi/iwl-1000.c
873
4689
/****************************************************************************** * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * * Contact Information: * Intel Linux Wireless <ilw@linux.intel.com> * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 * *****************************************************************************/ #include <linux/module.h> #include <linux/stringify.h> #include "iwl-config.h" #include "iwl-csr.h" #include "iwl-agn-hw.h" /* Highest firmware API version supported */ #define IWL1000_UCODE_API_MAX 5 #define IWL100_UCODE_API_MAX 5 /* Oldest version we won't warn about */ #define IWL1000_UCODE_API_OK 5 #define IWL100_UCODE_API_OK 5 /* Lowest firmware API version supported */ #define IWL1000_UCODE_API_MIN 1 #define IWL100_UCODE_API_MIN 5 /* EEPROM version */ #define EEPROM_1000_TX_POWER_VERSION (4) #define EEPROM_1000_EEPROM_VERSION (0x15C) #define IWL1000_FW_PRE "iwlwifi-1000-" #define IWL1000_MODULE_FIRMWARE(api) IWL1000_FW_PRE __stringify(api) ".ucode" #define IWL100_FW_PRE "iwlwifi-100-" #define IWL100_MODULE_FIRMWARE(api) IWL100_FW_PRE __stringify(api) ".ucode" static const struct iwl_base_params iwl1000_base_params = { .num_of_queues = IWLAGN_NUM_QUEUES, .eeprom_size = OTP_LOW_IMAGE_SIZE, .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL, .max_ll_items = OTP_MAX_LL_ITEMS_1000, .shadow_ram_support = false, .led_compensation = 51, .wd_timeout = IWL_WATCHDOG_DISABLED, .max_event_log_size = 128, .scd_chain_ext_wa = true, }; static const struct iwl_ht_params iwl1000_ht_params = { .ht_greenfield_support = true, .use_rts_for_aggregation = true, /* use rts/cts protection */ .ht40_bands = BIT(IEEE80211_BAND_2GHZ), }; static const struct iwl_eeprom_params iwl1000_eeprom_params = { .regulatory_bands = { EEPROM_REG_BAND_1_CHANNELS, EEPROM_REG_BAND_2_CHANNELS, EEPROM_REG_BAND_3_CHANNELS, EEPROM_REG_BAND_4_CHANNELS, EEPROM_REG_BAND_5_CHANNELS, EEPROM_REG_BAND_24_HT40_CHANNELS, EEPROM_REGULATORY_BAND_NO_HT40, } }; #define IWL_DEVICE_1000 \ .fw_name_pre = IWL1000_FW_PRE, \ .ucode_api_max = IWL1000_UCODE_API_MAX, \ .ucode_api_ok = IWL1000_UCODE_API_OK, \ .ucode_api_min = IWL1000_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_1000, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ .max_data_size = IWLAGN_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_1000_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ .base_params = &iwl1000_base_params, \ .eeprom_params = &iwl1000_eeprom_params, \ .led_mode = IWL_LED_BLINK, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl1000_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 1000 BGN", IWL_DEVICE_1000, .ht_params = &iwl1000_ht_params, }; const struct iwl_cfg iwl1000_bg_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 1000 BG", IWL_DEVICE_1000, }; #define IWL_DEVICE_100 \ .fw_name_pre = IWL100_FW_PRE, \ .ucode_api_max = IWL100_UCODE_API_MAX, \ .ucode_api_ok = IWL100_UCODE_API_OK, \ .ucode_api_min = IWL100_UCODE_API_MIN, \ .device_family = IWL_DEVICE_FAMILY_100, \ .max_inst_size = IWLAGN_RTC_INST_SIZE, \ .max_data_size = IWLAGN_RTC_DATA_SIZE, \ .nvm_ver = EEPROM_1000_EEPROM_VERSION, \ .nvm_calib_ver = EEPROM_1000_TX_POWER_VERSION, \ .base_params = &iwl1000_base_params, \ .eeprom_params = &iwl1000_eeprom_params, \ .led_mode = IWL_LED_RF_STATE, \ .rx_with_siso_diversity = true, \ .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K const struct iwl_cfg iwl100_bgn_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 100 BGN", IWL_DEVICE_100, .ht_params = &iwl1000_ht_params, }; const struct iwl_cfg iwl100_bg_cfg = { .name = "Intel(R) Centrino(R) Wireless-N 100 BG", IWL_DEVICE_100, }; MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_OK)); MODULE_FIRMWARE(IWL100_MODULE_FIRMWARE(IWL100_UCODE_API_OK));
gpl-2.0
TimWSpence/linux
drivers/gpio/gpio-wm831x.c
1385
7522
/* * gpiolib support for Wolfson WM831x PMICs * * Copyright 2009 Wolfson Microelectronics PLC. * * Author: Mark Brown <broonie@opensource.wolfsonmicro.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/gpio.h> #include <linux/mfd/core.h> #include <linux/platform_device.h> #include <linux/seq_file.h> #include <linux/mfd/wm831x/core.h> #include <linux/mfd/wm831x/pdata.h> #include <linux/mfd/wm831x/gpio.h> #include <linux/mfd/wm831x/irq.h> struct wm831x_gpio { struct wm831x *wm831x; struct gpio_chip gpio_chip; }; static inline struct wm831x_gpio *to_wm831x_gpio(struct gpio_chip *chip) { return container_of(chip, struct wm831x_gpio, gpio_chip); } static int wm831x_gpio_direction_in(struct gpio_chip *chip, unsigned offset) { struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip); struct wm831x *wm831x = wm831x_gpio->wm831x; int val = WM831X_GPN_DIR; if (wm831x->has_gpio_ena) val |= WM831X_GPN_TRI; return wm831x_set_bits(wm831x, WM831X_GPIO1_CONTROL + offset, WM831X_GPN_DIR | WM831X_GPN_TRI | WM831X_GPN_FN_MASK, val); } static int wm831x_gpio_get(struct gpio_chip *chip, unsigned offset) { struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip); struct wm831x *wm831x = wm831x_gpio->wm831x; int ret; ret = wm831x_reg_read(wm831x, WM831X_GPIO_LEVEL); if (ret < 0) return ret; if (ret & 1 << offset) return 1; else return 0; } static void wm831x_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip); struct wm831x *wm831x = wm831x_gpio->wm831x; wm831x_set_bits(wm831x, WM831X_GPIO_LEVEL, 1 << offset, value << offset); } static int wm831x_gpio_direction_out(struct gpio_chip *chip, unsigned offset, int value) { struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip); struct wm831x *wm831x = wm831x_gpio->wm831x; int val = 0; int ret; if (wm831x->has_gpio_ena) val |= WM831X_GPN_TRI; ret = wm831x_set_bits(wm831x, WM831X_GPIO1_CONTROL + offset, WM831X_GPN_DIR | WM831X_GPN_TRI | WM831X_GPN_FN_MASK, val); if (ret < 0) return ret; /* Can only set GPIO state once it's in output mode */ wm831x_gpio_set(chip, offset, value); return 0; } static int wm831x_gpio_to_irq(struct gpio_chip *chip, unsigned offset) { struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip); struct wm831x *wm831x = wm831x_gpio->wm831x; return irq_create_mapping(wm831x->irq_domain, WM831X_IRQ_GPIO_1 + offset); } static int wm831x_gpio_set_debounce(struct gpio_chip *chip, unsigned offset, unsigned debounce) { struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip); struct wm831x *wm831x = wm831x_gpio->wm831x; int reg = WM831X_GPIO1_CONTROL + offset; int ret, fn; ret = wm831x_reg_read(wm831x, reg); if (ret < 0) return ret; switch (ret & WM831X_GPN_FN_MASK) { case 0: case 1: break; default: /* Not in GPIO mode */ return -EBUSY; } if (debounce >= 32 && debounce <= 64) fn = 0; else if (debounce >= 4000 && debounce <= 8000) fn = 1; else return -EINVAL; return wm831x_set_bits(wm831x, reg, WM831X_GPN_FN_MASK, fn); } #ifdef CONFIG_DEBUG_FS static void wm831x_gpio_dbg_show(struct seq_file *s, struct gpio_chip *chip) { struct wm831x_gpio *wm831x_gpio = to_wm831x_gpio(chip); struct wm831x *wm831x = wm831x_gpio->wm831x; int i, tristated; for (i = 0; i < chip->ngpio; i++) { int gpio = i + chip->base; int reg; const char *label, *pull, *powerdomain; /* We report the GPIO even if it's not requested since * we're also reporting things like alternate * functions which apply even when the GPIO is not in * use as a GPIO. */ label = gpiochip_is_requested(chip, i); if (!label) label = "Unrequested"; seq_printf(s, " gpio-%-3d (%-20.20s) ", gpio, label); reg = wm831x_reg_read(wm831x, WM831X_GPIO1_CONTROL + i); if (reg < 0) { dev_err(wm831x->dev, "GPIO control %d read failed: %d\n", gpio, reg); seq_printf(s, "\n"); continue; } switch (reg & WM831X_GPN_PULL_MASK) { case WM831X_GPIO_PULL_NONE: pull = "nopull"; break; case WM831X_GPIO_PULL_DOWN: pull = "pulldown"; break; case WM831X_GPIO_PULL_UP: pull = "pullup"; break; default: pull = "INVALID PULL"; break; } switch (i + 1) { case 1 ... 3: case 7 ... 9: if (reg & WM831X_GPN_PWR_DOM) powerdomain = "VPMIC"; else powerdomain = "DBVDD"; break; case 4 ... 6: case 10 ... 12: if (reg & WM831X_GPN_PWR_DOM) powerdomain = "SYSVDD"; else powerdomain = "DBVDD"; break; case 13 ... 16: powerdomain = "TPVDD"; break; default: BUG(); break; } tristated = reg & WM831X_GPN_TRI; if (wm831x->has_gpio_ena) tristated = !tristated; seq_printf(s, " %s %s %s %s%s\n" " %s%s (0x%4x)\n", reg & WM831X_GPN_DIR ? "in" : "out", wm831x_gpio_get(chip, i) ? "high" : "low", pull, powerdomain, reg & WM831X_GPN_POL ? "" : " inverted", reg & WM831X_GPN_OD ? "open-drain" : "CMOS", tristated ? " tristated" : "", reg); } } #else #define wm831x_gpio_dbg_show NULL #endif static struct gpio_chip template_chip = { .label = "wm831x", .owner = THIS_MODULE, .direction_input = wm831x_gpio_direction_in, .get = wm831x_gpio_get, .direction_output = wm831x_gpio_direction_out, .set = wm831x_gpio_set, .to_irq = wm831x_gpio_to_irq, .set_debounce = wm831x_gpio_set_debounce, .dbg_show = wm831x_gpio_dbg_show, .can_sleep = true, }; static int wm831x_gpio_probe(struct platform_device *pdev) { struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent); struct wm831x_pdata *pdata = dev_get_platdata(wm831x->dev); struct wm831x_gpio *wm831x_gpio; int ret; wm831x_gpio = devm_kzalloc(&pdev->dev, sizeof(*wm831x_gpio), GFP_KERNEL); if (wm831x_gpio == NULL) return -ENOMEM; wm831x_gpio->wm831x = wm831x; wm831x_gpio->gpio_chip = template_chip; wm831x_gpio->gpio_chip.ngpio = wm831x->num_gpio; wm831x_gpio->gpio_chip.dev = &pdev->dev; if (pdata && pdata->gpio_base) wm831x_gpio->gpio_chip.base = pdata->gpio_base; else wm831x_gpio->gpio_chip.base = -1; ret = gpiochip_add(&wm831x_gpio->gpio_chip); if (ret < 0) { dev_err(&pdev->dev, "Could not register gpiochip, %d\n", ret); return ret; } platform_set_drvdata(pdev, wm831x_gpio); return ret; } static int wm831x_gpio_remove(struct platform_device *pdev) { struct wm831x_gpio *wm831x_gpio = platform_get_drvdata(pdev); gpiochip_remove(&wm831x_gpio->gpio_chip); return 0; } static struct platform_driver wm831x_gpio_driver = { .driver.name = "wm831x-gpio", .driver.owner = THIS_MODULE, .probe = wm831x_gpio_probe, .remove = wm831x_gpio_remove, }; static int __init wm831x_gpio_init(void) { return platform_driver_register(&wm831x_gpio_driver); } subsys_initcall(wm831x_gpio_init); static void __exit wm831x_gpio_exit(void) { platform_driver_unregister(&wm831x_gpio_driver); } module_exit(wm831x_gpio_exit); MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>"); MODULE_DESCRIPTION("GPIO interface for WM831x PMICs"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:wm831x-gpio");
gpl-2.0
kongzizaixian/linux
drivers/usb/c67x00/c67x00-ll-hpi.c
2153
12492
/* * c67x00-ll-hpi.c: Cypress C67X00 USB Low level interface using HPI * * Copyright (C) 2006-2008 Barco N.V. * Derived from the Cypress cy7c67200/300 ezusb linux driver and * based on multiple host controller drivers inside the linux kernel. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301 USA. */ #include <asm/byteorder.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/jiffies.h> #include <linux/usb/c67x00.h> #include "c67x00.h" #define COMM_REGS 14 struct c67x00_lcp_int_data { u16 regs[COMM_REGS]; }; /* -------------------------------------------------------------------------- */ /* Interface definitions */ #define COMM_ACK 0x0FED #define COMM_NAK 0xDEAD #define COMM_RESET 0xFA50 #define COMM_EXEC_INT 0xCE01 #define COMM_INT_NUM 0x01C2 /* Registers 0 to COMM_REGS-1 */ #define COMM_R(x) (0x01C4 + 2 * (x)) #define HUSB_SIE_pCurrentTDPtr(x) ((x) ? 0x01B2 : 0x01B0) #define HUSB_SIE_pTDListDone_Sem(x) ((x) ? 0x01B8 : 0x01B6) #define HUSB_pEOT 0x01B4 /* Software interrupts */ /* 114, 115: */ #define HUSB_SIE_INIT_INT(x) ((x) ? 0x0073 : 0x0072) #define HUSB_RESET_INT 0x0074 #define SUSB_INIT_INT 0x0071 #define SUSB_INIT_INT_LOC (SUSB_INIT_INT * 2) /* ----------------------------------------------------------------------- * HPI implementation * * The c67x00 chip also support control via SPI or HSS serial * interfaces. However, this driver assumes that register access can * be performed from IRQ context. While this is a safe assumption with * the HPI interface, it is not true for the serial interfaces. */ /* HPI registers */ #define HPI_DATA 0 #define HPI_MAILBOX 1 #define HPI_ADDR 2 #define HPI_STATUS 3 /* * According to CY7C67300 specification (tables 140 and 141) HPI read and * write cycle duration Tcyc must be at least 6T long, where T is 1/48MHz, * which is 125ns. */ #define HPI_T_CYC_NS 125 static inline u16 hpi_read_reg(struct c67x00_device *dev, int reg) { ndelay(HPI_T_CYC_NS); return __raw_readw(dev->hpi.base + reg * dev->hpi.regstep); } static inline void hpi_write_reg(struct c67x00_device *dev, int reg, u16 value) { ndelay(HPI_T_CYC_NS); __raw_writew(value, dev->hpi.base + reg * dev->hpi.regstep); } static inline u16 hpi_read_word_nolock(struct c67x00_device *dev, u16 reg) { hpi_write_reg(dev, HPI_ADDR, reg); return hpi_read_reg(dev, HPI_DATA); } static u16 hpi_read_word(struct c67x00_device *dev, u16 reg) { u16 value; unsigned long flags; spin_lock_irqsave(&dev->hpi.lock, flags); value = hpi_read_word_nolock(dev, reg); spin_unlock_irqrestore(&dev->hpi.lock, flags); return value; } static void hpi_write_word_nolock(struct c67x00_device *dev, u16 reg, u16 value) { hpi_write_reg(dev, HPI_ADDR, reg); hpi_write_reg(dev, HPI_DATA, value); } static void hpi_write_word(struct c67x00_device *dev, u16 reg, u16 value) { unsigned long flags; spin_lock_irqsave(&dev->hpi.lock, flags); hpi_write_word_nolock(dev, reg, value); spin_unlock_irqrestore(&dev->hpi.lock, flags); } /* * Only data is little endian, addr has cpu endianess */ static void hpi_write_words_le16(struct c67x00_device *dev, u16 addr, __le16 *data, u16 count) { unsigned long flags; int i; spin_lock_irqsave(&dev->hpi.lock, flags); hpi_write_reg(dev, HPI_ADDR, addr); for (i = 0; i < count; i++) hpi_write_reg(dev, HPI_DATA, le16_to_cpu(*data++)); spin_unlock_irqrestore(&dev->hpi.lock, flags); } /* * Only data is little endian, addr has cpu endianess */ static void hpi_read_words_le16(struct c67x00_device *dev, u16 addr, __le16 *data, u16 count) { unsigned long flags; int i; spin_lock_irqsave(&dev->hpi.lock, flags); hpi_write_reg(dev, HPI_ADDR, addr); for (i = 0; i < count; i++) *data++ = cpu_to_le16(hpi_read_reg(dev, HPI_DATA)); spin_unlock_irqrestore(&dev->hpi.lock, flags); } static void hpi_set_bits(struct c67x00_device *dev, u16 reg, u16 mask) { u16 value; unsigned long flags; spin_lock_irqsave(&dev->hpi.lock, flags); value = hpi_read_word_nolock(dev, reg); hpi_write_word_nolock(dev, reg, value | mask); spin_unlock_irqrestore(&dev->hpi.lock, flags); } static void hpi_clear_bits(struct c67x00_device *dev, u16 reg, u16 mask) { u16 value; unsigned long flags; spin_lock_irqsave(&dev->hpi.lock, flags); value = hpi_read_word_nolock(dev, reg); hpi_write_word_nolock(dev, reg, value & ~mask); spin_unlock_irqrestore(&dev->hpi.lock, flags); } static u16 hpi_recv_mbox(struct c67x00_device *dev) { u16 value; unsigned long flags; spin_lock_irqsave(&dev->hpi.lock, flags); value = hpi_read_reg(dev, HPI_MAILBOX); spin_unlock_irqrestore(&dev->hpi.lock, flags); return value; } static u16 hpi_send_mbox(struct c67x00_device *dev, u16 value) { unsigned long flags; spin_lock_irqsave(&dev->hpi.lock, flags); hpi_write_reg(dev, HPI_MAILBOX, value); spin_unlock_irqrestore(&dev->hpi.lock, flags); return value; } u16 c67x00_ll_hpi_status(struct c67x00_device *dev) { u16 value; unsigned long flags; spin_lock_irqsave(&dev->hpi.lock, flags); value = hpi_read_reg(dev, HPI_STATUS); spin_unlock_irqrestore(&dev->hpi.lock, flags); return value; } void c67x00_ll_hpi_reg_init(struct c67x00_device *dev) { int i; hpi_recv_mbox(dev); c67x00_ll_hpi_status(dev); hpi_write_word(dev, HPI_IRQ_ROUTING_REG, 0); for (i = 0; i < C67X00_SIES; i++) { hpi_write_word(dev, SIEMSG_REG(i), 0); hpi_read_word(dev, SIEMSG_REG(i)); } } void c67x00_ll_hpi_enable_sofeop(struct c67x00_sie *sie) { hpi_set_bits(sie->dev, HPI_IRQ_ROUTING_REG, SOFEOP_TO_HPI_EN(sie->sie_num)); } void c67x00_ll_hpi_disable_sofeop(struct c67x00_sie *sie) { hpi_clear_bits(sie->dev, HPI_IRQ_ROUTING_REG, SOFEOP_TO_HPI_EN(sie->sie_num)); } /* -------------------------------------------------------------------------- */ /* Transactions */ static inline int ll_recv_msg(struct c67x00_device *dev) { u16 res; res = wait_for_completion_timeout(&dev->hpi.lcp.msg_received, 5 * HZ); WARN_ON(!res); return (res == 0) ? -EIO : 0; } /* -------------------------------------------------------------------------- */ /* General functions */ u16 c67x00_ll_fetch_siemsg(struct c67x00_device *dev, int sie_num) { u16 val; val = hpi_read_word(dev, SIEMSG_REG(sie_num)); /* clear register to allow next message */ hpi_write_word(dev, SIEMSG_REG(sie_num), 0); return val; } u16 c67x00_ll_get_usb_ctl(struct c67x00_sie *sie) { return hpi_read_word(sie->dev, USB_CTL_REG(sie->sie_num)); } /** * c67x00_ll_usb_clear_status - clear the USB status bits */ void c67x00_ll_usb_clear_status(struct c67x00_sie *sie, u16 bits) { hpi_write_word(sie->dev, USB_STAT_REG(sie->sie_num), bits); } u16 c67x00_ll_usb_get_status(struct c67x00_sie *sie) { return hpi_read_word(sie->dev, USB_STAT_REG(sie->sie_num)); } /* -------------------------------------------------------------------------- */ static int c67x00_comm_exec_int(struct c67x00_device *dev, u16 nr, struct c67x00_lcp_int_data *data) { int i, rc; mutex_lock(&dev->hpi.lcp.mutex); hpi_write_word(dev, COMM_INT_NUM, nr); for (i = 0; i < COMM_REGS; i++) hpi_write_word(dev, COMM_R(i), data->regs[i]); hpi_send_mbox(dev, COMM_EXEC_INT); rc = ll_recv_msg(dev); mutex_unlock(&dev->hpi.lcp.mutex); return rc; } /* -------------------------------------------------------------------------- */ /* Host specific functions */ void c67x00_ll_set_husb_eot(struct c67x00_device *dev, u16 value) { mutex_lock(&dev->hpi.lcp.mutex); hpi_write_word(dev, HUSB_pEOT, value); mutex_unlock(&dev->hpi.lcp.mutex); } static inline void c67x00_ll_husb_sie_init(struct c67x00_sie *sie) { struct c67x00_device *dev = sie->dev; struct c67x00_lcp_int_data data; int rc; rc = c67x00_comm_exec_int(dev, HUSB_SIE_INIT_INT(sie->sie_num), &data); BUG_ON(rc); /* No return path for error code; crash spectacularly */ } void c67x00_ll_husb_reset(struct c67x00_sie *sie, int port) { struct c67x00_device *dev = sie->dev; struct c67x00_lcp_int_data data; int rc; data.regs[0] = 50; /* Reset USB port for 50ms */ data.regs[1] = port | (sie->sie_num << 1); rc = c67x00_comm_exec_int(dev, HUSB_RESET_INT, &data); BUG_ON(rc); /* No return path for error code; crash spectacularly */ } void c67x00_ll_husb_set_current_td(struct c67x00_sie *sie, u16 addr) { hpi_write_word(sie->dev, HUSB_SIE_pCurrentTDPtr(sie->sie_num), addr); } u16 c67x00_ll_husb_get_current_td(struct c67x00_sie *sie) { return hpi_read_word(sie->dev, HUSB_SIE_pCurrentTDPtr(sie->sie_num)); } u16 c67x00_ll_husb_get_frame(struct c67x00_sie *sie) { return hpi_read_word(sie->dev, HOST_FRAME_REG(sie->sie_num)); } void c67x00_ll_husb_init_host_port(struct c67x00_sie *sie) { /* Set port into host mode */ hpi_set_bits(sie->dev, USB_CTL_REG(sie->sie_num), HOST_MODE); c67x00_ll_husb_sie_init(sie); /* Clear interrupts */ c67x00_ll_usb_clear_status(sie, HOST_STAT_MASK); /* Check */ if (!(hpi_read_word(sie->dev, USB_CTL_REG(sie->sie_num)) & HOST_MODE)) dev_warn(sie_dev(sie), "SIE %d not set to host mode\n", sie->sie_num); } void c67x00_ll_husb_reset_port(struct c67x00_sie *sie, int port) { /* Clear connect change */ c67x00_ll_usb_clear_status(sie, PORT_CONNECT_CHANGE(port)); /* Enable interrupts */ hpi_set_bits(sie->dev, HPI_IRQ_ROUTING_REG, SOFEOP_TO_CPU_EN(sie->sie_num)); hpi_set_bits(sie->dev, HOST_IRQ_EN_REG(sie->sie_num), SOF_EOP_IRQ_EN | DONE_IRQ_EN); /* Enable pull down transistors */ hpi_set_bits(sie->dev, USB_CTL_REG(sie->sie_num), PORT_RES_EN(port)); } /* -------------------------------------------------------------------------- */ void c67x00_ll_irq(struct c67x00_device *dev, u16 int_status) { if ((int_status & MBX_OUT_FLG) == 0) return; dev->hpi.lcp.last_msg = hpi_recv_mbox(dev); complete(&dev->hpi.lcp.msg_received); } /* -------------------------------------------------------------------------- */ int c67x00_ll_reset(struct c67x00_device *dev) { int rc; mutex_lock(&dev->hpi.lcp.mutex); hpi_send_mbox(dev, COMM_RESET); rc = ll_recv_msg(dev); mutex_unlock(&dev->hpi.lcp.mutex); return rc; } /* -------------------------------------------------------------------------- */ /** * c67x00_ll_write_mem_le16 - write into c67x00 memory * Only data is little endian, addr has cpu endianess. */ void c67x00_ll_write_mem_le16(struct c67x00_device *dev, u16 addr, void *data, int len) { u8 *buf = data; /* Sanity check */ if (addr + len > 0xffff) { dev_err(&dev->pdev->dev, "Trying to write beyond writable region!\n"); return; } if (addr & 0x01) { /* unaligned access */ u16 tmp; tmp = hpi_read_word(dev, addr - 1); tmp = (tmp & 0x00ff) | (*buf++ << 8); hpi_write_word(dev, addr - 1, tmp); addr++; len--; } hpi_write_words_le16(dev, addr, (__le16 *)buf, len / 2); buf += len & ~0x01; addr += len & ~0x01; len &= 0x01; if (len) { u16 tmp; tmp = hpi_read_word(dev, addr); tmp = (tmp & 0xff00) | *buf; hpi_write_word(dev, addr, tmp); } } /** * c67x00_ll_read_mem_le16 - read from c67x00 memory * Only data is little endian, addr has cpu endianess. */ void c67x00_ll_read_mem_le16(struct c67x00_device *dev, u16 addr, void *data, int len) { u8 *buf = data; if (addr & 0x01) { /* unaligned access */ u16 tmp; tmp = hpi_read_word(dev, addr - 1); *buf++ = (tmp >> 8) & 0x00ff; addr++; len--; } hpi_read_words_le16(dev, addr, (__le16 *)buf, len / 2); buf += len & ~0x01; addr += len & ~0x01; len &= 0x01; if (len) { u16 tmp; tmp = hpi_read_word(dev, addr); *buf = tmp & 0x00ff; } } /* -------------------------------------------------------------------------- */ void c67x00_ll_init(struct c67x00_device *dev) { mutex_init(&dev->hpi.lcp.mutex); init_completion(&dev->hpi.lcp.msg_received); } void c67x00_ll_release(struct c67x00_device *dev) { }
gpl-2.0
TeamRegular/android_kernel_alcatel_msm8916
fs/reiserfs/journal.c
2153
124107
/* ** Write ahead logging implementation copyright Chris Mason 2000 ** ** The background commits make this code very interrelated, and ** overly complex. I need to rethink things a bit....The major players: ** ** journal_begin -- call with the number of blocks you expect to log. ** If the current transaction is too ** old, it will block until the current transaction is ** finished, and then start a new one. ** Usually, your transaction will get joined in with ** previous ones for speed. ** ** journal_join -- same as journal_begin, but won't block on the current ** transaction regardless of age. Don't ever call ** this. Ever. There are only two places it should be ** called from, and they are both inside this file. ** ** journal_mark_dirty -- adds blocks into this transaction. clears any flags ** that might make them get sent to disk ** and then marks them BH_JDirty. Puts the buffer head ** into the current transaction hash. ** ** journal_end -- if the current transaction is batchable, it does nothing ** otherwise, it could do an async/synchronous commit, or ** a full flush of all log and real blocks in the ** transaction. ** ** flush_old_commits -- if the current transaction is too old, it is ended and ** commit blocks are sent to disk. Forces commit blocks ** to disk for all backgrounded commits that have been ** around too long. ** -- Note, if you call this as an immediate flush from ** from within kupdate, it will ignore the immediate flag */ #include <linux/time.h> #include <linux/semaphore.h> #include <linux/vmalloc.h> #include "reiserfs.h" #include <linux/kernel.h> #include <linux/errno.h> #include <linux/fcntl.h> #include <linux/stat.h> #include <linux/string.h> #include <linux/buffer_head.h> #include <linux/workqueue.h> #include <linux/writeback.h> #include <linux/blkdev.h> #include <linux/backing-dev.h> #include <linux/uaccess.h> #include <linux/slab.h> /* gets a struct reiserfs_journal_list * from a list head */ #define JOURNAL_LIST_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \ j_list)) #define JOURNAL_WORK_ENTRY(h) (list_entry((h), struct reiserfs_journal_list, \ j_working_list)) /* the number of mounted filesystems. This is used to decide when to ** start and kill the commit workqueue */ static int reiserfs_mounted_fs_count; static struct workqueue_struct *commit_wq; #define JOURNAL_TRANS_HALF 1018 /* must be correct to keep the desc and commit structs at 4k */ #define BUFNR 64 /*read ahead */ /* cnode stat bits. Move these into reiserfs_fs.h */ #define BLOCK_FREED 2 /* this block was freed, and can't be written. */ #define BLOCK_FREED_HOLDER 3 /* this block was freed during this transaction, and can't be written */ #define BLOCK_NEEDS_FLUSH 4 /* used in flush_journal_list */ #define BLOCK_DIRTIED 5 /* journal list state bits */ #define LIST_TOUCHED 1 #define LIST_DIRTY 2 #define LIST_COMMIT_PENDING 4 /* someone will commit this list */ /* flags for do_journal_end */ #define FLUSH_ALL 1 /* flush commit and real blocks */ #define COMMIT_NOW 2 /* end and commit this transaction */ #define WAIT 4 /* wait for the log blocks to hit the disk */ static int do_journal_end(struct reiserfs_transaction_handle *, struct super_block *, unsigned long nblocks, int flags); static int flush_journal_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall); static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall); static int can_dirty(struct reiserfs_journal_cnode *cn); static int journal_join(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks); static void release_journal_dev(struct super_block *super, struct reiserfs_journal *journal); static int dirty_one_transaction(struct super_block *s, struct reiserfs_journal_list *jl); static void flush_async_commits(struct work_struct *work); static void queue_log_writer(struct super_block *s); /* values for join in do_journal_begin_r */ enum { JBEGIN_REG = 0, /* regular journal begin */ JBEGIN_JOIN = 1, /* join the running transaction if at all possible */ JBEGIN_ABORT = 2, /* called from cleanup code, ignores aborted flag */ }; static int do_journal_begin_r(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks, int join); static void init_journal_hash(struct super_block *sb) { struct reiserfs_journal *journal = SB_JOURNAL(sb); memset(journal->j_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)); } /* ** clears BH_Dirty and sticks the buffer on the clean list. Called because I can't allow refile_buffer to ** make schedule happen after I've freed a block. Look at remove_from_transaction and journal_mark_freed for ** more details. */ static int reiserfs_clean_and_file_buffer(struct buffer_head *bh) { if (bh) { clear_buffer_dirty(bh); clear_buffer_journal_test(bh); } return 0; } static struct reiserfs_bitmap_node *allocate_bitmap_node(struct super_block *sb) { struct reiserfs_bitmap_node *bn; static int id; bn = kmalloc(sizeof(struct reiserfs_bitmap_node), GFP_NOFS); if (!bn) { return NULL; } bn->data = kzalloc(sb->s_blocksize, GFP_NOFS); if (!bn->data) { kfree(bn); return NULL; } bn->id = id++; INIT_LIST_HEAD(&bn->list); return bn; } static struct reiserfs_bitmap_node *get_bitmap_node(struct super_block *sb) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_bitmap_node *bn = NULL; struct list_head *entry = journal->j_bitmap_nodes.next; journal->j_used_bitmap_nodes++; repeat: if (entry != &journal->j_bitmap_nodes) { bn = list_entry(entry, struct reiserfs_bitmap_node, list); list_del(entry); memset(bn->data, 0, sb->s_blocksize); journal->j_free_bitmap_nodes--; return bn; } bn = allocate_bitmap_node(sb); if (!bn) { yield(); goto repeat; } return bn; } static inline void free_bitmap_node(struct super_block *sb, struct reiserfs_bitmap_node *bn) { struct reiserfs_journal *journal = SB_JOURNAL(sb); journal->j_used_bitmap_nodes--; if (journal->j_free_bitmap_nodes > REISERFS_MAX_BITMAP_NODES) { kfree(bn->data); kfree(bn); } else { list_add(&bn->list, &journal->j_bitmap_nodes); journal->j_free_bitmap_nodes++; } } static void allocate_bitmap_nodes(struct super_block *sb) { int i; struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_bitmap_node *bn = NULL; for (i = 0; i < REISERFS_MIN_BITMAP_NODES; i++) { bn = allocate_bitmap_node(sb); if (bn) { list_add(&bn->list, &journal->j_bitmap_nodes); journal->j_free_bitmap_nodes++; } else { break; /* this is ok, we'll try again when more are needed */ } } } static int set_bit_in_list_bitmap(struct super_block *sb, b_blocknr_t block, struct reiserfs_list_bitmap *jb) { unsigned int bmap_nr = block / (sb->s_blocksize << 3); unsigned int bit_nr = block % (sb->s_blocksize << 3); if (!jb->bitmaps[bmap_nr]) { jb->bitmaps[bmap_nr] = get_bitmap_node(sb); } set_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]->data); return 0; } static void cleanup_bitmap_list(struct super_block *sb, struct reiserfs_list_bitmap *jb) { int i; if (jb->bitmaps == NULL) return; for (i = 0; i < reiserfs_bmap_count(sb); i++) { if (jb->bitmaps[i]) { free_bitmap_node(sb, jb->bitmaps[i]); jb->bitmaps[i] = NULL; } } } /* ** only call this on FS unmount. */ static int free_list_bitmaps(struct super_block *sb, struct reiserfs_list_bitmap *jb_array) { int i; struct reiserfs_list_bitmap *jb; for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) { jb = jb_array + i; jb->journal_list = NULL; cleanup_bitmap_list(sb, jb); vfree(jb->bitmaps); jb->bitmaps = NULL; } return 0; } static int free_bitmap_nodes(struct super_block *sb) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct list_head *next = journal->j_bitmap_nodes.next; struct reiserfs_bitmap_node *bn; while (next != &journal->j_bitmap_nodes) { bn = list_entry(next, struct reiserfs_bitmap_node, list); list_del(next); kfree(bn->data); kfree(bn); next = journal->j_bitmap_nodes.next; journal->j_free_bitmap_nodes--; } return 0; } /* ** get memory for JOURNAL_NUM_BITMAPS worth of bitmaps. ** jb_array is the array to be filled in. */ int reiserfs_allocate_list_bitmaps(struct super_block *sb, struct reiserfs_list_bitmap *jb_array, unsigned int bmap_nr) { int i; int failed = 0; struct reiserfs_list_bitmap *jb; int mem = bmap_nr * sizeof(struct reiserfs_bitmap_node *); for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) { jb = jb_array + i; jb->journal_list = NULL; jb->bitmaps = vzalloc(mem); if (!jb->bitmaps) { reiserfs_warning(sb, "clm-2000", "unable to " "allocate bitmaps for journal lists"); failed = 1; break; } } if (failed) { free_list_bitmaps(sb, jb_array); return -1; } return 0; } /* ** find an available list bitmap. If you can't find one, flush a commit list ** and try again */ static struct reiserfs_list_bitmap *get_list_bitmap(struct super_block *sb, struct reiserfs_journal_list *jl) { int i, j; struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_list_bitmap *jb = NULL; for (j = 0; j < (JOURNAL_NUM_BITMAPS * 3); j++) { i = journal->j_list_bitmap_index; journal->j_list_bitmap_index = (i + 1) % JOURNAL_NUM_BITMAPS; jb = journal->j_list_bitmap + i; if (journal->j_list_bitmap[i].journal_list) { flush_commit_list(sb, journal->j_list_bitmap[i]. journal_list, 1); if (!journal->j_list_bitmap[i].journal_list) { break; } } else { break; } } if (jb->journal_list) { /* double check to make sure if flushed correctly */ return NULL; } jb->journal_list = jl; return jb; } /* ** allocates a new chunk of X nodes, and links them all together as a list. ** Uses the cnode->next and cnode->prev pointers ** returns NULL on failure */ static struct reiserfs_journal_cnode *allocate_cnodes(int num_cnodes) { struct reiserfs_journal_cnode *head; int i; if (num_cnodes <= 0) { return NULL; } head = vzalloc(num_cnodes * sizeof(struct reiserfs_journal_cnode)); if (!head) { return NULL; } head[0].prev = NULL; head[0].next = head + 1; for (i = 1; i < num_cnodes; i++) { head[i].prev = head + (i - 1); head[i].next = head + (i + 1); /* if last one, overwrite it after the if */ } head[num_cnodes - 1].next = NULL; return head; } /* ** pulls a cnode off the free list, or returns NULL on failure */ static struct reiserfs_journal_cnode *get_cnode(struct super_block *sb) { struct reiserfs_journal_cnode *cn; struct reiserfs_journal *journal = SB_JOURNAL(sb); reiserfs_check_lock_depth(sb, "get_cnode"); if (journal->j_cnode_free <= 0) { return NULL; } journal->j_cnode_used++; journal->j_cnode_free--; cn = journal->j_cnode_free_list; if (!cn) { return cn; } if (cn->next) { cn->next->prev = NULL; } journal->j_cnode_free_list = cn->next; memset(cn, 0, sizeof(struct reiserfs_journal_cnode)); return cn; } /* ** returns a cnode to the free list */ static void free_cnode(struct super_block *sb, struct reiserfs_journal_cnode *cn) { struct reiserfs_journal *journal = SB_JOURNAL(sb); reiserfs_check_lock_depth(sb, "free_cnode"); journal->j_cnode_used--; journal->j_cnode_free++; /* memset(cn, 0, sizeof(struct reiserfs_journal_cnode)) ; */ cn->next = journal->j_cnode_free_list; if (journal->j_cnode_free_list) { journal->j_cnode_free_list->prev = cn; } cn->prev = NULL; /* not needed with the memset, but I might kill the memset, and forget to do this */ journal->j_cnode_free_list = cn; } static void clear_prepared_bits(struct buffer_head *bh) { clear_buffer_journal_prepared(bh); clear_buffer_journal_restore_dirty(bh); } /* return a cnode with same dev, block number and size in table, or null if not found */ static inline struct reiserfs_journal_cnode *get_journal_hash_dev(struct super_block *sb, struct reiserfs_journal_cnode **table, long bl) { struct reiserfs_journal_cnode *cn; cn = journal_hash(table, sb, bl); while (cn) { if (cn->blocknr == bl && cn->sb == sb) return cn; cn = cn->hnext; } return (struct reiserfs_journal_cnode *)0; } /* ** this actually means 'can this block be reallocated yet?'. If you set search_all, a block can only be allocated ** if it is not in the current transaction, was not freed by the current transaction, and has no chance of ever ** being overwritten by a replay after crashing. ** ** If you don't set search_all, a block can only be allocated if it is not in the current transaction. Since deleting ** a block removes it from the current transaction, this case should never happen. If you don't set search_all, make ** sure you never write the block without logging it. ** ** next_zero_bit is a suggestion about the next block to try for find_forward. ** when bl is rejected because it is set in a journal list bitmap, we search ** for the next zero bit in the bitmap that rejected bl. Then, we return that ** through next_zero_bit for find_forward to try. ** ** Just because we return something in next_zero_bit does not mean we won't ** reject it on the next call to reiserfs_in_journal ** */ int reiserfs_in_journal(struct super_block *sb, unsigned int bmap_nr, int bit_nr, int search_all, b_blocknr_t * next_zero_bit) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_cnode *cn; struct reiserfs_list_bitmap *jb; int i; unsigned long bl; *next_zero_bit = 0; /* always start this at zero. */ PROC_INFO_INC(sb, journal.in_journal); /* If we aren't doing a search_all, this is a metablock, and it will be logged before use. ** if we crash before the transaction that freed it commits, this transaction won't ** have committed either, and the block will never be written */ if (search_all) { for (i = 0; i < JOURNAL_NUM_BITMAPS; i++) { PROC_INFO_INC(sb, journal.in_journal_bitmap); jb = journal->j_list_bitmap + i; if (jb->journal_list && jb->bitmaps[bmap_nr] && test_bit(bit_nr, (unsigned long *)jb->bitmaps[bmap_nr]-> data)) { *next_zero_bit = find_next_zero_bit((unsigned long *) (jb->bitmaps[bmap_nr]-> data), sb->s_blocksize << 3, bit_nr + 1); return 1; } } } bl = bmap_nr * (sb->s_blocksize << 3) + bit_nr; /* is it in any old transactions? */ if (search_all && (cn = get_journal_hash_dev(sb, journal->j_list_hash_table, bl))) { return 1; } /* is it in the current transaction. This should never happen */ if ((cn = get_journal_hash_dev(sb, journal->j_hash_table, bl))) { BUG(); return 1; } PROC_INFO_INC(sb, journal.in_journal_reusable); /* safe for reuse */ return 0; } /* insert cn into table */ static inline void insert_journal_hash(struct reiserfs_journal_cnode **table, struct reiserfs_journal_cnode *cn) { struct reiserfs_journal_cnode *cn_orig; cn_orig = journal_hash(table, cn->sb, cn->blocknr); cn->hnext = cn_orig; cn->hprev = NULL; if (cn_orig) { cn_orig->hprev = cn; } journal_hash(table, cn->sb, cn->blocknr) = cn; } /* lock the current transaction */ static inline void lock_journal(struct super_block *sb) { PROC_INFO_INC(sb, journal.lock_journal); reiserfs_mutex_lock_safe(&SB_JOURNAL(sb)->j_mutex, sb); } /* unlock the current transaction */ static inline void unlock_journal(struct super_block *sb) { mutex_unlock(&SB_JOURNAL(sb)->j_mutex); } static inline void get_journal_list(struct reiserfs_journal_list *jl) { jl->j_refcount++; } static inline void put_journal_list(struct super_block *s, struct reiserfs_journal_list *jl) { if (jl->j_refcount < 1) { reiserfs_panic(s, "journal-2", "trans id %u, refcount at %d", jl->j_trans_id, jl->j_refcount); } if (--jl->j_refcount == 0) kfree(jl); } /* ** this used to be much more involved, and I'm keeping it just in case things get ugly again. ** it gets called by flush_commit_list, and cleans up any data stored about blocks freed during a ** transaction. */ static void cleanup_freed_for_journal_list(struct super_block *sb, struct reiserfs_journal_list *jl) { struct reiserfs_list_bitmap *jb = jl->j_list_bitmap; if (jb) { cleanup_bitmap_list(sb, jb); } jl->j_list_bitmap->journal_list = NULL; jl->j_list_bitmap = NULL; } static int journal_list_still_alive(struct super_block *s, unsigned int trans_id) { struct reiserfs_journal *journal = SB_JOURNAL(s); struct list_head *entry = &journal->j_journal_list; struct reiserfs_journal_list *jl; if (!list_empty(entry)) { jl = JOURNAL_LIST_ENTRY(entry->next); if (jl->j_trans_id <= trans_id) { return 1; } } return 0; } /* * If page->mapping was null, we failed to truncate this page for * some reason. Most likely because it was truncated after being * logged via data=journal. * * This does a check to see if the buffer belongs to one of these * lost pages before doing the final put_bh. If page->mapping was * null, it tries to free buffers on the page, which should make the * final page_cache_release drop the page from the lru. */ static void release_buffer_page(struct buffer_head *bh) { struct page *page = bh->b_page; if (!page->mapping && trylock_page(page)) { page_cache_get(page); put_bh(bh); if (!page->mapping) try_to_free_buffers(page); unlock_page(page); page_cache_release(page); } else { put_bh(bh); } } static void reiserfs_end_buffer_io_sync(struct buffer_head *bh, int uptodate) { char b[BDEVNAME_SIZE]; if (buffer_journaled(bh)) { reiserfs_warning(NULL, "clm-2084", "pinned buffer %lu:%s sent to disk", bh->b_blocknr, bdevname(bh->b_bdev, b)); } if (uptodate) set_buffer_uptodate(bh); else clear_buffer_uptodate(bh); unlock_buffer(bh); release_buffer_page(bh); } static void reiserfs_end_ordered_io(struct buffer_head *bh, int uptodate) { if (uptodate) set_buffer_uptodate(bh); else clear_buffer_uptodate(bh); unlock_buffer(bh); put_bh(bh); } static void submit_logged_buffer(struct buffer_head *bh) { get_bh(bh); bh->b_end_io = reiserfs_end_buffer_io_sync; clear_buffer_journal_new(bh); clear_buffer_dirty(bh); if (!test_clear_buffer_journal_test(bh)) BUG(); if (!buffer_uptodate(bh)) BUG(); submit_bh(WRITE, bh); } static void submit_ordered_buffer(struct buffer_head *bh) { get_bh(bh); bh->b_end_io = reiserfs_end_ordered_io; clear_buffer_dirty(bh); if (!buffer_uptodate(bh)) BUG(); submit_bh(WRITE, bh); } #define CHUNK_SIZE 32 struct buffer_chunk { struct buffer_head *bh[CHUNK_SIZE]; int nr; }; static void write_chunk(struct buffer_chunk *chunk) { int i; for (i = 0; i < chunk->nr; i++) { submit_logged_buffer(chunk->bh[i]); } chunk->nr = 0; } static void write_ordered_chunk(struct buffer_chunk *chunk) { int i; for (i = 0; i < chunk->nr; i++) { submit_ordered_buffer(chunk->bh[i]); } chunk->nr = 0; } static int add_to_chunk(struct buffer_chunk *chunk, struct buffer_head *bh, spinlock_t * lock, void (fn) (struct buffer_chunk *)) { int ret = 0; BUG_ON(chunk->nr >= CHUNK_SIZE); chunk->bh[chunk->nr++] = bh; if (chunk->nr >= CHUNK_SIZE) { ret = 1; if (lock) spin_unlock(lock); fn(chunk); if (lock) spin_lock(lock); } return ret; } static atomic_t nr_reiserfs_jh = ATOMIC_INIT(0); static struct reiserfs_jh *alloc_jh(void) { struct reiserfs_jh *jh; while (1) { jh = kmalloc(sizeof(*jh), GFP_NOFS); if (jh) { atomic_inc(&nr_reiserfs_jh); return jh; } yield(); } } /* * we want to free the jh when the buffer has been written * and waited on */ void reiserfs_free_jh(struct buffer_head *bh) { struct reiserfs_jh *jh; jh = bh->b_private; if (jh) { bh->b_private = NULL; jh->bh = NULL; list_del_init(&jh->list); kfree(jh); if (atomic_read(&nr_reiserfs_jh) <= 0) BUG(); atomic_dec(&nr_reiserfs_jh); put_bh(bh); } } static inline int __add_jh(struct reiserfs_journal *j, struct buffer_head *bh, int tail) { struct reiserfs_jh *jh; if (bh->b_private) { spin_lock(&j->j_dirty_buffers_lock); if (!bh->b_private) { spin_unlock(&j->j_dirty_buffers_lock); goto no_jh; } jh = bh->b_private; list_del_init(&jh->list); } else { no_jh: get_bh(bh); jh = alloc_jh(); spin_lock(&j->j_dirty_buffers_lock); /* buffer must be locked for __add_jh, should be able to have * two adds at the same time */ BUG_ON(bh->b_private); jh->bh = bh; bh->b_private = jh; } jh->jl = j->j_current_jl; if (tail) list_add_tail(&jh->list, &jh->jl->j_tail_bh_list); else { list_add_tail(&jh->list, &jh->jl->j_bh_list); } spin_unlock(&j->j_dirty_buffers_lock); return 0; } int reiserfs_add_tail_list(struct inode *inode, struct buffer_head *bh) { return __add_jh(SB_JOURNAL(inode->i_sb), bh, 1); } int reiserfs_add_ordered_list(struct inode *inode, struct buffer_head *bh) { return __add_jh(SB_JOURNAL(inode->i_sb), bh, 0); } #define JH_ENTRY(l) list_entry((l), struct reiserfs_jh, list) static int write_ordered_buffers(spinlock_t * lock, struct reiserfs_journal *j, struct reiserfs_journal_list *jl, struct list_head *list) { struct buffer_head *bh; struct reiserfs_jh *jh; int ret = j->j_errno; struct buffer_chunk chunk; struct list_head tmp; INIT_LIST_HEAD(&tmp); chunk.nr = 0; spin_lock(lock); while (!list_empty(list)) { jh = JH_ENTRY(list->next); bh = jh->bh; get_bh(bh); if (!trylock_buffer(bh)) { if (!buffer_dirty(bh)) { list_move(&jh->list, &tmp); goto loop_next; } spin_unlock(lock); if (chunk.nr) write_ordered_chunk(&chunk); wait_on_buffer(bh); cond_resched(); spin_lock(lock); goto loop_next; } /* in theory, dirty non-uptodate buffers should never get here, * but the upper layer io error paths still have a few quirks. * Handle them here as gracefully as we can */ if (!buffer_uptodate(bh) && buffer_dirty(bh)) { clear_buffer_dirty(bh); ret = -EIO; } if (buffer_dirty(bh)) { list_move(&jh->list, &tmp); add_to_chunk(&chunk, bh, lock, write_ordered_chunk); } else { reiserfs_free_jh(bh); unlock_buffer(bh); } loop_next: put_bh(bh); cond_resched_lock(lock); } if (chunk.nr) { spin_unlock(lock); write_ordered_chunk(&chunk); spin_lock(lock); } while (!list_empty(&tmp)) { jh = JH_ENTRY(tmp.prev); bh = jh->bh; get_bh(bh); reiserfs_free_jh(bh); if (buffer_locked(bh)) { spin_unlock(lock); wait_on_buffer(bh); spin_lock(lock); } if (!buffer_uptodate(bh)) { ret = -EIO; } /* ugly interaction with invalidatepage here. * reiserfs_invalidate_page will pin any buffer that has a valid * journal head from an older transaction. If someone else sets * our buffer dirty after we write it in the first loop, and * then someone truncates the page away, nobody will ever write * the buffer. We're safe if we write the page one last time * after freeing the journal header. */ if (buffer_dirty(bh) && unlikely(bh->b_page->mapping == NULL)) { spin_unlock(lock); ll_rw_block(WRITE, 1, &bh); spin_lock(lock); } put_bh(bh); cond_resched_lock(lock); } spin_unlock(lock); return ret; } static int flush_older_commits(struct super_block *s, struct reiserfs_journal_list *jl) { struct reiserfs_journal *journal = SB_JOURNAL(s); struct reiserfs_journal_list *other_jl; struct reiserfs_journal_list *first_jl; struct list_head *entry; unsigned int trans_id = jl->j_trans_id; unsigned int other_trans_id; unsigned int first_trans_id; find_first: /* * first we walk backwards to find the oldest uncommitted transation */ first_jl = jl; entry = jl->j_list.prev; while (1) { other_jl = JOURNAL_LIST_ENTRY(entry); if (entry == &journal->j_journal_list || atomic_read(&other_jl->j_older_commits_done)) break; first_jl = other_jl; entry = other_jl->j_list.prev; } /* if we didn't find any older uncommitted transactions, return now */ if (first_jl == jl) { return 0; } first_trans_id = first_jl->j_trans_id; entry = &first_jl->j_list; while (1) { other_jl = JOURNAL_LIST_ENTRY(entry); other_trans_id = other_jl->j_trans_id; if (other_trans_id < trans_id) { if (atomic_read(&other_jl->j_commit_left) != 0) { flush_commit_list(s, other_jl, 0); /* list we were called with is gone, return */ if (!journal_list_still_alive(s, trans_id)) return 1; /* the one we just flushed is gone, this means all * older lists are also gone, so first_jl is no longer * valid either. Go back to the beginning. */ if (!journal_list_still_alive (s, other_trans_id)) { goto find_first; } } entry = entry->next; if (entry == &journal->j_journal_list) return 0; } else { return 0; } } return 0; } static int reiserfs_async_progress_wait(struct super_block *s) { struct reiserfs_journal *j = SB_JOURNAL(s); if (atomic_read(&j->j_async_throttle)) { reiserfs_write_unlock(s); congestion_wait(BLK_RW_ASYNC, HZ / 10); reiserfs_write_lock(s); } return 0; } /* ** if this journal list still has commit blocks unflushed, send them to disk. ** ** log areas must be flushed in order (transaction 2 can't commit before transaction 1) ** Before the commit block can by written, every other log block must be safely on disk ** */ static int flush_commit_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) { int i; b_blocknr_t bn; struct buffer_head *tbh = NULL; unsigned int trans_id = jl->j_trans_id; struct reiserfs_journal *journal = SB_JOURNAL(s); int retval = 0; int write_len; reiserfs_check_lock_depth(s, "flush_commit_list"); if (atomic_read(&jl->j_older_commits_done)) { return 0; } /* before we can put our commit blocks on disk, we have to make sure everyone older than ** us is on disk too */ BUG_ON(jl->j_len <= 0); BUG_ON(trans_id == journal->j_trans_id); get_journal_list(jl); if (flushall) { if (flush_older_commits(s, jl) == 1) { /* list disappeared during flush_older_commits. return */ goto put_jl; } } /* make sure nobody is trying to flush this one at the same time */ reiserfs_mutex_lock_safe(&jl->j_commit_mutex, s); if (!journal_list_still_alive(s, trans_id)) { mutex_unlock(&jl->j_commit_mutex); goto put_jl; } BUG_ON(jl->j_trans_id == 0); /* this commit is done, exit */ if (atomic_read(&(jl->j_commit_left)) <= 0) { if (flushall) { atomic_set(&(jl->j_older_commits_done), 1); } mutex_unlock(&jl->j_commit_mutex); goto put_jl; } if (!list_empty(&jl->j_bh_list)) { int ret; /* * We might sleep in numerous places inside * write_ordered_buffers. Relax the write lock. */ reiserfs_write_unlock(s); ret = write_ordered_buffers(&journal->j_dirty_buffers_lock, journal, jl, &jl->j_bh_list); if (ret < 0 && retval == 0) retval = ret; reiserfs_write_lock(s); } BUG_ON(!list_empty(&jl->j_bh_list)); /* * for the description block and all the log blocks, submit any buffers * that haven't already reached the disk. Try to write at least 256 * log blocks. later on, we will only wait on blocks that correspond * to this transaction, but while we're unplugging we might as well * get a chunk of data on there. */ atomic_inc(&journal->j_async_throttle); write_len = jl->j_len + 1; if (write_len < 256) write_len = 256; for (i = 0 ; i < write_len ; i++) { bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s); tbh = journal_find_get_block(s, bn); if (tbh) { if (buffer_dirty(tbh)) { reiserfs_write_unlock(s); ll_rw_block(WRITE, 1, &tbh); reiserfs_write_lock(s); } put_bh(tbh) ; } } atomic_dec(&journal->j_async_throttle); for (i = 0; i < (jl->j_len + 1); i++) { bn = SB_ONDISK_JOURNAL_1st_BLOCK(s) + (jl->j_start + i) % SB_ONDISK_JOURNAL_SIZE(s); tbh = journal_find_get_block(s, bn); reiserfs_write_unlock(s); wait_on_buffer(tbh); reiserfs_write_lock(s); // since we're using ll_rw_blk above, it might have skipped over // a locked buffer. Double check here // /* redundant, sync_dirty_buffer() checks */ if (buffer_dirty(tbh)) { reiserfs_write_unlock(s); sync_dirty_buffer(tbh); reiserfs_write_lock(s); } if (unlikely(!buffer_uptodate(tbh))) { #ifdef CONFIG_REISERFS_CHECK reiserfs_warning(s, "journal-601", "buffer write failed"); #endif retval = -EIO; } put_bh(tbh); /* once for journal_find_get_block */ put_bh(tbh); /* once due to original getblk in do_journal_end */ atomic_dec(&(jl->j_commit_left)); } BUG_ON(atomic_read(&(jl->j_commit_left)) != 1); /* If there was a write error in the journal - we can't commit * this transaction - it will be invalid and, if successful, * will just end up propagating the write error out to * the file system. */ if (likely(!retval && !reiserfs_is_journal_aborted (journal))) { if (buffer_dirty(jl->j_commit_bh)) BUG(); mark_buffer_dirty(jl->j_commit_bh) ; reiserfs_write_unlock(s); if (reiserfs_barrier_flush(s)) __sync_dirty_buffer(jl->j_commit_bh, WRITE_FLUSH_FUA); else sync_dirty_buffer(jl->j_commit_bh); reiserfs_write_lock(s); } /* If there was a write error in the journal - we can't commit this * transaction - it will be invalid and, if successful, will just end * up propagating the write error out to the filesystem. */ if (unlikely(!buffer_uptodate(jl->j_commit_bh))) { #ifdef CONFIG_REISERFS_CHECK reiserfs_warning(s, "journal-615", "buffer write failed"); #endif retval = -EIO; } bforget(jl->j_commit_bh); if (journal->j_last_commit_id != 0 && (jl->j_trans_id - journal->j_last_commit_id) != 1) { reiserfs_warning(s, "clm-2200", "last commit %lu, current %lu", journal->j_last_commit_id, jl->j_trans_id); } journal->j_last_commit_id = jl->j_trans_id; /* now, every commit block is on the disk. It is safe to allow blocks freed during this transaction to be reallocated */ cleanup_freed_for_journal_list(s, jl); retval = retval ? retval : journal->j_errno; /* mark the metadata dirty */ if (!retval) dirty_one_transaction(s, jl); atomic_dec(&(jl->j_commit_left)); if (flushall) { atomic_set(&(jl->j_older_commits_done), 1); } mutex_unlock(&jl->j_commit_mutex); put_jl: put_journal_list(s, jl); if (retval) reiserfs_abort(s, retval, "Journal write error in %s", __func__); return retval; } /* ** flush_journal_list frequently needs to find a newer transaction for a given block. This does that, or ** returns NULL if it can't find anything */ static struct reiserfs_journal_list *find_newer_jl_for_cn(struct reiserfs_journal_cnode *cn) { struct super_block *sb = cn->sb; b_blocknr_t blocknr = cn->blocknr; cn = cn->hprev; while (cn) { if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist) { return cn->jlist; } cn = cn->hprev; } return NULL; } static int newer_jl_done(struct reiserfs_journal_cnode *cn) { struct super_block *sb = cn->sb; b_blocknr_t blocknr = cn->blocknr; cn = cn->hprev; while (cn) { if (cn->sb == sb && cn->blocknr == blocknr && cn->jlist && atomic_read(&cn->jlist->j_commit_left) != 0) return 0; cn = cn->hprev; } return 1; } static void remove_journal_hash(struct super_block *, struct reiserfs_journal_cnode **, struct reiserfs_journal_list *, unsigned long, int); /* ** once all the real blocks have been flushed, it is safe to remove them from the ** journal list for this transaction. Aside from freeing the cnode, this also allows the ** block to be reallocated for data blocks if it had been deleted. */ static void remove_all_from_journal_list(struct super_block *sb, struct reiserfs_journal_list *jl, int debug) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_cnode *cn, *last; cn = jl->j_realblock; /* which is better, to lock once around the whole loop, or ** to lock for each call to remove_journal_hash? */ while (cn) { if (cn->blocknr != 0) { if (debug) { reiserfs_warning(sb, "reiserfs-2201", "block %u, bh is %d, state %ld", cn->blocknr, cn->bh ? 1 : 0, cn->state); } cn->state = 0; remove_journal_hash(sb, journal->j_list_hash_table, jl, cn->blocknr, 1); } last = cn; cn = cn->next; free_cnode(sb, last); } jl->j_realblock = NULL; } /* ** if this timestamp is greater than the timestamp we wrote last to the header block, write it to the header block. ** once this is done, I can safely say the log area for this transaction won't ever be replayed, and I can start ** releasing blocks in this transaction for reuse as data blocks. ** called by flush_journal_list, before it calls remove_all_from_journal_list ** */ static int _update_journal_header_block(struct super_block *sb, unsigned long offset, unsigned int trans_id) { struct reiserfs_journal_header *jh; struct reiserfs_journal *journal = SB_JOURNAL(sb); if (reiserfs_is_journal_aborted(journal)) return -EIO; if (trans_id >= journal->j_last_flush_trans_id) { if (buffer_locked((journal->j_header_bh))) { reiserfs_write_unlock(sb); wait_on_buffer((journal->j_header_bh)); reiserfs_write_lock(sb); if (unlikely(!buffer_uptodate(journal->j_header_bh))) { #ifdef CONFIG_REISERFS_CHECK reiserfs_warning(sb, "journal-699", "buffer write failed"); #endif return -EIO; } } journal->j_last_flush_trans_id = trans_id; journal->j_first_unflushed_offset = offset; jh = (struct reiserfs_journal_header *)(journal->j_header_bh-> b_data); jh->j_last_flush_trans_id = cpu_to_le32(trans_id); jh->j_first_unflushed_offset = cpu_to_le32(offset); jh->j_mount_id = cpu_to_le32(journal->j_mount_id); set_buffer_dirty(journal->j_header_bh); reiserfs_write_unlock(sb); if (reiserfs_barrier_flush(sb)) __sync_dirty_buffer(journal->j_header_bh, WRITE_FLUSH_FUA); else sync_dirty_buffer(journal->j_header_bh); reiserfs_write_lock(sb); if (!buffer_uptodate(journal->j_header_bh)) { reiserfs_warning(sb, "journal-837", "IO error during journal replay"); return -EIO; } } return 0; } static int update_journal_header_block(struct super_block *sb, unsigned long offset, unsigned int trans_id) { return _update_journal_header_block(sb, offset, trans_id); } /* ** flush any and all journal lists older than you are ** can only be called from flush_journal_list */ static int flush_older_journal_lists(struct super_block *sb, struct reiserfs_journal_list *jl) { struct list_head *entry; struct reiserfs_journal_list *other_jl; struct reiserfs_journal *journal = SB_JOURNAL(sb); unsigned int trans_id = jl->j_trans_id; /* we know we are the only ones flushing things, no extra race * protection is required. */ restart: entry = journal->j_journal_list.next; /* Did we wrap? */ if (entry == &journal->j_journal_list) return 0; other_jl = JOURNAL_LIST_ENTRY(entry); if (other_jl->j_trans_id < trans_id) { BUG_ON(other_jl->j_refcount <= 0); /* do not flush all */ flush_journal_list(sb, other_jl, 0); /* other_jl is now deleted from the list */ goto restart; } return 0; } static void del_from_work_list(struct super_block *s, struct reiserfs_journal_list *jl) { struct reiserfs_journal *journal = SB_JOURNAL(s); if (!list_empty(&jl->j_working_list)) { list_del_init(&jl->j_working_list); journal->j_num_work_lists--; } } /* flush a journal list, both commit and real blocks ** ** always set flushall to 1, unless you are calling from inside ** flush_journal_list ** ** IMPORTANT. This can only be called while there are no journal writers, ** and the journal is locked. That means it can only be called from ** do_journal_end, or by journal_release */ static int flush_journal_list(struct super_block *s, struct reiserfs_journal_list *jl, int flushall) { struct reiserfs_journal_list *pjl; struct reiserfs_journal_cnode *cn, *last; int count; int was_jwait = 0; int was_dirty = 0; struct buffer_head *saved_bh; unsigned long j_len_saved = jl->j_len; struct reiserfs_journal *journal = SB_JOURNAL(s); int err = 0; BUG_ON(j_len_saved <= 0); if (atomic_read(&journal->j_wcount) != 0) { reiserfs_warning(s, "clm-2048", "called with wcount %d", atomic_read(&journal->j_wcount)); } BUG_ON(jl->j_trans_id == 0); /* if flushall == 0, the lock is already held */ if (flushall) { reiserfs_mutex_lock_safe(&journal->j_flush_mutex, s); } else if (mutex_trylock(&journal->j_flush_mutex)) { BUG(); } count = 0; if (j_len_saved > journal->j_trans_max) { reiserfs_panic(s, "journal-715", "length is %lu, trans id %lu", j_len_saved, jl->j_trans_id); return 0; } /* if all the work is already done, get out of here */ if (atomic_read(&(jl->j_nonzerolen)) <= 0 && atomic_read(&(jl->j_commit_left)) <= 0) { goto flush_older_and_return; } /* start by putting the commit list on disk. This will also flush ** the commit lists of any olders transactions */ flush_commit_list(s, jl, 1); if (!(jl->j_state & LIST_DIRTY) && !reiserfs_is_journal_aborted(journal)) BUG(); /* are we done now? */ if (atomic_read(&(jl->j_nonzerolen)) <= 0 && atomic_read(&(jl->j_commit_left)) <= 0) { goto flush_older_and_return; } /* loop through each cnode, see if we need to write it, ** or wait on a more recent transaction, or just ignore it */ if (atomic_read(&(journal->j_wcount)) != 0) { reiserfs_panic(s, "journal-844", "journal list is flushing, " "wcount is not 0"); } cn = jl->j_realblock; while (cn) { was_jwait = 0; was_dirty = 0; saved_bh = NULL; /* blocknr of 0 is no longer in the hash, ignore it */ if (cn->blocknr == 0) { goto free_cnode; } /* This transaction failed commit. Don't write out to the disk */ if (!(jl->j_state & LIST_DIRTY)) goto free_cnode; pjl = find_newer_jl_for_cn(cn); /* the order is important here. We check pjl to make sure we ** don't clear BH_JDirty_wait if we aren't the one writing this ** block to disk */ if (!pjl && cn->bh) { saved_bh = cn->bh; /* we do this to make sure nobody releases the buffer while ** we are working with it */ get_bh(saved_bh); if (buffer_journal_dirty(saved_bh)) { BUG_ON(!can_dirty(cn)); was_jwait = 1; was_dirty = 1; } else if (can_dirty(cn)) { /* everything with !pjl && jwait should be writable */ BUG(); } } /* if someone has this block in a newer transaction, just make ** sure they are committed, and don't try writing it to disk */ if (pjl) { if (atomic_read(&pjl->j_commit_left)) flush_commit_list(s, pjl, 1); goto free_cnode; } /* bh == NULL when the block got to disk on its own, OR, ** the block got freed in a future transaction */ if (saved_bh == NULL) { goto free_cnode; } /* this should never happen. kupdate_one_transaction has this list ** locked while it works, so we should never see a buffer here that ** is not marked JDirty_wait */ if ((!was_jwait) && !buffer_locked(saved_bh)) { reiserfs_warning(s, "journal-813", "BAD! buffer %llu %cdirty %cjwait, " "not in a newer tranasction", (unsigned long long)saved_bh-> b_blocknr, was_dirty ? ' ' : '!', was_jwait ? ' ' : '!'); } if (was_dirty) { /* we inc again because saved_bh gets decremented at free_cnode */ get_bh(saved_bh); set_bit(BLOCK_NEEDS_FLUSH, &cn->state); lock_buffer(saved_bh); BUG_ON(cn->blocknr != saved_bh->b_blocknr); if (buffer_dirty(saved_bh)) submit_logged_buffer(saved_bh); else unlock_buffer(saved_bh); count++; } else { reiserfs_warning(s, "clm-2082", "Unable to flush buffer %llu in %s", (unsigned long long)saved_bh-> b_blocknr, __func__); } free_cnode: last = cn; cn = cn->next; if (saved_bh) { /* we incremented this to keep others from taking the buffer head away */ put_bh(saved_bh); if (atomic_read(&(saved_bh->b_count)) < 0) { reiserfs_warning(s, "journal-945", "saved_bh->b_count < 0"); } } } if (count > 0) { cn = jl->j_realblock; while (cn) { if (test_bit(BLOCK_NEEDS_FLUSH, &cn->state)) { if (!cn->bh) { reiserfs_panic(s, "journal-1011", "cn->bh is NULL"); } reiserfs_write_unlock(s); wait_on_buffer(cn->bh); reiserfs_write_lock(s); if (!cn->bh) { reiserfs_panic(s, "journal-1012", "cn->bh is NULL"); } if (unlikely(!buffer_uptodate(cn->bh))) { #ifdef CONFIG_REISERFS_CHECK reiserfs_warning(s, "journal-949", "buffer write failed"); #endif err = -EIO; } /* note, we must clear the JDirty_wait bit after the up to date ** check, otherwise we race against our flushpage routine */ BUG_ON(!test_clear_buffer_journal_dirty (cn->bh)); /* drop one ref for us */ put_bh(cn->bh); /* drop one ref for journal_mark_dirty */ release_buffer_page(cn->bh); } cn = cn->next; } } if (err) reiserfs_abort(s, -EIO, "Write error while pushing transaction to disk in %s", __func__); flush_older_and_return: /* before we can update the journal header block, we _must_ flush all ** real blocks from all older transactions to disk. This is because ** once the header block is updated, this transaction will not be ** replayed after a crash */ if (flushall) { flush_older_journal_lists(s, jl); } err = journal->j_errno; /* before we can remove everything from the hash tables for this ** transaction, we must make sure it can never be replayed ** ** since we are only called from do_journal_end, we know for sure there ** are no allocations going on while we are flushing journal lists. So, ** we only need to update the journal header block for the last list ** being flushed */ if (!err && flushall) { err = update_journal_header_block(s, (jl->j_start + jl->j_len + 2) % SB_ONDISK_JOURNAL_SIZE(s), jl->j_trans_id); if (err) reiserfs_abort(s, -EIO, "Write error while updating journal header in %s", __func__); } remove_all_from_journal_list(s, jl, 0); list_del_init(&jl->j_list); journal->j_num_lists--; del_from_work_list(s, jl); if (journal->j_last_flush_id != 0 && (jl->j_trans_id - journal->j_last_flush_id) != 1) { reiserfs_warning(s, "clm-2201", "last flush %lu, current %lu", journal->j_last_flush_id, jl->j_trans_id); } journal->j_last_flush_id = jl->j_trans_id; /* not strictly required since we are freeing the list, but it should * help find code using dead lists later on */ jl->j_len = 0; atomic_set(&(jl->j_nonzerolen), 0); jl->j_start = 0; jl->j_realblock = NULL; jl->j_commit_bh = NULL; jl->j_trans_id = 0; jl->j_state = 0; put_journal_list(s, jl); if (flushall) mutex_unlock(&journal->j_flush_mutex); return err; } static int test_transaction(struct super_block *s, struct reiserfs_journal_list *jl) { struct reiserfs_journal_cnode *cn; if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0) return 1; cn = jl->j_realblock; while (cn) { /* if the blocknr == 0, this has been cleared from the hash, ** skip it */ if (cn->blocknr == 0) { goto next; } if (cn->bh && !newer_jl_done(cn)) return 0; next: cn = cn->next; cond_resched(); } return 0; } static int write_one_transaction(struct super_block *s, struct reiserfs_journal_list *jl, struct buffer_chunk *chunk) { struct reiserfs_journal_cnode *cn; int ret = 0; jl->j_state |= LIST_TOUCHED; del_from_work_list(s, jl); if (jl->j_len == 0 || atomic_read(&jl->j_nonzerolen) == 0) { return 0; } cn = jl->j_realblock; while (cn) { /* if the blocknr == 0, this has been cleared from the hash, ** skip it */ if (cn->blocknr == 0) { goto next; } if (cn->bh && can_dirty(cn) && buffer_dirty(cn->bh)) { struct buffer_head *tmp_bh; /* we can race against journal_mark_freed when we try * to lock_buffer(cn->bh), so we have to inc the buffer * count, and recheck things after locking */ tmp_bh = cn->bh; get_bh(tmp_bh); lock_buffer(tmp_bh); if (cn->bh && can_dirty(cn) && buffer_dirty(tmp_bh)) { if (!buffer_journal_dirty(tmp_bh) || buffer_journal_prepared(tmp_bh)) BUG(); add_to_chunk(chunk, tmp_bh, NULL, write_chunk); ret++; } else { /* note, cn->bh might be null now */ unlock_buffer(tmp_bh); } put_bh(tmp_bh); } next: cn = cn->next; cond_resched(); } return ret; } /* used by flush_commit_list */ static int dirty_one_transaction(struct super_block *s, struct reiserfs_journal_list *jl) { struct reiserfs_journal_cnode *cn; struct reiserfs_journal_list *pjl; int ret = 0; jl->j_state |= LIST_DIRTY; cn = jl->j_realblock; while (cn) { /* look for a more recent transaction that logged this ** buffer. Only the most recent transaction with a buffer in ** it is allowed to send that buffer to disk */ pjl = find_newer_jl_for_cn(cn); if (!pjl && cn->blocknr && cn->bh && buffer_journal_dirty(cn->bh)) { BUG_ON(!can_dirty(cn)); /* if the buffer is prepared, it will either be logged * or restored. If restored, we need to make sure * it actually gets marked dirty */ clear_buffer_journal_new(cn->bh); if (buffer_journal_prepared(cn->bh)) { set_buffer_journal_restore_dirty(cn->bh); } else { set_buffer_journal_test(cn->bh); mark_buffer_dirty(cn->bh); } } cn = cn->next; } return ret; } static int kupdate_transactions(struct super_block *s, struct reiserfs_journal_list *jl, struct reiserfs_journal_list **next_jl, unsigned int *next_trans_id, int num_blocks, int num_trans) { int ret = 0; int written = 0; int transactions_flushed = 0; unsigned int orig_trans_id = jl->j_trans_id; struct buffer_chunk chunk; struct list_head *entry; struct reiserfs_journal *journal = SB_JOURNAL(s); chunk.nr = 0; reiserfs_mutex_lock_safe(&journal->j_flush_mutex, s); if (!journal_list_still_alive(s, orig_trans_id)) { goto done; } /* we've got j_flush_mutex held, nobody is going to delete any * of these lists out from underneath us */ while ((num_trans && transactions_flushed < num_trans) || (!num_trans && written < num_blocks)) { if (jl->j_len == 0 || (jl->j_state & LIST_TOUCHED) || atomic_read(&jl->j_commit_left) || !(jl->j_state & LIST_DIRTY)) { del_from_work_list(s, jl); break; } ret = write_one_transaction(s, jl, &chunk); if (ret < 0) goto done; transactions_flushed++; written += ret; entry = jl->j_list.next; /* did we wrap? */ if (entry == &journal->j_journal_list) { break; } jl = JOURNAL_LIST_ENTRY(entry); /* don't bother with older transactions */ if (jl->j_trans_id <= orig_trans_id) break; } if (chunk.nr) { write_chunk(&chunk); } done: mutex_unlock(&journal->j_flush_mutex); return ret; } /* for o_sync and fsync heavy applications, they tend to use ** all the journa list slots with tiny transactions. These ** trigger lots and lots of calls to update the header block, which ** adds seeks and slows things down. ** ** This function tries to clear out a large chunk of the journal lists ** at once, which makes everything faster since only the newest journal ** list updates the header block */ static int flush_used_journal_lists(struct super_block *s, struct reiserfs_journal_list *jl) { unsigned long len = 0; unsigned long cur_len; int ret; int i; int limit = 256; struct reiserfs_journal_list *tjl; struct reiserfs_journal_list *flush_jl; unsigned int trans_id; struct reiserfs_journal *journal = SB_JOURNAL(s); flush_jl = tjl = jl; /* in data logging mode, try harder to flush a lot of blocks */ if (reiserfs_data_log(s)) limit = 1024; /* flush for 256 transactions or limit blocks, whichever comes first */ for (i = 0; i < 256 && len < limit; i++) { if (atomic_read(&tjl->j_commit_left) || tjl->j_trans_id < jl->j_trans_id) { break; } cur_len = atomic_read(&tjl->j_nonzerolen); if (cur_len > 0) { tjl->j_state &= ~LIST_TOUCHED; } len += cur_len; flush_jl = tjl; if (tjl->j_list.next == &journal->j_journal_list) break; tjl = JOURNAL_LIST_ENTRY(tjl->j_list.next); } /* try to find a group of blocks we can flush across all the ** transactions, but only bother if we've actually spanned ** across multiple lists */ if (flush_jl != jl) { ret = kupdate_transactions(s, jl, &tjl, &trans_id, len, i); } flush_journal_list(s, flush_jl, 1); return 0; } /* ** removes any nodes in table with name block and dev as bh. ** only touchs the hnext and hprev pointers. */ void remove_journal_hash(struct super_block *sb, struct reiserfs_journal_cnode **table, struct reiserfs_journal_list *jl, unsigned long block, int remove_freed) { struct reiserfs_journal_cnode *cur; struct reiserfs_journal_cnode **head; head = &(journal_hash(table, sb, block)); if (!head) { return; } cur = *head; while (cur) { if (cur->blocknr == block && cur->sb == sb && (jl == NULL || jl == cur->jlist) && (!test_bit(BLOCK_FREED, &cur->state) || remove_freed)) { if (cur->hnext) { cur->hnext->hprev = cur->hprev; } if (cur->hprev) { cur->hprev->hnext = cur->hnext; } else { *head = cur->hnext; } cur->blocknr = 0; cur->sb = NULL; cur->state = 0; if (cur->bh && cur->jlist) /* anybody who clears the cur->bh will also dec the nonzerolen */ atomic_dec(&(cur->jlist->j_nonzerolen)); cur->bh = NULL; cur->jlist = NULL; } cur = cur->hnext; } } static void free_journal_ram(struct super_block *sb) { struct reiserfs_journal *journal = SB_JOURNAL(sb); kfree(journal->j_current_jl); journal->j_num_lists--; vfree(journal->j_cnode_free_orig); free_list_bitmaps(sb, journal->j_list_bitmap); free_bitmap_nodes(sb); /* must be after free_list_bitmaps */ if (journal->j_header_bh) { brelse(journal->j_header_bh); } /* j_header_bh is on the journal dev, make sure not to release the journal * dev until we brelse j_header_bh */ release_journal_dev(sb, journal); vfree(journal); } /* ** call on unmount. Only set error to 1 if you haven't made your way out ** of read_super() yet. Any other caller must keep error at 0. */ static int do_journal_release(struct reiserfs_transaction_handle *th, struct super_block *sb, int error) { struct reiserfs_transaction_handle myth; int flushed = 0; struct reiserfs_journal *journal = SB_JOURNAL(sb); /* we only want to flush out transactions if we were called with error == 0 */ if (!error && !(sb->s_flags & MS_RDONLY)) { /* end the current trans */ BUG_ON(!th->t_trans_id); do_journal_end(th, sb, 10, FLUSH_ALL); /* make sure something gets logged to force our way into the flush code */ if (!journal_join(&myth, sb, 1)) { reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1); journal_mark_dirty(&myth, sb, SB_BUFFER_WITH_SB(sb)); do_journal_end(&myth, sb, 1, FLUSH_ALL); flushed = 1; } } /* this also catches errors during the do_journal_end above */ if (!error && reiserfs_is_journal_aborted(journal)) { memset(&myth, 0, sizeof(myth)); if (!journal_join_abort(&myth, sb, 1)) { reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1); journal_mark_dirty(&myth, sb, SB_BUFFER_WITH_SB(sb)); do_journal_end(&myth, sb, 1, FLUSH_ALL); } } reiserfs_mounted_fs_count--; /* wait for all commits to finish */ cancel_delayed_work(&SB_JOURNAL(sb)->j_work); /* * We must release the write lock here because * the workqueue job (flush_async_commit) needs this lock */ reiserfs_write_unlock(sb); cancel_delayed_work_sync(&REISERFS_SB(sb)->old_work); flush_workqueue(commit_wq); if (!reiserfs_mounted_fs_count) { destroy_workqueue(commit_wq); commit_wq = NULL; } free_journal_ram(sb); reiserfs_write_lock(sb); return 0; } /* ** call on unmount. flush all journal trans, release all alloc'd ram */ int journal_release(struct reiserfs_transaction_handle *th, struct super_block *sb) { return do_journal_release(th, sb, 0); } /* ** only call from an error condition inside reiserfs_read_super! */ int journal_release_error(struct reiserfs_transaction_handle *th, struct super_block *sb) { return do_journal_release(th, sb, 1); } /* compares description block with commit block. returns 1 if they differ, 0 if they are the same */ static int journal_compare_desc_commit(struct super_block *sb, struct reiserfs_journal_desc *desc, struct reiserfs_journal_commit *commit) { if (get_commit_trans_id(commit) != get_desc_trans_id(desc) || get_commit_trans_len(commit) != get_desc_trans_len(desc) || get_commit_trans_len(commit) > SB_JOURNAL(sb)->j_trans_max || get_commit_trans_len(commit) <= 0) { return 1; } return 0; } /* returns 0 if it did not find a description block ** returns -1 if it found a corrupt commit block ** returns 1 if both desc and commit were valid */ static int journal_transaction_is_valid(struct super_block *sb, struct buffer_head *d_bh, unsigned int *oldest_invalid_trans_id, unsigned long *newest_mount_id) { struct reiserfs_journal_desc *desc; struct reiserfs_journal_commit *commit; struct buffer_head *c_bh; unsigned long offset; if (!d_bh) return 0; desc = (struct reiserfs_journal_desc *)d_bh->b_data; if (get_desc_trans_len(desc) > 0 && !memcmp(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8)) { if (oldest_invalid_trans_id && *oldest_invalid_trans_id && get_desc_trans_id(desc) > *oldest_invalid_trans_id) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-986: transaction " "is valid returning because trans_id %d is greater than " "oldest_invalid %lu", get_desc_trans_id(desc), *oldest_invalid_trans_id); return 0; } if (newest_mount_id && *newest_mount_id > get_desc_mount_id(desc)) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1087: transaction " "is valid returning because mount_id %d is less than " "newest_mount_id %lu", get_desc_mount_id(desc), *newest_mount_id); return -1; } if (get_desc_trans_len(desc) > SB_JOURNAL(sb)->j_trans_max) { reiserfs_warning(sb, "journal-2018", "Bad transaction length %d " "encountered, ignoring transaction", get_desc_trans_len(desc)); return -1; } offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb); /* ok, we have a journal description block, lets see if the transaction was valid */ c_bh = journal_bread(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + ((offset + get_desc_trans_len(desc) + 1) % SB_ONDISK_JOURNAL_SIZE(sb))); if (!c_bh) return 0; commit = (struct reiserfs_journal_commit *)c_bh->b_data; if (journal_compare_desc_commit(sb, desc, commit)) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal_transaction_is_valid, commit offset %ld had bad " "time %d or length %d", c_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb), get_commit_trans_id(commit), get_commit_trans_len(commit)); brelse(c_bh); if (oldest_invalid_trans_id) { *oldest_invalid_trans_id = get_desc_trans_id(desc); reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1004: " "transaction_is_valid setting oldest invalid trans_id " "to %d", get_desc_trans_id(desc)); } return -1; } brelse(c_bh); reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1006: found valid " "transaction start offset %llu, len %d id %d", d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb), get_desc_trans_len(desc), get_desc_trans_id(desc)); return 1; } else { return 0; } } static void brelse_array(struct buffer_head **heads, int num) { int i; for (i = 0; i < num; i++) { brelse(heads[i]); } } /* ** given the start, and values for the oldest acceptable transactions, ** this either reads in a replays a transaction, or returns because the transaction ** is invalid, or too old. */ static int journal_read_transaction(struct super_block *sb, unsigned long cur_dblock, unsigned long oldest_start, unsigned int oldest_trans_id, unsigned long newest_mount_id) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_desc *desc; struct reiserfs_journal_commit *commit; unsigned int trans_id = 0; struct buffer_head *c_bh; struct buffer_head *d_bh; struct buffer_head **log_blocks = NULL; struct buffer_head **real_blocks = NULL; unsigned int trans_offset; int i; int trans_half; d_bh = journal_bread(sb, cur_dblock); if (!d_bh) return 1; desc = (struct reiserfs_journal_desc *)d_bh->b_data; trans_offset = d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb); reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1037: " "journal_read_transaction, offset %llu, len %d mount_id %d", d_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb), get_desc_trans_len(desc), get_desc_mount_id(desc)); if (get_desc_trans_id(desc) < oldest_trans_id) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1039: " "journal_read_trans skipping because %lu is too old", cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb)); brelse(d_bh); return 1; } if (get_desc_mount_id(desc) != newest_mount_id) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1146: " "journal_read_trans skipping because %d is != " "newest_mount_id %lu", get_desc_mount_id(desc), newest_mount_id); brelse(d_bh); return 1; } c_bh = journal_bread(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + ((trans_offset + get_desc_trans_len(desc) + 1) % SB_ONDISK_JOURNAL_SIZE(sb))); if (!c_bh) { brelse(d_bh); return 1; } commit = (struct reiserfs_journal_commit *)c_bh->b_data; if (journal_compare_desc_commit(sb, desc, commit)) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal_read_transaction, " "commit offset %llu had bad time %d or length %d", c_bh->b_blocknr - SB_ONDISK_JOURNAL_1st_BLOCK(sb), get_commit_trans_id(commit), get_commit_trans_len(commit)); brelse(c_bh); brelse(d_bh); return 1; } if (bdev_read_only(sb->s_bdev)) { reiserfs_warning(sb, "clm-2076", "device is readonly, unable to replay log"); brelse(c_bh); brelse(d_bh); return -EROFS; } trans_id = get_desc_trans_id(desc); /* now we know we've got a good transaction, and it was inside the valid time ranges */ log_blocks = kmalloc(get_desc_trans_len(desc) * sizeof(struct buffer_head *), GFP_NOFS); real_blocks = kmalloc(get_desc_trans_len(desc) * sizeof(struct buffer_head *), GFP_NOFS); if (!log_blocks || !real_blocks) { brelse(c_bh); brelse(d_bh); kfree(log_blocks); kfree(real_blocks); reiserfs_warning(sb, "journal-1169", "kmalloc failed, unable to mount FS"); return -1; } /* get all the buffer heads */ trans_half = journal_trans_half(sb->s_blocksize); for (i = 0; i < get_desc_trans_len(desc); i++) { log_blocks[i] = journal_getblk(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + (trans_offset + 1 + i) % SB_ONDISK_JOURNAL_SIZE(sb)); if (i < trans_half) { real_blocks[i] = sb_getblk(sb, le32_to_cpu(desc->j_realblock[i])); } else { real_blocks[i] = sb_getblk(sb, le32_to_cpu(commit-> j_realblock[i - trans_half])); } if (real_blocks[i]->b_blocknr > SB_BLOCK_COUNT(sb)) { reiserfs_warning(sb, "journal-1207", "REPLAY FAILURE fsck required! " "Block to replay is outside of " "filesystem"); goto abort_replay; } /* make sure we don't try to replay onto log or reserved area */ if (is_block_in_log_or_reserved_area (sb, real_blocks[i]->b_blocknr)) { reiserfs_warning(sb, "journal-1204", "REPLAY FAILURE fsck required! " "Trying to replay onto a log block"); abort_replay: brelse_array(log_blocks, i); brelse_array(real_blocks, i); brelse(c_bh); brelse(d_bh); kfree(log_blocks); kfree(real_blocks); return -1; } } /* read in the log blocks, memcpy to the corresponding real block */ ll_rw_block(READ, get_desc_trans_len(desc), log_blocks); for (i = 0; i < get_desc_trans_len(desc); i++) { reiserfs_write_unlock(sb); wait_on_buffer(log_blocks[i]); reiserfs_write_lock(sb); if (!buffer_uptodate(log_blocks[i])) { reiserfs_warning(sb, "journal-1212", "REPLAY FAILURE fsck required! " "buffer write failed"); brelse_array(log_blocks + i, get_desc_trans_len(desc) - i); brelse_array(real_blocks, get_desc_trans_len(desc)); brelse(c_bh); brelse(d_bh); kfree(log_blocks); kfree(real_blocks); return -1; } memcpy(real_blocks[i]->b_data, log_blocks[i]->b_data, real_blocks[i]->b_size); set_buffer_uptodate(real_blocks[i]); brelse(log_blocks[i]); } /* flush out the real blocks */ for (i = 0; i < get_desc_trans_len(desc); i++) { set_buffer_dirty(real_blocks[i]); write_dirty_buffer(real_blocks[i], WRITE); } for (i = 0; i < get_desc_trans_len(desc); i++) { wait_on_buffer(real_blocks[i]); if (!buffer_uptodate(real_blocks[i])) { reiserfs_warning(sb, "journal-1226", "REPLAY FAILURE, fsck required! " "buffer write failed"); brelse_array(real_blocks + i, get_desc_trans_len(desc) - i); brelse(c_bh); brelse(d_bh); kfree(log_blocks); kfree(real_blocks); return -1; } brelse(real_blocks[i]); } cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(sb) + ((trans_offset + get_desc_trans_len(desc) + 2) % SB_ONDISK_JOURNAL_SIZE(sb)); reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1095: setting journal " "start to offset %ld", cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb)); /* init starting values for the first transaction, in case this is the last transaction to be replayed. */ journal->j_start = cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb); journal->j_last_flush_trans_id = trans_id; journal->j_trans_id = trans_id + 1; /* check for trans_id overflow */ if (journal->j_trans_id == 0) journal->j_trans_id = 10; brelse(c_bh); brelse(d_bh); kfree(log_blocks); kfree(real_blocks); return 0; } /* This function reads blocks starting from block and to max_block of bufsize size (but no more than BUFNR blocks at a time). This proved to improve mounting speed on self-rebuilding raid5 arrays at least. Right now it is only used from journal code. But later we might use it from other places. Note: Do not use journal_getblk/sb_getblk functions here! */ static struct buffer_head *reiserfs_breada(struct block_device *dev, b_blocknr_t block, int bufsize, b_blocknr_t max_block) { struct buffer_head *bhlist[BUFNR]; unsigned int blocks = BUFNR; struct buffer_head *bh; int i, j; bh = __getblk(dev, block, bufsize); if (buffer_uptodate(bh)) return (bh); if (block + BUFNR > max_block) { blocks = max_block - block; } bhlist[0] = bh; j = 1; for (i = 1; i < blocks; i++) { bh = __getblk(dev, block + i, bufsize); if (buffer_uptodate(bh)) { brelse(bh); break; } else bhlist[j++] = bh; } ll_rw_block(READ, j, bhlist); for (i = 1; i < j; i++) brelse(bhlist[i]); bh = bhlist[0]; wait_on_buffer(bh); if (buffer_uptodate(bh)) return bh; brelse(bh); return NULL; } /* ** read and replay the log ** on a clean unmount, the journal header's next unflushed pointer will be to an invalid ** transaction. This tests that before finding all the transactions in the log, which makes normal mount times fast. ** ** After a crash, this starts with the next unflushed transaction, and replays until it finds one too old, or invalid. ** ** On exit, it sets things up so the first transaction will work correctly. */ static int journal_read(struct super_block *sb) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_desc *desc; unsigned int oldest_trans_id = 0; unsigned int oldest_invalid_trans_id = 0; time_t start; unsigned long oldest_start = 0; unsigned long cur_dblock = 0; unsigned long newest_mount_id = 9; struct buffer_head *d_bh; struct reiserfs_journal_header *jh; int valid_journal_header = 0; int replay_count = 0; int continue_replay = 1; int ret; char b[BDEVNAME_SIZE]; cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(sb); reiserfs_info(sb, "checking transaction log (%s)\n", bdevname(journal->j_dev_bd, b)); start = get_seconds(); /* step 1, read in the journal header block. Check the transaction it says ** is the first unflushed, and if that transaction is not valid, ** replay is done */ journal->j_header_bh = journal_bread(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + SB_ONDISK_JOURNAL_SIZE(sb)); if (!journal->j_header_bh) { return 1; } jh = (struct reiserfs_journal_header *)(journal->j_header_bh->b_data); if (le32_to_cpu(jh->j_first_unflushed_offset) < SB_ONDISK_JOURNAL_SIZE(sb) && le32_to_cpu(jh->j_last_flush_trans_id) > 0) { oldest_start = SB_ONDISK_JOURNAL_1st_BLOCK(sb) + le32_to_cpu(jh->j_first_unflushed_offset); oldest_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1; newest_mount_id = le32_to_cpu(jh->j_mount_id); reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1153: found in " "header: first_unflushed_offset %d, last_flushed_trans_id " "%lu", le32_to_cpu(jh->j_first_unflushed_offset), le32_to_cpu(jh->j_last_flush_trans_id)); valid_journal_header = 1; /* now, we try to read the first unflushed offset. If it is not valid, ** there is nothing more we can do, and it makes no sense to read ** through the whole log. */ d_bh = journal_bread(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + le32_to_cpu(jh->j_first_unflushed_offset)); ret = journal_transaction_is_valid(sb, d_bh, NULL, NULL); if (!ret) { continue_replay = 0; } brelse(d_bh); goto start_log_replay; } /* ok, there are transactions that need to be replayed. start with the first log block, find ** all the valid transactions, and pick out the oldest. */ while (continue_replay && cur_dblock < (SB_ONDISK_JOURNAL_1st_BLOCK(sb) + SB_ONDISK_JOURNAL_SIZE(sb))) { /* Note that it is required for blocksize of primary fs device and journal device to be the same */ d_bh = reiserfs_breada(journal->j_dev_bd, cur_dblock, sb->s_blocksize, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + SB_ONDISK_JOURNAL_SIZE(sb)); ret = journal_transaction_is_valid(sb, d_bh, &oldest_invalid_trans_id, &newest_mount_id); if (ret == 1) { desc = (struct reiserfs_journal_desc *)d_bh->b_data; if (oldest_start == 0) { /* init all oldest_ values */ oldest_trans_id = get_desc_trans_id(desc); oldest_start = d_bh->b_blocknr; newest_mount_id = get_desc_mount_id(desc); reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1179: Setting " "oldest_start to offset %llu, trans_id %lu", oldest_start - SB_ONDISK_JOURNAL_1st_BLOCK (sb), oldest_trans_id); } else if (oldest_trans_id > get_desc_trans_id(desc)) { /* one we just read was older */ oldest_trans_id = get_desc_trans_id(desc); oldest_start = d_bh->b_blocknr; reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1180: Resetting " "oldest_start to offset %lu, trans_id %lu", oldest_start - SB_ONDISK_JOURNAL_1st_BLOCK (sb), oldest_trans_id); } if (newest_mount_id < get_desc_mount_id(desc)) { newest_mount_id = get_desc_mount_id(desc); reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1299: Setting " "newest_mount_id to %d", get_desc_mount_id(desc)); } cur_dblock += get_desc_trans_len(desc) + 2; } else { cur_dblock++; } brelse(d_bh); } start_log_replay: cur_dblock = oldest_start; if (oldest_trans_id) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1206: Starting replay " "from offset %llu, trans_id %lu", cur_dblock - SB_ONDISK_JOURNAL_1st_BLOCK(sb), oldest_trans_id); } replay_count = 0; while (continue_replay && oldest_trans_id > 0) { ret = journal_read_transaction(sb, cur_dblock, oldest_start, oldest_trans_id, newest_mount_id); if (ret < 0) { return ret; } else if (ret != 0) { break; } cur_dblock = SB_ONDISK_JOURNAL_1st_BLOCK(sb) + journal->j_start; replay_count++; if (cur_dblock == oldest_start) break; } if (oldest_trans_id == 0) { reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1225: No valid " "transactions found"); } /* j_start does not get set correctly if we don't replay any transactions. ** if we had a valid journal_header, set j_start to the first unflushed transaction value, ** copy the trans_id from the header */ if (valid_journal_header && replay_count == 0) { journal->j_start = le32_to_cpu(jh->j_first_unflushed_offset); journal->j_trans_id = le32_to_cpu(jh->j_last_flush_trans_id) + 1; /* check for trans_id overflow */ if (journal->j_trans_id == 0) journal->j_trans_id = 10; journal->j_last_flush_trans_id = le32_to_cpu(jh->j_last_flush_trans_id); journal->j_mount_id = le32_to_cpu(jh->j_mount_id) + 1; } else { journal->j_mount_id = newest_mount_id + 1; } reiserfs_debug(sb, REISERFS_DEBUG_CODE, "journal-1299: Setting " "newest_mount_id to %lu", journal->j_mount_id); journal->j_first_unflushed_offset = journal->j_start; if (replay_count > 0) { reiserfs_info(sb, "replayed %d transactions in %lu seconds\n", replay_count, get_seconds() - start); } if (!bdev_read_only(sb->s_bdev) && _update_journal_header_block(sb, journal->j_start, journal->j_last_flush_trans_id)) { /* replay failed, caller must call free_journal_ram and abort ** the mount */ return -1; } return 0; } static struct reiserfs_journal_list *alloc_journal_list(struct super_block *s) { struct reiserfs_journal_list *jl; jl = kzalloc(sizeof(struct reiserfs_journal_list), GFP_NOFS | __GFP_NOFAIL); INIT_LIST_HEAD(&jl->j_list); INIT_LIST_HEAD(&jl->j_working_list); INIT_LIST_HEAD(&jl->j_tail_bh_list); INIT_LIST_HEAD(&jl->j_bh_list); mutex_init(&jl->j_commit_mutex); SB_JOURNAL(s)->j_num_lists++; get_journal_list(jl); return jl; } static void journal_list_init(struct super_block *sb) { SB_JOURNAL(sb)->j_current_jl = alloc_journal_list(sb); } static void release_journal_dev(struct super_block *super, struct reiserfs_journal *journal) { if (journal->j_dev_bd != NULL) { blkdev_put(journal->j_dev_bd, journal->j_dev_mode); journal->j_dev_bd = NULL; } } static int journal_init_dev(struct super_block *super, struct reiserfs_journal *journal, const char *jdev_name) { int result; dev_t jdev; fmode_t blkdev_mode = FMODE_READ | FMODE_WRITE | FMODE_EXCL; char b[BDEVNAME_SIZE]; result = 0; journal->j_dev_bd = NULL; jdev = SB_ONDISK_JOURNAL_DEVICE(super) ? new_decode_dev(SB_ONDISK_JOURNAL_DEVICE(super)) : super->s_dev; if (bdev_read_only(super->s_bdev)) blkdev_mode = FMODE_READ; /* there is no "jdev" option and journal is on separate device */ if ((!jdev_name || !jdev_name[0])) { if (jdev == super->s_dev) blkdev_mode &= ~FMODE_EXCL; journal->j_dev_bd = blkdev_get_by_dev(jdev, blkdev_mode, journal); journal->j_dev_mode = blkdev_mode; if (IS_ERR(journal->j_dev_bd)) { result = PTR_ERR(journal->j_dev_bd); journal->j_dev_bd = NULL; reiserfs_warning(super, "sh-458", "cannot init journal device '%s': %i", __bdevname(jdev, b), result); return result; } else if (jdev != super->s_dev) set_blocksize(journal->j_dev_bd, super->s_blocksize); return 0; } journal->j_dev_mode = blkdev_mode; journal->j_dev_bd = blkdev_get_by_path(jdev_name, blkdev_mode, journal); if (IS_ERR(journal->j_dev_bd)) { result = PTR_ERR(journal->j_dev_bd); journal->j_dev_bd = NULL; reiserfs_warning(super, "journal_init_dev: Cannot open '%s': %i", jdev_name, result); return result; } set_blocksize(journal->j_dev_bd, super->s_blocksize); reiserfs_info(super, "journal_init_dev: journal device: %s\n", bdevname(journal->j_dev_bd, b)); return 0; } /** * When creating/tuning a file system user can assign some * journal params within boundaries which depend on the ratio * blocksize/standard_blocksize. * * For blocks >= standard_blocksize transaction size should * be not less then JOURNAL_TRANS_MIN_DEFAULT, and not more * then JOURNAL_TRANS_MAX_DEFAULT. * * For blocks < standard_blocksize these boundaries should be * decreased proportionally. */ #define REISERFS_STANDARD_BLKSIZE (4096) static int check_advise_trans_params(struct super_block *sb, struct reiserfs_journal *journal) { if (journal->j_trans_max) { /* Non-default journal params. Do sanity check for them. */ int ratio = 1; if (sb->s_blocksize < REISERFS_STANDARD_BLKSIZE) ratio = REISERFS_STANDARD_BLKSIZE / sb->s_blocksize; if (journal->j_trans_max > JOURNAL_TRANS_MAX_DEFAULT / ratio || journal->j_trans_max < JOURNAL_TRANS_MIN_DEFAULT / ratio || SB_ONDISK_JOURNAL_SIZE(sb) / journal->j_trans_max < JOURNAL_MIN_RATIO) { reiserfs_warning(sb, "sh-462", "bad transaction max size (%u). " "FSCK?", journal->j_trans_max); return 1; } if (journal->j_max_batch != (journal->j_trans_max) * JOURNAL_MAX_BATCH_DEFAULT/JOURNAL_TRANS_MAX_DEFAULT) { reiserfs_warning(sb, "sh-463", "bad transaction max batch (%u). " "FSCK?", journal->j_max_batch); return 1; } } else { /* Default journal params. The file system was created by old version of mkreiserfs, so some fields contain zeros, and we need to advise proper values for them */ if (sb->s_blocksize != REISERFS_STANDARD_BLKSIZE) { reiserfs_warning(sb, "sh-464", "bad blocksize (%u)", sb->s_blocksize); return 1; } journal->j_trans_max = JOURNAL_TRANS_MAX_DEFAULT; journal->j_max_batch = JOURNAL_MAX_BATCH_DEFAULT; journal->j_max_commit_age = JOURNAL_MAX_COMMIT_AGE; } return 0; } /* ** must be called once on fs mount. calls journal_read for you */ int journal_init(struct super_block *sb, const char *j_dev_name, int old_format, unsigned int commit_max_age) { int num_cnodes = SB_ONDISK_JOURNAL_SIZE(sb) * 2; struct buffer_head *bhjh; struct reiserfs_super_block *rs; struct reiserfs_journal_header *jh; struct reiserfs_journal *journal; struct reiserfs_journal_list *jl; char b[BDEVNAME_SIZE]; int ret; journal = SB_JOURNAL(sb) = vzalloc(sizeof(struct reiserfs_journal)); if (!journal) { reiserfs_warning(sb, "journal-1256", "unable to get memory for journal structure"); return 1; } INIT_LIST_HEAD(&journal->j_bitmap_nodes); INIT_LIST_HEAD(&journal->j_prealloc_list); INIT_LIST_HEAD(&journal->j_working_list); INIT_LIST_HEAD(&journal->j_journal_list); journal->j_persistent_trans = 0; if (reiserfs_allocate_list_bitmaps(sb, journal->j_list_bitmap, reiserfs_bmap_count(sb))) goto free_and_return; allocate_bitmap_nodes(sb); /* reserved for journal area support */ SB_JOURNAL_1st_RESERVED_BLOCK(sb) = (old_format ? REISERFS_OLD_DISK_OFFSET_IN_BYTES / sb->s_blocksize + reiserfs_bmap_count(sb) + 1 : REISERFS_DISK_OFFSET_IN_BYTES / sb->s_blocksize + 2); /* Sanity check to see is the standard journal fitting within first bitmap (actual for small blocksizes) */ if (!SB_ONDISK_JOURNAL_DEVICE(sb) && (SB_JOURNAL_1st_RESERVED_BLOCK(sb) + SB_ONDISK_JOURNAL_SIZE(sb) > sb->s_blocksize * 8)) { reiserfs_warning(sb, "journal-1393", "journal does not fit for area addressed " "by first of bitmap blocks. It starts at " "%u and its size is %u. Block size %ld", SB_JOURNAL_1st_RESERVED_BLOCK(sb), SB_ONDISK_JOURNAL_SIZE(sb), sb->s_blocksize); goto free_and_return; } if (journal_init_dev(sb, journal, j_dev_name) != 0) { reiserfs_warning(sb, "sh-462", "unable to initialize jornal device"); goto free_and_return; } rs = SB_DISK_SUPER_BLOCK(sb); /* read journal header */ bhjh = journal_bread(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + SB_ONDISK_JOURNAL_SIZE(sb)); if (!bhjh) { reiserfs_warning(sb, "sh-459", "unable to read journal header"); goto free_and_return; } jh = (struct reiserfs_journal_header *)(bhjh->b_data); /* make sure that journal matches to the super block */ if (is_reiserfs_jr(rs) && (le32_to_cpu(jh->jh_journal.jp_journal_magic) != sb_jp_journal_magic(rs))) { reiserfs_warning(sb, "sh-460", "journal header magic %x (device %s) does " "not match to magic found in super block %x", jh->jh_journal.jp_journal_magic, bdevname(journal->j_dev_bd, b), sb_jp_journal_magic(rs)); brelse(bhjh); goto free_and_return; } journal->j_trans_max = le32_to_cpu(jh->jh_journal.jp_journal_trans_max); journal->j_max_batch = le32_to_cpu(jh->jh_journal.jp_journal_max_batch); journal->j_max_commit_age = le32_to_cpu(jh->jh_journal.jp_journal_max_commit_age); journal->j_max_trans_age = JOURNAL_MAX_TRANS_AGE; if (check_advise_trans_params(sb, journal) != 0) goto free_and_return; journal->j_default_max_commit_age = journal->j_max_commit_age; if (commit_max_age != 0) { journal->j_max_commit_age = commit_max_age; journal->j_max_trans_age = commit_max_age; } reiserfs_info(sb, "journal params: device %s, size %u, " "journal first block %u, max trans len %u, max batch %u, " "max commit age %u, max trans age %u\n", bdevname(journal->j_dev_bd, b), SB_ONDISK_JOURNAL_SIZE(sb), SB_ONDISK_JOURNAL_1st_BLOCK(sb), journal->j_trans_max, journal->j_max_batch, journal->j_max_commit_age, journal->j_max_trans_age); brelse(bhjh); journal->j_list_bitmap_index = 0; journal_list_init(sb); memset(journal->j_list_hash_table, 0, JOURNAL_HASH_SIZE * sizeof(struct reiserfs_journal_cnode *)); INIT_LIST_HEAD(&journal->j_dirty_buffers); spin_lock_init(&journal->j_dirty_buffers_lock); journal->j_start = 0; journal->j_len = 0; journal->j_len_alloc = 0; atomic_set(&(journal->j_wcount), 0); atomic_set(&(journal->j_async_throttle), 0); journal->j_bcount = 0; journal->j_trans_start_time = 0; journal->j_last = NULL; journal->j_first = NULL; init_waitqueue_head(&(journal->j_join_wait)); mutex_init(&journal->j_mutex); mutex_init(&journal->j_flush_mutex); journal->j_trans_id = 10; journal->j_mount_id = 10; journal->j_state = 0; atomic_set(&(journal->j_jlock), 0); journal->j_cnode_free_list = allocate_cnodes(num_cnodes); journal->j_cnode_free_orig = journal->j_cnode_free_list; journal->j_cnode_free = journal->j_cnode_free_list ? num_cnodes : 0; journal->j_cnode_used = 0; journal->j_must_wait = 0; if (journal->j_cnode_free == 0) { reiserfs_warning(sb, "journal-2004", "Journal cnode memory " "allocation failed (%ld bytes). Journal is " "too large for available memory. Usually " "this is due to a journal that is too large.", sizeof (struct reiserfs_journal_cnode) * num_cnodes); goto free_and_return; } init_journal_hash(sb); jl = journal->j_current_jl; /* * get_list_bitmap() may call flush_commit_list() which * requires the lock. Calling flush_commit_list() shouldn't happen * this early but I like to be paranoid. */ reiserfs_write_lock(sb); jl->j_list_bitmap = get_list_bitmap(sb, jl); reiserfs_write_unlock(sb); if (!jl->j_list_bitmap) { reiserfs_warning(sb, "journal-2005", "get_list_bitmap failed for journal list 0"); goto free_and_return; } /* * Journal_read needs to be inspected in order to push down * the lock further inside (or even remove it). */ reiserfs_write_lock(sb); ret = journal_read(sb); reiserfs_write_unlock(sb); if (ret < 0) { reiserfs_warning(sb, "reiserfs-2006", "Replay Failure, unable to mount"); goto free_and_return; } reiserfs_mounted_fs_count++; if (reiserfs_mounted_fs_count <= 1) commit_wq = alloc_workqueue("reiserfs", WQ_MEM_RECLAIM, 0); INIT_DELAYED_WORK(&journal->j_work, flush_async_commits); journal->j_work_sb = sb; return 0; free_and_return: free_journal_ram(sb); return 1; } /* ** test for a polite end of the current transaction. Used by file_write, and should ** be used by delete to make sure they don't write more than can fit inside a single ** transaction */ int journal_transaction_should_end(struct reiserfs_transaction_handle *th, int new_alloc) { struct reiserfs_journal *journal = SB_JOURNAL(th->t_super); time_t now = get_seconds(); /* cannot restart while nested */ BUG_ON(!th->t_trans_id); if (th->t_refcount > 1) return 0; if (journal->j_must_wait > 0 || (journal->j_len_alloc + new_alloc) >= journal->j_max_batch || atomic_read(&(journal->j_jlock)) || (now - journal->j_trans_start_time) > journal->j_max_trans_age || journal->j_cnode_free < (journal->j_trans_max * 3)) { return 1; } journal->j_len_alloc += new_alloc; th->t_blocks_allocated += new_alloc ; return 0; } /* this must be called inside a transaction */ void reiserfs_block_writes(struct reiserfs_transaction_handle *th) { struct reiserfs_journal *journal = SB_JOURNAL(th->t_super); BUG_ON(!th->t_trans_id); journal->j_must_wait = 1; set_bit(J_WRITERS_BLOCKED, &journal->j_state); return; } /* this must be called without a transaction started */ void reiserfs_allow_writes(struct super_block *s) { struct reiserfs_journal *journal = SB_JOURNAL(s); clear_bit(J_WRITERS_BLOCKED, &journal->j_state); wake_up(&journal->j_join_wait); } /* this must be called without a transaction started */ void reiserfs_wait_on_write_block(struct super_block *s) { struct reiserfs_journal *journal = SB_JOURNAL(s); wait_event(journal->j_join_wait, !test_bit(J_WRITERS_BLOCKED, &journal->j_state)); } static void queue_log_writer(struct super_block *s) { wait_queue_t wait; struct reiserfs_journal *journal = SB_JOURNAL(s); set_bit(J_WRITERS_QUEUED, &journal->j_state); /* * we don't want to use wait_event here because * we only want to wait once. */ init_waitqueue_entry(&wait, current); add_wait_queue(&journal->j_join_wait, &wait); set_current_state(TASK_UNINTERRUPTIBLE); if (test_bit(J_WRITERS_QUEUED, &journal->j_state)) { reiserfs_write_unlock(s); schedule(); reiserfs_write_lock(s); } __set_current_state(TASK_RUNNING); remove_wait_queue(&journal->j_join_wait, &wait); } static void wake_queued_writers(struct super_block *s) { struct reiserfs_journal *journal = SB_JOURNAL(s); if (test_and_clear_bit(J_WRITERS_QUEUED, &journal->j_state)) wake_up(&journal->j_join_wait); } static void let_transaction_grow(struct super_block *sb, unsigned int trans_id) { struct reiserfs_journal *journal = SB_JOURNAL(sb); unsigned long bcount = journal->j_bcount; while (1) { reiserfs_write_unlock(sb); schedule_timeout_uninterruptible(1); reiserfs_write_lock(sb); journal->j_current_jl->j_state |= LIST_COMMIT_PENDING; while ((atomic_read(&journal->j_wcount) > 0 || atomic_read(&journal->j_jlock)) && journal->j_trans_id == trans_id) { queue_log_writer(sb); } if (journal->j_trans_id != trans_id) break; if (bcount == journal->j_bcount) break; bcount = journal->j_bcount; } } /* join == true if you must join an existing transaction. ** join == false if you can deal with waiting for others to finish ** ** this will block until the transaction is joinable. send the number of blocks you ** expect to use in nblocks. */ static int do_journal_begin_r(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks, int join) { time_t now = get_seconds(); unsigned int old_trans_id; struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_transaction_handle myth; int sched_count = 0; int retval; reiserfs_check_lock_depth(sb, "journal_begin"); BUG_ON(nblocks > journal->j_trans_max); PROC_INFO_INC(sb, journal.journal_being); /* set here for journal_join */ th->t_refcount = 1; th->t_super = sb; relock: lock_journal(sb); if (join != JBEGIN_ABORT && reiserfs_is_journal_aborted(journal)) { unlock_journal(sb); retval = journal->j_errno; goto out_fail; } journal->j_bcount++; if (test_bit(J_WRITERS_BLOCKED, &journal->j_state)) { unlock_journal(sb); reiserfs_write_unlock(sb); reiserfs_wait_on_write_block(sb); reiserfs_write_lock(sb); PROC_INFO_INC(sb, journal.journal_relock_writers); goto relock; } now = get_seconds(); /* if there is no room in the journal OR ** if this transaction is too old, and we weren't called joinable, wait for it to finish before beginning ** we don't sleep if there aren't other writers */ if ((!join && journal->j_must_wait > 0) || (!join && (journal->j_len_alloc + nblocks + 2) >= journal->j_max_batch) || (!join && atomic_read(&journal->j_wcount) > 0 && journal->j_trans_start_time > 0 && (now - journal->j_trans_start_time) > journal->j_max_trans_age) || (!join && atomic_read(&journal->j_jlock)) || (!join && journal->j_cnode_free < (journal->j_trans_max * 3))) { old_trans_id = journal->j_trans_id; unlock_journal(sb); /* allow others to finish this transaction */ if (!join && (journal->j_len_alloc + nblocks + 2) >= journal->j_max_batch && ((journal->j_len + nblocks + 2) * 100) < (journal->j_len_alloc * 75)) { if (atomic_read(&journal->j_wcount) > 10) { sched_count++; queue_log_writer(sb); goto relock; } } /* don't mess with joining the transaction if all we have to do is * wait for someone else to do a commit */ if (atomic_read(&journal->j_jlock)) { while (journal->j_trans_id == old_trans_id && atomic_read(&journal->j_jlock)) { queue_log_writer(sb); } goto relock; } retval = journal_join(&myth, sb, 1); if (retval) goto out_fail; /* someone might have ended the transaction while we joined */ if (old_trans_id != journal->j_trans_id) { retval = do_journal_end(&myth, sb, 1, 0); } else { retval = do_journal_end(&myth, sb, 1, COMMIT_NOW); } if (retval) goto out_fail; PROC_INFO_INC(sb, journal.journal_relock_wcount); goto relock; } /* we are the first writer, set trans_id */ if (journal->j_trans_start_time == 0) { journal->j_trans_start_time = get_seconds(); } atomic_inc(&(journal->j_wcount)); journal->j_len_alloc += nblocks; th->t_blocks_logged = 0; th->t_blocks_allocated = nblocks; th->t_trans_id = journal->j_trans_id; unlock_journal(sb); INIT_LIST_HEAD(&th->t_list); return 0; out_fail: memset(th, 0, sizeof(*th)); /* Re-set th->t_super, so we can properly keep track of how many * persistent transactions there are. We need to do this so if this * call is part of a failed restart_transaction, we can free it later */ th->t_super = sb; return retval; } struct reiserfs_transaction_handle *reiserfs_persistent_transaction(struct super_block *s, int nblocks) { int ret; struct reiserfs_transaction_handle *th; /* if we're nesting into an existing transaction. It will be ** persistent on its own */ if (reiserfs_transaction_running(s)) { th = current->journal_info; th->t_refcount++; BUG_ON(th->t_refcount < 2); return th; } th = kmalloc(sizeof(struct reiserfs_transaction_handle), GFP_NOFS); if (!th) return NULL; ret = journal_begin(th, s, nblocks); if (ret) { kfree(th); return NULL; } SB_JOURNAL(s)->j_persistent_trans++; return th; } int reiserfs_end_persistent_transaction(struct reiserfs_transaction_handle *th) { struct super_block *s = th->t_super; int ret = 0; if (th->t_trans_id) ret = journal_end(th, th->t_super, th->t_blocks_allocated); else ret = -EIO; if (th->t_refcount == 0) { SB_JOURNAL(s)->j_persistent_trans--; kfree(th); } return ret; } static int journal_join(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks) { struct reiserfs_transaction_handle *cur_th = current->journal_info; /* this keeps do_journal_end from NULLing out the current->journal_info ** pointer */ th->t_handle_save = cur_th; BUG_ON(cur_th && cur_th->t_refcount > 1); return do_journal_begin_r(th, sb, nblocks, JBEGIN_JOIN); } int journal_join_abort(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks) { struct reiserfs_transaction_handle *cur_th = current->journal_info; /* this keeps do_journal_end from NULLing out the current->journal_info ** pointer */ th->t_handle_save = cur_th; BUG_ON(cur_th && cur_th->t_refcount > 1); return do_journal_begin_r(th, sb, nblocks, JBEGIN_ABORT); } int journal_begin(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks) { struct reiserfs_transaction_handle *cur_th = current->journal_info; int ret; th->t_handle_save = NULL; if (cur_th) { /* we are nesting into the current transaction */ if (cur_th->t_super == sb) { BUG_ON(!cur_th->t_refcount); cur_th->t_refcount++; memcpy(th, cur_th, sizeof(*th)); if (th->t_refcount <= 1) reiserfs_warning(sb, "reiserfs-2005", "BAD: refcount <= 1, but " "journal_info != 0"); return 0; } else { /* we've ended up with a handle from a different filesystem. ** save it and restore on journal_end. This should never ** really happen... */ reiserfs_warning(sb, "clm-2100", "nesting info a different FS"); th->t_handle_save = current->journal_info; current->journal_info = th; } } else { current->journal_info = th; } ret = do_journal_begin_r(th, sb, nblocks, JBEGIN_REG); BUG_ON(current->journal_info != th); /* I guess this boils down to being the reciprocal of clm-2100 above. * If do_journal_begin_r fails, we need to put it back, since journal_end * won't be called to do it. */ if (ret) current->journal_info = th->t_handle_save; else BUG_ON(!th->t_refcount); return ret; } /* ** puts bh into the current transaction. If it was already there, reorders removes the ** old pointers from the hash, and puts new ones in (to make sure replay happen in the right order). ** ** if it was dirty, cleans and files onto the clean list. I can't let it be dirty again until the ** transaction is committed. ** ** if j_len, is bigger than j_len_alloc, it pushes j_len_alloc to 10 + j_len. */ int journal_mark_dirty(struct reiserfs_transaction_handle *th, struct super_block *sb, struct buffer_head *bh) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_cnode *cn = NULL; int count_already_incd = 0; int prepared = 0; BUG_ON(!th->t_trans_id); PROC_INFO_INC(sb, journal.mark_dirty); if (th->t_trans_id != journal->j_trans_id) { reiserfs_panic(th->t_super, "journal-1577", "handle trans id %ld != current trans id %ld", th->t_trans_id, journal->j_trans_id); } prepared = test_clear_buffer_journal_prepared(bh); clear_buffer_journal_restore_dirty(bh); /* already in this transaction, we are done */ if (buffer_journaled(bh)) { PROC_INFO_INC(sb, journal.mark_dirty_already); return 0; } /* this must be turned into a panic instead of a warning. We can't allow ** a dirty or journal_dirty or locked buffer to be logged, as some changes ** could get to disk too early. NOT GOOD. */ if (!prepared || buffer_dirty(bh)) { reiserfs_warning(sb, "journal-1777", "buffer %llu bad state " "%cPREPARED %cLOCKED %cDIRTY %cJDIRTY_WAIT", (unsigned long long)bh->b_blocknr, prepared ? ' ' : '!', buffer_locked(bh) ? ' ' : '!', buffer_dirty(bh) ? ' ' : '!', buffer_journal_dirty(bh) ? ' ' : '!'); } if (atomic_read(&(journal->j_wcount)) <= 0) { reiserfs_warning(sb, "journal-1409", "returning because j_wcount was %d", atomic_read(&(journal->j_wcount))); return 1; } /* this error means I've screwed up, and we've overflowed the transaction. ** Nothing can be done here, except make the FS readonly or panic. */ if (journal->j_len >= journal->j_trans_max) { reiserfs_panic(th->t_super, "journal-1413", "j_len (%lu) is too big", journal->j_len); } if (buffer_journal_dirty(bh)) { count_already_incd = 1; PROC_INFO_INC(sb, journal.mark_dirty_notjournal); clear_buffer_journal_dirty(bh); } if (journal->j_len > journal->j_len_alloc) { journal->j_len_alloc = journal->j_len + JOURNAL_PER_BALANCE_CNT; } set_buffer_journaled(bh); /* now put this guy on the end */ if (!cn) { cn = get_cnode(sb); if (!cn) { reiserfs_panic(sb, "journal-4", "get_cnode failed!"); } if (th->t_blocks_logged == th->t_blocks_allocated) { th->t_blocks_allocated += JOURNAL_PER_BALANCE_CNT; journal->j_len_alloc += JOURNAL_PER_BALANCE_CNT; } th->t_blocks_logged++; journal->j_len++; cn->bh = bh; cn->blocknr = bh->b_blocknr; cn->sb = sb; cn->jlist = NULL; insert_journal_hash(journal->j_hash_table, cn); if (!count_already_incd) { get_bh(bh); } } cn->next = NULL; cn->prev = journal->j_last; cn->bh = bh; if (journal->j_last) { journal->j_last->next = cn; journal->j_last = cn; } else { journal->j_first = cn; journal->j_last = cn; } reiserfs_schedule_old_flush(sb); return 0; } int journal_end(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks) { if (!current->journal_info && th->t_refcount > 1) reiserfs_warning(sb, "REISER-NESTING", "th NULL, refcount %d", th->t_refcount); if (!th->t_trans_id) { WARN_ON(1); return -EIO; } th->t_refcount--; if (th->t_refcount > 0) { struct reiserfs_transaction_handle *cur_th = current->journal_info; /* we aren't allowed to close a nested transaction on a different ** filesystem from the one in the task struct */ BUG_ON(cur_th->t_super != th->t_super); if (th != cur_th) { memcpy(current->journal_info, th, sizeof(*th)); th->t_trans_id = 0; } return 0; } else { return do_journal_end(th, sb, nblocks, 0); } } /* removes from the current transaction, relsing and descrementing any counters. ** also files the removed buffer directly onto the clean list ** ** called by journal_mark_freed when a block has been deleted ** ** returns 1 if it cleaned and relsed the buffer. 0 otherwise */ static int remove_from_transaction(struct super_block *sb, b_blocknr_t blocknr, int already_cleaned) { struct buffer_head *bh; struct reiserfs_journal_cnode *cn; struct reiserfs_journal *journal = SB_JOURNAL(sb); int ret = 0; cn = get_journal_hash_dev(sb, journal->j_hash_table, blocknr); if (!cn || !cn->bh) { return ret; } bh = cn->bh; if (cn->prev) { cn->prev->next = cn->next; } if (cn->next) { cn->next->prev = cn->prev; } if (cn == journal->j_first) { journal->j_first = cn->next; } if (cn == journal->j_last) { journal->j_last = cn->prev; } if (bh) remove_journal_hash(sb, journal->j_hash_table, NULL, bh->b_blocknr, 0); clear_buffer_journaled(bh); /* don't log this one */ if (!already_cleaned) { clear_buffer_journal_dirty(bh); clear_buffer_dirty(bh); clear_buffer_journal_test(bh); put_bh(bh); if (atomic_read(&(bh->b_count)) < 0) { reiserfs_warning(sb, "journal-1752", "b_count < 0"); } ret = 1; } journal->j_len--; journal->j_len_alloc--; free_cnode(sb, cn); return ret; } /* ** for any cnode in a journal list, it can only be dirtied of all the ** transactions that include it are committed to disk. ** this checks through each transaction, and returns 1 if you are allowed to dirty, ** and 0 if you aren't ** ** it is called by dirty_journal_list, which is called after flush_commit_list has gotten all the log ** blocks for a given transaction on disk ** */ static int can_dirty(struct reiserfs_journal_cnode *cn) { struct super_block *sb = cn->sb; b_blocknr_t blocknr = cn->blocknr; struct reiserfs_journal_cnode *cur = cn->hprev; int can_dirty = 1; /* first test hprev. These are all newer than cn, so any node here ** with the same block number and dev means this node can't be sent ** to disk right now. */ while (cur && can_dirty) { if (cur->jlist && cur->bh && cur->blocknr && cur->sb == sb && cur->blocknr == blocknr) { can_dirty = 0; } cur = cur->hprev; } /* then test hnext. These are all older than cn. As long as they ** are committed to the log, it is safe to write cn to disk */ cur = cn->hnext; while (cur && can_dirty) { if (cur->jlist && cur->jlist->j_len > 0 && atomic_read(&(cur->jlist->j_commit_left)) > 0 && cur->bh && cur->blocknr && cur->sb == sb && cur->blocknr == blocknr) { can_dirty = 0; } cur = cur->hnext; } return can_dirty; } /* syncs the commit blocks, but does not force the real buffers to disk ** will wait until the current transaction is done/committed before returning */ int journal_end_sync(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks) { struct reiserfs_journal *journal = SB_JOURNAL(sb); BUG_ON(!th->t_trans_id); /* you can sync while nested, very, very bad */ BUG_ON(th->t_refcount > 1); if (journal->j_len == 0) { reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1); journal_mark_dirty(th, sb, SB_BUFFER_WITH_SB(sb)); } return do_journal_end(th, sb, nblocks, COMMIT_NOW | WAIT); } /* ** writeback the pending async commits to disk */ static void flush_async_commits(struct work_struct *work) { struct reiserfs_journal *journal = container_of(work, struct reiserfs_journal, j_work.work); struct super_block *sb = journal->j_work_sb; struct reiserfs_journal_list *jl; struct list_head *entry; reiserfs_write_lock(sb); if (!list_empty(&journal->j_journal_list)) { /* last entry is the youngest, commit it and you get everything */ entry = journal->j_journal_list.prev; jl = JOURNAL_LIST_ENTRY(entry); flush_commit_list(sb, jl, 1); } reiserfs_write_unlock(sb); } /* ** flushes any old transactions to disk ** ends the current transaction if it is too old */ void reiserfs_flush_old_commits(struct super_block *sb) { time_t now; struct reiserfs_transaction_handle th; struct reiserfs_journal *journal = SB_JOURNAL(sb); now = get_seconds(); /* safety check so we don't flush while we are replaying the log during * mount */ if (list_empty(&journal->j_journal_list)) return; /* check the current transaction. If there are no writers, and it is * too old, finish it, and force the commit blocks to disk */ if (atomic_read(&journal->j_wcount) <= 0 && journal->j_trans_start_time > 0 && journal->j_len > 0 && (now - journal->j_trans_start_time) > journal->j_max_trans_age) { if (!journal_join(&th, sb, 1)) { reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1); journal_mark_dirty(&th, sb, SB_BUFFER_WITH_SB(sb)); /* we're only being called from kreiserfsd, it makes no sense to do ** an async commit so that kreiserfsd can do it later */ do_journal_end(&th, sb, 1, COMMIT_NOW | WAIT); } } } /* ** returns 0 if do_journal_end should return right away, returns 1 if do_journal_end should finish the commit ** ** if the current transaction is too old, but still has writers, this will wait on j_join_wait until all ** the writers are done. By the time it wakes up, the transaction it was called has already ended, so it just ** flushes the commit list and returns 0. ** ** Won't batch when flush or commit_now is set. Also won't batch when others are waiting on j_join_wait. ** ** Note, we can't allow the journal_end to proceed while there are still writers in the log. */ static int check_journal_end(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks, int flags) { time_t now; int flush = flags & FLUSH_ALL; int commit_now = flags & COMMIT_NOW; int wait_on_commit = flags & WAIT; struct reiserfs_journal_list *jl; struct reiserfs_journal *journal = SB_JOURNAL(sb); BUG_ON(!th->t_trans_id); if (th->t_trans_id != journal->j_trans_id) { reiserfs_panic(th->t_super, "journal-1577", "handle trans id %ld != current trans id %ld", th->t_trans_id, journal->j_trans_id); } journal->j_len_alloc -= (th->t_blocks_allocated - th->t_blocks_logged); if (atomic_read(&(journal->j_wcount)) > 0) { /* <= 0 is allowed. unmounting might not call begin */ atomic_dec(&(journal->j_wcount)); } /* BUG, deal with case where j_len is 0, but people previously freed blocks need to be released ** will be dealt with by next transaction that actually writes something, but should be taken ** care of in this trans */ BUG_ON(journal->j_len == 0); /* if wcount > 0, and we are called to with flush or commit_now, ** we wait on j_join_wait. We will wake up when the last writer has ** finished the transaction, and started it on its way to the disk. ** Then, we flush the commit or journal list, and just return 0 ** because the rest of journal end was already done for this transaction. */ if (atomic_read(&(journal->j_wcount)) > 0) { if (flush || commit_now) { unsigned trans_id; jl = journal->j_current_jl; trans_id = jl->j_trans_id; if (wait_on_commit) jl->j_state |= LIST_COMMIT_PENDING; atomic_set(&(journal->j_jlock), 1); if (flush) { journal->j_next_full_flush = 1; } unlock_journal(sb); /* sleep while the current transaction is still j_jlocked */ while (journal->j_trans_id == trans_id) { if (atomic_read(&journal->j_jlock)) { queue_log_writer(sb); } else { lock_journal(sb); if (journal->j_trans_id == trans_id) { atomic_set(&(journal->j_jlock), 1); } unlock_journal(sb); } } BUG_ON(journal->j_trans_id == trans_id); if (commit_now && journal_list_still_alive(sb, trans_id) && wait_on_commit) { flush_commit_list(sb, jl, 1); } return 0; } unlock_journal(sb); return 0; } /* deal with old transactions where we are the last writers */ now = get_seconds(); if ((now - journal->j_trans_start_time) > journal->j_max_trans_age) { commit_now = 1; journal->j_next_async_flush = 1; } /* don't batch when someone is waiting on j_join_wait */ /* don't batch when syncing the commit or flushing the whole trans */ if (!(journal->j_must_wait > 0) && !(atomic_read(&(journal->j_jlock))) && !flush && !commit_now && (journal->j_len < journal->j_max_batch) && journal->j_len_alloc < journal->j_max_batch && journal->j_cnode_free > (journal->j_trans_max * 3)) { journal->j_bcount++; unlock_journal(sb); return 0; } if (journal->j_start > SB_ONDISK_JOURNAL_SIZE(sb)) { reiserfs_panic(sb, "journal-003", "j_start (%ld) is too high", journal->j_start); } return 1; } /* ** Does all the work that makes deleting blocks safe. ** when deleting a block mark BH_JNew, just remove it from the current transaction, clean it's buffer_head and move on. ** ** otherwise: ** set a bit for the block in the journal bitmap. That will prevent it from being allocated for unformatted nodes ** before this transaction has finished. ** ** mark any cnodes for this block as BLOCK_FREED, and clear their bh pointers. That will prevent any old transactions with ** this block from trying to flush to the real location. Since we aren't removing the cnode from the journal_list_hash, ** the block can't be reallocated yet. ** ** Then remove it from the current transaction, decrementing any counters and filing it on the clean list. */ int journal_mark_freed(struct reiserfs_transaction_handle *th, struct super_block *sb, b_blocknr_t blocknr) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_cnode *cn = NULL; struct buffer_head *bh = NULL; struct reiserfs_list_bitmap *jb = NULL; int cleaned = 0; BUG_ON(!th->t_trans_id); cn = get_journal_hash_dev(sb, journal->j_hash_table, blocknr); if (cn && cn->bh) { bh = cn->bh; get_bh(bh); } /* if it is journal new, we just remove it from this transaction */ if (bh && buffer_journal_new(bh)) { clear_buffer_journal_new(bh); clear_prepared_bits(bh); reiserfs_clean_and_file_buffer(bh); cleaned = remove_from_transaction(sb, blocknr, cleaned); } else { /* set the bit for this block in the journal bitmap for this transaction */ jb = journal->j_current_jl->j_list_bitmap; if (!jb) { reiserfs_panic(sb, "journal-1702", "journal_list_bitmap is NULL"); } set_bit_in_list_bitmap(sb, blocknr, jb); /* Note, the entire while loop is not allowed to schedule. */ if (bh) { clear_prepared_bits(bh); reiserfs_clean_and_file_buffer(bh); } cleaned = remove_from_transaction(sb, blocknr, cleaned); /* find all older transactions with this block, make sure they don't try to write it out */ cn = get_journal_hash_dev(sb, journal->j_list_hash_table, blocknr); while (cn) { if (sb == cn->sb && blocknr == cn->blocknr) { set_bit(BLOCK_FREED, &cn->state); if (cn->bh) { if (!cleaned) { /* remove_from_transaction will brelse the buffer if it was ** in the current trans */ clear_buffer_journal_dirty(cn-> bh); clear_buffer_dirty(cn->bh); clear_buffer_journal_test(cn-> bh); cleaned = 1; put_bh(cn->bh); if (atomic_read (&(cn->bh->b_count)) < 0) { reiserfs_warning(sb, "journal-2138", "cn->bh->b_count < 0"); } } if (cn->jlist) { /* since we are clearing the bh, we MUST dec nonzerolen */ atomic_dec(& (cn->jlist-> j_nonzerolen)); } cn->bh = NULL; } } cn = cn->hnext; } } if (bh) release_buffer_page(bh); /* get_hash grabs the buffer */ return 0; } void reiserfs_update_inode_transaction(struct inode *inode) { struct reiserfs_journal *journal = SB_JOURNAL(inode->i_sb); REISERFS_I(inode)->i_jl = journal->j_current_jl; REISERFS_I(inode)->i_trans_id = journal->j_trans_id; } /* * returns -1 on error, 0 if no commits/barriers were done and 1 * if a transaction was actually committed and the barrier was done */ static int __commit_trans_jl(struct inode *inode, unsigned long id, struct reiserfs_journal_list *jl) { struct reiserfs_transaction_handle th; struct super_block *sb = inode->i_sb; struct reiserfs_journal *journal = SB_JOURNAL(sb); int ret = 0; /* is it from the current transaction, or from an unknown transaction? */ if (id == journal->j_trans_id) { jl = journal->j_current_jl; /* try to let other writers come in and grow this transaction */ let_transaction_grow(sb, id); if (journal->j_trans_id != id) { goto flush_commit_only; } ret = journal_begin(&th, sb, 1); if (ret) return ret; /* someone might have ended this transaction while we joined */ if (journal->j_trans_id != id) { reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1); journal_mark_dirty(&th, sb, SB_BUFFER_WITH_SB(sb)); ret = journal_end(&th, sb, 1); goto flush_commit_only; } ret = journal_end_sync(&th, sb, 1); if (!ret) ret = 1; } else { /* this gets tricky, we have to make sure the journal list in * the inode still exists. We know the list is still around * if we've got a larger transaction id than the oldest list */ flush_commit_only: if (journal_list_still_alive(inode->i_sb, id)) { /* * we only set ret to 1 when we know for sure * the barrier hasn't been started yet on the commit * block. */ if (atomic_read(&jl->j_commit_left) > 1) ret = 1; flush_commit_list(sb, jl, 1); if (journal->j_errno) ret = journal->j_errno; } } /* otherwise the list is gone, and long since committed */ return ret; } int reiserfs_commit_for_inode(struct inode *inode) { unsigned int id = REISERFS_I(inode)->i_trans_id; struct reiserfs_journal_list *jl = REISERFS_I(inode)->i_jl; /* for the whole inode, assume unset id means it was * changed in the current transaction. More conservative */ if (!id || !jl) { reiserfs_update_inode_transaction(inode); id = REISERFS_I(inode)->i_trans_id; /* jl will be updated in __commit_trans_jl */ } return __commit_trans_jl(inode, id, jl); } void reiserfs_restore_prepared_buffer(struct super_block *sb, struct buffer_head *bh) { struct reiserfs_journal *journal = SB_JOURNAL(sb); PROC_INFO_INC(sb, journal.restore_prepared); if (!bh) { return; } if (test_clear_buffer_journal_restore_dirty(bh) && buffer_journal_dirty(bh)) { struct reiserfs_journal_cnode *cn; cn = get_journal_hash_dev(sb, journal->j_list_hash_table, bh->b_blocknr); if (cn && can_dirty(cn)) { set_buffer_journal_test(bh); mark_buffer_dirty(bh); } } clear_buffer_journal_prepared(bh); } extern struct tree_balance *cur_tb; /* ** before we can change a metadata block, we have to make sure it won't ** be written to disk while we are altering it. So, we must: ** clean it ** wait on it. ** */ int reiserfs_prepare_for_journal(struct super_block *sb, struct buffer_head *bh, int wait) { PROC_INFO_INC(sb, journal.prepare); if (!trylock_buffer(bh)) { if (!wait) return 0; lock_buffer(bh); } set_buffer_journal_prepared(bh); if (test_clear_buffer_dirty(bh) && buffer_journal_dirty(bh)) { clear_buffer_journal_test(bh); set_buffer_journal_restore_dirty(bh); } unlock_buffer(bh); return 1; } static void flush_old_journal_lists(struct super_block *s) { struct reiserfs_journal *journal = SB_JOURNAL(s); struct reiserfs_journal_list *jl; struct list_head *entry; time_t now = get_seconds(); while (!list_empty(&journal->j_journal_list)) { entry = journal->j_journal_list.next; jl = JOURNAL_LIST_ENTRY(entry); /* this check should always be run, to send old lists to disk */ if (jl->j_timestamp < (now - (JOURNAL_MAX_TRANS_AGE * 4)) && atomic_read(&jl->j_commit_left) == 0 && test_transaction(s, jl)) { flush_used_journal_lists(s, jl); } else { break; } } } /* ** long and ugly. If flush, will not return until all commit ** blocks and all real buffers in the trans are on disk. ** If no_async, won't return until all commit blocks are on disk. ** ** keep reading, there are comments as you go along ** ** If the journal is aborted, we just clean up. Things like flushing ** journal lists, etc just won't happen. */ static int do_journal_end(struct reiserfs_transaction_handle *th, struct super_block *sb, unsigned long nblocks, int flags) { struct reiserfs_journal *journal = SB_JOURNAL(sb); struct reiserfs_journal_cnode *cn, *next, *jl_cn; struct reiserfs_journal_cnode *last_cn = NULL; struct reiserfs_journal_desc *desc; struct reiserfs_journal_commit *commit; struct buffer_head *c_bh; /* commit bh */ struct buffer_head *d_bh; /* desc bh */ int cur_write_start = 0; /* start index of current log write */ int old_start; int i; int flush; int wait_on_commit; struct reiserfs_journal_list *jl, *temp_jl; struct list_head *entry, *safe; unsigned long jindex; unsigned int commit_trans_id; int trans_half; BUG_ON(th->t_refcount > 1); BUG_ON(!th->t_trans_id); /* protect flush_older_commits from doing mistakes if the transaction ID counter gets overflowed. */ if (th->t_trans_id == ~0U) flags |= FLUSH_ALL | COMMIT_NOW | WAIT; flush = flags & FLUSH_ALL; wait_on_commit = flags & WAIT; current->journal_info = th->t_handle_save; reiserfs_check_lock_depth(sb, "journal end"); if (journal->j_len == 0) { reiserfs_prepare_for_journal(sb, SB_BUFFER_WITH_SB(sb), 1); journal_mark_dirty(th, sb, SB_BUFFER_WITH_SB(sb)); } lock_journal(sb); if (journal->j_next_full_flush) { flags |= FLUSH_ALL; flush = 1; } if (journal->j_next_async_flush) { flags |= COMMIT_NOW | WAIT; wait_on_commit = 1; } /* check_journal_end locks the journal, and unlocks if it does not return 1 ** it tells us if we should continue with the journal_end, or just return */ if (!check_journal_end(th, sb, nblocks, flags)) { reiserfs_schedule_old_flush(sb); wake_queued_writers(sb); reiserfs_async_progress_wait(sb); goto out; } /* check_journal_end might set these, check again */ if (journal->j_next_full_flush) { flush = 1; } /* ** j must wait means we have to flush the log blocks, and the real blocks for ** this transaction */ if (journal->j_must_wait > 0) { flush = 1; } #ifdef REISERFS_PREALLOCATE /* quota ops might need to nest, setup the journal_info pointer for them * and raise the refcount so that it is > 0. */ current->journal_info = th; th->t_refcount++; reiserfs_discard_all_prealloc(th); /* it should not involve new blocks into * the transaction */ th->t_refcount--; current->journal_info = th->t_handle_save; #endif /* setup description block */ d_bh = journal_getblk(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + journal->j_start); set_buffer_uptodate(d_bh); desc = (struct reiserfs_journal_desc *)(d_bh)->b_data; memset(d_bh->b_data, 0, d_bh->b_size); memcpy(get_journal_desc_magic(d_bh), JOURNAL_DESC_MAGIC, 8); set_desc_trans_id(desc, journal->j_trans_id); /* setup commit block. Don't write (keep it clean too) this one until after everyone else is written */ c_bh = journal_getblk(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + ((journal->j_start + journal->j_len + 1) % SB_ONDISK_JOURNAL_SIZE(sb))); commit = (struct reiserfs_journal_commit *)c_bh->b_data; memset(c_bh->b_data, 0, c_bh->b_size); set_commit_trans_id(commit, journal->j_trans_id); set_buffer_uptodate(c_bh); /* init this journal list */ jl = journal->j_current_jl; /* we lock the commit before doing anything because * we want to make sure nobody tries to run flush_commit_list until * the new transaction is fully setup, and we've already flushed the * ordered bh list */ reiserfs_mutex_lock_safe(&jl->j_commit_mutex, sb); /* save the transaction id in case we need to commit it later */ commit_trans_id = jl->j_trans_id; atomic_set(&jl->j_older_commits_done, 0); jl->j_trans_id = journal->j_trans_id; jl->j_timestamp = journal->j_trans_start_time; jl->j_commit_bh = c_bh; jl->j_start = journal->j_start; jl->j_len = journal->j_len; atomic_set(&jl->j_nonzerolen, journal->j_len); atomic_set(&jl->j_commit_left, journal->j_len + 2); jl->j_realblock = NULL; /* The ENTIRE FOR LOOP MUST not cause schedule to occur. ** for each real block, add it to the journal list hash, ** copy into real block index array in the commit or desc block */ trans_half = journal_trans_half(sb->s_blocksize); for (i = 0, cn = journal->j_first; cn; cn = cn->next, i++) { if (buffer_journaled(cn->bh)) { jl_cn = get_cnode(sb); if (!jl_cn) { reiserfs_panic(sb, "journal-1676", "get_cnode returned NULL"); } if (i == 0) { jl->j_realblock = jl_cn; } jl_cn->prev = last_cn; jl_cn->next = NULL; if (last_cn) { last_cn->next = jl_cn; } last_cn = jl_cn; /* make sure the block we are trying to log is not a block of journal or reserved area */ if (is_block_in_log_or_reserved_area (sb, cn->bh->b_blocknr)) { reiserfs_panic(sb, "journal-2332", "Trying to log block %lu, " "which is a log block", cn->bh->b_blocknr); } jl_cn->blocknr = cn->bh->b_blocknr; jl_cn->state = 0; jl_cn->sb = sb; jl_cn->bh = cn->bh; jl_cn->jlist = jl; insert_journal_hash(journal->j_list_hash_table, jl_cn); if (i < trans_half) { desc->j_realblock[i] = cpu_to_le32(cn->bh->b_blocknr); } else { commit->j_realblock[i - trans_half] = cpu_to_le32(cn->bh->b_blocknr); } } else { i--; } } set_desc_trans_len(desc, journal->j_len); set_desc_mount_id(desc, journal->j_mount_id); set_desc_trans_id(desc, journal->j_trans_id); set_commit_trans_len(commit, journal->j_len); /* special check in case all buffers in the journal were marked for not logging */ BUG_ON(journal->j_len == 0); /* we're about to dirty all the log blocks, mark the description block * dirty now too. Don't mark the commit block dirty until all the * others are on disk */ mark_buffer_dirty(d_bh); /* first data block is j_start + 1, so add one to cur_write_start wherever you use it */ cur_write_start = journal->j_start; cn = journal->j_first; jindex = 1; /* start at one so we don't get the desc again */ while (cn) { clear_buffer_journal_new(cn->bh); /* copy all the real blocks into log area. dirty log blocks */ if (buffer_journaled(cn->bh)) { struct buffer_head *tmp_bh; char *addr; struct page *page; tmp_bh = journal_getblk(sb, SB_ONDISK_JOURNAL_1st_BLOCK(sb) + ((cur_write_start + jindex) % SB_ONDISK_JOURNAL_SIZE(sb))); set_buffer_uptodate(tmp_bh); page = cn->bh->b_page; addr = kmap(page); memcpy(tmp_bh->b_data, addr + offset_in_page(cn->bh->b_data), cn->bh->b_size); kunmap(page); mark_buffer_dirty(tmp_bh); jindex++; set_buffer_journal_dirty(cn->bh); clear_buffer_journaled(cn->bh); } else { /* JDirty cleared sometime during transaction. don't log this one */ reiserfs_warning(sb, "journal-2048", "BAD, buffer in journal hash, " "but not JDirty!"); brelse(cn->bh); } next = cn->next; free_cnode(sb, cn); cn = next; reiserfs_write_unlock(sb); cond_resched(); reiserfs_write_lock(sb); } /* we are done with both the c_bh and d_bh, but ** c_bh must be written after all other commit blocks, ** so we dirty/relse c_bh in flush_commit_list, with commit_left <= 1. */ journal->j_current_jl = alloc_journal_list(sb); /* now it is safe to insert this transaction on the main list */ list_add_tail(&jl->j_list, &journal->j_journal_list); list_add_tail(&jl->j_working_list, &journal->j_working_list); journal->j_num_work_lists++; /* reset journal values for the next transaction */ old_start = journal->j_start; journal->j_start = (journal->j_start + journal->j_len + 2) % SB_ONDISK_JOURNAL_SIZE(sb); atomic_set(&(journal->j_wcount), 0); journal->j_bcount = 0; journal->j_last = NULL; journal->j_first = NULL; journal->j_len = 0; journal->j_trans_start_time = 0; /* check for trans_id overflow */ if (++journal->j_trans_id == 0) journal->j_trans_id = 10; journal->j_current_jl->j_trans_id = journal->j_trans_id; journal->j_must_wait = 0; journal->j_len_alloc = 0; journal->j_next_full_flush = 0; journal->j_next_async_flush = 0; init_journal_hash(sb); // make sure reiserfs_add_jh sees the new current_jl before we // write out the tails smp_mb(); /* tail conversion targets have to hit the disk before we end the * transaction. Otherwise a later transaction might repack the tail * before this transaction commits, leaving the data block unflushed and * clean, if we crash before the later transaction commits, the data block * is lost. */ if (!list_empty(&jl->j_tail_bh_list)) { reiserfs_write_unlock(sb); write_ordered_buffers(&journal->j_dirty_buffers_lock, journal, jl, &jl->j_tail_bh_list); reiserfs_write_lock(sb); } BUG_ON(!list_empty(&jl->j_tail_bh_list)); mutex_unlock(&jl->j_commit_mutex); /* honor the flush wishes from the caller, simple commits can ** be done outside the journal lock, they are done below ** ** if we don't flush the commit list right now, we put it into ** the work queue so the people waiting on the async progress work ** queue don't wait for this proc to flush journal lists and such. */ if (flush) { flush_commit_list(sb, jl, 1); flush_journal_list(sb, jl, 1); } else if (!(jl->j_state & LIST_COMMIT_PENDING)) queue_delayed_work(commit_wq, &journal->j_work, HZ / 10); /* if the next transaction has any chance of wrapping, flush ** transactions that might get overwritten. If any journal lists are very ** old flush them as well. */ first_jl: list_for_each_safe(entry, safe, &journal->j_journal_list) { temp_jl = JOURNAL_LIST_ENTRY(entry); if (journal->j_start <= temp_jl->j_start) { if ((journal->j_start + journal->j_trans_max + 1) >= temp_jl->j_start) { flush_used_journal_lists(sb, temp_jl); goto first_jl; } else if ((journal->j_start + journal->j_trans_max + 1) < SB_ONDISK_JOURNAL_SIZE(sb)) { /* if we don't cross into the next transaction and we don't * wrap, there is no way we can overlap any later transactions * break now */ break; } } else if ((journal->j_start + journal->j_trans_max + 1) > SB_ONDISK_JOURNAL_SIZE(sb)) { if (((journal->j_start + journal->j_trans_max + 1) % SB_ONDISK_JOURNAL_SIZE(sb)) >= temp_jl->j_start) { flush_used_journal_lists(sb, temp_jl); goto first_jl; } else { /* we don't overlap anything from out start to the end of the * log, and our wrapped portion doesn't overlap anything at * the start of the log. We can break */ break; } } } flush_old_journal_lists(sb); journal->j_current_jl->j_list_bitmap = get_list_bitmap(sb, journal->j_current_jl); if (!(journal->j_current_jl->j_list_bitmap)) { reiserfs_panic(sb, "journal-1996", "could not get a list bitmap"); } atomic_set(&(journal->j_jlock), 0); unlock_journal(sb); /* wake up any body waiting to join. */ clear_bit(J_WRITERS_QUEUED, &journal->j_state); wake_up(&(journal->j_join_wait)); if (!flush && wait_on_commit && journal_list_still_alive(sb, commit_trans_id)) { flush_commit_list(sb, jl, 1); } out: reiserfs_check_lock_depth(sb, "journal end2"); memset(th, 0, sizeof(*th)); /* Re-set th->t_super, so we can properly keep track of how many * persistent transactions there are. We need to do this so if this * call is part of a failed restart_transaction, we can free it later */ th->t_super = sb; return journal->j_errno; } /* Send the file system read only and refuse new transactions */ void reiserfs_abort_journal(struct super_block *sb, int errno) { struct reiserfs_journal *journal = SB_JOURNAL(sb); if (test_bit(J_ABORTED, &journal->j_state)) return; if (!journal->j_errno) journal->j_errno = errno; sb->s_flags |= MS_RDONLY; set_bit(J_ABORTED, &journal->j_state); #ifdef CONFIG_REISERFS_CHECK dump_stack(); #endif }
gpl-2.0
alexax66/kernel_samsung_a3xelte
drivers/input/keyboard/adp5589-keys.c
2409
30335
/* * Description: keypad driver for ADP5589, ADP5585 * I2C QWERTY Keypad and IO Expander * Bugs: Enter bugs at http://blackfin.uclinux.org/ * * Copyright (C) 2010-2011 Analog Devices Inc. * Licensed under the GPL-2. */ #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/workqueue.h> #include <linux/errno.h> #include <linux/pm.h> #include <linux/platform_device.h> #include <linux/input.h> #include <linux/i2c.h> #include <linux/gpio.h> #include <linux/slab.h> #include <linux/input/adp5589.h> /* ADP5589/ADP5585 Common Registers */ #define ADP5589_5_ID 0x00 #define ADP5589_5_INT_STATUS 0x01 #define ADP5589_5_STATUS 0x02 #define ADP5589_5_FIFO_1 0x03 #define ADP5589_5_FIFO_2 0x04 #define ADP5589_5_FIFO_3 0x05 #define ADP5589_5_FIFO_4 0x06 #define ADP5589_5_FIFO_5 0x07 #define ADP5589_5_FIFO_6 0x08 #define ADP5589_5_FIFO_7 0x09 #define ADP5589_5_FIFO_8 0x0A #define ADP5589_5_FIFO_9 0x0B #define ADP5589_5_FIFO_10 0x0C #define ADP5589_5_FIFO_11 0x0D #define ADP5589_5_FIFO_12 0x0E #define ADP5589_5_FIFO_13 0x0F #define ADP5589_5_FIFO_14 0x10 #define ADP5589_5_FIFO_15 0x11 #define ADP5589_5_FIFO_16 0x12 #define ADP5589_5_GPI_INT_STAT_A 0x13 #define ADP5589_5_GPI_INT_STAT_B 0x14 /* ADP5589 Registers */ #define ADP5589_GPI_INT_STAT_C 0x15 #define ADP5589_GPI_STATUS_A 0x16 #define ADP5589_GPI_STATUS_B 0x17 #define ADP5589_GPI_STATUS_C 0x18 #define ADP5589_RPULL_CONFIG_A 0x19 #define ADP5589_RPULL_CONFIG_B 0x1A #define ADP5589_RPULL_CONFIG_C 0x1B #define ADP5589_RPULL_CONFIG_D 0x1C #define ADP5589_RPULL_CONFIG_E 0x1D #define ADP5589_GPI_INT_LEVEL_A 0x1E #define ADP5589_GPI_INT_LEVEL_B 0x1F #define ADP5589_GPI_INT_LEVEL_C 0x20 #define ADP5589_GPI_EVENT_EN_A 0x21 #define ADP5589_GPI_EVENT_EN_B 0x22 #define ADP5589_GPI_EVENT_EN_C 0x23 #define ADP5589_GPI_INTERRUPT_EN_A 0x24 #define ADP5589_GPI_INTERRUPT_EN_B 0x25 #define ADP5589_GPI_INTERRUPT_EN_C 0x26 #define ADP5589_DEBOUNCE_DIS_A 0x27 #define ADP5589_DEBOUNCE_DIS_B 0x28 #define ADP5589_DEBOUNCE_DIS_C 0x29 #define ADP5589_GPO_DATA_OUT_A 0x2A #define ADP5589_GPO_DATA_OUT_B 0x2B #define ADP5589_GPO_DATA_OUT_C 0x2C #define ADP5589_GPO_OUT_MODE_A 0x2D #define ADP5589_GPO_OUT_MODE_B 0x2E #define ADP5589_GPO_OUT_MODE_C 0x2F #define ADP5589_GPIO_DIRECTION_A 0x30 #define ADP5589_GPIO_DIRECTION_B 0x31 #define ADP5589_GPIO_DIRECTION_C 0x32 #define ADP5589_UNLOCK1 0x33 #define ADP5589_UNLOCK2 0x34 #define ADP5589_EXT_LOCK_EVENT 0x35 #define ADP5589_UNLOCK_TIMERS 0x36 #define ADP5589_LOCK_CFG 0x37 #define ADP5589_RESET1_EVENT_A 0x38 #define ADP5589_RESET1_EVENT_B 0x39 #define ADP5589_RESET1_EVENT_C 0x3A #define ADP5589_RESET2_EVENT_A 0x3B #define ADP5589_RESET2_EVENT_B 0x3C #define ADP5589_RESET_CFG 0x3D #define ADP5589_PWM_OFFT_LOW 0x3E #define ADP5589_PWM_OFFT_HIGH 0x3F #define ADP5589_PWM_ONT_LOW 0x40 #define ADP5589_PWM_ONT_HIGH 0x41 #define ADP5589_PWM_CFG 0x42 #define ADP5589_CLOCK_DIV_CFG 0x43 #define ADP5589_LOGIC_1_CFG 0x44 #define ADP5589_LOGIC_2_CFG 0x45 #define ADP5589_LOGIC_FF_CFG 0x46 #define ADP5589_LOGIC_INT_EVENT_EN 0x47 #define ADP5589_POLL_PTIME_CFG 0x48 #define ADP5589_PIN_CONFIG_A 0x49 #define ADP5589_PIN_CONFIG_B 0x4A #define ADP5589_PIN_CONFIG_C 0x4B #define ADP5589_PIN_CONFIG_D 0x4C #define ADP5589_GENERAL_CFG 0x4D #define ADP5589_INT_EN 0x4E /* ADP5585 Registers */ #define ADP5585_GPI_STATUS_A 0x15 #define ADP5585_GPI_STATUS_B 0x16 #define ADP5585_RPULL_CONFIG_A 0x17 #define ADP5585_RPULL_CONFIG_B 0x18 #define ADP5585_RPULL_CONFIG_C 0x19 #define ADP5585_RPULL_CONFIG_D 0x1A #define ADP5585_GPI_INT_LEVEL_A 0x1B #define ADP5585_GPI_INT_LEVEL_B 0x1C #define ADP5585_GPI_EVENT_EN_A 0x1D #define ADP5585_GPI_EVENT_EN_B 0x1E #define ADP5585_GPI_INTERRUPT_EN_A 0x1F #define ADP5585_GPI_INTERRUPT_EN_B 0x20 #define ADP5585_DEBOUNCE_DIS_A 0x21 #define ADP5585_DEBOUNCE_DIS_B 0x22 #define ADP5585_GPO_DATA_OUT_A 0x23 #define ADP5585_GPO_DATA_OUT_B 0x24 #define ADP5585_GPO_OUT_MODE_A 0x25 #define ADP5585_GPO_OUT_MODE_B 0x26 #define ADP5585_GPIO_DIRECTION_A 0x27 #define ADP5585_GPIO_DIRECTION_B 0x28 #define ADP5585_RESET1_EVENT_A 0x29 #define ADP5585_RESET1_EVENT_B 0x2A #define ADP5585_RESET1_EVENT_C 0x2B #define ADP5585_RESET2_EVENT_A 0x2C #define ADP5585_RESET2_EVENT_B 0x2D #define ADP5585_RESET_CFG 0x2E #define ADP5585_PWM_OFFT_LOW 0x2F #define ADP5585_PWM_OFFT_HIGH 0x30 #define ADP5585_PWM_ONT_LOW 0x31 #define ADP5585_PWM_ONT_HIGH 0x32 #define ADP5585_PWM_CFG 0x33 #define ADP5585_LOGIC_CFG 0x34 #define ADP5585_LOGIC_FF_CFG 0x35 #define ADP5585_LOGIC_INT_EVENT_EN 0x36 #define ADP5585_POLL_PTIME_CFG 0x37 #define ADP5585_PIN_CONFIG_A 0x38 #define ADP5585_PIN_CONFIG_B 0x39 #define ADP5585_PIN_CONFIG_D 0x3A #define ADP5585_GENERAL_CFG 0x3B #define ADP5585_INT_EN 0x3C /* ID Register */ #define ADP5589_5_DEVICE_ID_MASK 0xF #define ADP5589_5_MAN_ID_MASK 0xF #define ADP5589_5_MAN_ID_SHIFT 4 #define ADP5589_5_MAN_ID 0x02 /* GENERAL_CFG Register */ #define OSC_EN (1 << 7) #define CORE_CLK(x) (((x) & 0x3) << 5) #define LCK_TRK_LOGIC (1 << 4) /* ADP5589 only */ #define LCK_TRK_GPI (1 << 3) /* ADP5589 only */ #define INT_CFG (1 << 1) #define RST_CFG (1 << 0) /* INT_EN Register */ #define LOGIC2_IEN (1 << 5) /* ADP5589 only */ #define LOGIC1_IEN (1 << 4) #define LOCK_IEN (1 << 3) /* ADP5589 only */ #define OVRFLOW_IEN (1 << 2) #define GPI_IEN (1 << 1) #define EVENT_IEN (1 << 0) /* Interrupt Status Register */ #define LOGIC2_INT (1 << 5) /* ADP5589 only */ #define LOGIC1_INT (1 << 4) #define LOCK_INT (1 << 3) /* ADP5589 only */ #define OVRFLOW_INT (1 << 2) #define GPI_INT (1 << 1) #define EVENT_INT (1 << 0) /* STATUS Register */ #define LOGIC2_STAT (1 << 7) /* ADP5589 only */ #define LOGIC1_STAT (1 << 6) #define LOCK_STAT (1 << 5) /* ADP5589 only */ #define KEC 0xF /* PIN_CONFIG_D Register */ #define C4_EXTEND_CFG (1 << 6) /* RESET2 */ #define R4_EXTEND_CFG (1 << 5) /* RESET1 */ /* LOCK_CFG */ #define LOCK_EN (1 << 0) #define PTIME_MASK 0x3 #define LTIME_MASK 0x3 /* ADP5589 only */ /* Key Event Register xy */ #define KEY_EV_PRESSED (1 << 7) #define KEY_EV_MASK (0x7F) #define KEYP_MAX_EVENT 16 #define ADP5589_MAXGPIO 19 #define ADP5585_MAXGPIO 11 /* 10 on the ADP5585-01, 11 on ADP5585-02 */ enum { ADP5589, ADP5585_01, ADP5585_02 }; struct adp_constants { u8 maxgpio; u8 keymapsize; u8 gpi_pin_row_base; u8 gpi_pin_row_end; u8 gpi_pin_col_base; u8 gpi_pin_base; u8 gpi_pin_end; u8 gpimapsize_max; u8 max_row_num; u8 max_col_num; u8 row_mask; u8 col_mask; u8 col_shift; u8 c4_extend_cfg; u8 (*bank) (u8 offset); u8 (*bit) (u8 offset); u8 (*reg) (u8 reg); }; struct adp5589_kpad { struct i2c_client *client; struct input_dev *input; const struct adp_constants *var; unsigned short keycode[ADP5589_KEYMAPSIZE]; const struct adp5589_gpi_map *gpimap; unsigned short gpimapsize; unsigned extend_cfg; bool is_adp5585; bool adp5585_support_row5; #ifdef CONFIG_GPIOLIB unsigned char gpiomap[ADP5589_MAXGPIO]; bool export_gpio; struct gpio_chip gc; struct mutex gpio_lock; /* Protect cached dir, dat_out */ u8 dat_out[3]; u8 dir[3]; #endif }; /* * ADP5589 / ADP5585 derivative / variant handling */ /* ADP5589 */ static unsigned char adp5589_bank(unsigned char offset) { return offset >> 3; } static unsigned char adp5589_bit(unsigned char offset) { return 1u << (offset & 0x7); } static unsigned char adp5589_reg(unsigned char reg) { return reg; } static const struct adp_constants const_adp5589 = { .maxgpio = ADP5589_MAXGPIO, .keymapsize = ADP5589_KEYMAPSIZE, .gpi_pin_row_base = ADP5589_GPI_PIN_ROW_BASE, .gpi_pin_row_end = ADP5589_GPI_PIN_ROW_END, .gpi_pin_col_base = ADP5589_GPI_PIN_COL_BASE, .gpi_pin_base = ADP5589_GPI_PIN_BASE, .gpi_pin_end = ADP5589_GPI_PIN_END, .gpimapsize_max = ADP5589_GPIMAPSIZE_MAX, .c4_extend_cfg = 12, .max_row_num = ADP5589_MAX_ROW_NUM, .max_col_num = ADP5589_MAX_COL_NUM, .row_mask = ADP5589_ROW_MASK, .col_mask = ADP5589_COL_MASK, .col_shift = ADP5589_COL_SHIFT, .bank = adp5589_bank, .bit = adp5589_bit, .reg = adp5589_reg, }; /* ADP5585 */ static unsigned char adp5585_bank(unsigned char offset) { return offset > ADP5585_MAX_ROW_NUM; } static unsigned char adp5585_bit(unsigned char offset) { return (offset > ADP5585_MAX_ROW_NUM) ? 1u << (offset - ADP5585_COL_SHIFT) : 1u << offset; } static const unsigned char adp5585_reg_lut[] = { [ADP5589_GPI_STATUS_A] = ADP5585_GPI_STATUS_A, [ADP5589_GPI_STATUS_B] = ADP5585_GPI_STATUS_B, [ADP5589_RPULL_CONFIG_A] = ADP5585_RPULL_CONFIG_A, [ADP5589_RPULL_CONFIG_B] = ADP5585_RPULL_CONFIG_B, [ADP5589_RPULL_CONFIG_C] = ADP5585_RPULL_CONFIG_C, [ADP5589_RPULL_CONFIG_D] = ADP5585_RPULL_CONFIG_D, [ADP5589_GPI_INT_LEVEL_A] = ADP5585_GPI_INT_LEVEL_A, [ADP5589_GPI_INT_LEVEL_B] = ADP5585_GPI_INT_LEVEL_B, [ADP5589_GPI_EVENT_EN_A] = ADP5585_GPI_EVENT_EN_A, [ADP5589_GPI_EVENT_EN_B] = ADP5585_GPI_EVENT_EN_B, [ADP5589_GPI_INTERRUPT_EN_A] = ADP5585_GPI_INTERRUPT_EN_A, [ADP5589_GPI_INTERRUPT_EN_B] = ADP5585_GPI_INTERRUPT_EN_B, [ADP5589_DEBOUNCE_DIS_A] = ADP5585_DEBOUNCE_DIS_A, [ADP5589_DEBOUNCE_DIS_B] = ADP5585_DEBOUNCE_DIS_B, [ADP5589_GPO_DATA_OUT_A] = ADP5585_GPO_DATA_OUT_A, [ADP5589_GPO_DATA_OUT_B] = ADP5585_GPO_DATA_OUT_B, [ADP5589_GPO_OUT_MODE_A] = ADP5585_GPO_OUT_MODE_A, [ADP5589_GPO_OUT_MODE_B] = ADP5585_GPO_OUT_MODE_B, [ADP5589_GPIO_DIRECTION_A] = ADP5585_GPIO_DIRECTION_A, [ADP5589_GPIO_DIRECTION_B] = ADP5585_GPIO_DIRECTION_B, [ADP5589_RESET1_EVENT_A] = ADP5585_RESET1_EVENT_A, [ADP5589_RESET1_EVENT_B] = ADP5585_RESET1_EVENT_B, [ADP5589_RESET1_EVENT_C] = ADP5585_RESET1_EVENT_C, [ADP5589_RESET2_EVENT_A] = ADP5585_RESET2_EVENT_A, [ADP5589_RESET2_EVENT_B] = ADP5585_RESET2_EVENT_B, [ADP5589_RESET_CFG] = ADP5585_RESET_CFG, [ADP5589_PWM_OFFT_LOW] = ADP5585_PWM_OFFT_LOW, [ADP5589_PWM_OFFT_HIGH] = ADP5585_PWM_OFFT_HIGH, [ADP5589_PWM_ONT_LOW] = ADP5585_PWM_ONT_LOW, [ADP5589_PWM_ONT_HIGH] = ADP5585_PWM_ONT_HIGH, [ADP5589_PWM_CFG] = ADP5585_PWM_CFG, [ADP5589_LOGIC_1_CFG] = ADP5585_LOGIC_CFG, [ADP5589_LOGIC_FF_CFG] = ADP5585_LOGIC_FF_CFG, [ADP5589_LOGIC_INT_EVENT_EN] = ADP5585_LOGIC_INT_EVENT_EN, [ADP5589_POLL_PTIME_CFG] = ADP5585_POLL_PTIME_CFG, [ADP5589_PIN_CONFIG_A] = ADP5585_PIN_CONFIG_A, [ADP5589_PIN_CONFIG_B] = ADP5585_PIN_CONFIG_B, [ADP5589_PIN_CONFIG_D] = ADP5585_PIN_CONFIG_D, [ADP5589_GENERAL_CFG] = ADP5585_GENERAL_CFG, [ADP5589_INT_EN] = ADP5585_INT_EN, }; static unsigned char adp5585_reg(unsigned char reg) { return adp5585_reg_lut[reg]; } static const struct adp_constants const_adp5585 = { .maxgpio = ADP5585_MAXGPIO, .keymapsize = ADP5585_KEYMAPSIZE, .gpi_pin_row_base = ADP5585_GPI_PIN_ROW_BASE, .gpi_pin_row_end = ADP5585_GPI_PIN_ROW_END, .gpi_pin_col_base = ADP5585_GPI_PIN_COL_BASE, .gpi_pin_base = ADP5585_GPI_PIN_BASE, .gpi_pin_end = ADP5585_GPI_PIN_END, .gpimapsize_max = ADP5585_GPIMAPSIZE_MAX, .c4_extend_cfg = 10, .max_row_num = ADP5585_MAX_ROW_NUM, .max_col_num = ADP5585_MAX_COL_NUM, .row_mask = ADP5585_ROW_MASK, .col_mask = ADP5585_COL_MASK, .col_shift = ADP5585_COL_SHIFT, .bank = adp5585_bank, .bit = adp5585_bit, .reg = adp5585_reg, }; static int adp5589_read(struct i2c_client *client, u8 reg) { int ret = i2c_smbus_read_byte_data(client, reg); if (ret < 0) dev_err(&client->dev, "Read Error\n"); return ret; } static int adp5589_write(struct i2c_client *client, u8 reg, u8 val) { return i2c_smbus_write_byte_data(client, reg, val); } #ifdef CONFIG_GPIOLIB static int adp5589_gpio_get_value(struct gpio_chip *chip, unsigned off) { struct adp5589_kpad *kpad = container_of(chip, struct adp5589_kpad, gc); unsigned int bank = kpad->var->bank(kpad->gpiomap[off]); unsigned int bit = kpad->var->bit(kpad->gpiomap[off]); return !!(adp5589_read(kpad->client, kpad->var->reg(ADP5589_GPI_STATUS_A) + bank) & bit); } static void adp5589_gpio_set_value(struct gpio_chip *chip, unsigned off, int val) { struct adp5589_kpad *kpad = container_of(chip, struct adp5589_kpad, gc); unsigned int bank = kpad->var->bank(kpad->gpiomap[off]); unsigned int bit = kpad->var->bit(kpad->gpiomap[off]); mutex_lock(&kpad->gpio_lock); if (val) kpad->dat_out[bank] |= bit; else kpad->dat_out[bank] &= ~bit; adp5589_write(kpad->client, kpad->var->reg(ADP5589_GPO_DATA_OUT_A) + bank, kpad->dat_out[bank]); mutex_unlock(&kpad->gpio_lock); } static int adp5589_gpio_direction_input(struct gpio_chip *chip, unsigned off) { struct adp5589_kpad *kpad = container_of(chip, struct adp5589_kpad, gc); unsigned int bank = kpad->var->bank(kpad->gpiomap[off]); unsigned int bit = kpad->var->bit(kpad->gpiomap[off]); int ret; mutex_lock(&kpad->gpio_lock); kpad->dir[bank] &= ~bit; ret = adp5589_write(kpad->client, kpad->var->reg(ADP5589_GPIO_DIRECTION_A) + bank, kpad->dir[bank]); mutex_unlock(&kpad->gpio_lock); return ret; } static int adp5589_gpio_direction_output(struct gpio_chip *chip, unsigned off, int val) { struct adp5589_kpad *kpad = container_of(chip, struct adp5589_kpad, gc); unsigned int bank = kpad->var->bank(kpad->gpiomap[off]); unsigned int bit = kpad->var->bit(kpad->gpiomap[off]); int ret; mutex_lock(&kpad->gpio_lock); kpad->dir[bank] |= bit; if (val) kpad->dat_out[bank] |= bit; else kpad->dat_out[bank] &= ~bit; ret = adp5589_write(kpad->client, kpad->var->reg(ADP5589_GPO_DATA_OUT_A) + bank, kpad->dat_out[bank]); ret |= adp5589_write(kpad->client, kpad->var->reg(ADP5589_GPIO_DIRECTION_A) + bank, kpad->dir[bank]); mutex_unlock(&kpad->gpio_lock); return ret; } static int adp5589_build_gpiomap(struct adp5589_kpad *kpad, const struct adp5589_kpad_platform_data *pdata) { bool pin_used[ADP5589_MAXGPIO]; int n_unused = 0; int i; memset(pin_used, false, sizeof(pin_used)); for (i = 0; i < kpad->var->maxgpio; i++) if (pdata->keypad_en_mask & (1 << i)) pin_used[i] = true; for (i = 0; i < kpad->gpimapsize; i++) pin_used[kpad->gpimap[i].pin - kpad->var->gpi_pin_base] = true; if (kpad->extend_cfg & R4_EXTEND_CFG) pin_used[4] = true; if (kpad->extend_cfg & C4_EXTEND_CFG) pin_used[kpad->var->c4_extend_cfg] = true; if (!kpad->adp5585_support_row5) pin_used[5] = true; for (i = 0; i < kpad->var->maxgpio; i++) if (!pin_used[i]) kpad->gpiomap[n_unused++] = i; return n_unused; } static int adp5589_gpio_add(struct adp5589_kpad *kpad) { struct device *dev = &kpad->client->dev; const struct adp5589_kpad_platform_data *pdata = dev->platform_data; const struct adp5589_gpio_platform_data *gpio_data = pdata->gpio_data; int i, error; if (!gpio_data) return 0; kpad->gc.ngpio = adp5589_build_gpiomap(kpad, pdata); if (kpad->gc.ngpio == 0) { dev_info(dev, "No unused gpios left to export\n"); return 0; } kpad->export_gpio = true; kpad->gc.direction_input = adp5589_gpio_direction_input; kpad->gc.direction_output = adp5589_gpio_direction_output; kpad->gc.get = adp5589_gpio_get_value; kpad->gc.set = adp5589_gpio_set_value; kpad->gc.can_sleep = 1; kpad->gc.base = gpio_data->gpio_start; kpad->gc.label = kpad->client->name; kpad->gc.owner = THIS_MODULE; mutex_init(&kpad->gpio_lock); error = gpiochip_add(&kpad->gc); if (error) { dev_err(dev, "gpiochip_add failed, err: %d\n", error); return error; } for (i = 0; i <= kpad->var->bank(kpad->var->maxgpio); i++) { kpad->dat_out[i] = adp5589_read(kpad->client, kpad->var->reg( ADP5589_GPO_DATA_OUT_A) + i); kpad->dir[i] = adp5589_read(kpad->client, kpad->var->reg( ADP5589_GPIO_DIRECTION_A) + i); } if (gpio_data->setup) { error = gpio_data->setup(kpad->client, kpad->gc.base, kpad->gc.ngpio, gpio_data->context); if (error) dev_warn(dev, "setup failed, %d\n", error); } return 0; } static void adp5589_gpio_remove(struct adp5589_kpad *kpad) { struct device *dev = &kpad->client->dev; const struct adp5589_kpad_platform_data *pdata = dev->platform_data; const struct adp5589_gpio_platform_data *gpio_data = pdata->gpio_data; int error; if (!kpad->export_gpio) return; if (gpio_data->teardown) { error = gpio_data->teardown(kpad->client, kpad->gc.base, kpad->gc.ngpio, gpio_data->context); if (error) dev_warn(dev, "teardown failed %d\n", error); } error = gpiochip_remove(&kpad->gc); if (error) dev_warn(dev, "gpiochip_remove failed %d\n", error); } #else static inline int adp5589_gpio_add(struct adp5589_kpad *kpad) { return 0; } static inline void adp5589_gpio_remove(struct adp5589_kpad *kpad) { } #endif static void adp5589_report_switches(struct adp5589_kpad *kpad, int key, int key_val) { int i; for (i = 0; i < kpad->gpimapsize; i++) { if (key_val == kpad->gpimap[i].pin) { input_report_switch(kpad->input, kpad->gpimap[i].sw_evt, key & KEY_EV_PRESSED); break; } } } static void adp5589_report_events(struct adp5589_kpad *kpad, int ev_cnt) { int i; for (i = 0; i < ev_cnt; i++) { int key = adp5589_read(kpad->client, ADP5589_5_FIFO_1 + i); int key_val = key & KEY_EV_MASK; if (key_val >= kpad->var->gpi_pin_base && key_val <= kpad->var->gpi_pin_end) { adp5589_report_switches(kpad, key, key_val); } else { input_report_key(kpad->input, kpad->keycode[key_val - 1], key & KEY_EV_PRESSED); } } } static irqreturn_t adp5589_irq(int irq, void *handle) { struct adp5589_kpad *kpad = handle; struct i2c_client *client = kpad->client; int status, ev_cnt; status = adp5589_read(client, ADP5589_5_INT_STATUS); if (status & OVRFLOW_INT) /* Unlikely and should never happen */ dev_err(&client->dev, "Event Overflow Error\n"); if (status & EVENT_INT) { ev_cnt = adp5589_read(client, ADP5589_5_STATUS) & KEC; if (ev_cnt) { adp5589_report_events(kpad, ev_cnt); input_sync(kpad->input); } } adp5589_write(client, ADP5589_5_INT_STATUS, status); /* Status is W1C */ return IRQ_HANDLED; } static int adp5589_get_evcode(struct adp5589_kpad *kpad, unsigned short key) { int i; for (i = 0; i < kpad->var->keymapsize; i++) if (key == kpad->keycode[i]) return (i + 1) | KEY_EV_PRESSED; dev_err(&kpad->client->dev, "RESET/UNLOCK key not in keycode map\n"); return -EINVAL; } static int adp5589_setup(struct adp5589_kpad *kpad) { struct i2c_client *client = kpad->client; const struct adp5589_kpad_platform_data *pdata = client->dev.platform_data; u8 (*reg) (u8) = kpad->var->reg; unsigned char evt_mode1 = 0, evt_mode2 = 0, evt_mode3 = 0; unsigned char pull_mask = 0; int i, ret; ret = adp5589_write(client, reg(ADP5589_PIN_CONFIG_A), pdata->keypad_en_mask & kpad->var->row_mask); ret |= adp5589_write(client, reg(ADP5589_PIN_CONFIG_B), (pdata->keypad_en_mask >> kpad->var->col_shift) & kpad->var->col_mask); if (!kpad->is_adp5585) ret |= adp5589_write(client, ADP5589_PIN_CONFIG_C, (pdata->keypad_en_mask >> 16) & 0xFF); if (!kpad->is_adp5585 && pdata->en_keylock) { ret |= adp5589_write(client, ADP5589_UNLOCK1, pdata->unlock_key1); ret |= adp5589_write(client, ADP5589_UNLOCK2, pdata->unlock_key2); ret |= adp5589_write(client, ADP5589_UNLOCK_TIMERS, pdata->unlock_timer & LTIME_MASK); ret |= adp5589_write(client, ADP5589_LOCK_CFG, LOCK_EN); } for (i = 0; i < KEYP_MAX_EVENT; i++) ret |= adp5589_read(client, ADP5589_5_FIFO_1 + i); for (i = 0; i < pdata->gpimapsize; i++) { unsigned short pin = pdata->gpimap[i].pin; if (pin <= kpad->var->gpi_pin_row_end) { evt_mode1 |= (1 << (pin - kpad->var->gpi_pin_row_base)); } else { evt_mode2 |= ((1 << (pin - kpad->var->gpi_pin_col_base)) & 0xFF); if (!kpad->is_adp5585) evt_mode3 |= ((1 << (pin - kpad->var->gpi_pin_col_base)) >> 8); } } if (pdata->gpimapsize) { ret |= adp5589_write(client, reg(ADP5589_GPI_EVENT_EN_A), evt_mode1); ret |= adp5589_write(client, reg(ADP5589_GPI_EVENT_EN_B), evt_mode2); if (!kpad->is_adp5585) ret |= adp5589_write(client, reg(ADP5589_GPI_EVENT_EN_C), evt_mode3); } if (pdata->pull_dis_mask & pdata->pullup_en_100k & pdata->pullup_en_300k & pdata->pulldown_en_300k) dev_warn(&client->dev, "Conflicting pull resistor config\n"); for (i = 0; i <= kpad->var->max_row_num; i++) { unsigned val = 0, bit = (1 << i); if (pdata->pullup_en_300k & bit) val = 0; else if (pdata->pulldown_en_300k & bit) val = 1; else if (pdata->pullup_en_100k & bit) val = 2; else if (pdata->pull_dis_mask & bit) val = 3; pull_mask |= val << (2 * (i & 0x3)); if (i == 3 || i == kpad->var->max_row_num) { ret |= adp5589_write(client, reg(ADP5585_RPULL_CONFIG_A) + (i >> 2), pull_mask); pull_mask = 0; } } for (i = 0; i <= kpad->var->max_col_num; i++) { unsigned val = 0, bit = 1 << (i + kpad->var->col_shift); if (pdata->pullup_en_300k & bit) val = 0; else if (pdata->pulldown_en_300k & bit) val = 1; else if (pdata->pullup_en_100k & bit) val = 2; else if (pdata->pull_dis_mask & bit) val = 3; pull_mask |= val << (2 * (i & 0x3)); if (i == 3 || i == kpad->var->max_col_num) { ret |= adp5589_write(client, reg(ADP5585_RPULL_CONFIG_C) + (i >> 2), pull_mask); pull_mask = 0; } } if (pdata->reset1_key_1 && pdata->reset1_key_2 && pdata->reset1_key_3) { ret |= adp5589_write(client, reg(ADP5589_RESET1_EVENT_A), adp5589_get_evcode(kpad, pdata->reset1_key_1)); ret |= adp5589_write(client, reg(ADP5589_RESET1_EVENT_B), adp5589_get_evcode(kpad, pdata->reset1_key_2)); ret |= adp5589_write(client, reg(ADP5589_RESET1_EVENT_C), adp5589_get_evcode(kpad, pdata->reset1_key_3)); kpad->extend_cfg |= R4_EXTEND_CFG; } if (pdata->reset2_key_1 && pdata->reset2_key_2) { ret |= adp5589_write(client, reg(ADP5589_RESET2_EVENT_A), adp5589_get_evcode(kpad, pdata->reset2_key_1)); ret |= adp5589_write(client, reg(ADP5589_RESET2_EVENT_B), adp5589_get_evcode(kpad, pdata->reset2_key_2)); kpad->extend_cfg |= C4_EXTEND_CFG; } if (kpad->extend_cfg) { ret |= adp5589_write(client, reg(ADP5589_RESET_CFG), pdata->reset_cfg); ret |= adp5589_write(client, reg(ADP5589_PIN_CONFIG_D), kpad->extend_cfg); } ret |= adp5589_write(client, reg(ADP5589_DEBOUNCE_DIS_A), pdata->debounce_dis_mask & kpad->var->row_mask); ret |= adp5589_write(client, reg(ADP5589_DEBOUNCE_DIS_B), (pdata->debounce_dis_mask >> kpad->var->col_shift) & kpad->var->col_mask); if (!kpad->is_adp5585) ret |= adp5589_write(client, reg(ADP5589_DEBOUNCE_DIS_C), (pdata->debounce_dis_mask >> 16) & 0xFF); ret |= adp5589_write(client, reg(ADP5589_POLL_PTIME_CFG), pdata->scan_cycle_time & PTIME_MASK); ret |= adp5589_write(client, ADP5589_5_INT_STATUS, (kpad->is_adp5585 ? 0 : LOGIC2_INT) | LOGIC1_INT | OVRFLOW_INT | (kpad->is_adp5585 ? 0 : LOCK_INT) | GPI_INT | EVENT_INT); /* Status is W1C */ ret |= adp5589_write(client, reg(ADP5589_GENERAL_CFG), INT_CFG | OSC_EN | CORE_CLK(3)); ret |= adp5589_write(client, reg(ADP5589_INT_EN), OVRFLOW_IEN | GPI_IEN | EVENT_IEN); if (ret < 0) { dev_err(&client->dev, "Write Error\n"); return ret; } return 0; } static void adp5589_report_switch_state(struct adp5589_kpad *kpad) { int gpi_stat_tmp, pin_loc; int i; int gpi_stat1 = adp5589_read(kpad->client, kpad->var->reg(ADP5589_GPI_STATUS_A)); int gpi_stat2 = adp5589_read(kpad->client, kpad->var->reg(ADP5589_GPI_STATUS_B)); int gpi_stat3 = !kpad->is_adp5585 ? adp5589_read(kpad->client, ADP5589_GPI_STATUS_C) : 0; for (i = 0; i < kpad->gpimapsize; i++) { unsigned short pin = kpad->gpimap[i].pin; if (pin <= kpad->var->gpi_pin_row_end) { gpi_stat_tmp = gpi_stat1; pin_loc = pin - kpad->var->gpi_pin_row_base; } else if ((pin - kpad->var->gpi_pin_col_base) < 8) { gpi_stat_tmp = gpi_stat2; pin_loc = pin - kpad->var->gpi_pin_col_base; } else { gpi_stat_tmp = gpi_stat3; pin_loc = pin - kpad->var->gpi_pin_col_base - 8; } if (gpi_stat_tmp < 0) { dev_err(&kpad->client->dev, "Can't read GPIO_DAT_STAT switch %d, default to OFF\n", pin); gpi_stat_tmp = 0; } input_report_switch(kpad->input, kpad->gpimap[i].sw_evt, !(gpi_stat_tmp & (1 << pin_loc))); } input_sync(kpad->input); } static int adp5589_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct adp5589_kpad *kpad; const struct adp5589_kpad_platform_data *pdata = client->dev.platform_data; struct input_dev *input; unsigned int revid; int ret, i; int error; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE_DATA)) { dev_err(&client->dev, "SMBUS Byte Data not Supported\n"); return -EIO; } if (!pdata) { dev_err(&client->dev, "no platform data?\n"); return -EINVAL; } kpad = kzalloc(sizeof(*kpad), GFP_KERNEL); if (!kpad) return -ENOMEM; switch (id->driver_data) { case ADP5585_02: kpad->adp5585_support_row5 = true; case ADP5585_01: kpad->is_adp5585 = true; kpad->var = &const_adp5585; break; case ADP5589: kpad->var = &const_adp5589; break; } if (!((pdata->keypad_en_mask & kpad->var->row_mask) && (pdata->keypad_en_mask >> kpad->var->col_shift)) || !pdata->keymap) { dev_err(&client->dev, "no rows, cols or keymap from pdata\n"); error = -EINVAL; goto err_free_mem; } if (pdata->keymapsize != kpad->var->keymapsize) { dev_err(&client->dev, "invalid keymapsize\n"); error = -EINVAL; goto err_free_mem; } if (!pdata->gpimap && pdata->gpimapsize) { dev_err(&client->dev, "invalid gpimap from pdata\n"); error = -EINVAL; goto err_free_mem; } if (pdata->gpimapsize > kpad->var->gpimapsize_max) { dev_err(&client->dev, "invalid gpimapsize\n"); error = -EINVAL; goto err_free_mem; } for (i = 0; i < pdata->gpimapsize; i++) { unsigned short pin = pdata->gpimap[i].pin; if (pin < kpad->var->gpi_pin_base || pin > kpad->var->gpi_pin_end) { dev_err(&client->dev, "invalid gpi pin data\n"); error = -EINVAL; goto err_free_mem; } if ((1 << (pin - kpad->var->gpi_pin_row_base)) & pdata->keypad_en_mask) { dev_err(&client->dev, "invalid gpi row/col data\n"); error = -EINVAL; goto err_free_mem; } } if (!client->irq) { dev_err(&client->dev, "no IRQ?\n"); error = -EINVAL; goto err_free_mem; } input = input_allocate_device(); if (!input) { error = -ENOMEM; goto err_free_mem; } kpad->client = client; kpad->input = input; ret = adp5589_read(client, ADP5589_5_ID); if (ret < 0) { error = ret; goto err_free_input; } revid = (u8) ret & ADP5589_5_DEVICE_ID_MASK; input->name = client->name; input->phys = "adp5589-keys/input0"; input->dev.parent = &client->dev; input_set_drvdata(input, kpad); input->id.bustype = BUS_I2C; input->id.vendor = 0x0001; input->id.product = 0x0001; input->id.version = revid; input->keycodesize = sizeof(kpad->keycode[0]); input->keycodemax = pdata->keymapsize; input->keycode = kpad->keycode; memcpy(kpad->keycode, pdata->keymap, pdata->keymapsize * input->keycodesize); kpad->gpimap = pdata->gpimap; kpad->gpimapsize = pdata->gpimapsize; /* setup input device */ __set_bit(EV_KEY, input->evbit); if (pdata->repeat) __set_bit(EV_REP, input->evbit); for (i = 0; i < input->keycodemax; i++) __set_bit(kpad->keycode[i] & KEY_MAX, input->keybit); __clear_bit(KEY_RESERVED, input->keybit); if (kpad->gpimapsize) __set_bit(EV_SW, input->evbit); for (i = 0; i < kpad->gpimapsize; i++) __set_bit(kpad->gpimap[i].sw_evt, input->swbit); error = input_register_device(input); if (error) { dev_err(&client->dev, "unable to register input device\n"); goto err_free_input; } error = request_threaded_irq(client->irq, NULL, adp5589_irq, IRQF_TRIGGER_FALLING | IRQF_ONESHOT, client->dev.driver->name, kpad); if (error) { dev_err(&client->dev, "irq %d busy?\n", client->irq); goto err_unreg_dev; } error = adp5589_setup(kpad); if (error) goto err_free_irq; if (kpad->gpimapsize) adp5589_report_switch_state(kpad); error = adp5589_gpio_add(kpad); if (error) goto err_free_irq; device_init_wakeup(&client->dev, 1); i2c_set_clientdata(client, kpad); dev_info(&client->dev, "Rev.%d keypad, irq %d\n", revid, client->irq); return 0; err_free_irq: free_irq(client->irq, kpad); err_unreg_dev: input_unregister_device(input); input = NULL; err_free_input: input_free_device(input); err_free_mem: kfree(kpad); return error; } static int adp5589_remove(struct i2c_client *client) { struct adp5589_kpad *kpad = i2c_get_clientdata(client); adp5589_write(client, kpad->var->reg(ADP5589_GENERAL_CFG), 0); free_irq(client->irq, kpad); input_unregister_device(kpad->input); adp5589_gpio_remove(kpad); kfree(kpad); return 0; } #ifdef CONFIG_PM_SLEEP static int adp5589_suspend(struct device *dev) { struct adp5589_kpad *kpad = dev_get_drvdata(dev); struct i2c_client *client = kpad->client; disable_irq(client->irq); if (device_may_wakeup(&client->dev)) enable_irq_wake(client->irq); return 0; } static int adp5589_resume(struct device *dev) { struct adp5589_kpad *kpad = dev_get_drvdata(dev); struct i2c_client *client = kpad->client; if (device_may_wakeup(&client->dev)) disable_irq_wake(client->irq); enable_irq(client->irq); return 0; } #endif static SIMPLE_DEV_PM_OPS(adp5589_dev_pm_ops, adp5589_suspend, adp5589_resume); static const struct i2c_device_id adp5589_id[] = { {"adp5589-keys", ADP5589}, {"adp5585-keys", ADP5585_01}, {"adp5585-02-keys", ADP5585_02}, /* Adds ROW5 to ADP5585 */ {} }; MODULE_DEVICE_TABLE(i2c, adp5589_id); static struct i2c_driver adp5589_driver = { .driver = { .name = KBUILD_MODNAME, .owner = THIS_MODULE, .pm = &adp5589_dev_pm_ops, }, .probe = adp5589_probe, .remove = adp5589_remove, .id_table = adp5589_id, }; module_i2c_driver(adp5589_driver); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>"); MODULE_DESCRIPTION("ADP5589/ADP5585 Keypad driver");
gpl-2.0
xixi012023/sultan-kernel-vivid-homeslice-ION
arch/arm/mach-gemini/board-wbd222.c
2665
2857
/* * Support for Wiliboard WBD-222 * * Copyright (C) 2009 Imre Kaloz <kaloz@openwrt.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/leds.h> #include <linux/input.h> #include <linux/skbuff.h> #include <linux/gpio_keys.h> #include <linux/mdio-gpio.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include "common.h" static struct gpio_keys_button wbd222_keys[] = { { .code = KEY_SETUP, .gpio = 5, .active_low = 1, .desc = "reset", .type = EV_KEY, }, }; static struct gpio_keys_platform_data wbd222_keys_data = { .buttons = wbd222_keys, .nbuttons = ARRAY_SIZE(wbd222_keys), }; static struct platform_device wbd222_keys_device = { .name = "gpio-keys", .id = -1, .dev = { .platform_data = &wbd222_keys_data, }, }; static struct gpio_led wbd222_leds[] = { { .name = "L3red", .gpio = 1, }, { .name = "L4green", .gpio = 2, }, { .name = "L4red", .gpio = 3, }, { .name = "L3green", .gpio = 5, }, }; static struct gpio_led_platform_data wbd222_leds_data = { .num_leds = ARRAY_SIZE(wbd222_leds), .leds = wbd222_leds, }; static struct platform_device wbd222_leds_device = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &wbd222_leds_data, }, }; static struct sys_timer wbd222_timer = { .init = gemini_timer_init, }; static struct mtd_partition wbd222_partitions[] = { { .name = "RedBoot", .offset = 0, .size = 0x020000, .mask_flags = MTD_WRITEABLE, } , { .name = "kernel", .offset = 0x020000, .size = 0x100000, } , { .name = "rootfs", .offset = 0x120000, .size = 0x6a0000, } , { .name = "VCTL", .offset = 0x7c0000, .size = 0x010000, .mask_flags = MTD_WRITEABLE, } , { .name = "cfg", .offset = 0x7d0000, .size = 0x010000, .mask_flags = MTD_WRITEABLE, } , { .name = "FIS", .offset = 0x7e0000, .size = 0x010000, .mask_flags = MTD_WRITEABLE, } }; #define wbd222_num_partitions ARRAY_SIZE(wbd222_partitions) static void __init wbd222_init(void) { gemini_gpio_init(); platform_register_uart(); platform_register_pflash(SZ_8M, wbd222_partitions, wbd222_num_partitions); platform_device_register(&wbd222_leds_device); platform_device_register(&wbd222_keys_device); platform_register_rtc(); } MACHINE_START(WBD222, "Wiliboard WBD-222") .boot_params = 0x100, .map_io = gemini_map_io, .init_irq = gemini_init_irq, .timer = &wbd222_timer, .init_machine = wbd222_init, MACHINE_END
gpl-2.0
omnirom/android_kernel_asus_tegra3
drivers/net/ax88796.c
2921
24433
/* drivers/net/ax88796.c * * Copyright 2005,2007 Simtec Electronics * Ben Dooks <ben@simtec.co.uk> * * Asix AX88796 10/100 Ethernet controller support * Based on ne.c, by Donald Becker, et-al. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/isapnp.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/timer.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/mdio-bitbang.h> #include <linux/phy.h> #include <linux/eeprom_93cx6.h> #include <linux/slab.h> #include <net/ax88796.h> #include <asm/system.h> /* Rename the lib8390.c functions to show that they are in this driver */ #define __ei_open ax_ei_open #define __ei_close ax_ei_close #define __ei_poll ax_ei_poll #define __ei_start_xmit ax_ei_start_xmit #define __ei_tx_timeout ax_ei_tx_timeout #define __ei_get_stats ax_ei_get_stats #define __ei_set_multicast_list ax_ei_set_multicast_list #define __ei_interrupt ax_ei_interrupt #define ____alloc_ei_netdev ax__alloc_ei_netdev #define __NS8390_init ax_NS8390_init /* force unsigned long back to 'void __iomem *' */ #define ax_convert_addr(_a) ((void __force __iomem *)(_a)) #define ei_inb(_a) readb(ax_convert_addr(_a)) #define ei_outb(_v, _a) writeb(_v, ax_convert_addr(_a)) #define ei_inb_p(_a) ei_inb(_a) #define ei_outb_p(_v, _a) ei_outb(_v, _a) /* define EI_SHIFT() to take into account our register offsets */ #define EI_SHIFT(x) (ei_local->reg_offset[(x)]) /* Ensure we have our RCR base value */ #define AX88796_PLATFORM static unsigned char version[] = "ax88796.c: Copyright 2005,2007 Simtec Electronics\n"; #include "lib8390.c" #define DRV_NAME "ax88796" #define DRV_VERSION "1.00" /* from ne.c */ #define NE_CMD EI_SHIFT(0x00) #define NE_RESET EI_SHIFT(0x1f) #define NE_DATAPORT EI_SHIFT(0x10) #define NE1SM_START_PG 0x20 /* First page of TX buffer */ #define NE1SM_STOP_PG 0x40 /* Last page +1 of RX ring */ #define NESM_START_PG 0x40 /* First page of TX buffer */ #define NESM_STOP_PG 0x80 /* Last page +1 of RX ring */ #define AX_GPOC_PPDSET BIT(6) /* device private data */ struct ax_device { struct mii_bus *mii_bus; struct mdiobb_ctrl bb_ctrl; struct phy_device *phy_dev; void __iomem *addr_memr; u8 reg_memr; int link; int speed; int duplex; void __iomem *map2; const struct ax_plat_data *plat; unsigned char running; unsigned char resume_open; unsigned int irqflags; u32 reg_offsets[0x20]; }; static inline struct ax_device *to_ax_dev(struct net_device *dev) { struct ei_device *ei_local = netdev_priv(dev); return (struct ax_device *)(ei_local + 1); } /* * ax_initial_check * * do an initial probe for the card to check wether it exists * and is functional */ static int ax_initial_check(struct net_device *dev) { struct ei_device *ei_local = netdev_priv(dev); void __iomem *ioaddr = ei_local->mem; int reg0; int regd; reg0 = ei_inb(ioaddr); if (reg0 == 0xFF) return -ENODEV; ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ioaddr + E8390_CMD); regd = ei_inb(ioaddr + 0x0d); ei_outb(0xff, ioaddr + 0x0d); ei_outb(E8390_NODMA + E8390_PAGE0, ioaddr + E8390_CMD); ei_inb(ioaddr + EN0_COUNTER0); /* Clear the counter by reading. */ if (ei_inb(ioaddr + EN0_COUNTER0) != 0) { ei_outb(reg0, ioaddr); ei_outb(regd, ioaddr + 0x0d); /* Restore the old values. */ return -ENODEV; } return 0; } /* * Hard reset the card. This used to pause for the same period that a * 8390 reset command required, but that shouldn't be necessary. */ static void ax_reset_8390(struct net_device *dev) { struct ei_device *ei_local = netdev_priv(dev); unsigned long reset_start_time = jiffies; void __iomem *addr = (void __iomem *)dev->base_addr; if (ei_debug > 1) netdev_dbg(dev, "resetting the 8390 t=%ld\n", jiffies); ei_outb(ei_inb(addr + NE_RESET), addr + NE_RESET); ei_local->txing = 0; ei_local->dmaing = 0; /* This check _should_not_ be necessary, omit eventually. */ while ((ei_inb(addr + EN0_ISR) & ENISR_RESET) == 0) { if (jiffies - reset_start_time > 2 * HZ / 100) { netdev_warn(dev, "%s: did not complete.\n", __func__); break; } } ei_outb(ENISR_RESET, addr + EN0_ISR); /* Ack intr. */ } static void ax_get_8390_hdr(struct net_device *dev, struct e8390_pkt_hdr *hdr, int ring_page) { struct ei_device *ei_local = netdev_priv(dev); void __iomem *nic_base = ei_local->mem; /* This *shouldn't* happen. If it does, it's the last thing you'll see */ if (ei_local->dmaing) { netdev_err(dev, "DMAing conflict in %s " "[DMAstat:%d][irqlock:%d].\n", __func__, ei_local->dmaing, ei_local->irqlock); return; } ei_local->dmaing |= 0x01; ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_START, nic_base + NE_CMD); ei_outb(sizeof(struct e8390_pkt_hdr), nic_base + EN0_RCNTLO); ei_outb(0, nic_base + EN0_RCNTHI); ei_outb(0, nic_base + EN0_RSARLO); /* On page boundary */ ei_outb(ring_page, nic_base + EN0_RSARHI); ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); if (ei_local->word16) readsw(nic_base + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr) >> 1); else readsb(nic_base + NE_DATAPORT, hdr, sizeof(struct e8390_pkt_hdr)); ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ ei_local->dmaing &= ~0x01; le16_to_cpus(&hdr->count); } /* * Block input and output, similar to the Crynwr packet driver. If * you are porting to a new ethercard, look at the packet driver * source for hints. The NEx000 doesn't share the on-board packet * memory -- you have to put the packet out through the "remote DMA" * dataport using ei_outb. */ static void ax_block_input(struct net_device *dev, int count, struct sk_buff *skb, int ring_offset) { struct ei_device *ei_local = netdev_priv(dev); void __iomem *nic_base = ei_local->mem; char *buf = skb->data; if (ei_local->dmaing) { netdev_err(dev, "DMAing conflict in %s " "[DMAstat:%d][irqlock:%d].\n", __func__, ei_local->dmaing, ei_local->irqlock); return; } ei_local->dmaing |= 0x01; ei_outb(E8390_NODMA+E8390_PAGE0+E8390_START, nic_base + NE_CMD); ei_outb(count & 0xff, nic_base + EN0_RCNTLO); ei_outb(count >> 8, nic_base + EN0_RCNTHI); ei_outb(ring_offset & 0xff, nic_base + EN0_RSARLO); ei_outb(ring_offset >> 8, nic_base + EN0_RSARHI); ei_outb(E8390_RREAD+E8390_START, nic_base + NE_CMD); if (ei_local->word16) { readsw(nic_base + NE_DATAPORT, buf, count >> 1); if (count & 0x01) buf[count-1] = ei_inb(nic_base + NE_DATAPORT); } else { readsb(nic_base + NE_DATAPORT, buf, count); } ei_local->dmaing &= ~1; } static void ax_block_output(struct net_device *dev, int count, const unsigned char *buf, const int start_page) { struct ei_device *ei_local = netdev_priv(dev); void __iomem *nic_base = ei_local->mem; unsigned long dma_start; /* * Round the count up for word writes. Do we need to do this? * What effect will an odd byte count have on the 8390? I * should check someday. */ if (ei_local->word16 && (count & 0x01)) count++; /* This *shouldn't* happen. If it does, it's the last thing you'll see */ if (ei_local->dmaing) { netdev_err(dev, "DMAing conflict in %s." "[DMAstat:%d][irqlock:%d]\n", __func__, ei_local->dmaing, ei_local->irqlock); return; } ei_local->dmaing |= 0x01; /* We should already be in page 0, but to be safe... */ ei_outb(E8390_PAGE0+E8390_START+E8390_NODMA, nic_base + NE_CMD); ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Now the normal output. */ ei_outb(count & 0xff, nic_base + EN0_RCNTLO); ei_outb(count >> 8, nic_base + EN0_RCNTHI); ei_outb(0x00, nic_base + EN0_RSARLO); ei_outb(start_page, nic_base + EN0_RSARHI); ei_outb(E8390_RWRITE+E8390_START, nic_base + NE_CMD); if (ei_local->word16) writesw(nic_base + NE_DATAPORT, buf, count >> 1); else writesb(nic_base + NE_DATAPORT, buf, count); dma_start = jiffies; while ((ei_inb(nic_base + EN0_ISR) & ENISR_RDC) == 0) { if (jiffies - dma_start > 2 * HZ / 100) { /* 20ms */ netdev_warn(dev, "timeout waiting for Tx RDC.\n"); ax_reset_8390(dev); ax_NS8390_init(dev, 1); break; } } ei_outb(ENISR_RDC, nic_base + EN0_ISR); /* Ack intr. */ ei_local->dmaing &= ~0x01; } /* definitions for accessing MII/EEPROM interface */ #define AX_MEMR EI_SHIFT(0x14) #define AX_MEMR_MDC BIT(0) #define AX_MEMR_MDIR BIT(1) #define AX_MEMR_MDI BIT(2) #define AX_MEMR_MDO BIT(3) #define AX_MEMR_EECS BIT(4) #define AX_MEMR_EEI BIT(5) #define AX_MEMR_EEO BIT(6) #define AX_MEMR_EECLK BIT(7) static void ax_handle_link_change(struct net_device *dev) { struct ax_device *ax = to_ax_dev(dev); struct phy_device *phy_dev = ax->phy_dev; int status_change = 0; if (phy_dev->link && ((ax->speed != phy_dev->speed) || (ax->duplex != phy_dev->duplex))) { ax->speed = phy_dev->speed; ax->duplex = phy_dev->duplex; status_change = 1; } if (phy_dev->link != ax->link) { if (!phy_dev->link) { ax->speed = 0; ax->duplex = -1; } ax->link = phy_dev->link; status_change = 1; } if (status_change) phy_print_status(phy_dev); } static int ax_mii_probe(struct net_device *dev) { struct ax_device *ax = to_ax_dev(dev); struct phy_device *phy_dev = NULL; int ret; /* find the first phy */ phy_dev = phy_find_first(ax->mii_bus); if (!phy_dev) { netdev_err(dev, "no PHY found\n"); return -ENODEV; } ret = phy_connect_direct(dev, phy_dev, ax_handle_link_change, 0, PHY_INTERFACE_MODE_MII); if (ret) { netdev_err(dev, "Could not attach to PHY\n"); return ret; } /* mask with MAC supported features */ phy_dev->supported &= PHY_BASIC_FEATURES; phy_dev->advertising = phy_dev->supported; ax->phy_dev = phy_dev; netdev_info(dev, "PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n", phy_dev->drv->name, dev_name(&phy_dev->dev), phy_dev->irq); return 0; } static void ax_phy_switch(struct net_device *dev, int on) { struct ei_device *ei_local = netdev_priv(dev); struct ax_device *ax = to_ax_dev(dev); u8 reg_gpoc = ax->plat->gpoc_val; if (!!on) reg_gpoc &= ~AX_GPOC_PPDSET; else reg_gpoc |= AX_GPOC_PPDSET; ei_outb(reg_gpoc, ei_local->mem + EI_SHIFT(0x17)); } static int ax_open(struct net_device *dev) { struct ax_device *ax = to_ax_dev(dev); int ret; netdev_dbg(dev, "open\n"); ret = request_irq(dev->irq, ax_ei_interrupt, ax->irqflags, dev->name, dev); if (ret) goto failed_request_irq; /* turn the phy on (if turned off) */ ax_phy_switch(dev, 1); ret = ax_mii_probe(dev); if (ret) goto failed_mii_probe; phy_start(ax->phy_dev); ret = ax_ei_open(dev); if (ret) goto failed_ax_ei_open; ax->running = 1; return 0; failed_ax_ei_open: phy_disconnect(ax->phy_dev); failed_mii_probe: ax_phy_switch(dev, 0); free_irq(dev->irq, dev); failed_request_irq: return ret; } static int ax_close(struct net_device *dev) { struct ax_device *ax = to_ax_dev(dev); netdev_dbg(dev, "close\n"); ax->running = 0; wmb(); ax_ei_close(dev); /* turn the phy off */ ax_phy_switch(dev, 0); phy_disconnect(ax->phy_dev); free_irq(dev->irq, dev); return 0; } static int ax_ioctl(struct net_device *dev, struct ifreq *req, int cmd) { struct ax_device *ax = to_ax_dev(dev); struct phy_device *phy_dev = ax->phy_dev; if (!netif_running(dev)) return -EINVAL; if (!phy_dev) return -ENODEV; return phy_mii_ioctl(phy_dev, req, cmd); } /* ethtool ops */ static void ax_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct platform_device *pdev = to_platform_device(dev->dev.parent); strcpy(info->driver, DRV_NAME); strcpy(info->version, DRV_VERSION); strcpy(info->bus_info, pdev->name); } static int ax_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct ax_device *ax = to_ax_dev(dev); struct phy_device *phy_dev = ax->phy_dev; if (!phy_dev) return -ENODEV; return phy_ethtool_gset(phy_dev, cmd); } static int ax_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) { struct ax_device *ax = to_ax_dev(dev); struct phy_device *phy_dev = ax->phy_dev; if (!phy_dev) return -ENODEV; return phy_ethtool_sset(phy_dev, cmd); } static const struct ethtool_ops ax_ethtool_ops = { .get_drvinfo = ax_get_drvinfo, .get_settings = ax_get_settings, .set_settings = ax_set_settings, .get_link = ethtool_op_get_link, }; #ifdef CONFIG_AX88796_93CX6 static void ax_eeprom_register_read(struct eeprom_93cx6 *eeprom) { struct ei_device *ei_local = eeprom->data; u8 reg = ei_inb(ei_local->mem + AX_MEMR); eeprom->reg_data_in = reg & AX_MEMR_EEI; eeprom->reg_data_out = reg & AX_MEMR_EEO; /* Input pin */ eeprom->reg_data_clock = reg & AX_MEMR_EECLK; eeprom->reg_chip_select = reg & AX_MEMR_EECS; } static void ax_eeprom_register_write(struct eeprom_93cx6 *eeprom) { struct ei_device *ei_local = eeprom->data; u8 reg = ei_inb(ei_local->mem + AX_MEMR); reg &= ~(AX_MEMR_EEI | AX_MEMR_EECLK | AX_MEMR_EECS); if (eeprom->reg_data_in) reg |= AX_MEMR_EEI; if (eeprom->reg_data_clock) reg |= AX_MEMR_EECLK; if (eeprom->reg_chip_select) reg |= AX_MEMR_EECS; ei_outb(reg, ei_local->mem + AX_MEMR); udelay(10); } #endif static const struct net_device_ops ax_netdev_ops = { .ndo_open = ax_open, .ndo_stop = ax_close, .ndo_do_ioctl = ax_ioctl, .ndo_start_xmit = ax_ei_start_xmit, .ndo_tx_timeout = ax_ei_tx_timeout, .ndo_get_stats = ax_ei_get_stats, .ndo_set_multicast_list = ax_ei_set_multicast_list, .ndo_validate_addr = eth_validate_addr, .ndo_set_mac_address = eth_mac_addr, .ndo_change_mtu = eth_change_mtu, #ifdef CONFIG_NET_POLL_CONTROLLER .ndo_poll_controller = ax_ei_poll, #endif }; static void ax_bb_mdc(struct mdiobb_ctrl *ctrl, int level) { struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); if (level) ax->reg_memr |= AX_MEMR_MDC; else ax->reg_memr &= ~AX_MEMR_MDC; ei_outb(ax->reg_memr, ax->addr_memr); } static void ax_bb_dir(struct mdiobb_ctrl *ctrl, int output) { struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); if (output) ax->reg_memr &= ~AX_MEMR_MDIR; else ax->reg_memr |= AX_MEMR_MDIR; ei_outb(ax->reg_memr, ax->addr_memr); } static void ax_bb_set_data(struct mdiobb_ctrl *ctrl, int value) { struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); if (value) ax->reg_memr |= AX_MEMR_MDO; else ax->reg_memr &= ~AX_MEMR_MDO; ei_outb(ax->reg_memr, ax->addr_memr); } static int ax_bb_get_data(struct mdiobb_ctrl *ctrl) { struct ax_device *ax = container_of(ctrl, struct ax_device, bb_ctrl); int reg_memr = ei_inb(ax->addr_memr); return reg_memr & AX_MEMR_MDI ? 1 : 0; } static struct mdiobb_ops bb_ops = { .owner = THIS_MODULE, .set_mdc = ax_bb_mdc, .set_mdio_dir = ax_bb_dir, .set_mdio_data = ax_bb_set_data, .get_mdio_data = ax_bb_get_data, }; /* setup code */ static int ax_mii_init(struct net_device *dev) { struct platform_device *pdev = to_platform_device(dev->dev.parent); struct ei_device *ei_local = netdev_priv(dev); struct ax_device *ax = to_ax_dev(dev); int err, i; ax->bb_ctrl.ops = &bb_ops; ax->addr_memr = ei_local->mem + AX_MEMR; ax->mii_bus = alloc_mdio_bitbang(&ax->bb_ctrl); if (!ax->mii_bus) { err = -ENOMEM; goto out; } ax->mii_bus->name = "ax88796_mii_bus"; ax->mii_bus->parent = dev->dev.parent; snprintf(ax->mii_bus->id, MII_BUS_ID_SIZE, "%x", pdev->id); ax->mii_bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL); if (!ax->mii_bus->irq) { err = -ENOMEM; goto out_free_mdio_bitbang; } for (i = 0; i < PHY_MAX_ADDR; i++) ax->mii_bus->irq[i] = PHY_POLL; err = mdiobus_register(ax->mii_bus); if (err) goto out_free_irq; return 0; out_free_irq: kfree(ax->mii_bus->irq); out_free_mdio_bitbang: free_mdio_bitbang(ax->mii_bus); out: return err; } static void ax_initial_setup(struct net_device *dev, struct ei_device *ei_local) { void __iomem *ioaddr = ei_local->mem; struct ax_device *ax = to_ax_dev(dev); /* Select page 0 */ ei_outb(E8390_NODMA + E8390_PAGE0 + E8390_STOP, ioaddr + E8390_CMD); /* set to byte access */ ei_outb(ax->plat->dcr_val & ~1, ioaddr + EN0_DCFG); ei_outb(ax->plat->gpoc_val, ioaddr + EI_SHIFT(0x17)); } /* * ax_init_dev * * initialise the specified device, taking care to note the MAC * address it may already have (if configured), ensure * the device is ready to be used by lib8390.c and registerd with * the network layer. */ static int ax_init_dev(struct net_device *dev) { struct ei_device *ei_local = netdev_priv(dev); struct ax_device *ax = to_ax_dev(dev); void __iomem *ioaddr = ei_local->mem; unsigned int start_page; unsigned int stop_page; int ret; int i; ret = ax_initial_check(dev); if (ret) goto err_out; /* setup goes here */ ax_initial_setup(dev, ei_local); /* read the mac from the card prom if we need it */ if (ax->plat->flags & AXFLG_HAS_EEPROM) { unsigned char SA_prom[32]; for (i = 0; i < sizeof(SA_prom); i += 2) { SA_prom[i] = ei_inb(ioaddr + NE_DATAPORT); SA_prom[i + 1] = ei_inb(ioaddr + NE_DATAPORT); } if (ax->plat->wordlength == 2) for (i = 0; i < 16; i++) SA_prom[i] = SA_prom[i+i]; memcpy(dev->dev_addr, SA_prom, 6); } #ifdef CONFIG_AX88796_93CX6 if (ax->plat->flags & AXFLG_HAS_93CX6) { unsigned char mac_addr[6]; struct eeprom_93cx6 eeprom; eeprom.data = ei_local; eeprom.register_read = ax_eeprom_register_read; eeprom.register_write = ax_eeprom_register_write; eeprom.width = PCI_EEPROM_WIDTH_93C56; eeprom_93cx6_multiread(&eeprom, 0, (__le16 __force *)mac_addr, sizeof(mac_addr) >> 1); memcpy(dev->dev_addr, mac_addr, 6); } #endif if (ax->plat->wordlength == 2) { /* We must set the 8390 for word mode. */ ei_outb(ax->plat->dcr_val, ei_local->mem + EN0_DCFG); start_page = NESM_START_PG; stop_page = NESM_STOP_PG; } else { start_page = NE1SM_START_PG; stop_page = NE1SM_STOP_PG; } /* load the mac-address from the device */ if (ax->plat->flags & AXFLG_MAC_FROMDEV) { ei_outb(E8390_NODMA + E8390_PAGE1 + E8390_STOP, ei_local->mem + E8390_CMD); /* 0x61 */ for (i = 0; i < ETHER_ADDR_LEN; i++) dev->dev_addr[i] = ei_inb(ioaddr + EN1_PHYS_SHIFT(i)); } if ((ax->plat->flags & AXFLG_MAC_FROMPLATFORM) && ax->plat->mac_addr) memcpy(dev->dev_addr, ax->plat->mac_addr, ETHER_ADDR_LEN); ax_reset_8390(dev); ei_local->name = "AX88796"; ei_local->tx_start_page = start_page; ei_local->stop_page = stop_page; ei_local->word16 = (ax->plat->wordlength == 2); ei_local->rx_start_page = start_page + TX_PAGES; #ifdef PACKETBUF_MEMSIZE /* Allow the packet buffer size to be overridden by know-it-alls. */ ei_local->stop_page = ei_local->tx_start_page + PACKETBUF_MEMSIZE; #endif ei_local->reset_8390 = &ax_reset_8390; ei_local->block_input = &ax_block_input; ei_local->block_output = &ax_block_output; ei_local->get_8390_hdr = &ax_get_8390_hdr; ei_local->priv = 0; dev->netdev_ops = &ax_netdev_ops; dev->ethtool_ops = &ax_ethtool_ops; ret = ax_mii_init(dev); if (ret) goto out_irq; ax_NS8390_init(dev, 0); ret = register_netdev(dev); if (ret) goto out_irq; netdev_info(dev, "%dbit, irq %d, %lx, MAC: %pM\n", ei_local->word16 ? 16 : 8, dev->irq, dev->base_addr, dev->dev_addr); return 0; out_irq: /* cleanup irq */ free_irq(dev->irq, dev); err_out: return ret; } static int ax_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct ei_device *ei_local = netdev_priv(dev); struct ax_device *ax = to_ax_dev(dev); struct resource *mem; unregister_netdev(dev); free_irq(dev->irq, dev); iounmap(ei_local->mem); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(mem->start, resource_size(mem)); if (ax->map2) { iounmap(ax->map2); mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); release_mem_region(mem->start, resource_size(mem)); } free_netdev(dev); return 0; } /* * ax_probe * * This is the entry point when the platform device system uses to * notify us of a new device to attach to. Allocate memory, find the * resources and information passed, and map the necessary registers. */ static int ax_probe(struct platform_device *pdev) { struct net_device *dev; struct ei_device *ei_local; struct ax_device *ax; struct resource *irq, *mem, *mem2; resource_size_t mem_size, mem2_size = 0; int ret = 0; dev = ax__alloc_ei_netdev(sizeof(struct ax_device)); if (dev == NULL) return -ENOMEM; /* ok, let's setup our device */ SET_NETDEV_DEV(dev, &pdev->dev); ei_local = netdev_priv(dev); ax = to_ax_dev(dev); ax->plat = pdev->dev.platform_data; platform_set_drvdata(pdev, dev); ei_local->rxcr_base = ax->plat->rcr_val; /* find the platform resources */ irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); if (!irq) { dev_err(&pdev->dev, "no IRQ specified\n"); ret = -ENXIO; goto exit_mem; } dev->irq = irq->start; ax->irqflags = irq->flags & IRQF_TRIGGER_MASK; mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pdev->dev, "no MEM specified\n"); ret = -ENXIO; goto exit_mem; } mem_size = resource_size(mem); /* * setup the register offsets from either the platform data or * by using the size of the resource provided */ if (ax->plat->reg_offsets) ei_local->reg_offset = ax->plat->reg_offsets; else { ei_local->reg_offset = ax->reg_offsets; for (ret = 0; ret < 0x18; ret++) ax->reg_offsets[ret] = (mem_size / 0x18) * ret; } if (!request_mem_region(mem->start, mem_size, pdev->name)) { dev_err(&pdev->dev, "cannot reserve registers\n"); ret = -ENXIO; goto exit_mem; } ei_local->mem = ioremap(mem->start, mem_size); dev->base_addr = (unsigned long)ei_local->mem; if (ei_local->mem == NULL) { dev_err(&pdev->dev, "Cannot ioremap area %pR\n", mem); ret = -ENXIO; goto exit_req; } /* look for reset area */ mem2 = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (!mem2) { if (!ax->plat->reg_offsets) { for (ret = 0; ret < 0x20; ret++) ax->reg_offsets[ret] = (mem_size / 0x20) * ret; } } else { mem2_size = resource_size(mem2); if (!request_mem_region(mem2->start, mem2_size, pdev->name)) { dev_err(&pdev->dev, "cannot reserve registers\n"); ret = -ENXIO; goto exit_mem1; } ax->map2 = ioremap(mem2->start, mem2_size); if (!ax->map2) { dev_err(&pdev->dev, "cannot map reset register\n"); ret = -ENXIO; goto exit_mem2; } ei_local->reg_offset[0x1f] = ax->map2 - ei_local->mem; } /* got resources, now initialise and register device */ ret = ax_init_dev(dev); if (!ret) return 0; if (!ax->map2) goto exit_mem1; iounmap(ax->map2); exit_mem2: release_mem_region(mem2->start, mem2_size); exit_mem1: iounmap(ei_local->mem); exit_req: release_mem_region(mem->start, mem_size); exit_mem: free_netdev(dev); return ret; } /* suspend and resume */ #ifdef CONFIG_PM static int ax_suspend(struct platform_device *dev, pm_message_t state) { struct net_device *ndev = platform_get_drvdata(dev); struct ax_device *ax = to_ax_dev(ndev); ax->resume_open = ax->running; netif_device_detach(ndev); ax_close(ndev); return 0; } static int ax_resume(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct ax_device *ax = to_ax_dev(ndev); ax_initial_setup(ndev, netdev_priv(ndev)); ax_NS8390_init(ndev, ax->resume_open); netif_device_attach(ndev); if (ax->resume_open) ax_open(ndev); return 0; } #else #define ax_suspend NULL #define ax_resume NULL #endif static struct platform_driver axdrv = { .driver = { .name = "ax88796", .owner = THIS_MODULE, }, .probe = ax_probe, .remove = ax_remove, .suspend = ax_suspend, .resume = ax_resume, }; static int __init axdrv_init(void) { return platform_driver_register(&axdrv); } static void __exit axdrv_exit(void) { platform_driver_unregister(&axdrv); } module_init(axdrv_init); module_exit(axdrv_exit); MODULE_DESCRIPTION("AX88796 10/100 Ethernet platform driver"); MODULE_AUTHOR("Ben Dooks, <ben@simtec.co.uk>"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:ax88796");
gpl-2.0
TeamWin/android_kernel_lge_msm8974
arch/powerpc/platforms/pseries/pci_dlpar.c
4457
5825
/* * PCI Dynamic LPAR, PCI Hot Plug and PCI EEH recovery code * for RPA-compliant PPC64 platform. * Copyright (C) 2003 Linda Xie <lxie@us.ibm.com> * Copyright (C) 2005 International Business Machines * * Updates, 2005, John Rose <johnrose@austin.ibm.com> * Updates, 2005, Linas Vepstas <linas@austin.ibm.com> * * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/pci.h> #include <linux/export.h> #include <asm/pci-bridge.h> #include <asm/ppc-pci.h> #include <asm/firmware.h> #include <asm/eeh.h> static struct pci_bus * find_bus_among_children(struct pci_bus *bus, struct device_node *dn) { struct pci_bus *child = NULL; struct list_head *tmp; struct device_node *busdn; busdn = pci_bus_to_OF_node(bus); if (busdn == dn) return bus; list_for_each(tmp, &bus->children) { child = find_bus_among_children(pci_bus_b(tmp), dn); if (child) break; }; return child; } struct pci_bus * pcibios_find_pci_bus(struct device_node *dn) { struct pci_dn *pdn = dn->data; if (!pdn || !pdn->phb || !pdn->phb->bus) return NULL; return find_bus_among_children(pdn->phb->bus, dn); } EXPORT_SYMBOL_GPL(pcibios_find_pci_bus); /** * pcibios_remove_pci_devices - remove all devices under this bus * * Remove all of the PCI devices under this bus both from the * linux pci device tree, and from the powerpc EEH address cache. */ void pcibios_remove_pci_devices(struct pci_bus *bus) { struct pci_dev *dev, *tmp; struct pci_bus *child_bus; /* First go down child busses */ list_for_each_entry(child_bus, &bus->children, node) pcibios_remove_pci_devices(child_bus); pr_debug("PCI: Removing devices on bus %04x:%02x\n", pci_domain_nr(bus), bus->number); list_for_each_entry_safe(dev, tmp, &bus->devices, bus_list) { pr_debug(" * Removing %s...\n", pci_name(dev)); eeh_remove_bus_device(dev); pci_stop_and_remove_bus_device(dev); } } EXPORT_SYMBOL_GPL(pcibios_remove_pci_devices); /** * pcibios_add_pci_devices - adds new pci devices to bus * * This routine will find and fixup new pci devices under * the indicated bus. This routine presumes that there * might already be some devices under this bridge, so * it carefully tries to add only new devices. (And that * is how this routine differs from other, similar pcibios * routines.) */ void pcibios_add_pci_devices(struct pci_bus * bus) { int slotno, num, mode, pass, max; struct pci_dev *dev; struct device_node *dn = pci_bus_to_OF_node(bus); eeh_add_device_tree_early(dn); mode = PCI_PROBE_NORMAL; if (ppc_md.pci_probe_mode) mode = ppc_md.pci_probe_mode(bus); if (mode == PCI_PROBE_DEVTREE) { /* use ofdt-based probe */ of_rescan_bus(dn, bus); } else if (mode == PCI_PROBE_NORMAL) { /* use legacy probe */ slotno = PCI_SLOT(PCI_DN(dn->child)->devfn); num = pci_scan_slot(bus, PCI_DEVFN(slotno, 0)); if (!num) return; pcibios_setup_bus_devices(bus); max = bus->secondary; for (pass=0; pass < 2; pass++) list_for_each_entry(dev, &bus->devices, bus_list) { if (dev->hdr_type == PCI_HEADER_TYPE_BRIDGE || dev->hdr_type == PCI_HEADER_TYPE_CARDBUS) max = pci_scan_bridge(bus, dev, max, pass); } } pcibios_finish_adding_to_bus(bus); } EXPORT_SYMBOL_GPL(pcibios_add_pci_devices); struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn) { struct pci_controller *phb; pr_debug("PCI: Initializing new hotplug PHB %s\n", dn->full_name); phb = pcibios_alloc_controller(dn); if (!phb) return NULL; rtas_setup_phb(phb); pci_process_bridge_OF_ranges(phb, dn, 0); pci_devs_phb_init_dynamic(phb); /* Create EEH devices for the PHB */ eeh_dev_phb_init_dynamic(phb); if (dn->child) eeh_add_device_tree_early(dn); pcibios_scan_phb(phb); pcibios_finish_adding_to_bus(phb->bus); return phb; } EXPORT_SYMBOL_GPL(init_phb_dynamic); /* RPA-specific bits for removing PHBs */ int remove_phb_dynamic(struct pci_controller *phb) { struct pci_bus *b = phb->bus; struct resource *res; int rc, i; pr_debug("PCI: Removing PHB %04x:%02x...\n", pci_domain_nr(b), b->number); /* We cannot to remove a root bus that has children */ if (!(list_empty(&b->children) && list_empty(&b->devices))) return -EBUSY; /* We -know- there aren't any child devices anymore at this stage * and thus, we can safely unmap the IO space as it's not in use */ res = &phb->io_resource; if (res->flags & IORESOURCE_IO) { rc = pcibios_unmap_io_space(b); if (rc) { printk(KERN_ERR "%s: failed to unmap IO on bus %s\n", __func__, b->name); return 1; } } /* Unregister the bridge device from sysfs and remove the PCI bus */ device_unregister(b->bridge); phb->bus = NULL; pci_remove_bus(b); /* Now release the IO resource */ if (res->flags & IORESOURCE_IO) release_resource(res); /* Release memory resources */ for (i = 0; i < 3; ++i) { res = &phb->mem_resources[i]; if (!(res->flags & IORESOURCE_MEM)) continue; release_resource(res); } /* Free pci_controller data structure */ pcibios_free_controller(phb); return 0; } EXPORT_SYMBOL_GPL(remove_phb_dynamic);
gpl-2.0
jejecule/kernel_despair_find7
arch/blackfin/kernel/dma-mapping.c
4713
3397
/* * Dynamic DMA mapping support * * Copyright 2005-2009 Analog Devices Inc. * * Licensed under the GPL-2 or later */ #include <linux/types.h> #include <linux/gfp.h> #include <linux/string.h> #include <linux/spinlock.h> #include <linux/dma-mapping.h> #include <linux/scatterlist.h> #include <linux/export.h> static spinlock_t dma_page_lock; static unsigned long *dma_page; static unsigned int dma_pages; static unsigned long dma_base; static unsigned long dma_size; static unsigned int dma_initialized; static void dma_alloc_init(unsigned long start, unsigned long end) { spin_lock_init(&dma_page_lock); dma_initialized = 0; dma_page = (unsigned long *)__get_free_page(GFP_KERNEL); memset(dma_page, 0, PAGE_SIZE); dma_base = PAGE_ALIGN(start); dma_size = PAGE_ALIGN(end) - PAGE_ALIGN(start); dma_pages = dma_size >> PAGE_SHIFT; memset((void *)dma_base, 0, DMA_UNCACHED_REGION); dma_initialized = 1; printk(KERN_INFO "%s: dma_page @ 0x%p - %d pages at 0x%08lx\n", __func__, dma_page, dma_pages, dma_base); } static inline unsigned int get_pages(size_t size) { return ((size - 1) >> PAGE_SHIFT) + 1; } static unsigned long __alloc_dma_pages(unsigned int pages) { unsigned long ret = 0, flags; int i, count = 0; if (dma_initialized == 0) dma_alloc_init(_ramend - DMA_UNCACHED_REGION, _ramend); spin_lock_irqsave(&dma_page_lock, flags); for (i = 0; i < dma_pages;) { if (test_bit(i++, dma_page) == 0) { if (++count == pages) { while (count--) __set_bit(--i, dma_page); ret = dma_base + (i << PAGE_SHIFT); break; } } else count = 0; } spin_unlock_irqrestore(&dma_page_lock, flags); return ret; } static void __free_dma_pages(unsigned long addr, unsigned int pages) { unsigned long page = (addr - dma_base) >> PAGE_SHIFT; unsigned long flags; int i; if ((page + pages) > dma_pages) { printk(KERN_ERR "%s: freeing outside range.\n", __func__); BUG(); } spin_lock_irqsave(&dma_page_lock, flags); for (i = page; i < page + pages; i++) __clear_bit(i, dma_page); spin_unlock_irqrestore(&dma_page_lock, flags); } void *dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, gfp_t gfp) { void *ret; ret = (void *)__alloc_dma_pages(get_pages(size)); if (ret) { memset(ret, 0, size); *dma_handle = virt_to_phys(ret); } return ret; } EXPORT_SYMBOL(dma_alloc_coherent); void dma_free_coherent(struct device *dev, size_t size, void *vaddr, dma_addr_t dma_handle) { __free_dma_pages((unsigned long)vaddr, get_pages(size)); } EXPORT_SYMBOL(dma_free_coherent); /* * Streaming DMA mappings */ void __dma_sync(dma_addr_t addr, size_t size, enum dma_data_direction dir) { __dma_sync_inline(addr, size, dir); } EXPORT_SYMBOL(__dma_sync); int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, enum dma_data_direction direction) { int i; for (i = 0; i < nents; i++, sg++) { sg->dma_address = (dma_addr_t) sg_virt(sg); __dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction); } return nents; } EXPORT_SYMBOL(dma_map_sg); void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems, enum dma_data_direction direction) { int i; for (i = 0; i < nelems; i++, sg++) { sg->dma_address = (dma_addr_t) sg_virt(sg); __dma_sync(sg_dma_address(sg), sg_dma_len(sg), direction); } } EXPORT_SYMBOL(dma_sync_sg_for_device);
gpl-2.0
bashrc/linux-sunxi
drivers/scsi/aacraid/rkt.c
8041
3111
/* * Adaptec AAC series RAID controller driver * (c) Copyright 2001 Red Hat Inc. * * based on the old aacraid driver that is.. * Adaptec aacraid device driver for Linux. * * Copyright (c) 2000-2010 Adaptec, Inc. * 2010 PMC-Sierra, Inc. (aacraid@pmc-sierra.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; see the file COPYING. If not, write to * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * * Module Name: * rkt.c * * Abstract: Hardware miniport for Drawbridge specific hardware functions. * */ #include <linux/blkdev.h> #include <scsi/scsi_host.h> #include "aacraid.h" #define AAC_NUM_IO_FIB_RKT (246 - AAC_NUM_MGT_FIB) /** * aac_rkt_select_comm - Select communications method * @dev: Adapter * @comm: communications method */ static int aac_rkt_select_comm(struct aac_dev *dev, int comm) { int retval; retval = aac_rx_select_comm(dev, comm); if (comm == AAC_COMM_MESSAGE) { /* * FIB Setup has already been done, but we can minimize the * damage by at least ensuring the OS never issues more * commands than we can handle. The Rocket adapters currently * can only handle 246 commands and 8 AIFs at the same time, * and in fact do notify us accordingly if we negotiate the * FIB size. The problem that causes us to add this check is * to ensure that we do not overdo it with the adapter when a * hard coded FIB override is being utilized. This special * case warrants this half baked, but convenient, check here. */ if (dev->scsi_host_ptr->can_queue > AAC_NUM_IO_FIB_RKT) { dev->init->MaxIoCommands = cpu_to_le32(AAC_NUM_IO_FIB_RKT + AAC_NUM_MGT_FIB); dev->scsi_host_ptr->can_queue = AAC_NUM_IO_FIB_RKT; } } return retval; } /** * aac_rkt_ioremap * @size: mapping resize request * */ static int aac_rkt_ioremap(struct aac_dev * dev, u32 size) { if (!size) { iounmap(dev->regs.rkt); return 0; } dev->base = dev->regs.rkt = ioremap(dev->scsi_host_ptr->base, size); if (dev->base == NULL) return -1; dev->IndexRegs = &dev->regs.rkt->IndexRegs; return 0; } /** * aac_rkt_init - initialize an i960 based AAC card * @dev: device to configure * * Allocate and set up resources for the i960 based AAC variants. The * device_interface in the commregion will be allocated and linked * to the comm region. */ int aac_rkt_init(struct aac_dev *dev) { /* * Fill in the function dispatch table. */ dev->a_ops.adapter_ioremap = aac_rkt_ioremap; dev->a_ops.adapter_comm = aac_rkt_select_comm; return _aac_rx_init(dev); }
gpl-2.0
jpihet/linux-omap
drivers/ps3/sys-manager-core.c
8553
2032
/* * PS3 System Manager core. * * Copyright (C) 2007 Sony Computer Entertainment Inc. * Copyright 2007 Sony Corp. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/kernel.h> #include <linux/export.h> #include <asm/lv1call.h> #include <asm/ps3.h> /** * Staticly linked routines that allow late binding of a loaded sys-manager * module. */ static struct ps3_sys_manager_ops ps3_sys_manager_ops; /** * ps3_register_sys_manager_ops - Bind ps3_sys_manager_ops to a module. * @ops: struct ps3_sys_manager_ops. * * To be called from ps3_sys_manager_probe() and ps3_sys_manager_remove() to * register call back ops for power control. Copies data to the static * variable ps3_sys_manager_ops. */ void ps3_sys_manager_register_ops(const struct ps3_sys_manager_ops *ops) { BUG_ON(!ops); BUG_ON(!ops->dev); ps3_sys_manager_ops = ops ? *ops : ps3_sys_manager_ops; } EXPORT_SYMBOL_GPL(ps3_sys_manager_register_ops); void ps3_sys_manager_power_off(void) { if (ps3_sys_manager_ops.power_off) ps3_sys_manager_ops.power_off(ps3_sys_manager_ops.dev); ps3_sys_manager_halt(); } void ps3_sys_manager_restart(void) { if (ps3_sys_manager_ops.restart) ps3_sys_manager_ops.restart(ps3_sys_manager_ops.dev); ps3_sys_manager_halt(); } void ps3_sys_manager_halt(void) { pr_emerg("System Halted, OK to turn off power\n"); local_irq_disable(); while (1) lv1_pause(1); }
gpl-2.0
Ken-Liu/OpenScrKernel_For_XC210
arch/x86/kernel/init_task.c
8809
1278
#include <linux/mm.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/init.h> #include <linux/init_task.h> #include <linux/fs.h> #include <linux/mqueue.h> #include <asm/uaccess.h> #include <asm/pgtable.h> #include <asm/desc.h> static struct signal_struct init_signals = INIT_SIGNALS(init_signals); static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); /* * Initial thread structure. * * We need to make sure that this is THREAD_SIZE aligned due to the * way process stacks are handled. This is done by having a special * "init_task" linker map entry.. */ union thread_union init_thread_union __init_task_data = { INIT_THREAD_INFO(init_task) }; /* * Initial task structure. * * All other task structs will be allocated on slabs in fork.c */ struct task_struct init_task = INIT_TASK(init_task); EXPORT_SYMBOL(init_task); /* * per-CPU TSS segments. Threads are completely 'soft' on Linux, * no more per-task TSS's. The TSS size is kept cacheline-aligned * so they are allowed to end up in the .data..cacheline_aligned * section. Since TSS's are completely CPU-local, we want them * on exact cacheline boundaries, to eliminate cacheline ping-pong. */ DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
gpl-2.0
lujji/JXD-7800b-JB-kernel
drivers/video/aty/mach64_accel.c
9321
11876
/* * ATI Mach64 Hardware Acceleration */ #include <linux/delay.h> #include <linux/fb.h> #include <video/mach64.h> #include "atyfb.h" /* * Generic Mach64 routines */ /* this is for DMA GUI engine! work in progress */ typedef struct { u32 frame_buf_offset; u32 system_mem_addr; u32 command; u32 reserved; } BM_DESCRIPTOR_ENTRY; #define LAST_DESCRIPTOR (1 << 31) #define SYSTEM_TO_FRAME_BUFFER 0 static u32 rotation24bpp(u32 dx, u32 direction) { u32 rotation; if (direction & DST_X_LEFT_TO_RIGHT) { rotation = (dx / 4) % 6; } else { rotation = ((dx + 2) / 4) % 6; } return ((rotation << 8) | DST_24_ROTATION_ENABLE); } void aty_reset_engine(const struct atyfb_par *par) { /* reset engine */ aty_st_le32(GEN_TEST_CNTL, aty_ld_le32(GEN_TEST_CNTL, par) & ~(GUI_ENGINE_ENABLE | HWCURSOR_ENABLE), par); /* enable engine */ aty_st_le32(GEN_TEST_CNTL, aty_ld_le32(GEN_TEST_CNTL, par) | GUI_ENGINE_ENABLE, par); /* ensure engine is not locked up by clearing any FIFO or */ /* HOST errors */ aty_st_le32(BUS_CNTL, aty_ld_le32(BUS_CNTL, par) | BUS_HOST_ERR_ACK | BUS_FIFO_ERR_ACK, par); } static void reset_GTC_3D_engine(const struct atyfb_par *par) { aty_st_le32(SCALE_3D_CNTL, 0xc0, par); mdelay(GTC_3D_RESET_DELAY); aty_st_le32(SETUP_CNTL, 0x00, par); mdelay(GTC_3D_RESET_DELAY); aty_st_le32(SCALE_3D_CNTL, 0x00, par); mdelay(GTC_3D_RESET_DELAY); } void aty_init_engine(struct atyfb_par *par, struct fb_info *info) { u32 pitch_value; u32 vxres; /* determine modal information from global mode structure */ pitch_value = info->fix.line_length / (info->var.bits_per_pixel / 8); vxres = info->var.xres_virtual; if (info->var.bits_per_pixel == 24) { /* In 24 bpp, the engine is in 8 bpp - this requires that all */ /* horizontal coordinates and widths must be adjusted */ pitch_value *= 3; vxres *= 3; } /* On GTC (RagePro), we need to reset the 3D engine before */ if (M64_HAS(RESET_3D)) reset_GTC_3D_engine(par); /* Reset engine, enable, and clear any engine errors */ aty_reset_engine(par); /* Ensure that vga page pointers are set to zero - the upper */ /* page pointers are set to 1 to handle overflows in the */ /* lower page */ aty_st_le32(MEM_VGA_WP_SEL, 0x00010000, par); aty_st_le32(MEM_VGA_RP_SEL, 0x00010000, par); /* ---- Setup standard engine context ---- */ /* All GUI registers here are FIFOed - therefore, wait for */ /* the appropriate number of empty FIFO entries */ wait_for_fifo(14, par); /* enable all registers to be loaded for context loads */ aty_st_le32(CONTEXT_MASK, 0xFFFFFFFF, par); /* set destination pitch to modal pitch, set offset to zero */ aty_st_le32(DST_OFF_PITCH, (pitch_value / 8) << 22, par); /* zero these registers (set them to a known state) */ aty_st_le32(DST_Y_X, 0, par); aty_st_le32(DST_HEIGHT, 0, par); aty_st_le32(DST_BRES_ERR, 0, par); aty_st_le32(DST_BRES_INC, 0, par); aty_st_le32(DST_BRES_DEC, 0, par); /* set destination drawing attributes */ aty_st_le32(DST_CNTL, DST_LAST_PEL | DST_Y_TOP_TO_BOTTOM | DST_X_LEFT_TO_RIGHT, par); /* set source pitch to modal pitch, set offset to zero */ aty_st_le32(SRC_OFF_PITCH, (pitch_value / 8) << 22, par); /* set these registers to a known state */ aty_st_le32(SRC_Y_X, 0, par); aty_st_le32(SRC_HEIGHT1_WIDTH1, 1, par); aty_st_le32(SRC_Y_X_START, 0, par); aty_st_le32(SRC_HEIGHT2_WIDTH2, 1, par); /* set source pixel retrieving attributes */ aty_st_le32(SRC_CNTL, SRC_LINE_X_LEFT_TO_RIGHT, par); /* set host attributes */ wait_for_fifo(13, par); aty_st_le32(HOST_CNTL, 0, par); /* set pattern attributes */ aty_st_le32(PAT_REG0, 0, par); aty_st_le32(PAT_REG1, 0, par); aty_st_le32(PAT_CNTL, 0, par); /* set scissors to modal size */ aty_st_le32(SC_LEFT, 0, par); aty_st_le32(SC_TOP, 0, par); aty_st_le32(SC_BOTTOM, par->crtc.vyres - 1, par); aty_st_le32(SC_RIGHT, vxres - 1, par); /* set background color to minimum value (usually BLACK) */ aty_st_le32(DP_BKGD_CLR, 0, par); /* set foreground color to maximum value (usually WHITE) */ aty_st_le32(DP_FRGD_CLR, 0xFFFFFFFF, par); /* set write mask to effect all pixel bits */ aty_st_le32(DP_WRITE_MASK, 0xFFFFFFFF, par); /* set foreground mix to overpaint and background mix to */ /* no-effect */ aty_st_le32(DP_MIX, FRGD_MIX_S | BKGD_MIX_D, par); /* set primary source pixel channel to foreground color */ /* register */ aty_st_le32(DP_SRC, FRGD_SRC_FRGD_CLR, par); /* set compare functionality to false (no-effect on */ /* destination) */ wait_for_fifo(3, par); aty_st_le32(CLR_CMP_CLR, 0, par); aty_st_le32(CLR_CMP_MASK, 0xFFFFFFFF, par); aty_st_le32(CLR_CMP_CNTL, 0, par); /* set pixel depth */ wait_for_fifo(2, par); aty_st_le32(DP_PIX_WIDTH, par->crtc.dp_pix_width, par); aty_st_le32(DP_CHAIN_MASK, par->crtc.dp_chain_mask, par); wait_for_fifo(5, par); aty_st_le32(SCALE_3D_CNTL, 0, par); aty_st_le32(Z_CNTL, 0, par); aty_st_le32(CRTC_INT_CNTL, aty_ld_le32(CRTC_INT_CNTL, par) & ~0x20, par); aty_st_le32(GUI_TRAJ_CNTL, 0x100023, par); /* insure engine is idle before leaving */ wait_for_idle(par); } /* * Accelerated functions */ static inline void draw_rect(s16 x, s16 y, u16 width, u16 height, struct atyfb_par *par) { /* perform rectangle fill */ wait_for_fifo(2, par); aty_st_le32(DST_Y_X, (x << 16) | y, par); aty_st_le32(DST_HEIGHT_WIDTH, (width << 16) | height, par); par->blitter_may_be_busy = 1; } void atyfb_copyarea(struct fb_info *info, const struct fb_copyarea *area) { struct atyfb_par *par = (struct atyfb_par *) info->par; u32 dy = area->dy, sy = area->sy, direction = DST_LAST_PEL; u32 sx = area->sx, dx = area->dx, width = area->width, rotation = 0; if (par->asleep) return; if (!area->width || !area->height) return; if (!par->accel_flags) { cfb_copyarea(info, area); return; } if (info->var.bits_per_pixel == 24) { /* In 24 bpp, the engine is in 8 bpp - this requires that all */ /* horizontal coordinates and widths must be adjusted */ sx *= 3; dx *= 3; width *= 3; } if (area->sy < area->dy) { dy += area->height - 1; sy += area->height - 1; } else direction |= DST_Y_TOP_TO_BOTTOM; if (sx < dx) { dx += width - 1; sx += width - 1; } else direction |= DST_X_LEFT_TO_RIGHT; if (info->var.bits_per_pixel == 24) { rotation = rotation24bpp(dx, direction); } wait_for_fifo(4, par); aty_st_le32(DP_SRC, FRGD_SRC_BLIT, par); aty_st_le32(SRC_Y_X, (sx << 16) | sy, par); aty_st_le32(SRC_HEIGHT1_WIDTH1, (width << 16) | area->height, par); aty_st_le32(DST_CNTL, direction | rotation, par); draw_rect(dx, dy, width, area->height, par); } void atyfb_fillrect(struct fb_info *info, const struct fb_fillrect *rect) { struct atyfb_par *par = (struct atyfb_par *) info->par; u32 color, dx = rect->dx, width = rect->width, rotation = 0; if (par->asleep) return; if (!rect->width || !rect->height) return; if (!par->accel_flags) { cfb_fillrect(info, rect); return; } if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) color = ((u32 *)(info->pseudo_palette))[rect->color]; else color = rect->color; if (info->var.bits_per_pixel == 24) { /* In 24 bpp, the engine is in 8 bpp - this requires that all */ /* horizontal coordinates and widths must be adjusted */ dx *= 3; width *= 3; rotation = rotation24bpp(dx, DST_X_LEFT_TO_RIGHT); } wait_for_fifo(3, par); aty_st_le32(DP_FRGD_CLR, color, par); aty_st_le32(DP_SRC, BKGD_SRC_BKGD_CLR | FRGD_SRC_FRGD_CLR | MONO_SRC_ONE, par); aty_st_le32(DST_CNTL, DST_LAST_PEL | DST_Y_TOP_TO_BOTTOM | DST_X_LEFT_TO_RIGHT | rotation, par); draw_rect(dx, rect->dy, width, rect->height, par); } void atyfb_imageblit(struct fb_info *info, const struct fb_image *image) { struct atyfb_par *par = (struct atyfb_par *) info->par; u32 src_bytes, dx = image->dx, dy = image->dy, width = image->width; u32 pix_width_save, pix_width, host_cntl, rotation = 0, src, mix; if (par->asleep) return; if (!image->width || !image->height) return; if (!par->accel_flags || (image->depth != 1 && info->var.bits_per_pixel != image->depth)) { cfb_imageblit(info, image); return; } pix_width = pix_width_save = aty_ld_le32(DP_PIX_WIDTH, par); host_cntl = aty_ld_le32(HOST_CNTL, par) | HOST_BYTE_ALIGN; switch (image->depth) { case 1: pix_width &= ~(BYTE_ORDER_MASK | HOST_MASK); pix_width |= (BYTE_ORDER_MSB_TO_LSB | HOST_1BPP); break; case 4: pix_width &= ~(BYTE_ORDER_MASK | HOST_MASK); pix_width |= (BYTE_ORDER_MSB_TO_LSB | HOST_4BPP); break; case 8: pix_width &= ~HOST_MASK; pix_width |= HOST_8BPP; break; case 15: pix_width &= ~HOST_MASK; pix_width |= HOST_15BPP; break; case 16: pix_width &= ~HOST_MASK; pix_width |= HOST_16BPP; break; case 24: pix_width &= ~HOST_MASK; pix_width |= HOST_24BPP; break; case 32: pix_width &= ~HOST_MASK; pix_width |= HOST_32BPP; break; } if (info->var.bits_per_pixel == 24) { /* In 24 bpp, the engine is in 8 bpp - this requires that all */ /* horizontal coordinates and widths must be adjusted */ dx *= 3; width *= 3; rotation = rotation24bpp(dx, DST_X_LEFT_TO_RIGHT); pix_width &= ~DST_MASK; pix_width |= DST_8BPP; /* * since Rage 3D IIc we have DP_HOST_TRIPLE_EN bit * this hwaccelerated triple has an issue with not aligned data */ if (M64_HAS(HW_TRIPLE) && image->width % 8 == 0) pix_width |= DP_HOST_TRIPLE_EN; } if (image->depth == 1) { u32 fg, bg; if (info->fix.visual == FB_VISUAL_TRUECOLOR || info->fix.visual == FB_VISUAL_DIRECTCOLOR) { fg = ((u32*)(info->pseudo_palette))[image->fg_color]; bg = ((u32*)(info->pseudo_palette))[image->bg_color]; } else { fg = image->fg_color; bg = image->bg_color; } wait_for_fifo(2, par); aty_st_le32(DP_BKGD_CLR, bg, par); aty_st_le32(DP_FRGD_CLR, fg, par); src = MONO_SRC_HOST | FRGD_SRC_FRGD_CLR | BKGD_SRC_BKGD_CLR; mix = FRGD_MIX_S | BKGD_MIX_S; } else { src = MONO_SRC_ONE | FRGD_SRC_HOST; mix = FRGD_MIX_D_XOR_S | BKGD_MIX_D; } wait_for_fifo(6, par); aty_st_le32(DP_WRITE_MASK, 0xFFFFFFFF, par); aty_st_le32(DP_PIX_WIDTH, pix_width, par); aty_st_le32(DP_MIX, mix, par); aty_st_le32(DP_SRC, src, par); aty_st_le32(HOST_CNTL, host_cntl, par); aty_st_le32(DST_CNTL, DST_Y_TOP_TO_BOTTOM | DST_X_LEFT_TO_RIGHT | rotation, par); draw_rect(dx, dy, width, image->height, par); src_bytes = (((image->width * image->depth) + 7) / 8) * image->height; /* manual triple each pixel */ if (info->var.bits_per_pixel == 24 && !(pix_width & DP_HOST_TRIPLE_EN)) { int inbit, outbit, mult24, byte_id_in_dword, width; u8 *pbitmapin = (u8*)image->data, *pbitmapout; u32 hostdword; for (width = image->width, inbit = 7, mult24 = 0; src_bytes; ) { for (hostdword = 0, pbitmapout = (u8*)&hostdword, byte_id_in_dword = 0; byte_id_in_dword < 4 && src_bytes; byte_id_in_dword++, pbitmapout++) { for (outbit = 7; outbit >= 0; outbit--) { *pbitmapout |= (((*pbitmapin >> inbit) & 1) << outbit); mult24++; /* next bit */ if (mult24 == 3) { mult24 = 0; inbit--; width--; } /* next byte */ if (inbit < 0 || width == 0) { src_bytes--; pbitmapin++; inbit = 7; if (width == 0) { width = image->width; outbit = 0; } } } } wait_for_fifo(1, par); aty_st_le32(HOST_DATA0, hostdword, par); } } else { u32 *pbitmap, dwords = (src_bytes + 3) / 4; for (pbitmap = (u32*)(image->data); dwords; dwords--, pbitmap++) { wait_for_fifo(1, par); aty_st_le32(HOST_DATA0, le32_to_cpup(pbitmap), par); } } /* restore pix_width */ wait_for_fifo(1, par); aty_st_le32(DP_PIX_WIDTH, pix_width_save, par); }
gpl-2.0
baselsayeh/Kyleopen-4.4
net/ceph/buffer.c
10857
1396
#include <linux/ceph/ceph_debug.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/ceph/buffer.h> #include <linux/ceph/decode.h> struct ceph_buffer *ceph_buffer_new(size_t len, gfp_t gfp) { struct ceph_buffer *b; b = kmalloc(sizeof(*b), gfp); if (!b) return NULL; b->vec.iov_base = kmalloc(len, gfp | __GFP_NOWARN); if (b->vec.iov_base) { b->is_vmalloc = false; } else { b->vec.iov_base = __vmalloc(len, gfp | __GFP_HIGHMEM, PAGE_KERNEL); if (!b->vec.iov_base) { kfree(b); return NULL; } b->is_vmalloc = true; } kref_init(&b->kref); b->alloc_len = len; b->vec.iov_len = len; dout("buffer_new %p\n", b); return b; } EXPORT_SYMBOL(ceph_buffer_new); void ceph_buffer_release(struct kref *kref) { struct ceph_buffer *b = container_of(kref, struct ceph_buffer, kref); dout("buffer_release %p\n", b); if (b->vec.iov_base) { if (b->is_vmalloc) vfree(b->vec.iov_base); else kfree(b->vec.iov_base); } kfree(b); } EXPORT_SYMBOL(ceph_buffer_release); int ceph_decode_buffer(struct ceph_buffer **b, void **p, void *end) { size_t len; ceph_decode_need(p, end, sizeof(u32), bad); len = ceph_decode_32(p); dout("decode_buffer len %d\n", (int)len); ceph_decode_need(p, end, len, bad); *b = ceph_buffer_new(len, GFP_NOFS); if (!*b) return -ENOMEM; ceph_decode_copy(p, (*b)->vec.iov_base, len); return 0; bad: return -EINVAL; }
gpl-2.0
CyanogenMod/lge-kernel-sniper
arch/mips/fw/arc/init.c
11881
1267
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * PROM library initialisation code. * * Copyright (C) 1996 David S. Miller (davem@davemloft.net) */ #include <linux/init.h> #include <linux/kernel.h> #include <asm/bootinfo.h> #include <asm/sgialib.h> #include <asm/smp-ops.h> #undef DEBUG_PROM_INIT /* Master romvec interface. */ struct linux_romvec *romvec; int prom_argc; LONG *_prom_argv, *_prom_envp; void __init prom_init(void) { PSYSTEM_PARAMETER_BLOCK pb = PROMBLOCK; romvec = ROMVECTOR; prom_argc = fw_arg0; _prom_argv = (LONG *) fw_arg1; _prom_envp = (LONG *) fw_arg2; if (pb->magic != 0x53435241) { printk(KERN_CRIT "Aieee, bad prom vector magic %08lx\n", (unsigned long) pb->magic); while(1) ; } prom_init_cmdline(); prom_identify_arch(); printk(KERN_INFO "PROMLIB: ARC firmware Version %d Revision %d\n", pb->ver, pb->rev); prom_meminit(); #ifdef DEBUG_PROM_INIT pr_info("Press a key to reboot\n"); ArcRead(0, &c, 1, &cnt); ArcEnterInteractiveMode(); #endif #ifdef CONFIG_SGI_IP27 { extern struct plat_smp_ops ip27_smp_ops; register_smp_ops(&ip27_smp_ops); } #endif }
gpl-2.0
MSM8226-Samsung/kernel_samsung_msm8226
fs/jfs/jfs_uniupr.c
14953
7707
/* * Copyright (C) International Business Machines Corp., 2000-2002 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/fs.h> #include "jfs_unicode.h" /* * Latin upper case */ signed char UniUpperTable[512] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 000-00f */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 010-01f */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 020-02f */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 030-03f */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 040-04f */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 050-05f */ 0,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32, /* 060-06f */ -32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32, 0, 0, 0, 0, 0, /* 070-07f */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 080-08f */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 090-09f */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0a0-0af */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0b0-0bf */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0c0-0cf */ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 0d0-0df */ -32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32, /* 0e0-0ef */ -32,-32,-32,-32,-32,-32,-32, 0,-32,-32,-32,-32,-32,-32,-32,121, /* 0f0-0ff */ 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 100-10f */ 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 110-11f */ 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 120-12f */ 0, 0, 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, -1, 0, /* 130-13f */ -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, -1, /* 140-14f */ 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 150-15f */ 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 160-16f */ 0, -1, 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, -1, 0, -1, 0, /* 170-17f */ 0, 0, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, -1, 0, 0, 0, /* 180-18f */ 0, 0, -1, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, /* 190-19f */ 0, -1, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, 0, -1, 0, 0, /* 1a0-1af */ -1, 0, 0, 0, -1, 0, -1, 0, 0, -1, 0, 0, 0, -1, 0, 0, /* 1b0-1bf */ 0, 0, 0, 0, 0, -1, -2, 0, -1, -2, 0, -1, -2, 0, -1, 0, /* 1c0-1cf */ -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1,-79, 0, -1, /* 1d0-1df */ 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e0-1ef */ 0, 0, -1, -2, 0, -1, 0, 0, 0, -1, 0, -1, 0, -1, 0, -1, /* 1f0-1ff */ }; /* Upper case range - Greek */ static signed char UniCaseRangeU03a0[47] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,-38,-37,-37,-37, /* 3a0-3af */ 0,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32, /* 3b0-3bf */ -32,-32,-31,-32,-32,-32,-32,-32,-32,-32,-32,-32,-64,-63,-63, }; /* Upper case range - Cyrillic */ static signed char UniCaseRangeU0430[48] = { -32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32, /* 430-43f */ -32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32, /* 440-44f */ 0,-80,-80,-80,-80,-80,-80,-80,-80,-80,-80,-80,-80, 0,-80,-80, /* 450-45f */ }; /* Upper case range - Extended cyrillic */ static signed char UniCaseRangeU0490[61] = { 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 490-49f */ 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 4a0-4af */ 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 4b0-4bf */ 0, 0, -1, 0, -1, 0, 0, 0, -1, 0, 0, 0, -1, }; /* Upper case range - Extended latin and greek */ static signed char UniCaseRangeU1e00[509] = { 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e00-1e0f */ 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e10-1e1f */ 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e20-1e2f */ 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e30-1e3f */ 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e40-1e4f */ 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e50-1e5f */ 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e60-1e6f */ 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e70-1e7f */ 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1e80-1e8f */ 0, -1, 0, -1, 0, -1, 0, 0, 0, 0, 0,-59, 0, -1, 0, -1, /* 1e90-1e9f */ 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1ea0-1eaf */ 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1eb0-1ebf */ 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1ec0-1ecf */ 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1ed0-1edf */ 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, /* 1ee0-1eef */ 0, -1, 0, -1, 0, -1, 0, -1, 0, -1, 0, 0, 0, 0, 0, 0, /* 1ef0-1eff */ 8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f00-1f0f */ 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f10-1f1f */ 8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f20-1f2f */ 8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f30-1f3f */ 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f40-1f4f */ 0, 8, 0, 8, 0, 8, 0, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f50-1f5f */ 8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f60-1f6f */ 74, 74, 86, 86, 86, 86,100,100, 0, 0,112,112,126,126, 0, 0, /* 1f70-1f7f */ 8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f80-1f8f */ 8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1f90-1f9f */ 8, 8, 8, 8, 8, 8, 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, /* 1fa0-1faf */ 8, 8, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1fb0-1fbf */ 0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1fc0-1fcf */ 8, 8, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1fd0-1fdf */ 8, 8, 0, 0, 0, 7, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 1fe0-1fef */ 0, 0, 0, 9, 0, 0, 0, 0, 0, 0, 0, 0, 0, }; /* Upper case range - Wide latin */ static signed char UniCaseRangeUff40[27] = { 0,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32, /* ff40-ff4f */ -32,-32,-32,-32,-32,-32,-32,-32,-32,-32,-32, }; /* * Upper Case Range */ UNICASERANGE UniUpperRange[] = { { 0x03a0, 0x03ce, UniCaseRangeU03a0 }, { 0x0430, 0x045f, UniCaseRangeU0430 }, { 0x0490, 0x04cc, UniCaseRangeU0490 }, { 0x1e00, 0x1ffc, UniCaseRangeU1e00 }, { 0xff40, 0xff5a, UniCaseRangeUff40 }, { 0 } };
gpl-2.0
viaembedded/vab820-kernel-bsp-old
arch/mips/ralink/cevt-rt3352.c
1386
3701
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2013 by John Crispin <blogic@openwrt.org> */ #include <linux/clockchips.h> #include <linux/clocksource.h> #include <linux/interrupt.h> #include <linux/reset.h> #include <linux/init.h> #include <linux/time.h> #include <linux/of.h> #include <linux/of_irq.h> #include <linux/of_address.h> #include <asm/mach-ralink/ralink_regs.h> #define SYSTICK_FREQ (50 * 1000) #define SYSTICK_CONFIG 0x00 #define SYSTICK_COMPARE 0x04 #define SYSTICK_COUNT 0x08 /* route systick irq to mips irq 7 instead of the r4k-timer */ #define CFG_EXT_STK_EN 0x2 /* enable the counter */ #define CFG_CNT_EN 0x1 struct systick_device { void __iomem *membase; struct clock_event_device dev; int irq_requested; int freq_scale; }; static void systick_set_clock_mode(enum clock_event_mode mode, struct clock_event_device *evt); static int systick_next_event(unsigned long delta, struct clock_event_device *evt) { struct systick_device *sdev; u32 count; sdev = container_of(evt, struct systick_device, dev); count = ioread32(sdev->membase + SYSTICK_COUNT); count = (count + delta) % SYSTICK_FREQ; iowrite32(count + delta, sdev->membase + SYSTICK_COMPARE); return 0; } static void systick_event_handler(struct clock_event_device *dev) { /* noting to do here */ } static irqreturn_t systick_interrupt(int irq, void *dev_id) { struct clock_event_device *dev = (struct clock_event_device *) dev_id; dev->event_handler(dev); return IRQ_HANDLED; } static struct systick_device systick = { .dev = { /* * cevt-r4k uses 300, make sure systick * gets used if available */ .rating = 310, .features = CLOCK_EVT_FEAT_ONESHOT, .set_next_event = systick_next_event, .set_mode = systick_set_clock_mode, .event_handler = systick_event_handler, }, }; static struct irqaction systick_irqaction = { .handler = systick_interrupt, .flags = IRQF_PERCPU | IRQF_TIMER, .dev_id = &systick.dev, }; static void systick_set_clock_mode(enum clock_event_mode mode, struct clock_event_device *evt) { struct systick_device *sdev; sdev = container_of(evt, struct systick_device, dev); switch (mode) { case CLOCK_EVT_MODE_ONESHOT: if (!sdev->irq_requested) setup_irq(systick.dev.irq, &systick_irqaction); sdev->irq_requested = 1; iowrite32(CFG_EXT_STK_EN | CFG_CNT_EN, systick.membase + SYSTICK_CONFIG); break; case CLOCK_EVT_MODE_SHUTDOWN: if (sdev->irq_requested) free_irq(systick.dev.irq, &systick_irqaction); sdev->irq_requested = 0; iowrite32(0, systick.membase + SYSTICK_CONFIG); break; default: pr_err("%s: Unhandeled mips clock_mode\n", systick.dev.name); break; } } static void __init ralink_systick_init(struct device_node *np) { systick.membase = of_iomap(np, 0); if (!systick.membase) return; systick_irqaction.name = np->name; systick.dev.name = np->name; clockevents_calc_mult_shift(&systick.dev, SYSTICK_FREQ, 60); systick.dev.max_delta_ns = clockevent_delta2ns(0x7fff, &systick.dev); systick.dev.min_delta_ns = clockevent_delta2ns(0x3, &systick.dev); systick.dev.irq = irq_of_parse_and_map(np, 0); if (!systick.dev.irq) { pr_err("%s: request_irq failed", np->name); return; } clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name, SYSTICK_FREQ, 301, 16, clocksource_mmio_readl_up); clockevents_register_device(&systick.dev); pr_info("%s: running - mult: %d, shift: %d\n", np->name, systick.dev.mult, systick.dev.shift); } CLOCKSOURCE_OF_DECLARE(systick, "ralink,cevt-systick", ralink_systick_init);
gpl-2.0
zarboz/android_kernel_flounder
arch/alpha/kernel/traps.c
1898
27624
/* * arch/alpha/kernel/traps.c * * (C) Copyright 1994 Linus Torvalds */ /* * This file initializes the trap entry points */ #include <linux/jiffies.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/tty.h> #include <linux/delay.h> #include <linux/module.h> #include <linux/init.h> #include <linux/kallsyms.h> #include <linux/ratelimit.h> #include <asm/gentrap.h> #include <asm/uaccess.h> #include <asm/unaligned.h> #include <asm/sysinfo.h> #include <asm/hwrpb.h> #include <asm/mmu_context.h> #include <asm/special_insns.h> #include "proto.h" /* Work-around for some SRMs which mishandle opDEC faults. */ static int opDEC_fix; static void __cpuinit opDEC_check(void) { __asm__ __volatile__ ( /* Load the address of... */ " br $16, 1f\n" /* A stub instruction fault handler. Just add 4 to the pc and continue. */ " ldq $16, 8($sp)\n" " addq $16, 4, $16\n" " stq $16, 8($sp)\n" " call_pal %[rti]\n" /* Install the instruction fault handler. */ "1: lda $17, 3\n" " call_pal %[wrent]\n" /* With that in place, the fault from the round-to-minf fp insn will arrive either at the "lda 4" insn (bad) or one past that (good). This places the correct fixup in %0. */ " lda %[fix], 0\n" " cvttq/svm $f31,$f31\n" " lda %[fix], 4" : [fix] "=r" (opDEC_fix) : [rti] "n" (PAL_rti), [wrent] "n" (PAL_wrent) : "$0", "$1", "$16", "$17", "$22", "$23", "$24", "$25"); if (opDEC_fix) printk("opDEC fixup enabled.\n"); } void dik_show_regs(struct pt_regs *regs, unsigned long *r9_15) { printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n", regs->pc, regs->r26, regs->ps, print_tainted()); print_symbol("pc is at %s\n", regs->pc); print_symbol("ra is at %s\n", regs->r26 ); printk("v0 = %016lx t0 = %016lx t1 = %016lx\n", regs->r0, regs->r1, regs->r2); printk("t2 = %016lx t3 = %016lx t4 = %016lx\n", regs->r3, regs->r4, regs->r5); printk("t5 = %016lx t6 = %016lx t7 = %016lx\n", regs->r6, regs->r7, regs->r8); if (r9_15) { printk("s0 = %016lx s1 = %016lx s2 = %016lx\n", r9_15[9], r9_15[10], r9_15[11]); printk("s3 = %016lx s4 = %016lx s5 = %016lx\n", r9_15[12], r9_15[13], r9_15[14]); printk("s6 = %016lx\n", r9_15[15]); } printk("a0 = %016lx a1 = %016lx a2 = %016lx\n", regs->r16, regs->r17, regs->r18); printk("a3 = %016lx a4 = %016lx a5 = %016lx\n", regs->r19, regs->r20, regs->r21); printk("t8 = %016lx t9 = %016lx t10= %016lx\n", regs->r22, regs->r23, regs->r24); printk("t11= %016lx pv = %016lx at = %016lx\n", regs->r25, regs->r27, regs->r28); printk("gp = %016lx sp = %p\n", regs->gp, regs+1); #if 0 __halt(); #endif } #if 0 static char * ireg_name[] = {"v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6", "t7", "s0", "s1", "s2", "s3", "s4", "s5", "s6", "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9", "t10", "t11", "ra", "pv", "at", "gp", "sp", "zero"}; #endif static void dik_show_code(unsigned int *pc) { long i; printk("Code:"); for (i = -6; i < 2; i++) { unsigned int insn; if (__get_user(insn, (unsigned int __user *)pc + i)) break; printk("%c%08x%c", i ? ' ' : '<', insn, i ? ' ' : '>'); } printk("\n"); } static void dik_show_trace(unsigned long *sp) { long i = 0; printk("Trace:\n"); while (0x1ff8 & (unsigned long) sp) { extern char _stext[], _etext[]; unsigned long tmp = *sp; sp++; if (tmp < (unsigned long) &_stext) continue; if (tmp >= (unsigned long) &_etext) continue; printk("[<%lx>]", tmp); print_symbol(" %s", tmp); printk("\n"); if (i > 40) { printk(" ..."); break; } } printk("\n"); } static int kstack_depth_to_print = 24; void show_stack(struct task_struct *task, unsigned long *sp) { unsigned long *stack; int i; /* * debugging aid: "show_stack(NULL);" prints the * back trace for this cpu. */ if(sp==NULL) sp=(unsigned long*)&sp; stack = sp; for(i=0; i < kstack_depth_to_print; i++) { if (((long) stack & (THREAD_SIZE-1)) == 0) break; if (i && ((i % 4) == 0)) printk("\n "); printk("%016lx ", *stack++); } printk("\n"); dik_show_trace(sp); } void die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15) { if (regs->ps & 8) return; #ifdef CONFIG_SMP printk("CPU %d ", hard_smp_processor_id()); #endif printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err); dik_show_regs(regs, r9_15); add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE); dik_show_trace((unsigned long *)(regs+1)); dik_show_code((unsigned int *)regs->pc); if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) { printk("die_if_kernel recursion detected.\n"); local_irq_enable(); while (1); } do_exit(SIGSEGV); } #ifndef CONFIG_MATHEMU static long dummy_emul(void) { return 0; } long (*alpha_fp_emul_imprecise)(struct pt_regs *regs, unsigned long writemask) = (void *)dummy_emul; long (*alpha_fp_emul) (unsigned long pc) = (void *)dummy_emul; #else long alpha_fp_emul_imprecise(struct pt_regs *regs, unsigned long writemask); long alpha_fp_emul (unsigned long pc); #endif asmlinkage void do_entArith(unsigned long summary, unsigned long write_mask, struct pt_regs *regs) { long si_code = FPE_FLTINV; siginfo_t info; if (summary & 1) { /* Software-completion summary bit is set, so try to emulate the instruction. If the processor supports precise exceptions, we don't have to search. */ if (!amask(AMASK_PRECISE_TRAP)) si_code = alpha_fp_emul(regs->pc - 4); else si_code = alpha_fp_emul_imprecise(regs, write_mask); if (si_code == 0) return; } die_if_kernel("Arithmetic fault", regs, 0, NULL); info.si_signo = SIGFPE; info.si_errno = 0; info.si_code = si_code; info.si_addr = (void __user *) regs->pc; send_sig_info(SIGFPE, &info, current); } asmlinkage void do_entIF(unsigned long type, struct pt_regs *regs) { siginfo_t info; int signo, code; if ((regs->ps & ~IPL_MAX) == 0) { if (type == 1) { const unsigned int *data = (const unsigned int *) regs->pc; printk("Kernel bug at %s:%d\n", (const char *)(data[1] | (long)data[2] << 32), data[0]); } die_if_kernel((type == 1 ? "Kernel Bug" : "Instruction fault"), regs, type, NULL); } switch (type) { case 0: /* breakpoint */ info.si_signo = SIGTRAP; info.si_errno = 0; info.si_code = TRAP_BRKPT; info.si_trapno = 0; info.si_addr = (void __user *) regs->pc; if (ptrace_cancel_bpt(current)) { regs->pc -= 4; /* make pc point to former bpt */ } send_sig_info(SIGTRAP, &info, current); return; case 1: /* bugcheck */ info.si_signo = SIGTRAP; info.si_errno = 0; info.si_code = __SI_FAULT; info.si_addr = (void __user *) regs->pc; info.si_trapno = 0; send_sig_info(SIGTRAP, &info, current); return; case 2: /* gentrap */ info.si_addr = (void __user *) regs->pc; info.si_trapno = regs->r16; switch ((long) regs->r16) { case GEN_INTOVF: signo = SIGFPE; code = FPE_INTOVF; break; case GEN_INTDIV: signo = SIGFPE; code = FPE_INTDIV; break; case GEN_FLTOVF: signo = SIGFPE; code = FPE_FLTOVF; break; case GEN_FLTDIV: signo = SIGFPE; code = FPE_FLTDIV; break; case GEN_FLTUND: signo = SIGFPE; code = FPE_FLTUND; break; case GEN_FLTINV: signo = SIGFPE; code = FPE_FLTINV; break; case GEN_FLTINE: signo = SIGFPE; code = FPE_FLTRES; break; case GEN_ROPRAND: signo = SIGFPE; code = __SI_FAULT; break; case GEN_DECOVF: case GEN_DECDIV: case GEN_DECINV: case GEN_ASSERTERR: case GEN_NULPTRERR: case GEN_STKOVF: case GEN_STRLENERR: case GEN_SUBSTRERR: case GEN_RANGERR: case GEN_SUBRNG: case GEN_SUBRNG1: case GEN_SUBRNG2: case GEN_SUBRNG3: case GEN_SUBRNG4: case GEN_SUBRNG5: case GEN_SUBRNG6: case GEN_SUBRNG7: default: signo = SIGTRAP; code = __SI_FAULT; break; } info.si_signo = signo; info.si_errno = 0; info.si_code = code; info.si_addr = (void __user *) regs->pc; send_sig_info(signo, &info, current); return; case 4: /* opDEC */ if (implver() == IMPLVER_EV4) { long si_code; /* The some versions of SRM do not handle the opDEC properly - they return the PC of the opDEC fault, not the instruction after as the Alpha architecture requires. Here we fix it up. We do this by intentionally causing an opDEC fault during the boot sequence and testing if we get the correct PC. If not, we set a flag to correct it every time through. */ regs->pc += opDEC_fix; /* EV4 does not implement anything except normal rounding. Everything else will come here as an illegal instruction. Emulate them. */ si_code = alpha_fp_emul(regs->pc - 4); if (si_code == 0) return; if (si_code > 0) { info.si_signo = SIGFPE; info.si_errno = 0; info.si_code = si_code; info.si_addr = (void __user *) regs->pc; send_sig_info(SIGFPE, &info, current); return; } } break; case 3: /* FEN fault */ /* Irritating users can call PAL_clrfen to disable the FPU for the process. The kernel will then trap in do_switch_stack and undo_switch_stack when we try to save and restore the FP registers. Given that GCC by default generates code that uses the FP registers, PAL_clrfen is not useful except for DoS attacks. So turn the bleeding FPU back on and be done with it. */ current_thread_info()->pcb.flags |= 1; __reload_thread(&current_thread_info()->pcb); return; case 5: /* illoc */ default: /* unexpected instruction-fault type */ ; } info.si_signo = SIGILL; info.si_errno = 0; info.si_code = ILL_ILLOPC; info.si_addr = (void __user *) regs->pc; send_sig_info(SIGILL, &info, current); } /* There is an ifdef in the PALcode in MILO that enables a "kernel debugging entry point" as an unprivileged call_pal. We don't want to have anything to do with it, but unfortunately several versions of MILO included in distributions have it enabled, and if we don't put something on the entry point we'll oops. */ asmlinkage void do_entDbg(struct pt_regs *regs) { siginfo_t info; die_if_kernel("Instruction fault", regs, 0, NULL); info.si_signo = SIGILL; info.si_errno = 0; info.si_code = ILL_ILLOPC; info.si_addr = (void __user *) regs->pc; force_sig_info(SIGILL, &info, current); } /* * entUna has a different register layout to be reasonably simple. It * needs access to all the integer registers (the kernel doesn't use * fp-regs), and it needs to have them in order for simpler access. * * Due to the non-standard register layout (and because we don't want * to handle floating-point regs), user-mode unaligned accesses are * handled separately by do_entUnaUser below. * * Oh, btw, we don't handle the "gp" register correctly, but if we fault * on a gp-register unaligned load/store, something is _very_ wrong * in the kernel anyway.. */ struct allregs { unsigned long regs[32]; unsigned long ps, pc, gp, a0, a1, a2; }; struct unaligned_stat { unsigned long count, va, pc; } unaligned[2]; /* Macro for exception fixup code to access integer registers. */ #define una_reg(r) (_regs[(r) >= 16 && (r) <= 18 ? (r)+19 : (r)]) asmlinkage void do_entUna(void * va, unsigned long opcode, unsigned long reg, struct allregs *regs) { long error, tmp1, tmp2, tmp3, tmp4; unsigned long pc = regs->pc - 4; unsigned long *_regs = regs->regs; const struct exception_table_entry *fixup; unaligned[0].count++; unaligned[0].va = (unsigned long) va; unaligned[0].pc = pc; /* We don't want to use the generic get/put unaligned macros as we want to trap exceptions. Only if we actually get an exception will we decide whether we should have caught it. */ switch (opcode) { case 0x0c: /* ldwu */ __asm__ __volatile__( "1: ldq_u %1,0(%3)\n" "2: ldq_u %2,1(%3)\n" " extwl %1,%3,%1\n" " extwh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" " .long 1b - .\n" " lda %1,3b-1b(%0)\n" " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) goto got_exception; una_reg(reg) = tmp1|tmp2; return; case 0x28: /* ldl */ __asm__ __volatile__( "1: ldq_u %1,0(%3)\n" "2: ldq_u %2,3(%3)\n" " extll %1,%3,%1\n" " extlh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" " .long 1b - .\n" " lda %1,3b-1b(%0)\n" " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) goto got_exception; una_reg(reg) = (int)(tmp1|tmp2); return; case 0x29: /* ldq */ __asm__ __volatile__( "1: ldq_u %1,0(%3)\n" "2: ldq_u %2,7(%3)\n" " extql %1,%3,%1\n" " extqh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" " .long 1b - .\n" " lda %1,3b-1b(%0)\n" " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) goto got_exception; una_reg(reg) = tmp1|tmp2; return; /* Note that the store sequences do not indicate that they change memory because it _should_ be affecting nothing in this context. (Otherwise we have other, much larger, problems.) */ case 0x0d: /* stw */ __asm__ __volatile__( "1: ldq_u %2,1(%5)\n" "2: ldq_u %1,0(%5)\n" " inswh %6,%5,%4\n" " inswl %6,%5,%3\n" " mskwh %2,%5,%2\n" " mskwl %1,%5,%1\n" " or %2,%4,%2\n" " or %1,%3,%1\n" "3: stq_u %2,1(%5)\n" "4: stq_u %1,0(%5)\n" "5:\n" ".section __ex_table,\"a\"\n" " .long 1b - .\n" " lda %2,5b-1b(%0)\n" " .long 2b - .\n" " lda %1,5b-2b(%0)\n" " .long 3b - .\n" " lda $31,5b-3b(%0)\n" " .long 4b - .\n" " lda $31,5b-4b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) : "r"(va), "r"(una_reg(reg)), "0"(0)); if (error) goto got_exception; return; case 0x2c: /* stl */ __asm__ __volatile__( "1: ldq_u %2,3(%5)\n" "2: ldq_u %1,0(%5)\n" " inslh %6,%5,%4\n" " insll %6,%5,%3\n" " msklh %2,%5,%2\n" " mskll %1,%5,%1\n" " or %2,%4,%2\n" " or %1,%3,%1\n" "3: stq_u %2,3(%5)\n" "4: stq_u %1,0(%5)\n" "5:\n" ".section __ex_table,\"a\"\n" " .long 1b - .\n" " lda %2,5b-1b(%0)\n" " .long 2b - .\n" " lda %1,5b-2b(%0)\n" " .long 3b - .\n" " lda $31,5b-3b(%0)\n" " .long 4b - .\n" " lda $31,5b-4b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) : "r"(va), "r"(una_reg(reg)), "0"(0)); if (error) goto got_exception; return; case 0x2d: /* stq */ __asm__ __volatile__( "1: ldq_u %2,7(%5)\n" "2: ldq_u %1,0(%5)\n" " insqh %6,%5,%4\n" " insql %6,%5,%3\n" " mskqh %2,%5,%2\n" " mskql %1,%5,%1\n" " or %2,%4,%2\n" " or %1,%3,%1\n" "3: stq_u %2,7(%5)\n" "4: stq_u %1,0(%5)\n" "5:\n" ".section __ex_table,\"a\"\n\t" " .long 1b - .\n" " lda %2,5b-1b(%0)\n" " .long 2b - .\n" " lda %1,5b-2b(%0)\n" " .long 3b - .\n" " lda $31,5b-3b(%0)\n" " .long 4b - .\n" " lda $31,5b-4b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) : "r"(va), "r"(una_reg(reg)), "0"(0)); if (error) goto got_exception; return; } printk("Bad unaligned kernel access at %016lx: %p %lx %lu\n", pc, va, opcode, reg); do_exit(SIGSEGV); got_exception: /* Ok, we caught the exception, but we don't want it. Is there someone to pass it along to? */ if ((fixup = search_exception_tables(pc)) != 0) { unsigned long newpc; newpc = fixup_exception(una_reg, fixup, pc); printk("Forwarding unaligned exception at %lx (%lx)\n", pc, newpc); regs->pc = newpc; return; } /* * Yikes! No one to forward the exception to. * Since the registers are in a weird format, dump them ourselves. */ printk("%s(%d): unhandled unaligned exception\n", current->comm, task_pid_nr(current)); printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx\n", pc, una_reg(26), regs->ps); printk("r0 = %016lx r1 = %016lx r2 = %016lx\n", una_reg(0), una_reg(1), una_reg(2)); printk("r3 = %016lx r4 = %016lx r5 = %016lx\n", una_reg(3), una_reg(4), una_reg(5)); printk("r6 = %016lx r7 = %016lx r8 = %016lx\n", una_reg(6), una_reg(7), una_reg(8)); printk("r9 = %016lx r10= %016lx r11= %016lx\n", una_reg(9), una_reg(10), una_reg(11)); printk("r12= %016lx r13= %016lx r14= %016lx\n", una_reg(12), una_reg(13), una_reg(14)); printk("r15= %016lx\n", una_reg(15)); printk("r16= %016lx r17= %016lx r18= %016lx\n", una_reg(16), una_reg(17), una_reg(18)); printk("r19= %016lx r20= %016lx r21= %016lx\n", una_reg(19), una_reg(20), una_reg(21)); printk("r22= %016lx r23= %016lx r24= %016lx\n", una_reg(22), una_reg(23), una_reg(24)); printk("r25= %016lx r27= %016lx r28= %016lx\n", una_reg(25), una_reg(27), una_reg(28)); printk("gp = %016lx sp = %p\n", regs->gp, regs+1); dik_show_code((unsigned int *)pc); dik_show_trace((unsigned long *)(regs+1)); if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) { printk("die_if_kernel recursion detected.\n"); local_irq_enable(); while (1); } do_exit(SIGSEGV); } /* * Convert an s-floating point value in memory format to the * corresponding value in register format. The exponent * needs to be remapped to preserve non-finite values * (infinities, not-a-numbers, denormals). */ static inline unsigned long s_mem_to_reg (unsigned long s_mem) { unsigned long frac = (s_mem >> 0) & 0x7fffff; unsigned long sign = (s_mem >> 31) & 0x1; unsigned long exp_msb = (s_mem >> 30) & 0x1; unsigned long exp_low = (s_mem >> 23) & 0x7f; unsigned long exp; exp = (exp_msb << 10) | exp_low; /* common case */ if (exp_msb) { if (exp_low == 0x7f) { exp = 0x7ff; } } else { if (exp_low == 0x00) { exp = 0x000; } else { exp |= (0x7 << 7); } } return (sign << 63) | (exp << 52) | (frac << 29); } /* * Convert an s-floating point value in register format to the * corresponding value in memory format. */ static inline unsigned long s_reg_to_mem (unsigned long s_reg) { return ((s_reg >> 62) << 30) | ((s_reg << 5) >> 34); } /* * Handle user-level unaligned fault. Handling user-level unaligned * faults is *extremely* slow and produces nasty messages. A user * program *should* fix unaligned faults ASAP. * * Notice that we have (almost) the regular kernel stack layout here, * so finding the appropriate registers is a little more difficult * than in the kernel case. * * Finally, we handle regular integer load/stores only. In * particular, load-linked/store-conditionally and floating point * load/stores are not supported. The former make no sense with * unaligned faults (they are guaranteed to fail) and I don't think * the latter will occur in any decent program. * * Sigh. We *do* have to handle some FP operations, because GCC will * uses them as temporary storage for integer memory to memory copies. * However, we need to deal with stt/ldt and sts/lds only. */ #define OP_INT_MASK ( 1L << 0x28 | 1L << 0x2c /* ldl stl */ \ | 1L << 0x29 | 1L << 0x2d /* ldq stq */ \ | 1L << 0x0c | 1L << 0x0d /* ldwu stw */ \ | 1L << 0x0a | 1L << 0x0e ) /* ldbu stb */ #define OP_WRITE_MASK ( 1L << 0x26 | 1L << 0x27 /* sts stt */ \ | 1L << 0x2c | 1L << 0x2d /* stl stq */ \ | 1L << 0x0d | 1L << 0x0e ) /* stw stb */ #define R(x) ((size_t) &((struct pt_regs *)0)->x) static int unauser_reg_offsets[32] = { R(r0), R(r1), R(r2), R(r3), R(r4), R(r5), R(r6), R(r7), R(r8), /* r9 ... r15 are stored in front of regs. */ -56, -48, -40, -32, -24, -16, -8, R(r16), R(r17), R(r18), R(r19), R(r20), R(r21), R(r22), R(r23), R(r24), R(r25), R(r26), R(r27), R(r28), R(gp), 0, 0 }; #undef R asmlinkage void do_entUnaUser(void __user * va, unsigned long opcode, unsigned long reg, struct pt_regs *regs) { static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5); unsigned long tmp1, tmp2, tmp3, tmp4; unsigned long fake_reg, *reg_addr = &fake_reg; siginfo_t info; long error; /* Check the UAC bits to decide what the user wants us to do with the unaliged access. */ if (!(current_thread_info()->status & TS_UAC_NOPRINT)) { if (__ratelimit(&ratelimit)) { printk("%s(%d): unaligned trap at %016lx: %p %lx %ld\n", current->comm, task_pid_nr(current), regs->pc - 4, va, opcode, reg); } } if ((current_thread_info()->status & TS_UAC_SIGBUS)) goto give_sigbus; /* Not sure why you'd want to use this, but... */ if ((current_thread_info()->status & TS_UAC_NOFIX)) return; /* Don't bother reading ds in the access check since we already know that this came from the user. Also rely on the fact that the page at TASK_SIZE is unmapped and so can't be touched anyway. */ if (!__access_ok((unsigned long)va, 0, USER_DS)) goto give_sigsegv; ++unaligned[1].count; unaligned[1].va = (unsigned long)va; unaligned[1].pc = regs->pc - 4; if ((1L << opcode) & OP_INT_MASK) { /* it's an integer load/store */ if (reg < 30) { reg_addr = (unsigned long *) ((char *)regs + unauser_reg_offsets[reg]); } else if (reg == 30) { /* usp in PAL regs */ fake_reg = rdusp(); } else { /* zero "register" */ fake_reg = 0; } } /* We don't want to use the generic get/put unaligned macros as we want to trap exceptions. Only if we actually get an exception will we decide whether we should have caught it. */ switch (opcode) { case 0x0c: /* ldwu */ __asm__ __volatile__( "1: ldq_u %1,0(%3)\n" "2: ldq_u %2,1(%3)\n" " extwl %1,%3,%1\n" " extwh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" " .long 1b - .\n" " lda %1,3b-1b(%0)\n" " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) goto give_sigsegv; *reg_addr = tmp1|tmp2; break; case 0x22: /* lds */ __asm__ __volatile__( "1: ldq_u %1,0(%3)\n" "2: ldq_u %2,3(%3)\n" " extll %1,%3,%1\n" " extlh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" " .long 1b - .\n" " lda %1,3b-1b(%0)\n" " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) goto give_sigsegv; alpha_write_fp_reg(reg, s_mem_to_reg((int)(tmp1|tmp2))); return; case 0x23: /* ldt */ __asm__ __volatile__( "1: ldq_u %1,0(%3)\n" "2: ldq_u %2,7(%3)\n" " extql %1,%3,%1\n" " extqh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" " .long 1b - .\n" " lda %1,3b-1b(%0)\n" " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) goto give_sigsegv; alpha_write_fp_reg(reg, tmp1|tmp2); return; case 0x28: /* ldl */ __asm__ __volatile__( "1: ldq_u %1,0(%3)\n" "2: ldq_u %2,3(%3)\n" " extll %1,%3,%1\n" " extlh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" " .long 1b - .\n" " lda %1,3b-1b(%0)\n" " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) goto give_sigsegv; *reg_addr = (int)(tmp1|tmp2); break; case 0x29: /* ldq */ __asm__ __volatile__( "1: ldq_u %1,0(%3)\n" "2: ldq_u %2,7(%3)\n" " extql %1,%3,%1\n" " extqh %2,%3,%2\n" "3:\n" ".section __ex_table,\"a\"\n" " .long 1b - .\n" " lda %1,3b-1b(%0)\n" " .long 2b - .\n" " lda %2,3b-2b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2) : "r"(va), "0"(0)); if (error) goto give_sigsegv; *reg_addr = tmp1|tmp2; break; /* Note that the store sequences do not indicate that they change memory because it _should_ be affecting nothing in this context. (Otherwise we have other, much larger, problems.) */ case 0x0d: /* stw */ __asm__ __volatile__( "1: ldq_u %2,1(%5)\n" "2: ldq_u %1,0(%5)\n" " inswh %6,%5,%4\n" " inswl %6,%5,%3\n" " mskwh %2,%5,%2\n" " mskwl %1,%5,%1\n" " or %2,%4,%2\n" " or %1,%3,%1\n" "3: stq_u %2,1(%5)\n" "4: stq_u %1,0(%5)\n" "5:\n" ".section __ex_table,\"a\"\n" " .long 1b - .\n" " lda %2,5b-1b(%0)\n" " .long 2b - .\n" " lda %1,5b-2b(%0)\n" " .long 3b - .\n" " lda $31,5b-3b(%0)\n" " .long 4b - .\n" " lda $31,5b-4b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) : "r"(va), "r"(*reg_addr), "0"(0)); if (error) goto give_sigsegv; return; case 0x26: /* sts */ fake_reg = s_reg_to_mem(alpha_read_fp_reg(reg)); /* FALLTHRU */ case 0x2c: /* stl */ __asm__ __volatile__( "1: ldq_u %2,3(%5)\n" "2: ldq_u %1,0(%5)\n" " inslh %6,%5,%4\n" " insll %6,%5,%3\n" " msklh %2,%5,%2\n" " mskll %1,%5,%1\n" " or %2,%4,%2\n" " or %1,%3,%1\n" "3: stq_u %2,3(%5)\n" "4: stq_u %1,0(%5)\n" "5:\n" ".section __ex_table,\"a\"\n" " .long 1b - .\n" " lda %2,5b-1b(%0)\n" " .long 2b - .\n" " lda %1,5b-2b(%0)\n" " .long 3b - .\n" " lda $31,5b-3b(%0)\n" " .long 4b - .\n" " lda $31,5b-4b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) : "r"(va), "r"(*reg_addr), "0"(0)); if (error) goto give_sigsegv; return; case 0x27: /* stt */ fake_reg = alpha_read_fp_reg(reg); /* FALLTHRU */ case 0x2d: /* stq */ __asm__ __volatile__( "1: ldq_u %2,7(%5)\n" "2: ldq_u %1,0(%5)\n" " insqh %6,%5,%4\n" " insql %6,%5,%3\n" " mskqh %2,%5,%2\n" " mskql %1,%5,%1\n" " or %2,%4,%2\n" " or %1,%3,%1\n" "3: stq_u %2,7(%5)\n" "4: stq_u %1,0(%5)\n" "5:\n" ".section __ex_table,\"a\"\n\t" " .long 1b - .\n" " lda %2,5b-1b(%0)\n" " .long 2b - .\n" " lda %1,5b-2b(%0)\n" " .long 3b - .\n" " lda $31,5b-3b(%0)\n" " .long 4b - .\n" " lda $31,5b-4b(%0)\n" ".previous" : "=r"(error), "=&r"(tmp1), "=&r"(tmp2), "=&r"(tmp3), "=&r"(tmp4) : "r"(va), "r"(*reg_addr), "0"(0)); if (error) goto give_sigsegv; return; default: /* What instruction were you trying to use, exactly? */ goto give_sigbus; } /* Only integer loads should get here; everyone else returns early. */ if (reg == 30) wrusp(fake_reg); return; give_sigsegv: regs->pc -= 4; /* make pc point to faulting insn */ info.si_signo = SIGSEGV; info.si_errno = 0; /* We need to replicate some of the logic in mm/fault.c, since we don't have access to the fault code in the exception handling return path. */ if (!__access_ok((unsigned long)va, 0, USER_DS)) info.si_code = SEGV_ACCERR; else { struct mm_struct *mm = current->mm; down_read(&mm->mmap_sem); if (find_vma(mm, (unsigned long)va)) info.si_code = SEGV_ACCERR; else info.si_code = SEGV_MAPERR; up_read(&mm->mmap_sem); } info.si_addr = va; send_sig_info(SIGSEGV, &info, current); return; give_sigbus: regs->pc -= 4; info.si_signo = SIGBUS; info.si_errno = 0; info.si_code = BUS_ADRALN; info.si_addr = va; send_sig_info(SIGBUS, &info, current); return; } void __cpuinit trap_init(void) { /* Tell PAL-code what global pointer we want in the kernel. */ register unsigned long gptr __asm__("$29"); wrkgp(gptr); /* Hack for Multia (UDB) and JENSEN: some of their SRMs have a bug in the handling of the opDEC fault. Fix it up if so. */ if (implver() == IMPLVER_EV4) opDEC_check(); wrent(entArith, 1); wrent(entMM, 2); wrent(entIF, 3); wrent(entUna, 4); wrent(entSys, 5); wrent(entDbg, 6); }
gpl-2.0
Jazz-823/kernel_sony_togari-216-
drivers/coresight/coresight-funnel.c
2154
6598
/* Copyright (c) 2011-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/clk.h> #include <linux/of_coresight.h> #include <linux/coresight.h> #include "coresight-priv.h" #define funnel_writel(drvdata, val, off) \ __raw_writel((val), drvdata->base + off) #define funnel_readl(drvdata, off) \ __raw_readl(drvdata->base + off) #define FUNNEL_LOCK(drvdata) \ do { \ mb(); \ funnel_writel(drvdata, 0x0, CORESIGHT_LAR); \ } while (0) #define FUNNEL_UNLOCK(drvdata) \ do { \ funnel_writel(drvdata, CORESIGHT_UNLOCK, CORESIGHT_LAR); \ mb(); \ } while (0) #define FUNNEL_FUNCTL (0x000) #define FUNNEL_PRICTL (0x004) #define FUNNEL_ITATBDATA0 (0xEEC) #define FUNNEL_ITATBCTR2 (0xEF0) #define FUNNEL_ITATBCTR1 (0xEF4) #define FUNNEL_ITATBCTR0 (0xEF8) #define FUNNEL_HOLDTIME_MASK (0xF00) #define FUNNEL_HOLDTIME_SHFT (0x8) #define FUNNEL_HOLDTIME (0x7 << FUNNEL_HOLDTIME_SHFT) struct funnel_drvdata { void __iomem *base; struct device *dev; struct coresight_device *csdev; struct clk *clk; uint32_t priority; }; static void __funnel_enable(struct funnel_drvdata *drvdata, int port) { uint32_t functl; FUNNEL_UNLOCK(drvdata); functl = funnel_readl(drvdata, FUNNEL_FUNCTL); functl &= ~FUNNEL_HOLDTIME_MASK; functl |= FUNNEL_HOLDTIME; functl |= (1 << port); funnel_writel(drvdata, functl, FUNNEL_FUNCTL); funnel_writel(drvdata, drvdata->priority, FUNNEL_PRICTL); FUNNEL_LOCK(drvdata); } static int funnel_enable(struct coresight_device *csdev, int inport, int outport) { struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); int ret; ret = clk_prepare_enable(drvdata->clk); if (ret) return ret; __funnel_enable(drvdata, inport); dev_info(drvdata->dev, "FUNNEL inport %d enabled\n", inport); return 0; } static void __funnel_disable(struct funnel_drvdata *drvdata, int inport) { uint32_t functl; FUNNEL_UNLOCK(drvdata); functl = funnel_readl(drvdata, FUNNEL_FUNCTL); functl &= ~(1 << inport); funnel_writel(drvdata, functl, FUNNEL_FUNCTL); FUNNEL_LOCK(drvdata); } static void funnel_disable(struct coresight_device *csdev, int inport, int outport) { struct funnel_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); __funnel_disable(drvdata, inport); clk_disable_unprepare(drvdata->clk); dev_info(drvdata->dev, "FUNNEL inport %d disabled\n", inport); } static const struct coresight_ops_link funnel_link_ops = { .enable = funnel_enable, .disable = funnel_disable, }; static const struct coresight_ops funnel_cs_ops = { .link_ops = &funnel_link_ops, }; static ssize_t funnel_show_priority(struct device *dev, struct device_attribute *attr, char *buf) { struct funnel_drvdata *drvdata = dev_get_drvdata(dev->parent); unsigned long val = drvdata->priority; return scnprintf(buf, PAGE_SIZE, "%#lx\n", val); } static ssize_t funnel_store_priority(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct funnel_drvdata *drvdata = dev_get_drvdata(dev->parent); unsigned long val; if (sscanf(buf, "%lx", &val) != 1) return -EINVAL; drvdata->priority = val; return size; } static DEVICE_ATTR(priority, S_IRUGO | S_IWUSR, funnel_show_priority, funnel_store_priority); static struct attribute *funnel_attrs[] = { &dev_attr_priority.attr, NULL, }; static struct attribute_group funnel_attr_grp = { .attrs = funnel_attrs, }; static const struct attribute_group *funnel_attr_grps[] = { &funnel_attr_grp, NULL, }; static int __devinit funnel_probe(struct platform_device *pdev) { int ret; struct device *dev = &pdev->dev; struct coresight_platform_data *pdata; struct funnel_drvdata *drvdata; struct resource *res; struct coresight_desc *desc; if (coresight_fuse_access_disabled()) return -EPERM; if (pdev->dev.of_node) { pdata = of_get_coresight_platform_data(dev, pdev->dev.of_node); if (IS_ERR(pdata)) return PTR_ERR(pdata); pdev->dev.platform_data = pdata; } drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); if (!drvdata) return -ENOMEM; drvdata->dev = &pdev->dev; platform_set_drvdata(pdev, drvdata); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "funnel-base"); if (!res) return -ENODEV; drvdata->base = devm_ioremap(dev, res->start, resource_size(res)); if (!drvdata->base) return -ENOMEM; drvdata->clk = devm_clk_get(dev, "core_clk"); if (IS_ERR(drvdata->clk)) return PTR_ERR(drvdata->clk); ret = clk_set_rate(drvdata->clk, CORESIGHT_CLK_RATE_TRACE); if (ret) return ret; desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); if (!desc) return -ENOMEM; desc->type = CORESIGHT_DEV_TYPE_LINK; desc->subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_MERG; desc->ops = &funnel_cs_ops; desc->pdata = pdev->dev.platform_data; desc->dev = &pdev->dev; desc->groups = funnel_attr_grps; desc->owner = THIS_MODULE; drvdata->csdev = coresight_register(desc); if (IS_ERR(drvdata->csdev)) return PTR_ERR(drvdata->csdev); dev_info(dev, "FUNNEL initialized\n"); return 0; } static int __devexit funnel_remove(struct platform_device *pdev) { struct funnel_drvdata *drvdata = platform_get_drvdata(pdev); coresight_unregister(drvdata->csdev); return 0; } static struct of_device_id funnel_match[] = { {.compatible = "arm,coresight-funnel"}, {} }; static struct platform_driver funnel_driver = { .probe = funnel_probe, .remove = __devexit_p(funnel_remove), .driver = { .name = "coresight-funnel", .owner = THIS_MODULE, .of_match_table = funnel_match, }, }; static int __init funnel_init(void) { return platform_driver_register(&funnel_driver); } module_init(funnel_init); static void __exit funnel_exit(void) { platform_driver_unregister(&funnel_driver); } module_exit(funnel_exit); MODULE_LICENSE("GPL v2"); MODULE_DESCRIPTION("CoreSight Funnel driver");
gpl-2.0
locke12456/linux
fs/jfs/resize.c
2154
15073
/* * Copyright (C) International Business Machines Corp., 2000-2004 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/fs.h> #include <linux/buffer_head.h> #include <linux/quotaops.h> #include "jfs_incore.h" #include "jfs_filsys.h" #include "jfs_metapage.h" #include "jfs_dinode.h" #include "jfs_imap.h" #include "jfs_dmap.h" #include "jfs_superblock.h" #include "jfs_txnmgr.h" #include "jfs_debug.h" #define BITSPERPAGE (PSIZE << 3) #define L2MEGABYTE 20 #define MEGABYTE (1 << L2MEGABYTE) #define MEGABYTE32 (MEGABYTE << 5) /* convert block number to bmap file page number */ #define BLKTODMAPN(b)\ (((b) >> 13) + ((b) >> 23) + ((b) >> 33) + 3 + 1) /* * jfs_extendfs() * * function: extend file system; * * |-------------------------------|----------|----------| * file system space fsck inline log * workspace space * * input: * new LVSize: in LV blocks (required) * new LogSize: in LV blocks (optional) * new FSSize: in LV blocks (optional) * * new configuration: * 1. set new LogSize as specified or default from new LVSize; * 2. compute new FSCKSize from new LVSize; * 3. set new FSSize as MIN(FSSize, LVSize-(LogSize+FSCKSize)) where * assert(new FSSize >= old FSSize), * i.e., file system must not be shrunk; */ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize) { int rc = 0; struct jfs_sb_info *sbi = JFS_SBI(sb); struct inode *ipbmap = sbi->ipbmap; struct inode *ipbmap2; struct inode *ipimap = sbi->ipimap; struct jfs_log *log = sbi->log; struct bmap *bmp = sbi->bmap; s64 newLogAddress, newFSCKAddress; int newFSCKSize; s64 newMapSize = 0, mapSize; s64 XAddress, XSize, nblocks, xoff, xaddr, t64; s64 oldLVSize; s64 newFSSize; s64 VolumeSize; int newNpages = 0, nPages, newPage, xlen, t32; int tid; int log_formatted = 0; struct inode *iplist[1]; struct jfs_superblock *j_sb, *j_sb2; s64 old_agsize; int agsizechanged = 0; struct buffer_head *bh, *bh2; /* If the volume hasn't grown, get out now */ if (sbi->mntflag & JFS_INLINELOG) oldLVSize = addressPXD(&sbi->logpxd) + lengthPXD(&sbi->logpxd); else oldLVSize = addressPXD(&sbi->fsckpxd) + lengthPXD(&sbi->fsckpxd); if (oldLVSize >= newLVSize) { printk(KERN_WARNING "jfs_extendfs: volume hasn't grown, returning\n"); goto out; } VolumeSize = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits; if (VolumeSize) { if (newLVSize > VolumeSize) { printk(KERN_WARNING "jfs_extendfs: invalid size\n"); rc = -EINVAL; goto out; } } else { /* check the device */ bh = sb_bread(sb, newLVSize - 1); if (!bh) { printk(KERN_WARNING "jfs_extendfs: invalid size\n"); rc = -EINVAL; goto out; } bforget(bh); } /* Can't extend write-protected drive */ if (isReadOnly(ipbmap)) { printk(KERN_WARNING "jfs_extendfs: read-only file system\n"); rc = -EROFS; goto out; } /* * reconfigure LV spaces * --------------------- * * validate new size, or, if not specified, determine new size */ /* * reconfigure inline log space: */ if ((sbi->mntflag & JFS_INLINELOG)) { if (newLogSize == 0) { /* * no size specified: default to 1/256 of aggregate * size; rounded up to a megabyte boundary; */ newLogSize = newLVSize >> 8; t32 = (1 << (20 - sbi->l2bsize)) - 1; newLogSize = (newLogSize + t32) & ~t32; newLogSize = min(newLogSize, MEGABYTE32 >> sbi->l2bsize); } else { /* * convert the newLogSize to fs blocks. * * Since this is given in megabytes, it will always be * an even number of pages. */ newLogSize = (newLogSize * MEGABYTE) >> sbi->l2bsize; } } else newLogSize = 0; newLogAddress = newLVSize - newLogSize; /* * reconfigure fsck work space: * * configure it to the end of the logical volume regardless of * whether file system extends to the end of the aggregate; * Need enough 4k pages to cover: * - 1 bit per block in aggregate rounded up to BPERDMAP boundary * - 1 extra page to handle control page and intermediate level pages * - 50 extra pages for the chkdsk service log */ t64 = ((newLVSize - newLogSize + BPERDMAP - 1) >> L2BPERDMAP) << L2BPERDMAP; t32 = DIV_ROUND_UP(t64, BITSPERPAGE) + 1 + 50; newFSCKSize = t32 << sbi->l2nbperpage; newFSCKAddress = newLogAddress - newFSCKSize; /* * compute new file system space; */ newFSSize = newLVSize - newLogSize - newFSCKSize; /* file system cannot be shrunk */ if (newFSSize < bmp->db_mapsize) { rc = -EINVAL; goto out; } /* * If we're expanding enough that the inline log does not overlap * the old one, we can format the new log before we quiesce the * filesystem. */ if ((sbi->mntflag & JFS_INLINELOG) && (newLogAddress > oldLVSize)) { if ((rc = lmLogFormat(log, newLogAddress, newLogSize))) goto out; log_formatted = 1; } /* * quiesce file system * * (prepare to move the inline log and to prevent map update) * * block any new transactions and wait for completion of * all wip transactions and flush modified pages s.t. * on-disk file system is in consistent state and * log is not required for recovery. */ txQuiesce(sb); /* Reset size of direct inode */ sbi->direct_inode->i_size = sb->s_bdev->bd_inode->i_size; if (sbi->mntflag & JFS_INLINELOG) { /* * deactivate old inline log */ lmLogShutdown(log); /* * mark on-disk super block for fs in transition; * * update on-disk superblock for the new space configuration * of inline log space and fsck work space descriptors: * N.B. FS descriptor is NOT updated; * * crash recovery: * logredo(): if FM_EXTENDFS, return to fsck() for cleanup; * fsck(): if FM_EXTENDFS, reformat inline log and fsck * workspace from superblock inline log descriptor and fsck * workspace descriptor; */ /* read in superblock */ if ((rc = readSuper(sb, &bh))) goto error_out; j_sb = (struct jfs_superblock *)bh->b_data; /* mark extendfs() in progress */ j_sb->s_state |= cpu_to_le32(FM_EXTENDFS); j_sb->s_xsize = cpu_to_le64(newFSSize); PXDaddress(&j_sb->s_xfsckpxd, newFSCKAddress); PXDlength(&j_sb->s_xfsckpxd, newFSCKSize); PXDaddress(&j_sb->s_xlogpxd, newLogAddress); PXDlength(&j_sb->s_xlogpxd, newLogSize); /* synchronously update superblock */ mark_buffer_dirty(bh); sync_dirty_buffer(bh); brelse(bh); /* * format new inline log synchronously; * * crash recovery: if log move in progress, * reformat log and exit success; */ if (!log_formatted) if ((rc = lmLogFormat(log, newLogAddress, newLogSize))) goto error_out; /* * activate new log */ log->base = newLogAddress; log->size = newLogSize >> (L2LOGPSIZE - sb->s_blocksize_bits); if ((rc = lmLogInit(log))) goto error_out; } /* * extend block allocation map * --------------------------- * * extendfs() for new extension, retry after crash recovery; * * note: both logredo() and fsck() rebuild map from * the bitmap and configuration parameter from superblock * (disregarding all other control information in the map); * * superblock: * s_size: aggregate size in physical blocks; */ /* * compute the new block allocation map configuration * * map dinode: * di_size: map file size in byte; * di_nblocks: number of blocks allocated for map file; * di_mapsize: number of blocks in aggregate (covered by map); * map control page: * db_mapsize: number of blocks in aggregate (covered by map); */ newMapSize = newFSSize; /* number of data pages of new bmap file: * roundup new size to full dmap page boundary and * add 1 extra dmap page for next extendfs() */ t64 = (newMapSize - 1) + BPERDMAP; newNpages = BLKTODMAPN(t64) + 1; /* * extend map from current map (WITHOUT growing mapfile) * * map new extension with unmapped part of the last partial * dmap page, if applicable, and extra page(s) allocated * at end of bmap by mkfs() or previous extendfs(); */ extendBmap: /* compute number of blocks requested to extend */ mapSize = bmp->db_mapsize; XAddress = mapSize; /* eXtension Address */ XSize = newMapSize - mapSize; /* eXtension Size */ old_agsize = bmp->db_agsize; /* We need to know if this changes */ /* compute number of blocks that can be extended by current mapfile */ t64 = dbMapFileSizeToMapSize(ipbmap); if (mapSize > t64) { printk(KERN_ERR "jfs_extendfs: mapSize (0x%Lx) > t64 (0x%Lx)\n", (long long) mapSize, (long long) t64); rc = -EIO; goto error_out; } nblocks = min(t64 - mapSize, XSize); /* * update map pages for new extension: * * update/init dmap and bubble up the control hierarchy * incrementally fold up dmaps into upper levels; * update bmap control page; */ if ((rc = dbExtendFS(ipbmap, XAddress, nblocks))) goto error_out; agsizechanged |= (bmp->db_agsize != old_agsize); /* * the map now has extended to cover additional nblocks: * dn_mapsize = oldMapsize + nblocks; */ /* ipbmap->i_mapsize += nblocks; */ XSize -= nblocks; /* * grow map file to cover remaining extension * and/or one extra dmap page for next extendfs(); * * allocate new map pages and its backing blocks, and * update map file xtree */ /* compute number of data pages of current bmap file */ nPages = ipbmap->i_size >> L2PSIZE; /* need to grow map file ? */ if (nPages == newNpages) goto finalizeBmap; /* * grow bmap file for the new map pages required: * * allocate growth at the start of newly extended region; * bmap file only grows sequentially, i.e., both data pages * and possibly xtree index pages may grow in append mode, * s.t. logredo() can reconstruct pre-extension state * by washing away bmap file of pages outside s_size boundary; */ /* * journal map file growth as if a regular file growth: * (note: bmap is created with di_mode = IFJOURNAL|IFREG); * * journaling of bmap file growth is not required since * logredo() do/can not use log records of bmap file growth * but it provides careful write semantics, pmap update, etc.; */ /* synchronous write of data pages: bmap data pages are * cached in meta-data cache, and not written out * by txCommit(); */ filemap_fdatawait(ipbmap->i_mapping); filemap_write_and_wait(ipbmap->i_mapping); diWriteSpecial(ipbmap, 0); newPage = nPages; /* first new page number */ xoff = newPage << sbi->l2nbperpage; xlen = (newNpages - nPages) << sbi->l2nbperpage; xlen = min(xlen, (int) nblocks) & ~(sbi->nbperpage - 1); xaddr = XAddress; tid = txBegin(sb, COMMIT_FORCE); if ((rc = xtAppend(tid, ipbmap, 0, xoff, nblocks, &xlen, &xaddr, 0))) { txEnd(tid); goto error_out; } /* update bmap file size */ ipbmap->i_size += xlen << sbi->l2bsize; inode_add_bytes(ipbmap, xlen << sbi->l2bsize); iplist[0] = ipbmap; rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE); txEnd(tid); if (rc) goto error_out; /* * map file has been grown now to cover extension to further out; * di_size = new map file size; * * if huge extension, the previous extension based on previous * map file size may not have been sufficient to cover whole extension * (it could have been used up for new map pages), * but the newly grown map file now covers lot bigger new free space * available for further extension of map; */ /* any more blocks to extend ? */ if (XSize) goto extendBmap; finalizeBmap: /* finalize bmap */ dbFinalizeBmap(ipbmap); /* * update inode allocation map * --------------------------- * * move iag lists from old to new iag; * agstart field is not updated for logredo() to reconstruct * iag lists if system crash occurs. * (computation of ag number from agstart based on agsize * will correctly identify the new ag); */ /* if new AG size the same as old AG size, done! */ if (agsizechanged) { if ((rc = diExtendFS(ipimap, ipbmap))) goto error_out; /* finalize imap */ if ((rc = diSync(ipimap))) goto error_out; } /* * finalize * -------- * * extension is committed when on-disk super block is * updated with new descriptors: logredo will recover * crash before it to pre-extension state; */ /* sync log to skip log replay of bmap file growth transaction; */ /* lmLogSync(log, 1); */ /* * synchronous write bmap global control page; * for crash before completion of write * logredo() will recover to pre-extendfs state; * for crash after completion of write, * logredo() will recover post-extendfs state; */ if ((rc = dbSync(ipbmap))) goto error_out; /* * copy primary bmap inode to secondary bmap inode */ ipbmap2 = diReadSpecial(sb, BMAP_I, 1); if (ipbmap2 == NULL) { printk(KERN_ERR "jfs_extendfs: diReadSpecial(bmap) failed\n"); goto error_out; } memcpy(&JFS_IP(ipbmap2)->i_xtroot, &JFS_IP(ipbmap)->i_xtroot, 288); ipbmap2->i_size = ipbmap->i_size; ipbmap2->i_blocks = ipbmap->i_blocks; diWriteSpecial(ipbmap2, 1); diFreeSpecial(ipbmap2); /* * update superblock */ if ((rc = readSuper(sb, &bh))) goto error_out; j_sb = (struct jfs_superblock *)bh->b_data; /* mark extendfs() completion */ j_sb->s_state &= cpu_to_le32(~FM_EXTENDFS); j_sb->s_size = cpu_to_le64(bmp->db_mapsize << le16_to_cpu(j_sb->s_l2bfactor)); j_sb->s_agsize = cpu_to_le32(bmp->db_agsize); /* update inline log space descriptor */ if (sbi->mntflag & JFS_INLINELOG) { PXDaddress(&(j_sb->s_logpxd), newLogAddress); PXDlength(&(j_sb->s_logpxd), newLogSize); } /* record log's mount serial number */ j_sb->s_logserial = cpu_to_le32(log->serial); /* update fsck work space descriptor */ PXDaddress(&(j_sb->s_fsckpxd), newFSCKAddress); PXDlength(&(j_sb->s_fsckpxd), newFSCKSize); j_sb->s_fscklog = 1; /* sb->s_fsckloglen remains the same */ /* Update secondary superblock */ bh2 = sb_bread(sb, SUPER2_OFF >> sb->s_blocksize_bits); if (bh2) { j_sb2 = (struct jfs_superblock *)bh2->b_data; memcpy(j_sb2, j_sb, sizeof (struct jfs_superblock)); mark_buffer_dirty(bh); sync_dirty_buffer(bh2); brelse(bh2); } /* write primary superblock */ mark_buffer_dirty(bh); sync_dirty_buffer(bh); brelse(bh); goto resume; error_out: jfs_error(sb, "\n"); resume: /* * resume file system transactions */ txResume(sb); out: return rc; }
gpl-2.0
ZhizhouTian/kernel-stable
fs/jfs/resize.c
2154
15073
/* * Copyright (C) International Business Machines Corp., 2000-2004 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/fs.h> #include <linux/buffer_head.h> #include <linux/quotaops.h> #include "jfs_incore.h" #include "jfs_filsys.h" #include "jfs_metapage.h" #include "jfs_dinode.h" #include "jfs_imap.h" #include "jfs_dmap.h" #include "jfs_superblock.h" #include "jfs_txnmgr.h" #include "jfs_debug.h" #define BITSPERPAGE (PSIZE << 3) #define L2MEGABYTE 20 #define MEGABYTE (1 << L2MEGABYTE) #define MEGABYTE32 (MEGABYTE << 5) /* convert block number to bmap file page number */ #define BLKTODMAPN(b)\ (((b) >> 13) + ((b) >> 23) + ((b) >> 33) + 3 + 1) /* * jfs_extendfs() * * function: extend file system; * * |-------------------------------|----------|----------| * file system space fsck inline log * workspace space * * input: * new LVSize: in LV blocks (required) * new LogSize: in LV blocks (optional) * new FSSize: in LV blocks (optional) * * new configuration: * 1. set new LogSize as specified or default from new LVSize; * 2. compute new FSCKSize from new LVSize; * 3. set new FSSize as MIN(FSSize, LVSize-(LogSize+FSCKSize)) where * assert(new FSSize >= old FSSize), * i.e., file system must not be shrunk; */ int jfs_extendfs(struct super_block *sb, s64 newLVSize, int newLogSize) { int rc = 0; struct jfs_sb_info *sbi = JFS_SBI(sb); struct inode *ipbmap = sbi->ipbmap; struct inode *ipbmap2; struct inode *ipimap = sbi->ipimap; struct jfs_log *log = sbi->log; struct bmap *bmp = sbi->bmap; s64 newLogAddress, newFSCKAddress; int newFSCKSize; s64 newMapSize = 0, mapSize; s64 XAddress, XSize, nblocks, xoff, xaddr, t64; s64 oldLVSize; s64 newFSSize; s64 VolumeSize; int newNpages = 0, nPages, newPage, xlen, t32; int tid; int log_formatted = 0; struct inode *iplist[1]; struct jfs_superblock *j_sb, *j_sb2; s64 old_agsize; int agsizechanged = 0; struct buffer_head *bh, *bh2; /* If the volume hasn't grown, get out now */ if (sbi->mntflag & JFS_INLINELOG) oldLVSize = addressPXD(&sbi->logpxd) + lengthPXD(&sbi->logpxd); else oldLVSize = addressPXD(&sbi->fsckpxd) + lengthPXD(&sbi->fsckpxd); if (oldLVSize >= newLVSize) { printk(KERN_WARNING "jfs_extendfs: volume hasn't grown, returning\n"); goto out; } VolumeSize = sb->s_bdev->bd_inode->i_size >> sb->s_blocksize_bits; if (VolumeSize) { if (newLVSize > VolumeSize) { printk(KERN_WARNING "jfs_extendfs: invalid size\n"); rc = -EINVAL; goto out; } } else { /* check the device */ bh = sb_bread(sb, newLVSize - 1); if (!bh) { printk(KERN_WARNING "jfs_extendfs: invalid size\n"); rc = -EINVAL; goto out; } bforget(bh); } /* Can't extend write-protected drive */ if (isReadOnly(ipbmap)) { printk(KERN_WARNING "jfs_extendfs: read-only file system\n"); rc = -EROFS; goto out; } /* * reconfigure LV spaces * --------------------- * * validate new size, or, if not specified, determine new size */ /* * reconfigure inline log space: */ if ((sbi->mntflag & JFS_INLINELOG)) { if (newLogSize == 0) { /* * no size specified: default to 1/256 of aggregate * size; rounded up to a megabyte boundary; */ newLogSize = newLVSize >> 8; t32 = (1 << (20 - sbi->l2bsize)) - 1; newLogSize = (newLogSize + t32) & ~t32; newLogSize = min(newLogSize, MEGABYTE32 >> sbi->l2bsize); } else { /* * convert the newLogSize to fs blocks. * * Since this is given in megabytes, it will always be * an even number of pages. */ newLogSize = (newLogSize * MEGABYTE) >> sbi->l2bsize; } } else newLogSize = 0; newLogAddress = newLVSize - newLogSize; /* * reconfigure fsck work space: * * configure it to the end of the logical volume regardless of * whether file system extends to the end of the aggregate; * Need enough 4k pages to cover: * - 1 bit per block in aggregate rounded up to BPERDMAP boundary * - 1 extra page to handle control page and intermediate level pages * - 50 extra pages for the chkdsk service log */ t64 = ((newLVSize - newLogSize + BPERDMAP - 1) >> L2BPERDMAP) << L2BPERDMAP; t32 = DIV_ROUND_UP(t64, BITSPERPAGE) + 1 + 50; newFSCKSize = t32 << sbi->l2nbperpage; newFSCKAddress = newLogAddress - newFSCKSize; /* * compute new file system space; */ newFSSize = newLVSize - newLogSize - newFSCKSize; /* file system cannot be shrunk */ if (newFSSize < bmp->db_mapsize) { rc = -EINVAL; goto out; } /* * If we're expanding enough that the inline log does not overlap * the old one, we can format the new log before we quiesce the * filesystem. */ if ((sbi->mntflag & JFS_INLINELOG) && (newLogAddress > oldLVSize)) { if ((rc = lmLogFormat(log, newLogAddress, newLogSize))) goto out; log_formatted = 1; } /* * quiesce file system * * (prepare to move the inline log and to prevent map update) * * block any new transactions and wait for completion of * all wip transactions and flush modified pages s.t. * on-disk file system is in consistent state and * log is not required for recovery. */ txQuiesce(sb); /* Reset size of direct inode */ sbi->direct_inode->i_size = sb->s_bdev->bd_inode->i_size; if (sbi->mntflag & JFS_INLINELOG) { /* * deactivate old inline log */ lmLogShutdown(log); /* * mark on-disk super block for fs in transition; * * update on-disk superblock for the new space configuration * of inline log space and fsck work space descriptors: * N.B. FS descriptor is NOT updated; * * crash recovery: * logredo(): if FM_EXTENDFS, return to fsck() for cleanup; * fsck(): if FM_EXTENDFS, reformat inline log and fsck * workspace from superblock inline log descriptor and fsck * workspace descriptor; */ /* read in superblock */ if ((rc = readSuper(sb, &bh))) goto error_out; j_sb = (struct jfs_superblock *)bh->b_data; /* mark extendfs() in progress */ j_sb->s_state |= cpu_to_le32(FM_EXTENDFS); j_sb->s_xsize = cpu_to_le64(newFSSize); PXDaddress(&j_sb->s_xfsckpxd, newFSCKAddress); PXDlength(&j_sb->s_xfsckpxd, newFSCKSize); PXDaddress(&j_sb->s_xlogpxd, newLogAddress); PXDlength(&j_sb->s_xlogpxd, newLogSize); /* synchronously update superblock */ mark_buffer_dirty(bh); sync_dirty_buffer(bh); brelse(bh); /* * format new inline log synchronously; * * crash recovery: if log move in progress, * reformat log and exit success; */ if (!log_formatted) if ((rc = lmLogFormat(log, newLogAddress, newLogSize))) goto error_out; /* * activate new log */ log->base = newLogAddress; log->size = newLogSize >> (L2LOGPSIZE - sb->s_blocksize_bits); if ((rc = lmLogInit(log))) goto error_out; } /* * extend block allocation map * --------------------------- * * extendfs() for new extension, retry after crash recovery; * * note: both logredo() and fsck() rebuild map from * the bitmap and configuration parameter from superblock * (disregarding all other control information in the map); * * superblock: * s_size: aggregate size in physical blocks; */ /* * compute the new block allocation map configuration * * map dinode: * di_size: map file size in byte; * di_nblocks: number of blocks allocated for map file; * di_mapsize: number of blocks in aggregate (covered by map); * map control page: * db_mapsize: number of blocks in aggregate (covered by map); */ newMapSize = newFSSize; /* number of data pages of new bmap file: * roundup new size to full dmap page boundary and * add 1 extra dmap page for next extendfs() */ t64 = (newMapSize - 1) + BPERDMAP; newNpages = BLKTODMAPN(t64) + 1; /* * extend map from current map (WITHOUT growing mapfile) * * map new extension with unmapped part of the last partial * dmap page, if applicable, and extra page(s) allocated * at end of bmap by mkfs() or previous extendfs(); */ extendBmap: /* compute number of blocks requested to extend */ mapSize = bmp->db_mapsize; XAddress = mapSize; /* eXtension Address */ XSize = newMapSize - mapSize; /* eXtension Size */ old_agsize = bmp->db_agsize; /* We need to know if this changes */ /* compute number of blocks that can be extended by current mapfile */ t64 = dbMapFileSizeToMapSize(ipbmap); if (mapSize > t64) { printk(KERN_ERR "jfs_extendfs: mapSize (0x%Lx) > t64 (0x%Lx)\n", (long long) mapSize, (long long) t64); rc = -EIO; goto error_out; } nblocks = min(t64 - mapSize, XSize); /* * update map pages for new extension: * * update/init dmap and bubble up the control hierarchy * incrementally fold up dmaps into upper levels; * update bmap control page; */ if ((rc = dbExtendFS(ipbmap, XAddress, nblocks))) goto error_out; agsizechanged |= (bmp->db_agsize != old_agsize); /* * the map now has extended to cover additional nblocks: * dn_mapsize = oldMapsize + nblocks; */ /* ipbmap->i_mapsize += nblocks; */ XSize -= nblocks; /* * grow map file to cover remaining extension * and/or one extra dmap page for next extendfs(); * * allocate new map pages and its backing blocks, and * update map file xtree */ /* compute number of data pages of current bmap file */ nPages = ipbmap->i_size >> L2PSIZE; /* need to grow map file ? */ if (nPages == newNpages) goto finalizeBmap; /* * grow bmap file for the new map pages required: * * allocate growth at the start of newly extended region; * bmap file only grows sequentially, i.e., both data pages * and possibly xtree index pages may grow in append mode, * s.t. logredo() can reconstruct pre-extension state * by washing away bmap file of pages outside s_size boundary; */ /* * journal map file growth as if a regular file growth: * (note: bmap is created with di_mode = IFJOURNAL|IFREG); * * journaling of bmap file growth is not required since * logredo() do/can not use log records of bmap file growth * but it provides careful write semantics, pmap update, etc.; */ /* synchronous write of data pages: bmap data pages are * cached in meta-data cache, and not written out * by txCommit(); */ filemap_fdatawait(ipbmap->i_mapping); filemap_write_and_wait(ipbmap->i_mapping); diWriteSpecial(ipbmap, 0); newPage = nPages; /* first new page number */ xoff = newPage << sbi->l2nbperpage; xlen = (newNpages - nPages) << sbi->l2nbperpage; xlen = min(xlen, (int) nblocks) & ~(sbi->nbperpage - 1); xaddr = XAddress; tid = txBegin(sb, COMMIT_FORCE); if ((rc = xtAppend(tid, ipbmap, 0, xoff, nblocks, &xlen, &xaddr, 0))) { txEnd(tid); goto error_out; } /* update bmap file size */ ipbmap->i_size += xlen << sbi->l2bsize; inode_add_bytes(ipbmap, xlen << sbi->l2bsize); iplist[0] = ipbmap; rc = txCommit(tid, 1, &iplist[0], COMMIT_FORCE); txEnd(tid); if (rc) goto error_out; /* * map file has been grown now to cover extension to further out; * di_size = new map file size; * * if huge extension, the previous extension based on previous * map file size may not have been sufficient to cover whole extension * (it could have been used up for new map pages), * but the newly grown map file now covers lot bigger new free space * available for further extension of map; */ /* any more blocks to extend ? */ if (XSize) goto extendBmap; finalizeBmap: /* finalize bmap */ dbFinalizeBmap(ipbmap); /* * update inode allocation map * --------------------------- * * move iag lists from old to new iag; * agstart field is not updated for logredo() to reconstruct * iag lists if system crash occurs. * (computation of ag number from agstart based on agsize * will correctly identify the new ag); */ /* if new AG size the same as old AG size, done! */ if (agsizechanged) { if ((rc = diExtendFS(ipimap, ipbmap))) goto error_out; /* finalize imap */ if ((rc = diSync(ipimap))) goto error_out; } /* * finalize * -------- * * extension is committed when on-disk super block is * updated with new descriptors: logredo will recover * crash before it to pre-extension state; */ /* sync log to skip log replay of bmap file growth transaction; */ /* lmLogSync(log, 1); */ /* * synchronous write bmap global control page; * for crash before completion of write * logredo() will recover to pre-extendfs state; * for crash after completion of write, * logredo() will recover post-extendfs state; */ if ((rc = dbSync(ipbmap))) goto error_out; /* * copy primary bmap inode to secondary bmap inode */ ipbmap2 = diReadSpecial(sb, BMAP_I, 1); if (ipbmap2 == NULL) { printk(KERN_ERR "jfs_extendfs: diReadSpecial(bmap) failed\n"); goto error_out; } memcpy(&JFS_IP(ipbmap2)->i_xtroot, &JFS_IP(ipbmap)->i_xtroot, 288); ipbmap2->i_size = ipbmap->i_size; ipbmap2->i_blocks = ipbmap->i_blocks; diWriteSpecial(ipbmap2, 1); diFreeSpecial(ipbmap2); /* * update superblock */ if ((rc = readSuper(sb, &bh))) goto error_out; j_sb = (struct jfs_superblock *)bh->b_data; /* mark extendfs() completion */ j_sb->s_state &= cpu_to_le32(~FM_EXTENDFS); j_sb->s_size = cpu_to_le64(bmp->db_mapsize << le16_to_cpu(j_sb->s_l2bfactor)); j_sb->s_agsize = cpu_to_le32(bmp->db_agsize); /* update inline log space descriptor */ if (sbi->mntflag & JFS_INLINELOG) { PXDaddress(&(j_sb->s_logpxd), newLogAddress); PXDlength(&(j_sb->s_logpxd), newLogSize); } /* record log's mount serial number */ j_sb->s_logserial = cpu_to_le32(log->serial); /* update fsck work space descriptor */ PXDaddress(&(j_sb->s_fsckpxd), newFSCKAddress); PXDlength(&(j_sb->s_fsckpxd), newFSCKSize); j_sb->s_fscklog = 1; /* sb->s_fsckloglen remains the same */ /* Update secondary superblock */ bh2 = sb_bread(sb, SUPER2_OFF >> sb->s_blocksize_bits); if (bh2) { j_sb2 = (struct jfs_superblock *)bh2->b_data; memcpy(j_sb2, j_sb, sizeof (struct jfs_superblock)); mark_buffer_dirty(bh); sync_dirty_buffer(bh2); brelse(bh2); } /* write primary superblock */ mark_buffer_dirty(bh); sync_dirty_buffer(bh); brelse(bh); goto resume; error_out: jfs_error(sb, "\n"); resume: /* * resume file system transactions */ txResume(sb); out: return rc; }
gpl-2.0
Zero015/UniKernel
drivers/misc/cb710/core.c
2410
8482
/* * cb710/core.c * * Copyright by Michał Mirosław, 2008-2009 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/spinlock.h> #include <linux/idr.h> #include <linux/cb710.h> #include <linux/gfp.h> static DEFINE_IDA(cb710_ida); static DEFINE_SPINLOCK(cb710_ida_lock); void cb710_pci_update_config_reg(struct pci_dev *pdev, int reg, uint32_t mask, uint32_t xor) { u32 rval; pci_read_config_dword(pdev, reg, &rval); rval = (rval & mask) ^ xor; pci_write_config_dword(pdev, reg, rval); } EXPORT_SYMBOL_GPL(cb710_pci_update_config_reg); /* Some magic writes based on Windows driver init code */ static int cb710_pci_configure(struct pci_dev *pdev) { unsigned int devfn = PCI_DEVFN(PCI_SLOT(pdev->devfn), 0); struct pci_dev *pdev0; u32 val; cb710_pci_update_config_reg(pdev, 0x48, ~0x000000FF, 0x0000003F); pci_read_config_dword(pdev, 0x48, &val); if (val & 0x80000000) return 0; pdev0 = pci_get_slot(pdev->bus, devfn); if (!pdev0) return -ENODEV; if (pdev0->vendor == PCI_VENDOR_ID_ENE && pdev0->device == PCI_DEVICE_ID_ENE_720) { cb710_pci_update_config_reg(pdev0, 0x8C, ~0x00F00000, 0x00100000); cb710_pci_update_config_reg(pdev0, 0xB0, ~0x08000000, 0x08000000); } cb710_pci_update_config_reg(pdev0, 0x8C, ~0x00000F00, 0x00000200); cb710_pci_update_config_reg(pdev0, 0x90, ~0x00060000, 0x00040000); pci_dev_put(pdev0); return 0; } static irqreturn_t cb710_irq_handler(int irq, void *data) { struct cb710_chip *chip = data; struct cb710_slot *slot = &chip->slot[0]; irqreturn_t handled = IRQ_NONE; unsigned nr; spin_lock(&chip->irq_lock); /* incl. smp_rmb() */ for (nr = chip->slots; nr; ++slot, --nr) { cb710_irq_handler_t handler_func = slot->irq_handler; if (handler_func && handler_func(slot)) handled = IRQ_HANDLED; } spin_unlock(&chip->irq_lock); return handled; } static void cb710_release_slot(struct device *dev) { #ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS struct cb710_slot *slot = cb710_pdev_to_slot(to_platform_device(dev)); struct cb710_chip *chip = cb710_slot_to_chip(slot); /* slot struct can be freed now */ atomic_dec(&chip->slot_refs_count); #endif } static int cb710_register_slot(struct cb710_chip *chip, unsigned slot_mask, unsigned io_offset, const char *name) { int nr = chip->slots; struct cb710_slot *slot = &chip->slot[nr]; int err; dev_dbg(cb710_chip_dev(chip), "register: %s.%d; slot %d; mask %d; IO offset: 0x%02X\n", name, chip->platform_id, nr, slot_mask, io_offset); /* slot->irq_handler == NULL here; this needs to be * seen before platform_device_register() */ ++chip->slots; smp_wmb(); slot->iobase = chip->iobase + io_offset; slot->pdev.name = name; slot->pdev.id = chip->platform_id; slot->pdev.dev.parent = &chip->pdev->dev; slot->pdev.dev.release = cb710_release_slot; err = platform_device_register(&slot->pdev); #ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS atomic_inc(&chip->slot_refs_count); #endif if (err) { /* device_initialize() called from platform_device_register() * wants this on error path */ platform_device_put(&slot->pdev); /* slot->irq_handler == NULL here anyway, so no lock needed */ --chip->slots; return err; } chip->slot_mask |= slot_mask; return 0; } static void cb710_unregister_slot(struct cb710_chip *chip, unsigned slot_mask) { int nr = chip->slots - 1; if (!(chip->slot_mask & slot_mask)) return; platform_device_unregister(&chip->slot[nr].pdev); /* complementary to spin_unlock() in cb710_set_irq_handler() */ smp_rmb(); BUG_ON(chip->slot[nr].irq_handler != NULL); /* slot->irq_handler == NULL here, so no lock needed */ --chip->slots; chip->slot_mask &= ~slot_mask; } void cb710_set_irq_handler(struct cb710_slot *slot, cb710_irq_handler_t handler) { struct cb710_chip *chip = cb710_slot_to_chip(slot); unsigned long flags; spin_lock_irqsave(&chip->irq_lock, flags); slot->irq_handler = handler; spin_unlock_irqrestore(&chip->irq_lock, flags); } EXPORT_SYMBOL_GPL(cb710_set_irq_handler); #ifdef CONFIG_PM static int cb710_suspend(struct pci_dev *pdev, pm_message_t state) { struct cb710_chip *chip = pci_get_drvdata(pdev); free_irq(pdev->irq, chip); pci_save_state(pdev); pci_disable_device(pdev); if (state.event & PM_EVENT_SLEEP) pci_set_power_state(pdev, PCI_D3hot); return 0; } static int cb710_resume(struct pci_dev *pdev) { struct cb710_chip *chip = pci_get_drvdata(pdev); int err; pci_set_power_state(pdev, PCI_D0); pci_restore_state(pdev); err = pcim_enable_device(pdev); if (err) return err; return devm_request_irq(&pdev->dev, pdev->irq, cb710_irq_handler, IRQF_SHARED, KBUILD_MODNAME, chip); } #endif /* CONFIG_PM */ static int cb710_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct cb710_chip *chip; unsigned long flags; u32 val; int err; int n = 0; err = cb710_pci_configure(pdev); if (err) return err; /* this is actually magic... */ pci_read_config_dword(pdev, 0x48, &val); if (!(val & 0x80000000)) { pci_write_config_dword(pdev, 0x48, val|0x71000000); pci_read_config_dword(pdev, 0x48, &val); } dev_dbg(&pdev->dev, "PCI config[0x48] = 0x%08X\n", val); if (!(val & 0x70000000)) return -ENODEV; val = (val >> 28) & 7; if (val & CB710_SLOT_MMC) ++n; if (val & CB710_SLOT_MS) ++n; if (val & CB710_SLOT_SM) ++n; chip = devm_kzalloc(&pdev->dev, sizeof(*chip) + n * sizeof(*chip->slot), GFP_KERNEL); if (!chip) return -ENOMEM; err = pcim_enable_device(pdev); if (err) return err; err = pcim_iomap_regions(pdev, 0x0001, KBUILD_MODNAME); if (err) return err; spin_lock_init(&chip->irq_lock); chip->pdev = pdev; chip->iobase = pcim_iomap_table(pdev)[0]; pci_set_drvdata(pdev, chip); err = devm_request_irq(&pdev->dev, pdev->irq, cb710_irq_handler, IRQF_SHARED, KBUILD_MODNAME, chip); if (err) return err; do { if (!ida_pre_get(&cb710_ida, GFP_KERNEL)) return -ENOMEM; spin_lock_irqsave(&cb710_ida_lock, flags); err = ida_get_new(&cb710_ida, &chip->platform_id); spin_unlock_irqrestore(&cb710_ida_lock, flags); if (err && err != -EAGAIN) return err; } while (err); dev_info(&pdev->dev, "id %d, IO 0x%p, IRQ %d\n", chip->platform_id, chip->iobase, pdev->irq); if (val & CB710_SLOT_MMC) { /* MMC/SD slot */ err = cb710_register_slot(chip, CB710_SLOT_MMC, 0x00, "cb710-mmc"); if (err) return err; } if (val & CB710_SLOT_MS) { /* MemoryStick slot */ err = cb710_register_slot(chip, CB710_SLOT_MS, 0x40, "cb710-ms"); if (err) goto unreg_mmc; } if (val & CB710_SLOT_SM) { /* SmartMedia slot */ err = cb710_register_slot(chip, CB710_SLOT_SM, 0x60, "cb710-sm"); if (err) goto unreg_ms; } return 0; unreg_ms: cb710_unregister_slot(chip, CB710_SLOT_MS); unreg_mmc: cb710_unregister_slot(chip, CB710_SLOT_MMC); #ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS BUG_ON(atomic_read(&chip->slot_refs_count) != 0); #endif return err; } static void cb710_remove_one(struct pci_dev *pdev) { struct cb710_chip *chip = pci_get_drvdata(pdev); unsigned long flags; cb710_unregister_slot(chip, CB710_SLOT_SM); cb710_unregister_slot(chip, CB710_SLOT_MS); cb710_unregister_slot(chip, CB710_SLOT_MMC); #ifdef CONFIG_CB710_DEBUG_ASSUMPTIONS BUG_ON(atomic_read(&chip->slot_refs_count) != 0); #endif spin_lock_irqsave(&cb710_ida_lock, flags); ida_remove(&cb710_ida, chip->platform_id); spin_unlock_irqrestore(&cb710_ida_lock, flags); } static const struct pci_device_id cb710_pci_tbl[] = { { PCI_VENDOR_ID_ENE, PCI_DEVICE_ID_ENE_CB710_FLASH, PCI_ANY_ID, PCI_ANY_ID, }, { 0, } }; static struct pci_driver cb710_driver = { .name = KBUILD_MODNAME, .id_table = cb710_pci_tbl, .probe = cb710_probe, .remove = cb710_remove_one, #ifdef CONFIG_PM .suspend = cb710_suspend, .resume = cb710_resume, #endif }; static int __init cb710_init_module(void) { return pci_register_driver(&cb710_driver); } static void __exit cb710_cleanup_module(void) { pci_unregister_driver(&cb710_driver); ida_destroy(&cb710_ida); } module_init(cb710_init_module); module_exit(cb710_cleanup_module); MODULE_AUTHOR("Michał Mirosław <mirq-linux@rere.qmqm.pl>"); MODULE_DESCRIPTION("ENE CB710 memory card reader driver"); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(pci, cb710_pci_tbl);
gpl-2.0
razaina/android-kernel-samsung-smdk4412
drivers/staging/tidspbridge/core/wdt.c
3434
3165
/* * wdt.c * * DSP-BIOS Bridge driver support functions for TI OMAP processors. * * IO dispatcher for a shared memory channel driver. * * Copyright (C) 2010 Texas Instruments, Inc. * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #include <linux/types.h> #include <dspbridge/dbdefs.h> #include <dspbridge/dspdeh.h> #include <dspbridge/dev.h> #include <dspbridge/_chnl_sm.h> #include <dspbridge/wdt.h> #include <dspbridge/host_os.h> #ifdef CONFIG_TIDSPBRIDGE_WDT3 #define OMAP34XX_WDT3_BASE (L4_PER_34XX_BASE + 0x30000) static struct dsp_wdt_setting dsp_wdt; void dsp_wdt_dpc(unsigned long data) { struct deh_mgr *deh_mgr; dev_get_deh_mgr(dev_get_first(), &deh_mgr); if (deh_mgr) bridge_deh_notify(deh_mgr, DSP_WDTOVERFLOW, 0); } irqreturn_t dsp_wdt_isr(int irq, void *data) { u32 value; /* ack wdt3 interrupt */ value = __raw_readl(dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET); __raw_writel(value, dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET); tasklet_schedule(&dsp_wdt.wdt3_tasklet); return IRQ_HANDLED; } int dsp_wdt_init(void) { int ret = 0; dsp_wdt.sm_wdt = NULL; dsp_wdt.reg_base = OMAP2_L4_IO_ADDRESS(OMAP34XX_WDT3_BASE); tasklet_init(&dsp_wdt.wdt3_tasklet, dsp_wdt_dpc, 0); dsp_wdt.fclk = clk_get(NULL, "wdt3_fck"); if (dsp_wdt.fclk) { dsp_wdt.iclk = clk_get(NULL, "wdt3_ick"); if (!dsp_wdt.iclk) { clk_put(dsp_wdt.fclk); dsp_wdt.fclk = NULL; ret = -EFAULT; } } else ret = -EFAULT; if (!ret) ret = request_irq(INT_34XX_WDT3_IRQ, dsp_wdt_isr, 0, "dsp_wdt", &dsp_wdt); /* Disable at this moment, it will be enabled when DSP starts */ if (!ret) disable_irq(INT_34XX_WDT3_IRQ); return ret; } void dsp_wdt_sm_set(void *data) { dsp_wdt.sm_wdt = data; dsp_wdt.sm_wdt->wdt_overflow = CONFIG_TIDSPBRIDGE_WDT_TIMEOUT; } void dsp_wdt_exit(void) { free_irq(INT_34XX_WDT3_IRQ, &dsp_wdt); tasklet_kill(&dsp_wdt.wdt3_tasklet); if (dsp_wdt.fclk) clk_put(dsp_wdt.fclk); if (dsp_wdt.iclk) clk_put(dsp_wdt.iclk); dsp_wdt.fclk = NULL; dsp_wdt.iclk = NULL; dsp_wdt.sm_wdt = NULL; dsp_wdt.reg_base = NULL; } void dsp_wdt_enable(bool enable) { u32 tmp; static bool wdt_enable; if (wdt_enable == enable || !dsp_wdt.fclk || !dsp_wdt.iclk) return; wdt_enable = enable; if (enable) { clk_enable(dsp_wdt.fclk); clk_enable(dsp_wdt.iclk); dsp_wdt.sm_wdt->wdt_setclocks = 1; tmp = __raw_readl(dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET); __raw_writel(tmp, dsp_wdt.reg_base + OMAP3_WDT3_ISR_OFFSET); enable_irq(INT_34XX_WDT3_IRQ); } else { disable_irq(INT_34XX_WDT3_IRQ); dsp_wdt.sm_wdt->wdt_setclocks = 0; clk_disable(dsp_wdt.iclk); clk_disable(dsp_wdt.fclk); } } #else void dsp_wdt_enable(bool enable) { } void dsp_wdt_sm_set(void *data) { } int dsp_wdt_init(void) { return 0; } void dsp_wdt_exit(void) { } #endif
gpl-2.0
cm-3470/android_kernel_samsung_degaslte
arch/arm/mach-exynos/mach-exynos4-dt.c
4714
3116
/* * Samsung's Exynos4210 flattened device tree enabled machine * * Copyright (c) 2010-2011 Samsung Electronics Co., Ltd. * http://www.samsung.com * Copyright (c) 2010-2011 Linaro Ltd. * www.linaro.org * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/of_platform.h> #include <linux/serial_core.h> #include <asm/mach/arch.h> #include <asm/hardware/gic.h> #include <mach/map.h> #include <plat/cpu.h> #include <plat/regs-serial.h> #include "common.h" /* * The following lookup table is used to override device names when devices * are registered from device tree. This is temporarily added to enable * device tree support addition for the Exynos4 architecture. * * For drivers that require platform data to be provided from the machine * file, a platform data pointer can also be supplied along with the * devices names. Usually, the platform data elements that cannot be parsed * from the device tree by the drivers (example: function pointers) are * supplied. But it should be noted that this is a temporary mechanism and * at some point, the drivers should be capable of parsing all the platform * data from the device tree. */ static const struct of_dev_auxdata exynos4210_auxdata_lookup[] __initconst = { OF_DEV_AUXDATA("samsung,exynos4210-uart", EXYNOS4_PA_UART0, "exynos4210-uart.0", NULL), OF_DEV_AUXDATA("samsung,exynos4210-uart", EXYNOS4_PA_UART1, "exynos4210-uart.1", NULL), OF_DEV_AUXDATA("samsung,exynos4210-uart", EXYNOS4_PA_UART2, "exynos4210-uart.2", NULL), OF_DEV_AUXDATA("samsung,exynos4210-uart", EXYNOS4_PA_UART3, "exynos4210-uart.3", NULL), OF_DEV_AUXDATA("samsung,exynos4210-sdhci", EXYNOS4_PA_HSMMC(0), "exynos4-sdhci.0", NULL), OF_DEV_AUXDATA("samsung,exynos4210-sdhci", EXYNOS4_PA_HSMMC(1), "exynos4-sdhci.1", NULL), OF_DEV_AUXDATA("samsung,exynos4210-sdhci", EXYNOS4_PA_HSMMC(2), "exynos4-sdhci.2", NULL), OF_DEV_AUXDATA("samsung,exynos4210-sdhci", EXYNOS4_PA_HSMMC(3), "exynos4-sdhci.3", NULL), OF_DEV_AUXDATA("samsung,s3c2440-i2c", EXYNOS4_PA_IIC(0), "s3c2440-i2c.0", NULL), OF_DEV_AUXDATA("arm,pl330", EXYNOS4_PA_PDMA0, "dma-pl330.0", NULL), OF_DEV_AUXDATA("arm,pl330", EXYNOS4_PA_PDMA1, "dma-pl330.1", NULL), {}, }; static void __init exynos4210_dt_map_io(void) { exynos_init_io(NULL, 0); s3c24xx_init_clocks(24000000); } static void __init exynos4210_dt_machine_init(void) { of_platform_populate(NULL, of_default_bus_match_table, exynos4210_auxdata_lookup, NULL); } static char const *exynos4210_dt_compat[] __initdata = { "samsung,exynos4210", NULL }; DT_MACHINE_START(EXYNOS4210_DT, "Samsung Exynos4 (Flattened Device Tree)") /* Maintainer: Thomas Abraham <thomas.abraham@linaro.org> */ .init_irq = exynos4_init_irq, .map_io = exynos4210_dt_map_io, .handle_irq = gic_handle_irq, .init_machine = exynos4210_dt_machine_init, .timer = &exynos4_timer, .dt_compat = exynos4210_dt_compat, .restart = exynos4_restart, MACHINE_END
gpl-2.0
SinxOner/android_kernel_lge_p710
arch/um/os-Linux/util.c
4970
3308
/* * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <errno.h> #include <signal.h> #include <string.h> #include <termios.h> #include <wait.h> #include <sys/mman.h> #include <sys/utsname.h> #include "os.h" void stack_protections(unsigned long address) { if (mprotect((void *) address, UM_THREAD_SIZE, PROT_READ | PROT_WRITE | PROT_EXEC) < 0) panic("protecting stack failed, errno = %d", errno); } int raw(int fd) { struct termios tt; int err; CATCH_EINTR(err = tcgetattr(fd, &tt)); if (err < 0) return -errno; cfmakeraw(&tt); CATCH_EINTR(err = tcsetattr(fd, TCSADRAIN, &tt)); if (err < 0) return -errno; /* * XXX tcsetattr could have applied only some changes * (and cfmakeraw() is a set of changes) */ return 0; } void setup_machinename(char *machine_out) { struct utsname host; uname(&host); #ifdef UML_CONFIG_UML_X86 # ifndef UML_CONFIG_64BIT if (!strcmp(host.machine, "x86_64")) { strcpy(machine_out, "i686"); return; } # else if (!strcmp(host.machine, "i686")) { strcpy(machine_out, "x86_64"); return; } # endif #endif strcpy(machine_out, host.machine); } void setup_hostinfo(char *buf, int len) { struct utsname host; uname(&host); snprintf(buf, len, "%s %s %s %s %s", host.sysname, host.nodename, host.release, host.version, host.machine); } /* * We cannot use glibc's abort(). It makes use of tgkill() which * has no effect within UML's kernel threads. * After that glibc would execute an invalid instruction to kill * the calling process and UML crashes with SIGSEGV. */ static inline void __attribute__ ((noreturn)) uml_abort(void) { sigset_t sig; fflush(NULL); if (!sigemptyset(&sig) && !sigaddset(&sig, SIGABRT)) sigprocmask(SIG_UNBLOCK, &sig, 0); for (;;) if (kill(getpid(), SIGABRT) < 0) exit(127); } void os_dump_core(void) { int pid; signal(SIGSEGV, SIG_DFL); /* * We are about to SIGTERM this entire process group to ensure that * nothing is around to run after the kernel exits. The * kernel wants to abort, not die through SIGTERM, so we * ignore it here. */ signal(SIGTERM, SIG_IGN); kill(0, SIGTERM); /* * Most of the other processes associated with this UML are * likely sTopped, so give them a SIGCONT so they see the * SIGTERM. */ kill(0, SIGCONT); /* * Now, having sent signals to everyone but us, make sure they * die by ptrace. Processes can survive what's been done to * them so far - the mechanism I understand is receiving a * SIGSEGV and segfaulting immediately upon return. There is * always a SIGSEGV pending, and (I'm guessing) signals are * processed in numeric order so the SIGTERM (signal 15 vs * SIGSEGV being signal 11) is never handled. * * Run a waitpid loop until we get some kind of error. * Hopefully, it's ECHILD, but there's not a lot we can do if * it's something else. Tell os_kill_ptraced_process not to * wait for the child to report its death because there's * nothing reasonable to do if that fails. */ while ((pid = waitpid(-1, NULL, WNOHANG | __WALL)) > 0) os_kill_ptraced_process(pid, 0); uml_abort(); } void um_early_printk(const char *s, unsigned int n) { printf("%.*s", n, s); }
gpl-2.0
viaembedded/vab820-kernel-bsp
arch/um/drivers/pty.c
4970
3182
/* * Copyright (C) 2001 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <errno.h> #include <fcntl.h> #include <string.h> #include <termios.h> #include <sys/stat.h> #include "chan_user.h" #include "os.h" #include "um_malloc.h" struct pty_chan { void (*announce)(char *dev_name, int dev); int dev; int raw; struct termios tt; char dev_name[sizeof("/dev/pts/0123456\0")]; }; static void *pty_chan_init(char *str, int device, const struct chan_opts *opts) { struct pty_chan *data; data = uml_kmalloc(sizeof(*data), UM_GFP_KERNEL); if (data == NULL) return NULL; *data = ((struct pty_chan) { .announce = opts->announce, .dev = device, .raw = opts->raw }); return data; } static int pts_open(int input, int output, int primary, void *d, char **dev_out) { struct pty_chan *data = d; char *dev; int fd, err; fd = get_pty(); if (fd < 0) { err = -errno; printk(UM_KERN_ERR "open_pts : Failed to open pts\n"); return err; } if (data->raw) { CATCH_EINTR(err = tcgetattr(fd, &data->tt)); if (err) goto out_close; err = raw(fd); if (err) goto out_close; } dev = ptsname(fd); sprintf(data->dev_name, "%s", dev); *dev_out = data->dev_name; if (data->announce) (*data->announce)(dev, data->dev); return fd; out_close: close(fd); return err; } static int getmaster(char *line) { struct stat buf; char *pty, *bank, *cp; int master, err; pty = &line[strlen("/dev/ptyp")]; for (bank = "pqrs"; *bank; bank++) { line[strlen("/dev/pty")] = *bank; *pty = '0'; /* Did we hit the end ? */ if ((stat(line, &buf) < 0) && (errno == ENOENT)) break; for (cp = "0123456789abcdef"; *cp; cp++) { *pty = *cp; master = open(line, O_RDWR); if (master >= 0) { char *tp = &line[strlen("/dev/")]; /* verify slave side is usable */ *tp = 't'; err = access(line, R_OK | W_OK); *tp = 'p'; if (!err) return master; close(master); } } } printk(UM_KERN_ERR "getmaster - no usable host pty devices\n"); return -ENOENT; } static int pty_open(int input, int output, int primary, void *d, char **dev_out) { struct pty_chan *data = d; int fd, err; char dev[sizeof("/dev/ptyxx\0")] = "/dev/ptyxx"; fd = getmaster(dev); if (fd < 0) return fd; if (data->raw) { err = raw(fd); if (err) { close(fd); return err; } } if (data->announce) (*data->announce)(dev, data->dev); sprintf(data->dev_name, "%s", dev); *dev_out = data->dev_name; return fd; } const struct chan_ops pty_ops = { .type = "pty", .init = pty_chan_init, .open = pty_open, .close = generic_close, .read = generic_read, .write = generic_write, .console_write = generic_console_write, .window_size = generic_window_size, .free = generic_free, .winch = 0, }; const struct chan_ops pts_ops = { .type = "pts", .init = pty_chan_init, .open = pts_open, .close = generic_close, .read = generic_read, .write = generic_write, .console_write = generic_console_write, .window_size = generic_window_size, .free = generic_free, .winch = 0, };
gpl-2.0
HighwindONE/Kernel_GoldStar
arch/powerpc/sysdev/axonram.c
5994
9433
/* * (C) Copyright IBM Deutschland Entwicklung GmbH 2006 * * Author: Maxim Shchetynin <maxim@de.ibm.com> * * Axon DDR2 device driver. * It registers one block device per Axon's DDR2 memory bank found on a system. * Block devices are called axonram?, their major and minor numbers are * available in /proc/devices, /proc/partitions or in /sys/block/axonram?/dev. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/bio.h> #include <linux/blkdev.h> #include <linux/device.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/genhd.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/ioport.h> #include <linux/irq.h> #include <linux/irqreturn.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/mod_devicetable.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/types.h> #include <linux/of_device.h> #include <linux/of_platform.h> #include <asm/page.h> #include <asm/prom.h> #define AXON_RAM_MODULE_NAME "axonram" #define AXON_RAM_DEVICE_NAME "axonram" #define AXON_RAM_MINORS_PER_DISK 16 #define AXON_RAM_BLOCK_SHIFT PAGE_SHIFT #define AXON_RAM_BLOCK_SIZE 1 << AXON_RAM_BLOCK_SHIFT #define AXON_RAM_SECTOR_SHIFT 9 #define AXON_RAM_SECTOR_SIZE 1 << AXON_RAM_SECTOR_SHIFT #define AXON_RAM_IRQ_FLAGS IRQF_SHARED | IRQF_TRIGGER_RISING static int azfs_major, azfs_minor; struct axon_ram_bank { struct platform_device *device; struct gendisk *disk; unsigned int irq_id; unsigned long ph_addr; unsigned long io_addr; unsigned long size; unsigned long ecc_counter; }; static ssize_t axon_ram_sysfs_ecc(struct device *dev, struct device_attribute *attr, char *buf) { struct platform_device *device = to_platform_device(dev); struct axon_ram_bank *bank = device->dev.platform_data; BUG_ON(!bank); return sprintf(buf, "%ld\n", bank->ecc_counter); } static DEVICE_ATTR(ecc, S_IRUGO, axon_ram_sysfs_ecc, NULL); /** * axon_ram_irq_handler - interrupt handler for Axon RAM ECC * @irq: interrupt ID * @dev: pointer to of_device */ static irqreturn_t axon_ram_irq_handler(int irq, void *dev) { struct platform_device *device = dev; struct axon_ram_bank *bank = device->dev.platform_data; BUG_ON(!bank); dev_err(&device->dev, "Correctable memory error occurred\n"); bank->ecc_counter++; return IRQ_HANDLED; } /** * axon_ram_make_request - make_request() method for block device * @queue, @bio: see blk_queue_make_request() */ static void axon_ram_make_request(struct request_queue *queue, struct bio *bio) { struct axon_ram_bank *bank = bio->bi_bdev->bd_disk->private_data; unsigned long phys_mem, phys_end; void *user_mem; struct bio_vec *vec; unsigned int transfered; unsigned short idx; phys_mem = bank->io_addr + (bio->bi_sector << AXON_RAM_SECTOR_SHIFT); phys_end = bank->io_addr + bank->size; transfered = 0; bio_for_each_segment(vec, bio, idx) { if (unlikely(phys_mem + vec->bv_len > phys_end)) { bio_io_error(bio); return; } user_mem = page_address(vec->bv_page) + vec->bv_offset; if (bio_data_dir(bio) == READ) memcpy(user_mem, (void *) phys_mem, vec->bv_len); else memcpy((void *) phys_mem, user_mem, vec->bv_len); phys_mem += vec->bv_len; transfered += vec->bv_len; } bio_endio(bio, 0); } /** * axon_ram_direct_access - direct_access() method for block device * @device, @sector, @data: see block_device_operations method */ static int axon_ram_direct_access(struct block_device *device, sector_t sector, void **kaddr, unsigned long *pfn) { struct axon_ram_bank *bank = device->bd_disk->private_data; loff_t offset; offset = sector; if (device->bd_part != NULL) offset += device->bd_part->start_sect; offset <<= AXON_RAM_SECTOR_SHIFT; if (offset >= bank->size) { dev_err(&bank->device->dev, "Access outside of address space\n"); return -ERANGE; } *kaddr = (void *)(bank->ph_addr + offset); *pfn = virt_to_phys(kaddr) >> PAGE_SHIFT; return 0; } static const struct block_device_operations axon_ram_devops = { .owner = THIS_MODULE, .direct_access = axon_ram_direct_access }; /** * axon_ram_probe - probe() method for platform driver * @device: see platform_driver method */ static int axon_ram_probe(struct platform_device *device) { static int axon_ram_bank_id = -1; struct axon_ram_bank *bank; struct resource resource; int rc = 0; axon_ram_bank_id++; dev_info(&device->dev, "Found memory controller on %s\n", device->dev.of_node->full_name); bank = kzalloc(sizeof(struct axon_ram_bank), GFP_KERNEL); if (bank == NULL) { dev_err(&device->dev, "Out of memory\n"); rc = -ENOMEM; goto failed; } device->dev.platform_data = bank; bank->device = device; if (of_address_to_resource(device->dev.of_node, 0, &resource) != 0) { dev_err(&device->dev, "Cannot access device tree\n"); rc = -EFAULT; goto failed; } bank->size = resource_size(&resource); if (bank->size == 0) { dev_err(&device->dev, "No DDR2 memory found for %s%d\n", AXON_RAM_DEVICE_NAME, axon_ram_bank_id); rc = -ENODEV; goto failed; } dev_info(&device->dev, "Register DDR2 memory device %s%d with %luMB\n", AXON_RAM_DEVICE_NAME, axon_ram_bank_id, bank->size >> 20); bank->ph_addr = resource.start; bank->io_addr = (unsigned long) ioremap_prot( bank->ph_addr, bank->size, _PAGE_NO_CACHE); if (bank->io_addr == 0) { dev_err(&device->dev, "ioremap() failed\n"); rc = -EFAULT; goto failed; } bank->disk = alloc_disk(AXON_RAM_MINORS_PER_DISK); if (bank->disk == NULL) { dev_err(&device->dev, "Cannot register disk\n"); rc = -EFAULT; goto failed; } bank->disk->major = azfs_major; bank->disk->first_minor = azfs_minor; bank->disk->fops = &axon_ram_devops; bank->disk->private_data = bank; bank->disk->driverfs_dev = &device->dev; sprintf(bank->disk->disk_name, "%s%d", AXON_RAM_DEVICE_NAME, axon_ram_bank_id); bank->disk->queue = blk_alloc_queue(GFP_KERNEL); if (bank->disk->queue == NULL) { dev_err(&device->dev, "Cannot register disk queue\n"); rc = -EFAULT; goto failed; } set_capacity(bank->disk, bank->size >> AXON_RAM_SECTOR_SHIFT); blk_queue_make_request(bank->disk->queue, axon_ram_make_request); blk_queue_logical_block_size(bank->disk->queue, AXON_RAM_SECTOR_SIZE); add_disk(bank->disk); bank->irq_id = irq_of_parse_and_map(device->dev.of_node, 0); if (bank->irq_id == NO_IRQ) { dev_err(&device->dev, "Cannot access ECC interrupt ID\n"); rc = -EFAULT; goto failed; } rc = request_irq(bank->irq_id, axon_ram_irq_handler, AXON_RAM_IRQ_FLAGS, bank->disk->disk_name, device); if (rc != 0) { dev_err(&device->dev, "Cannot register ECC interrupt handler\n"); bank->irq_id = NO_IRQ; rc = -EFAULT; goto failed; } rc = device_create_file(&device->dev, &dev_attr_ecc); if (rc != 0) { dev_err(&device->dev, "Cannot create sysfs file\n"); rc = -EFAULT; goto failed; } azfs_minor += bank->disk->minors; return 0; failed: if (bank != NULL) { if (bank->irq_id != NO_IRQ) free_irq(bank->irq_id, device); if (bank->disk != NULL) { if (bank->disk->major > 0) unregister_blkdev(bank->disk->major, bank->disk->disk_name); del_gendisk(bank->disk); } device->dev.platform_data = NULL; if (bank->io_addr != 0) iounmap((void __iomem *) bank->io_addr); kfree(bank); } return rc; } /** * axon_ram_remove - remove() method for platform driver * @device: see of_platform_driver method */ static int axon_ram_remove(struct platform_device *device) { struct axon_ram_bank *bank = device->dev.platform_data; BUG_ON(!bank || !bank->disk); device_remove_file(&device->dev, &dev_attr_ecc); free_irq(bank->irq_id, device); del_gendisk(bank->disk); iounmap((void __iomem *) bank->io_addr); kfree(bank); return 0; } static struct of_device_id axon_ram_device_id[] = { { .type = "dma-memory" }, {} }; static struct platform_driver axon_ram_driver = { .probe = axon_ram_probe, .remove = axon_ram_remove, .driver = { .name = AXON_RAM_MODULE_NAME, .owner = THIS_MODULE, .of_match_table = axon_ram_device_id, }, }; /** * axon_ram_init */ static int __init axon_ram_init(void) { azfs_major = register_blkdev(azfs_major, AXON_RAM_DEVICE_NAME); if (azfs_major < 0) { printk(KERN_ERR "%s cannot become block device major number\n", AXON_RAM_MODULE_NAME); return -EFAULT; } azfs_minor = 0; return platform_driver_register(&axon_ram_driver); } /** * axon_ram_exit */ static void __exit axon_ram_exit(void) { platform_driver_unregister(&axon_ram_driver); unregister_blkdev(azfs_major, AXON_RAM_DEVICE_NAME); } module_init(axon_ram_init); module_exit(axon_ram_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Maxim Shchetynin <maxim@de.ibm.com>"); MODULE_DESCRIPTION("Axon DDR2 RAM device driver for IBM Cell BE");
gpl-2.0
victor2002/a770k_kernel
drivers/video/msm/mddi_client_dummy.c
6506
2513
/* drivers/video/msm_fb/mddi_client_dummy.c * * Support for "dummy" mddi client devices which require no * special initialization code. * * Copyright (C) 2007 Google Incorporated * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <mach/msm_fb.h> struct panel_info { struct platform_device pdev; struct msm_panel_data panel_data; }; static int mddi_dummy_suspend(struct msm_panel_data *panel_data) { return 0; } static int mddi_dummy_resume(struct msm_panel_data *panel_data) { return 0; } static int mddi_dummy_blank(struct msm_panel_data *panel_data) { return 0; } static int mddi_dummy_unblank(struct msm_panel_data *panel_data) { return 0; } static int mddi_dummy_probe(struct platform_device *pdev) { struct msm_mddi_client_data *client_data = pdev->dev.platform_data; struct panel_info *panel = kzalloc(sizeof(struct panel_info), GFP_KERNEL); int ret; if (!panel) return -ENOMEM; platform_set_drvdata(pdev, panel); panel->panel_data.suspend = mddi_dummy_suspend; panel->panel_data.resume = mddi_dummy_resume; panel->panel_data.blank = mddi_dummy_blank; panel->panel_data.unblank = mddi_dummy_unblank; panel->panel_data.caps = MSMFB_CAP_PARTIAL_UPDATES; panel->pdev.name = "msm_panel"; panel->pdev.id = pdev->id; platform_device_add_resources(&panel->pdev, client_data->fb_resource, 1); panel->panel_data.fb_data = client_data->private_client_data; panel->pdev.dev.platform_data = &panel->panel_data; ret = platform_device_register(&panel->pdev); if (ret) { kfree(panel); return ret; } return 0; } static int mddi_dummy_remove(struct platform_device *pdev) { struct panel_info *panel = platform_get_drvdata(pdev); kfree(panel); return 0; } static struct platform_driver mddi_client_dummy = { .probe = mddi_dummy_probe, .remove = mddi_dummy_remove, .driver = { .name = "mddi_c_dummy" }, }; static int __init mddi_client_dummy_init(void) { platform_driver_register(&mddi_client_dummy); return 0; } module_init(mddi_client_dummy_init);
gpl-2.0
TeamHackDroid/android_kernel_samsung_galaxys2plus-common
arch/arm/mach-davinci/usb.c
8042
4106
/* * USB */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/dma-mapping.h> #include <linux/usb/musb.h> #include <mach/common.h> #include <mach/irqs.h> #include <mach/cputype.h> #include <mach/usb.h> #define DAVINCI_USB_OTG_BASE 0x01c64000 #define DA8XX_USB0_BASE 0x01e00000 #define DA8XX_USB1_BASE 0x01e25000 #if defined(CONFIG_USB_MUSB_HDRC) || defined(CONFIG_USB_MUSB_HDRC_MODULE) static struct musb_hdrc_eps_bits musb_eps[] = { { "ep1_tx", 8, }, { "ep1_rx", 8, }, { "ep2_tx", 8, }, { "ep2_rx", 8, }, { "ep3_tx", 5, }, { "ep3_rx", 5, }, { "ep4_tx", 5, }, { "ep4_rx", 5, }, }; static struct musb_hdrc_config musb_config = { .multipoint = true, .dyn_fifo = true, .soft_con = true, .dma = true, .num_eps = 5, .dma_channels = 8, .ram_bits = 10, .eps_bits = musb_eps, }; static struct musb_hdrc_platform_data usb_data = { #if defined(CONFIG_USB_MUSB_OTG) /* OTG requires a Mini-AB connector */ .mode = MUSB_OTG, #elif defined(CONFIG_USB_MUSB_PERIPHERAL) .mode = MUSB_PERIPHERAL, #elif defined(CONFIG_USB_MUSB_HOST) .mode = MUSB_HOST, #endif .clock = "usb", .config = &musb_config, }; static struct resource usb_resources[] = { { /* physical address */ .start = DAVINCI_USB_OTG_BASE, .end = DAVINCI_USB_OTG_BASE + 0x5ff, .flags = IORESOURCE_MEM, }, { .start = IRQ_USBINT, .flags = IORESOURCE_IRQ, .name = "mc" }, { /* placeholder for the dedicated CPPI IRQ */ .flags = IORESOURCE_IRQ, .name = "dma" }, }; static u64 usb_dmamask = DMA_BIT_MASK(32); static struct platform_device usb_dev = { .name = "musb-davinci", .id = -1, .dev = { .platform_data = &usb_data, .dma_mask = &usb_dmamask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .resource = usb_resources, .num_resources = ARRAY_SIZE(usb_resources), }; void __init davinci_setup_usb(unsigned mA, unsigned potpgt_ms) { usb_data.power = mA > 510 ? 255 : mA / 2; usb_data.potpgt = (potpgt_ms + 1) / 2; if (cpu_is_davinci_dm646x()) { /* Override the defaults as DM6467 uses different IRQs. */ usb_dev.resource[1].start = IRQ_DM646X_USBINT; usb_dev.resource[2].start = IRQ_DM646X_USBDMAINT; } else /* other devices don't have dedicated CPPI IRQ */ usb_dev.num_resources = 2; platform_device_register(&usb_dev); } #ifdef CONFIG_ARCH_DAVINCI_DA8XX static struct resource da8xx_usb20_resources[] = { { .start = DA8XX_USB0_BASE, .end = DA8XX_USB0_BASE + SZ_64K - 1, .flags = IORESOURCE_MEM, }, { .start = IRQ_DA8XX_USB_INT, .flags = IORESOURCE_IRQ, .name = "mc", }, }; int __init da8xx_register_usb20(unsigned mA, unsigned potpgt) { usb_data.clock = "usb20"; usb_data.power = mA > 510 ? 255 : mA / 2; usb_data.potpgt = (potpgt + 1) / 2; usb_dev.resource = da8xx_usb20_resources; usb_dev.num_resources = ARRAY_SIZE(da8xx_usb20_resources); usb_dev.name = "musb-da8xx"; return platform_device_register(&usb_dev); } #endif /* CONFIG_DAVINCI_DA8XX */ #else void __init davinci_setup_usb(unsigned mA, unsigned potpgt_ms) { } #ifdef CONFIG_ARCH_DAVINCI_DA8XX int __init da8xx_register_usb20(unsigned mA, unsigned potpgt) { return 0; } #endif #endif /* CONFIG_USB_MUSB_HDRC */ #ifdef CONFIG_ARCH_DAVINCI_DA8XX static struct resource da8xx_usb11_resources[] = { [0] = { .start = DA8XX_USB1_BASE, .end = DA8XX_USB1_BASE + SZ_4K - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = IRQ_DA8XX_IRQN, .end = IRQ_DA8XX_IRQN, .flags = IORESOURCE_IRQ, }, }; static u64 da8xx_usb11_dma_mask = DMA_BIT_MASK(32); static struct platform_device da8xx_usb11_device = { .name = "ohci", .id = 0, .dev = { .dma_mask = &da8xx_usb11_dma_mask, .coherent_dma_mask = DMA_BIT_MASK(32), }, .num_resources = ARRAY_SIZE(da8xx_usb11_resources), .resource = da8xx_usb11_resources, }; int __init da8xx_register_usb11(struct da8xx_ohci_root_hub *pdata) { da8xx_usb11_device.dev.platform_data = pdata; return platform_device_register(&da8xx_usb11_device); } #endif /* CONFIG_DAVINCI_DA8XX */
gpl-2.0
davidmueller13/bricked-hammerhead
drivers/char/agp/backend.c
8298
9086
/* * AGPGART driver backend routines. * Copyright (C) 2004 Silicon Graphics, Inc. * Copyright (C) 2002-2003 Dave Jones. * Copyright (C) 1999 Jeff Hartmann. * Copyright (C) 1999 Precision Insight, Inc. * Copyright (C) 1999 Xi Graphics, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included * in all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * JEFF HARTMANN, DAVE JONES, OR ANY OTHER CONTRIBUTORS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE * OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * * TODO: * - Allocate more than order 0 pages to avoid too much linear map splitting. */ #include <linux/module.h> #include <linux/pci.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/pagemap.h> #include <linux/miscdevice.h> #include <linux/pm.h> #include <linux/agp_backend.h> #include <linux/agpgart.h> #include <linux/vmalloc.h> #include <asm/io.h> #include "agp.h" /* Due to XFree86 brain-damage, we can't go to 1.0 until they * fix some real stupidity. It's only by chance we can bump * past 0.99 at all due to some boolean logic error. */ #define AGPGART_VERSION_MAJOR 0 #define AGPGART_VERSION_MINOR 103 static const struct agp_version agp_current_version = { .major = AGPGART_VERSION_MAJOR, .minor = AGPGART_VERSION_MINOR, }; struct agp_bridge_data *(*agp_find_bridge)(struct pci_dev *) = &agp_generic_find_bridge; struct agp_bridge_data *agp_bridge; LIST_HEAD(agp_bridges); EXPORT_SYMBOL(agp_bridge); EXPORT_SYMBOL(agp_bridges); EXPORT_SYMBOL(agp_find_bridge); /** * agp_backend_acquire - attempt to acquire an agp backend. * */ struct agp_bridge_data *agp_backend_acquire(struct pci_dev *pdev) { struct agp_bridge_data *bridge; bridge = agp_find_bridge(pdev); if (!bridge) return NULL; if (atomic_read(&bridge->agp_in_use)) return NULL; atomic_inc(&bridge->agp_in_use); return bridge; } EXPORT_SYMBOL(agp_backend_acquire); /** * agp_backend_release - release the lock on the agp backend. * * The caller must insure that the graphics aperture translation table * is read for use by another entity. * * (Ensure that all memory it bound is unbound.) */ void agp_backend_release(struct agp_bridge_data *bridge) { if (bridge) atomic_dec(&bridge->agp_in_use); } EXPORT_SYMBOL(agp_backend_release); static const struct { int mem, agp; } maxes_table[] = { {0, 0}, {32, 4}, {64, 28}, {128, 96}, {256, 204}, {512, 440}, {1024, 942}, {2048, 1920}, {4096, 3932} }; static int agp_find_max(void) { long memory, index, result; #if PAGE_SHIFT < 20 memory = totalram_pages >> (20 - PAGE_SHIFT); #else memory = totalram_pages << (PAGE_SHIFT - 20); #endif index = 1; while ((memory > maxes_table[index].mem) && (index < 8)) index++; result = maxes_table[index - 1].agp + ( (memory - maxes_table[index - 1].mem) * (maxes_table[index].agp - maxes_table[index - 1].agp)) / (maxes_table[index].mem - maxes_table[index - 1].mem); result = result << (20 - PAGE_SHIFT); return result; } static int agp_backend_initialize(struct agp_bridge_data *bridge) { int size_value, rc, got_gatt=0, got_keylist=0; bridge->max_memory_agp = agp_find_max(); bridge->version = &agp_current_version; if (bridge->driver->needs_scratch_page) { struct page *page = bridge->driver->agp_alloc_page(bridge); if (!page) { dev_err(&bridge->dev->dev, "can't get memory for scratch page\n"); return -ENOMEM; } bridge->scratch_page_page = page; bridge->scratch_page_dma = page_to_phys(page); bridge->scratch_page = bridge->driver->mask_memory(bridge, bridge->scratch_page_dma, 0); } size_value = bridge->driver->fetch_size(); if (size_value == 0) { dev_err(&bridge->dev->dev, "can't determine aperture size\n"); rc = -EINVAL; goto err_out; } if (bridge->driver->create_gatt_table(bridge)) { dev_err(&bridge->dev->dev, "can't get memory for graphics translation table\n"); rc = -ENOMEM; goto err_out; } got_gatt = 1; bridge->key_list = vzalloc(PAGE_SIZE * 4); if (bridge->key_list == NULL) { dev_err(&bridge->dev->dev, "can't allocate memory for key lists\n"); rc = -ENOMEM; goto err_out; } got_keylist = 1; /* FIXME vmalloc'd memory not guaranteed contiguous */ if (bridge->driver->configure()) { dev_err(&bridge->dev->dev, "error configuring host chipset\n"); rc = -EINVAL; goto err_out; } INIT_LIST_HEAD(&bridge->mapped_list); spin_lock_init(&bridge->mapped_lock); return 0; err_out: if (bridge->driver->needs_scratch_page) { struct page *page = bridge->scratch_page_page; bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_UNMAP); bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_FREE); } if (got_gatt) bridge->driver->free_gatt_table(bridge); if (got_keylist) { vfree(bridge->key_list); bridge->key_list = NULL; } return rc; } /* cannot be __exit b/c as it could be called from __init code */ static void agp_backend_cleanup(struct agp_bridge_data *bridge) { if (bridge->driver->cleanup) bridge->driver->cleanup(); if (bridge->driver->free_gatt_table) bridge->driver->free_gatt_table(bridge); vfree(bridge->key_list); bridge->key_list = NULL; if (bridge->driver->agp_destroy_page && bridge->driver->needs_scratch_page) { struct page *page = bridge->scratch_page_page; bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_UNMAP); bridge->driver->agp_destroy_page(page, AGP_PAGE_DESTROY_FREE); } } /* When we remove the global variable agp_bridge from all drivers * then agp_alloc_bridge and agp_generic_find_bridge need to be updated */ struct agp_bridge_data *agp_alloc_bridge(void) { struct agp_bridge_data *bridge; bridge = kzalloc(sizeof(*bridge), GFP_KERNEL); if (!bridge) return NULL; atomic_set(&bridge->agp_in_use, 0); atomic_set(&bridge->current_memory_agp, 0); if (list_empty(&agp_bridges)) agp_bridge = bridge; return bridge; } EXPORT_SYMBOL(agp_alloc_bridge); void agp_put_bridge(struct agp_bridge_data *bridge) { kfree(bridge); if (list_empty(&agp_bridges)) agp_bridge = NULL; } EXPORT_SYMBOL(agp_put_bridge); int agp_add_bridge(struct agp_bridge_data *bridge) { int error; if (agp_off) { error = -ENODEV; goto err_put_bridge; } if (!bridge->dev) { printk (KERN_DEBUG PFX "Erk, registering with no pci_dev!\n"); error = -EINVAL; goto err_put_bridge; } /* Grab reference on the chipset driver. */ if (!try_module_get(bridge->driver->owner)) { dev_info(&bridge->dev->dev, "can't lock chipset driver\n"); error = -EINVAL; goto err_put_bridge; } error = agp_backend_initialize(bridge); if (error) { dev_info(&bridge->dev->dev, "agp_backend_initialize() failed\n"); goto err_out; } if (list_empty(&agp_bridges)) { error = agp_frontend_initialize(); if (error) { dev_info(&bridge->dev->dev, "agp_frontend_initialize() failed\n"); goto frontend_err; } dev_info(&bridge->dev->dev, "AGP aperture is %dM @ 0x%lx\n", bridge->driver->fetch_size(), bridge->gart_bus_addr); } list_add(&bridge->list, &agp_bridges); return 0; frontend_err: agp_backend_cleanup(bridge); err_out: module_put(bridge->driver->owner); err_put_bridge: agp_put_bridge(bridge); return error; } EXPORT_SYMBOL_GPL(agp_add_bridge); void agp_remove_bridge(struct agp_bridge_data *bridge) { agp_backend_cleanup(bridge); list_del(&bridge->list); if (list_empty(&agp_bridges)) agp_frontend_cleanup(); module_put(bridge->driver->owner); } EXPORT_SYMBOL_GPL(agp_remove_bridge); int agp_off; int agp_try_unsupported_boot; EXPORT_SYMBOL(agp_off); EXPORT_SYMBOL(agp_try_unsupported_boot); static int __init agp_init(void) { if (!agp_off) printk(KERN_INFO "Linux agpgart interface v%d.%d\n", AGPGART_VERSION_MAJOR, AGPGART_VERSION_MINOR); return 0; } static void __exit agp_exit(void) { } #ifndef MODULE static __init int agp_setup(char *s) { if (!strcmp(s,"off")) agp_off = 1; if (!strcmp(s,"try_unsupported")) agp_try_unsupported_boot = 1; return 1; } __setup("agp=", agp_setup); #endif MODULE_AUTHOR("Dave Jones <davej@redhat.com>"); MODULE_DESCRIPTION("AGP GART driver"); MODULE_LICENSE("GPL and additional rights"); MODULE_ALIAS_MISCDEV(AGPGART_MINOR); module_init(agp_init); module_exit(agp_exit);
gpl-2.0
c8813q-dev/android_kernel_huawei_c8813q
drivers/gpu/drm/radeon/atombios_i2c.c
1131
4061
/* * Copyright 2011 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Alex Deucher * */ #include "drmP.h" #include "radeon_drm.h" #include "radeon.h" #include "atom.h" extern void radeon_atom_copy_swap(u8 *dst, u8 *src, u8 num_bytes, bool to_le); #define TARGET_HW_I2C_CLOCK 50 /* these are a limitation of ProcessI2cChannelTransaction not the hw */ #define ATOM_MAX_HW_I2C_WRITE 2 #define ATOM_MAX_HW_I2C_READ 255 static int radeon_process_i2c_ch(struct radeon_i2c_chan *chan, u8 slave_addr, u8 flags, u8 *buf, u8 num) { struct drm_device *dev = chan->dev; struct radeon_device *rdev = dev->dev_private; PROCESS_I2C_CHANNEL_TRANSACTION_PS_ALLOCATION args; int index = GetIndexIntoMasterTable(COMMAND, ProcessI2cChannelTransaction); unsigned char *base; u16 out; memset(&args, 0, sizeof(args)); base = (unsigned char *)rdev->mode_info.atom_context->scratch; if (flags & HW_I2C_WRITE) { if (num > ATOM_MAX_HW_I2C_WRITE) { DRM_ERROR("hw i2c: tried to write too many bytes (%d vs 2)\n", num); return -EINVAL; } memcpy(&out, buf, num); args.lpI2CDataOut = cpu_to_le16(out); } else { if (num > ATOM_MAX_HW_I2C_READ) { DRM_ERROR("hw i2c: tried to read too many bytes (%d vs 255)\n", num); return -EINVAL; } } args.ucI2CSpeed = TARGET_HW_I2C_CLOCK; args.ucRegIndex = 0; args.ucTransBytes = num; args.ucSlaveAddr = slave_addr << 1; args.ucLineNumber = chan->rec.i2c_id; atom_execute_table(rdev->mode_info.atom_context, index, (uint32_t *)&args); /* error */ if (args.ucStatus != HW_ASSISTED_I2C_STATUS_SUCCESS) { DRM_DEBUG_KMS("hw_i2c error\n"); return -EIO; } if (!(flags & HW_I2C_WRITE)) radeon_atom_copy_swap(buf, base, num, false); return 0; } int radeon_atom_hw_i2c_xfer(struct i2c_adapter *i2c_adap, struct i2c_msg *msgs, int num) { struct radeon_i2c_chan *i2c = i2c_get_adapdata(i2c_adap); struct i2c_msg *p; int i, remaining, current_count, buffer_offset, max_bytes, ret; u8 buf = 0, flags; /* check for bus probe */ p = &msgs[0]; if ((num == 1) && (p->len == 0)) { ret = radeon_process_i2c_ch(i2c, p->addr, HW_I2C_WRITE, &buf, 1); if (ret) return ret; else return num; } for (i = 0; i < num; i++) { p = &msgs[i]; remaining = p->len; buffer_offset = 0; /* max_bytes are a limitation of ProcessI2cChannelTransaction not the hw */ if (p->flags & I2C_M_RD) { max_bytes = ATOM_MAX_HW_I2C_READ; flags = HW_I2C_READ; } else { max_bytes = ATOM_MAX_HW_I2C_WRITE; flags = HW_I2C_WRITE; } while (remaining) { if (remaining > max_bytes) current_count = max_bytes; else current_count = remaining; ret = radeon_process_i2c_ch(i2c, p->addr, flags, &p->buf[buffer_offset], current_count); if (ret) return ret; remaining -= current_count; buffer_offset += current_count; } } return num; } u32 radeon_atom_hw_i2c_func(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; }
gpl-2.0
vk2rq/linux-stable
drivers/char/ip2/i2lib.c
1643
66114
/******************************************************************************* * * (c) 1999 by Computone Corporation * ******************************************************************************** * * * PACKAGE: Linux tty Device Driver for IntelliPort family of multiport * serial I/O controllers. * * DESCRIPTION: High-level interface code for the device driver. Uses the * Extremely Low Level Interface Support (i2ellis.c). Provides an * interface to the standard loadware, to support drivers or * application code. (This is included source code, not a separate * compilation module.) * *******************************************************************************/ //------------------------------------------------------------------------------ // Note on Strategy: // Once the board has been initialized, it will interrupt us when: // 1) It has something in the fifo for us to read (incoming data, flow control // packets, or whatever). // 2) It has stripped whatever we have sent last time in the FIFO (and // consequently is ready for more). // // Note also that the buffer sizes declared in i2lib.h are VERY SMALL. This // worsens performance considerably, but is done so that a great many channels // might use only a little memory. //------------------------------------------------------------------------------ //------------------------------------------------------------------------------ // Revision History: // // 0.00 - 4/16/91 --- First Draft // 0.01 - 4/29/91 --- 1st beta release // 0.02 - 6/14/91 --- Changes to allow small model compilation // 0.03 - 6/17/91 MAG Break reporting protected from interrupts routines with // in-line asm added for moving data to/from ring buffers, // replacing a variety of methods used previously. // 0.04 - 6/21/91 MAG Initial flow-control packets not queued until // i2_enable_interrupts time. Former versions would enqueue // them at i2_init_channel time, before we knew how many // channels were supposed to exist! // 0.05 - 10/12/91 MAG Major changes: works through the ellis.c routines now; // supports new 16-bit protocol and expandable boards. // - 10/24/91 MAG Most changes in place and stable. // 0.06 - 2/20/92 MAG Format of CMD_HOTACK corrected: the command takes no // argument. // 0.07 -- 3/11/92 MAG Support added to store special packet types at interrupt // level (mostly responses to specific commands.) // 0.08 -- 3/30/92 MAG Support added for STAT_MODEM packet // 0.09 -- 6/24/93 MAG i2Link... needed to update number of boards BEFORE // turning on the interrupt. // 0.10 -- 6/25/93 MAG To avoid gruesome death from a bad board, we sanity check // some incoming. // // 1.1 - 12/25/96 AKM Linux version. // - 10/09/98 DMC Revised Linux version. //------------------------------------------------------------------------------ //************ //* Includes * //************ #include <linux/sched.h> #include "i2lib.h" //*********************** //* Function Prototypes * //*********************** static void i2QueueNeeds(i2eBordStrPtr, i2ChanStrPtr, int); static i2ChanStrPtr i2DeQueueNeeds(i2eBordStrPtr, int ); static void i2StripFifo(i2eBordStrPtr); static void i2StuffFifoBypass(i2eBordStrPtr); static void i2StuffFifoFlow(i2eBordStrPtr); static void i2StuffFifoInline(i2eBordStrPtr); static int i2RetryFlushOutput(i2ChanStrPtr); // Not a documented part of the library routines (careful...) but the Diagnostic // i2diag.c finds them useful to help the throughput in certain limited // single-threaded operations. static void iiSendPendingMail(i2eBordStrPtr); static void serviceOutgoingFifo(i2eBordStrPtr); // Functions defined in ip2.c as part of interrupt handling static void do_input(struct work_struct *); static void do_status(struct work_struct *); //*************** //* Debug Data * //*************** #ifdef DEBUG_FIFO unsigned char DBGBuf[0x4000]; unsigned short I = 0; static void WriteDBGBuf(char *s, unsigned char *src, unsigned short n ) { char *p = src; // XXX: We need a spin lock here if we ever use this again while (*s) { // copy label DBGBuf[I] = *s++; I = I++ & 0x3fff; } while (n--) { // copy data DBGBuf[I] = *p++; I = I++ & 0x3fff; } } static void fatality(i2eBordStrPtr pB ) { int i; for (i=0;i<sizeof(DBGBuf);i++) { if ((i%16) == 0) printk("\n%4x:",i); printk("%02x ",DBGBuf[i]); } printk("\n"); for (i=0;i<sizeof(DBGBuf);i++) { if ((i%16) == 0) printk("\n%4x:",i); if (DBGBuf[i] >= ' ' && DBGBuf[i] <= '~') { printk(" %c ",DBGBuf[i]); } else { printk(" . "); } } printk("\n"); printk("Last index %x\n",I); } #endif /* DEBUG_FIFO */ //******** //* Code * //******** static inline int i2Validate ( i2ChanStrPtr pCh ) { //ip2trace(pCh->port_index, ITRC_VERIFY,ITRC_ENTER,2,pCh->validity, // (CHANNEL_MAGIC | CHANNEL_SUPPORT)); return ((pCh->validity & (CHANNEL_MAGIC_BITS | CHANNEL_SUPPORT)) == (CHANNEL_MAGIC | CHANNEL_SUPPORT)); } static void iiSendPendingMail_t(unsigned long data) { i2eBordStrPtr pB = (i2eBordStrPtr)data; iiSendPendingMail(pB); } //****************************************************************************** // Function: iiSendPendingMail(pB) // Parameters: Pointer to a board structure // Returns: Nothing // // Description: // If any outgoing mail bits are set and there is outgoing mailbox is empty, // send the mail and clear the bits. //****************************************************************************** static void iiSendPendingMail(i2eBordStrPtr pB) { if (pB->i2eOutMailWaiting && (!pB->i2eWaitingForEmptyFifo) ) { if (iiTrySendMail(pB, pB->i2eOutMailWaiting)) { /* If we were already waiting for fifo to empty, * or just sent MB_OUT_STUFFED, then we are * still waiting for it to empty, until we should * receive an MB_IN_STRIPPED from the board. */ pB->i2eWaitingForEmptyFifo |= (pB->i2eOutMailWaiting & MB_OUT_STUFFED); pB->i2eOutMailWaiting = 0; pB->SendPendingRetry = 0; } else { /* The only time we hit this area is when "iiTrySendMail" has failed. That only occurs when the outbound mailbox is still busy with the last message. We take a short breather to let the board catch up with itself and then try again. 16 Retries is the limit - then we got a borked board. /\/\|=mhw=|\/\/ */ if( ++pB->SendPendingRetry < 16 ) { setup_timer(&pB->SendPendingTimer, iiSendPendingMail_t, (unsigned long)pB); mod_timer(&pB->SendPendingTimer, jiffies + 1); } else { printk( KERN_ERR "IP2: iiSendPendingMail unable to queue outbound mail\n" ); } } } } //****************************************************************************** // Function: i2InitChannels(pB, nChannels, pCh) // Parameters: Pointer to Ellis Board structure // Number of channels to initialize // Pointer to first element in an array of channel structures // Returns: Success or failure // // Description: // // This function patches pointers, back-pointers, and initializes all the // elements in the channel structure array. // // This should be run after the board structure is initialized, through having // loaded the standard loadware (otherwise it complains). // // In any case, it must be done before any serious work begins initializing the // irq's or sending commands... // //****************************************************************************** static int i2InitChannels ( i2eBordStrPtr pB, int nChannels, i2ChanStrPtr pCh) { int index, stuffIndex; i2ChanStrPtr *ppCh; if (pB->i2eValid != I2E_MAGIC) { I2_COMPLETE(pB, I2EE_BADMAGIC); } if (pB->i2eState != II_STATE_STDLOADED) { I2_COMPLETE(pB, I2EE_BADSTATE); } rwlock_init(&pB->read_fifo_spinlock); rwlock_init(&pB->write_fifo_spinlock); rwlock_init(&pB->Dbuf_spinlock); rwlock_init(&pB->Bbuf_spinlock); rwlock_init(&pB->Fbuf_spinlock); // NO LOCK needed yet - this is init pB->i2eChannelPtr = pCh; pB->i2eChannelCnt = nChannels; pB->i2Fbuf_strip = pB->i2Fbuf_stuff = 0; pB->i2Dbuf_strip = pB->i2Dbuf_stuff = 0; pB->i2Bbuf_strip = pB->i2Bbuf_stuff = 0; pB->SendPendingRetry = 0; memset ( pCh, 0, sizeof (i2ChanStr) * nChannels ); for (index = stuffIndex = 0, ppCh = (i2ChanStrPtr *)(pB->i2Fbuf); nChannels && index < ABS_MOST_PORTS; index++) { if ( !(pB->i2eChannelMap[index >> 4] & (1 << (index & 0xf)) ) ) { continue; } rwlock_init(&pCh->Ibuf_spinlock); rwlock_init(&pCh->Obuf_spinlock); rwlock_init(&pCh->Cbuf_spinlock); rwlock_init(&pCh->Pbuf_spinlock); // NO LOCK needed yet - this is init // Set up validity flag according to support level if (pB->i2eGoodMap[index >> 4] & (1 << (index & 0xf)) ) { pCh->validity = CHANNEL_MAGIC | CHANNEL_SUPPORT; } else { pCh->validity = CHANNEL_MAGIC; } pCh->pMyBord = pB; /* Back-pointer */ // Prepare an outgoing flow-control packet to send as soon as the chance // occurs. if ( pCh->validity & CHANNEL_SUPPORT ) { pCh->infl.hd.i2sChannel = index; pCh->infl.hd.i2sCount = 5; pCh->infl.hd.i2sType = PTYPE_BYPASS; pCh->infl.fcmd = 37; pCh->infl.asof = 0; pCh->infl.room = IBUF_SIZE - 1; pCh->whenSendFlow = (IBUF_SIZE/5)*4; // when 80% full // The following is similar to calling i2QueueNeeds, except that this // is done in longhand, since we are setting up initial conditions on // many channels at once. pCh->channelNeeds = NEED_FLOW; // Since starting from scratch pCh->sinceLastFlow = 0; // No bytes received since last flow // control packet was queued stuffIndex++; *ppCh++ = pCh; // List this channel as needing // initial flow control packet sent } // Don't allow anything to be sent until the status packets come in from // the board. pCh->outfl.asof = 0; pCh->outfl.room = 0; // Initialize all the ring buffers pCh->Ibuf_stuff = pCh->Ibuf_strip = 0; pCh->Obuf_stuff = pCh->Obuf_strip = 0; pCh->Cbuf_stuff = pCh->Cbuf_strip = 0; memset( &pCh->icount, 0, sizeof (struct async_icount) ); pCh->hotKeyIn = HOT_CLEAR; pCh->channelOptions = 0; pCh->bookMarks = 0; init_waitqueue_head(&pCh->pBookmarkWait); init_waitqueue_head(&pCh->open_wait); init_waitqueue_head(&pCh->close_wait); init_waitqueue_head(&pCh->delta_msr_wait); // Set base and divisor so default custom rate is 9600 pCh->BaudBase = 921600; // MAX for ST654, changed after we get pCh->BaudDivisor = 96; // the boxids (UART types) later pCh->dataSetIn = 0; pCh->dataSetOut = 0; pCh->wopen = 0; pCh->throttled = 0; pCh->speed = CBR_9600; pCh->flags = 0; pCh->ClosingDelay = 5*HZ/10; pCh->ClosingWaitTime = 30*HZ; // Initialize task queue objects INIT_WORK(&pCh->tqueue_input, do_input); INIT_WORK(&pCh->tqueue_status, do_status); #ifdef IP2DEBUG_TRACE pCh->trace = ip2trace; #endif ++pCh; --nChannels; } // No need to check for wrap here; this is initialization. pB->i2Fbuf_stuff = stuffIndex; I2_COMPLETE(pB, I2EE_GOOD); } //****************************************************************************** // Function: i2DeQueueNeeds(pB, type) // Parameters: Pointer to a board structure // type bit map: may include NEED_INLINE, NEED_BYPASS, or NEED_FLOW // Returns: // Pointer to a channel structure // // Description: Returns pointer struct of next channel that needs service of // the type specified. Otherwise returns a NULL reference. // //****************************************************************************** static i2ChanStrPtr i2DeQueueNeeds(i2eBordStrPtr pB, int type) { unsigned short queueIndex; unsigned long flags; i2ChanStrPtr pCh = NULL; switch(type) { case NEED_INLINE: write_lock_irqsave(&pB->Dbuf_spinlock, flags); if ( pB->i2Dbuf_stuff != pB->i2Dbuf_strip) { queueIndex = pB->i2Dbuf_strip; pCh = pB->i2Dbuf[queueIndex]; queueIndex++; if (queueIndex >= CH_QUEUE_SIZE) { queueIndex = 0; } pB->i2Dbuf_strip = queueIndex; pCh->channelNeeds &= ~NEED_INLINE; } write_unlock_irqrestore(&pB->Dbuf_spinlock, flags); break; case NEED_BYPASS: write_lock_irqsave(&pB->Bbuf_spinlock, flags); if (pB->i2Bbuf_stuff != pB->i2Bbuf_strip) { queueIndex = pB->i2Bbuf_strip; pCh = pB->i2Bbuf[queueIndex]; queueIndex++; if (queueIndex >= CH_QUEUE_SIZE) { queueIndex = 0; } pB->i2Bbuf_strip = queueIndex; pCh->channelNeeds &= ~NEED_BYPASS; } write_unlock_irqrestore(&pB->Bbuf_spinlock, flags); break; case NEED_FLOW: write_lock_irqsave(&pB->Fbuf_spinlock, flags); if (pB->i2Fbuf_stuff != pB->i2Fbuf_strip) { queueIndex = pB->i2Fbuf_strip; pCh = pB->i2Fbuf[queueIndex]; queueIndex++; if (queueIndex >= CH_QUEUE_SIZE) { queueIndex = 0; } pB->i2Fbuf_strip = queueIndex; pCh->channelNeeds &= ~NEED_FLOW; } write_unlock_irqrestore(&pB->Fbuf_spinlock, flags); break; default: printk(KERN_ERR "i2DeQueueNeeds called with bad type:%x\n",type); break; } return pCh; } //****************************************************************************** // Function: i2QueueNeeds(pB, pCh, type) // Parameters: Pointer to a board structure // Pointer to a channel structure // type bit map: may include NEED_INLINE, NEED_BYPASS, or NEED_FLOW // Returns: Nothing // // Description: // For each type of need selected, if the given channel is not already in the // queue, adds it, and sets the flag indicating it is in the queue. //****************************************************************************** static void i2QueueNeeds(i2eBordStrPtr pB, i2ChanStrPtr pCh, int type) { unsigned short queueIndex; unsigned long flags; // We turn off all the interrupts during this brief process, since the // interrupt-level code might want to put things on the queue as well. switch (type) { case NEED_INLINE: write_lock_irqsave(&pB->Dbuf_spinlock, flags); if ( !(pCh->channelNeeds & NEED_INLINE) ) { pCh->channelNeeds |= NEED_INLINE; queueIndex = pB->i2Dbuf_stuff; pB->i2Dbuf[queueIndex++] = pCh; if (queueIndex >= CH_QUEUE_SIZE) queueIndex = 0; pB->i2Dbuf_stuff = queueIndex; } write_unlock_irqrestore(&pB->Dbuf_spinlock, flags); break; case NEED_BYPASS: write_lock_irqsave(&pB->Bbuf_spinlock, flags); if ((type & NEED_BYPASS) && !(pCh->channelNeeds & NEED_BYPASS)) { pCh->channelNeeds |= NEED_BYPASS; queueIndex = pB->i2Bbuf_stuff; pB->i2Bbuf[queueIndex++] = pCh; if (queueIndex >= CH_QUEUE_SIZE) queueIndex = 0; pB->i2Bbuf_stuff = queueIndex; } write_unlock_irqrestore(&pB->Bbuf_spinlock, flags); break; case NEED_FLOW: write_lock_irqsave(&pB->Fbuf_spinlock, flags); if ((type & NEED_FLOW) && !(pCh->channelNeeds & NEED_FLOW)) { pCh->channelNeeds |= NEED_FLOW; queueIndex = pB->i2Fbuf_stuff; pB->i2Fbuf[queueIndex++] = pCh; if (queueIndex >= CH_QUEUE_SIZE) queueIndex = 0; pB->i2Fbuf_stuff = queueIndex; } write_unlock_irqrestore(&pB->Fbuf_spinlock, flags); break; case NEED_CREDIT: pCh->channelNeeds |= NEED_CREDIT; break; default: printk(KERN_ERR "i2QueueNeeds called with bad type:%x\n",type); break; } return; } //****************************************************************************** // Function: i2QueueCommands(type, pCh, timeout, nCommands, pCs,...) // Parameters: type - PTYPE_BYPASS or PTYPE_INLINE // pointer to the channel structure // maximum period to wait // number of commands (n) // n commands // Returns: Number of commands sent, or -1 for error // // get board lock before calling // // Description: // Queues up some commands to be sent to a channel. To send possibly several // bypass or inline commands to the given channel. The timeout parameter // indicates how many HUNDREDTHS OF SECONDS to wait until there is room: // 0 = return immediately if no room, -ive = wait forever, +ive = number of // 1/100 seconds to wait. Return values: // -1 Some kind of nasty error: bad channel structure or invalid arguments. // 0 No room to send all the commands // (+) Number of commands sent //****************************************************************************** static int i2QueueCommands(int type, i2ChanStrPtr pCh, int timeout, int nCommands, cmdSyntaxPtr pCs0,...) { int totalsize = 0; int blocksize; int lastended; cmdSyntaxPtr *ppCs; cmdSyntaxPtr pCs; int count; int flag; i2eBordStrPtr pB; unsigned short maxBlock; unsigned short maxBuff; short bufroom; unsigned short stuffIndex; unsigned char *pBuf; unsigned char *pInsert; unsigned char *pDest, *pSource; unsigned short channel; int cnt; unsigned long flags = 0; rwlock_t *lock_var_p = NULL; // Make sure the channel exists, otherwise do nothing if ( !i2Validate ( pCh ) ) { return -1; } ip2trace (CHANN, ITRC_QUEUE, ITRC_ENTER, 0 ); pB = pCh->pMyBord; // Board must also exist, and THE INTERRUPT COMMAND ALREADY SENT if (pB->i2eValid != I2E_MAGIC || pB->i2eUsingIrq == I2_IRQ_UNDEFINED) return -2; // If the board has gone fatal, return bad, and also hit the trap routine if // it exists. if (pB->i2eFatal) { if ( pB->i2eFatalTrap ) { (*(pB)->i2eFatalTrap)(pB); } return -3; } // Set up some variables, Which buffers are we using? How big are they? switch(type) { case PTYPE_INLINE: flag = INL; maxBlock = MAX_OBUF_BLOCK; maxBuff = OBUF_SIZE; pBuf = pCh->Obuf; break; case PTYPE_BYPASS: flag = BYP; maxBlock = MAX_CBUF_BLOCK; maxBuff = CBUF_SIZE; pBuf = pCh->Cbuf; break; default: return -4; } // Determine the total size required for all the commands totalsize = blocksize = sizeof(i2CmdHeader); lastended = 0; ppCs = &pCs0; for ( count = nCommands; count; count--, ppCs++) { pCs = *ppCs; cnt = pCs->length; // Will a new block be needed for this one? // Two possible reasons: too // big or previous command has to be at the end of a packet. if ((blocksize + cnt > maxBlock) || lastended) { blocksize = sizeof(i2CmdHeader); totalsize += sizeof(i2CmdHeader); } totalsize += cnt; blocksize += cnt; // If this command had to end a block, then we will make sure to // account for it should there be any more blocks. lastended = pCs->flags & END; } for (;;) { // Make sure any pending flush commands go out before we add more data. if ( !( pCh->flush_flags && i2RetryFlushOutput( pCh ) ) ) { // How much room (this time through) ? switch(type) { case PTYPE_INLINE: lock_var_p = &pCh->Obuf_spinlock; write_lock_irqsave(lock_var_p, flags); stuffIndex = pCh->Obuf_stuff; bufroom = pCh->Obuf_strip - stuffIndex; break; case PTYPE_BYPASS: lock_var_p = &pCh->Cbuf_spinlock; write_lock_irqsave(lock_var_p, flags); stuffIndex = pCh->Cbuf_stuff; bufroom = pCh->Cbuf_strip - stuffIndex; break; default: return -5; } if (--bufroom < 0) { bufroom += maxBuff; } ip2trace (CHANN, ITRC_QUEUE, 2, 1, bufroom ); // Check for overflow if (totalsize <= bufroom) { // Normal Expected path - We still hold LOCK break; /* from for()- Enough room: goto proceed */ } ip2trace(CHANN, ITRC_QUEUE, 3, 1, totalsize); write_unlock_irqrestore(lock_var_p, flags); } else ip2trace(CHANN, ITRC_QUEUE, 3, 1, totalsize); /* Prepare to wait for buffers to empty */ serviceOutgoingFifo(pB); // Dump what we got if (timeout == 0) { return 0; // Tired of waiting } if (timeout > 0) timeout--; // So negative values == forever if (!in_interrupt()) { schedule_timeout_interruptible(1); // short nap } else { // we cannot sched/sleep in interrupt silly return 0; } if (signal_pending(current)) { return 0; // Wake up! Time to die!!! } ip2trace (CHANN, ITRC_QUEUE, 4, 0 ); } // end of for(;;) // At this point we have room and the lock - stick them in. channel = pCh->infl.hd.i2sChannel; pInsert = &pBuf[stuffIndex]; // Pointer to start of packet pDest = CMD_OF(pInsert); // Pointer to start of command // When we start counting, the block is the size of the header for (blocksize = sizeof(i2CmdHeader), count = nCommands, lastended = 0, ppCs = &pCs0; count; count--, ppCs++) { pCs = *ppCs; // Points to command protocol structure // If this is a bookmark request command, post the fact that a bookmark // request is pending. NOTE THIS TRICK ONLY WORKS BECAUSE CMD_BMARK_REQ // has no parameters! The more general solution would be to reference // pCs->cmd[0]. if (pCs == CMD_BMARK_REQ) { pCh->bookMarks++; ip2trace (CHANN, ITRC_DRAIN, 30, 1, pCh->bookMarks ); } cnt = pCs->length; // If this command would put us over the maximum block size or // if the last command had to be at the end of a block, we end // the existing block here and start a new one. if ((blocksize + cnt > maxBlock) || lastended) { ip2trace (CHANN, ITRC_QUEUE, 5, 0 ); PTYPE_OF(pInsert) = type; CHANNEL_OF(pInsert) = channel; // count here does not include the header CMD_COUNT_OF(pInsert) = blocksize - sizeof(i2CmdHeader); stuffIndex += blocksize; if(stuffIndex >= maxBuff) { stuffIndex = 0; pInsert = pBuf; } pInsert = &pBuf[stuffIndex]; // Pointer to start of next pkt pDest = CMD_OF(pInsert); blocksize = sizeof(i2CmdHeader); } // Now we know there is room for this one in the current block blocksize += cnt; // Total bytes in this command pSource = pCs->cmd; // Copy the command into the buffer while (cnt--) { *pDest++ = *pSource++; } // If this command had to end a block, then we will make sure to account // for it should there be any more blocks. lastended = pCs->flags & END; } // end for // Clean up the final block by writing header, etc PTYPE_OF(pInsert) = type; CHANNEL_OF(pInsert) = channel; // count here does not include the header CMD_COUNT_OF(pInsert) = blocksize - sizeof(i2CmdHeader); stuffIndex += blocksize; if(stuffIndex >= maxBuff) { stuffIndex = 0; pInsert = pBuf; } // Updates the index, and post the need for service. When adding these to // the queue of channels, we turn off the interrupt while doing so, // because at interrupt level we might want to push a channel back to the // end of the queue. switch(type) { case PTYPE_INLINE: pCh->Obuf_stuff = stuffIndex; // Store buffer pointer write_unlock_irqrestore(&pCh->Obuf_spinlock, flags); pB->debugInlineQueued++; // Add the channel pointer to list of channels needing service (first // come...), if it's not already there. i2QueueNeeds(pB, pCh, NEED_INLINE); break; case PTYPE_BYPASS: pCh->Cbuf_stuff = stuffIndex; // Store buffer pointer write_unlock_irqrestore(&pCh->Cbuf_spinlock, flags); pB->debugBypassQueued++; // Add the channel pointer to list of channels needing service (first // come...), if it's not already there. i2QueueNeeds(pB, pCh, NEED_BYPASS); break; } ip2trace (CHANN, ITRC_QUEUE, ITRC_RETURN, 1, nCommands ); return nCommands; // Good status: number of commands sent } //****************************************************************************** // Function: i2GetStatus(pCh,resetBits) // Parameters: Pointer to a channel structure // Bit map of status bits to clear // Returns: Bit map of current status bits // // Description: // Returns the state of data set signals, and whether a break has been received, // (see i2lib.h for bit-mapped result). resetBits is a bit-map of any status // bits to be cleared: I2_BRK, I2_PAR, I2_FRA, I2_OVR,... These are cleared // AFTER the condition is passed. If pCh does not point to a valid channel, // returns -1 (which would be impossible otherwise. //****************************************************************************** static int i2GetStatus(i2ChanStrPtr pCh, int resetBits) { unsigned short status; i2eBordStrPtr pB; ip2trace (CHANN, ITRC_STATUS, ITRC_ENTER, 2, pCh->dataSetIn, resetBits ); // Make sure the channel exists, otherwise do nothing */ if ( !i2Validate ( pCh ) ) return -1; pB = pCh->pMyBord; status = pCh->dataSetIn; // Clear any specified error bits: but note that only actual error bits can // be cleared, regardless of the value passed. if (resetBits) { pCh->dataSetIn &= ~(resetBits & (I2_BRK | I2_PAR | I2_FRA | I2_OVR)); pCh->dataSetIn &= ~(I2_DDCD | I2_DCTS | I2_DDSR | I2_DRI); } ip2trace (CHANN, ITRC_STATUS, ITRC_RETURN, 1, pCh->dataSetIn ); return status; } //****************************************************************************** // Function: i2Input(pChpDest,count) // Parameters: Pointer to a channel structure // Pointer to data buffer // Number of bytes to read // Returns: Number of bytes read, or -1 for error // // Description: // Strips data from the input buffer and writes it to pDest. If there is a // collosal blunder, (invalid structure pointers or the like), returns -1. // Otherwise, returns the number of bytes read. //****************************************************************************** static int i2Input(i2ChanStrPtr pCh) { int amountToMove; unsigned short stripIndex; int count; unsigned long flags = 0; ip2trace (CHANN, ITRC_INPUT, ITRC_ENTER, 0); // Ensure channel structure seems real if ( !i2Validate( pCh ) ) { count = -1; goto i2Input_exit; } write_lock_irqsave(&pCh->Ibuf_spinlock, flags); // initialize some accelerators and private copies stripIndex = pCh->Ibuf_strip; count = pCh->Ibuf_stuff - stripIndex; // If buffer is empty or requested data count was 0, (trivial case) return // without any further thought. if ( count == 0 ) { write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags); goto i2Input_exit; } // Adjust for buffer wrap if ( count < 0 ) { count += IBUF_SIZE; } // Don't give more than can be taken by the line discipline amountToMove = pCh->pTTY->receive_room; if (count > amountToMove) { count = amountToMove; } // How much could we copy without a wrap? amountToMove = IBUF_SIZE - stripIndex; if (amountToMove > count) { amountToMove = count; } // Move the first block pCh->pTTY->ldisc->ops->receive_buf( pCh->pTTY, &(pCh->Ibuf[stripIndex]), NULL, amountToMove ); // If we needed to wrap, do the second data move if (count > amountToMove) { pCh->pTTY->ldisc->ops->receive_buf( pCh->pTTY, pCh->Ibuf, NULL, count - amountToMove ); } // Bump and wrap the stripIndex all at once by the amount of data read. This // method is good regardless of whether the data was in one or two pieces. stripIndex += count; if (stripIndex >= IBUF_SIZE) { stripIndex -= IBUF_SIZE; } pCh->Ibuf_strip = stripIndex; // Update our flow control information and possibly queue ourselves to send // it, depending on how much data has been stripped since the last time a // packet was sent. pCh->infl.asof += count; if ((pCh->sinceLastFlow += count) >= pCh->whenSendFlow) { pCh->sinceLastFlow -= pCh->whenSendFlow; write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags); i2QueueNeeds(pCh->pMyBord, pCh, NEED_FLOW); } else { write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags); } i2Input_exit: ip2trace (CHANN, ITRC_INPUT, ITRC_RETURN, 1, count); return count; } //****************************************************************************** // Function: i2InputFlush(pCh) // Parameters: Pointer to a channel structure // Returns: Number of bytes stripped, or -1 for error // // Description: // Strips any data from the input buffer. If there is a collosal blunder, // (invalid structure pointers or the like), returns -1. Otherwise, returns the // number of bytes stripped. //****************************************************************************** static int i2InputFlush(i2ChanStrPtr pCh) { int count; unsigned long flags; // Ensure channel structure seems real if ( !i2Validate ( pCh ) ) return -1; ip2trace (CHANN, ITRC_INPUT, 10, 0); write_lock_irqsave(&pCh->Ibuf_spinlock, flags); count = pCh->Ibuf_stuff - pCh->Ibuf_strip; // Adjust for buffer wrap if (count < 0) { count += IBUF_SIZE; } // Expedient way to zero out the buffer pCh->Ibuf_strip = pCh->Ibuf_stuff; // Update our flow control information and possibly queue ourselves to send // it, depending on how much data has been stripped since the last time a // packet was sent. pCh->infl.asof += count; if ( (pCh->sinceLastFlow += count) >= pCh->whenSendFlow ) { pCh->sinceLastFlow -= pCh->whenSendFlow; write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags); i2QueueNeeds(pCh->pMyBord, pCh, NEED_FLOW); } else { write_unlock_irqrestore(&pCh->Ibuf_spinlock, flags); } ip2trace (CHANN, ITRC_INPUT, 19, 1, count); return count; } //****************************************************************************** // Function: i2InputAvailable(pCh) // Parameters: Pointer to a channel structure // Returns: Number of bytes available, or -1 for error // // Description: // If there is a collosal blunder, (invalid structure pointers or the like), // returns -1. Otherwise, returns the number of bytes stripped. Otherwise, // returns the number of bytes available in the buffer. //****************************************************************************** #if 0 static int i2InputAvailable(i2ChanStrPtr pCh) { int count; // Ensure channel structure seems real if ( !i2Validate ( pCh ) ) return -1; // initialize some accelerators and private copies read_lock_irqsave(&pCh->Ibuf_spinlock, flags); count = pCh->Ibuf_stuff - pCh->Ibuf_strip; read_unlock_irqrestore(&pCh->Ibuf_spinlock, flags); // Adjust for buffer wrap if (count < 0) { count += IBUF_SIZE; } return count; } #endif //****************************************************************************** // Function: i2Output(pCh, pSource, count) // Parameters: Pointer to channel structure // Pointer to source data // Number of bytes to send // Returns: Number of bytes sent, or -1 for error // // Description: // Queues the data at pSource to be sent as data packets to the board. If there // is a collosal blunder, (invalid structure pointers or the like), returns -1. // Otherwise, returns the number of bytes written. What if there is not enough // room for all the data? If pCh->channelOptions & CO_NBLOCK_WRITE is set, then // we transfer as many characters as we can now, then return. If this bit is // clear (default), routine will spin along until all the data is buffered. // Should this occur, the 1-ms delay routine is called while waiting to avoid // applications that one cannot break out of. //****************************************************************************** static int i2Output(i2ChanStrPtr pCh, const char *pSource, int count) { i2eBordStrPtr pB; unsigned char *pInsert; int amountToMove; int countOriginal = count; unsigned short channel; unsigned short stuffIndex; unsigned long flags; int bailout = 10; ip2trace (CHANN, ITRC_OUTPUT, ITRC_ENTER, 2, count, 0 ); // Ensure channel structure seems real if ( !i2Validate ( pCh ) ) return -1; // initialize some accelerators and private copies pB = pCh->pMyBord; channel = pCh->infl.hd.i2sChannel; // If the board has gone fatal, return bad, and also hit the trap routine if // it exists. if (pB->i2eFatal) { if (pB->i2eFatalTrap) { (*(pB)->i2eFatalTrap)(pB); } return -1; } // Proceed as though we would do everything while ( count > 0 ) { // How much room in output buffer is there? read_lock_irqsave(&pCh->Obuf_spinlock, flags); amountToMove = pCh->Obuf_strip - pCh->Obuf_stuff - 1; read_unlock_irqrestore(&pCh->Obuf_spinlock, flags); if (amountToMove < 0) { amountToMove += OBUF_SIZE; } // Subtract off the headers size and see how much room there is for real // data. If this is negative, we will discover later. amountToMove -= sizeof (i2DataHeader); // Don't move more (now) than can go in a single packet if ( amountToMove > (int)(MAX_OBUF_BLOCK - sizeof(i2DataHeader)) ) { amountToMove = MAX_OBUF_BLOCK - sizeof(i2DataHeader); } // Don't move more than the count we were given if (amountToMove > count) { amountToMove = count; } // Now we know how much we must move: NB because the ring buffers have // an overflow area at the end, we needn't worry about wrapping in the // middle of a packet. // Small WINDOW here with no LOCK but I can't call Flush with LOCK // We would be flushing (or ending flush) anyway ip2trace (CHANN, ITRC_OUTPUT, 10, 1, amountToMove ); if ( !(pCh->flush_flags && i2RetryFlushOutput(pCh) ) && amountToMove > 0 ) { write_lock_irqsave(&pCh->Obuf_spinlock, flags); stuffIndex = pCh->Obuf_stuff; // Had room to move some data: don't know whether the block size, // buffer space, or what was the limiting factor... pInsert = &(pCh->Obuf[stuffIndex]); // Set up the header CHANNEL_OF(pInsert) = channel; PTYPE_OF(pInsert) = PTYPE_DATA; TAG_OF(pInsert) = 0; ID_OF(pInsert) = ID_ORDINARY_DATA; DATA_COUNT_OF(pInsert) = amountToMove; // Move the data memcpy( (char*)(DATA_OF(pInsert)), pSource, amountToMove ); // Adjust pointers and indices pSource += amountToMove; pCh->Obuf_char_count += amountToMove; stuffIndex += amountToMove + sizeof(i2DataHeader); count -= amountToMove; if (stuffIndex >= OBUF_SIZE) { stuffIndex = 0; } pCh->Obuf_stuff = stuffIndex; write_unlock_irqrestore(&pCh->Obuf_spinlock, flags); ip2trace (CHANN, ITRC_OUTPUT, 13, 1, stuffIndex ); } else { // Cannot move data // becuz we need to stuff a flush // or amount to move is <= 0 ip2trace(CHANN, ITRC_OUTPUT, 14, 3, amountToMove, pB->i2eFifoRemains, pB->i2eWaitingForEmptyFifo ); // Put this channel back on queue // this ultimatly gets more data or wakes write output i2QueueNeeds(pB, pCh, NEED_INLINE); if ( pB->i2eWaitingForEmptyFifo ) { ip2trace (CHANN, ITRC_OUTPUT, 16, 0 ); // or schedule if (!in_interrupt()) { ip2trace (CHANN, ITRC_OUTPUT, 61, 0 ); schedule_timeout_interruptible(2); if (signal_pending(current)) { break; } continue; } else { ip2trace (CHANN, ITRC_OUTPUT, 62, 0 ); // let interrupt in = WAS restore_flags() // We hold no lock nor is irq off anymore??? break; } break; // from while(count) } else if ( pB->i2eFifoRemains < 32 && !pB->i2eTxMailEmpty ( pB ) ) { ip2trace (CHANN, ITRC_OUTPUT, 19, 2, pB->i2eFifoRemains, pB->i2eTxMailEmpty ); break; // from while(count) } else if ( pCh->channelNeeds & NEED_CREDIT ) { ip2trace (CHANN, ITRC_OUTPUT, 22, 0 ); break; // from while(count) } else if ( --bailout) { // Try to throw more things (maybe not us) in the fifo if we're // not already waiting for it. ip2trace (CHANN, ITRC_OUTPUT, 20, 0 ); serviceOutgoingFifo(pB); //break; CONTINUE; } else { ip2trace (CHANN, ITRC_OUTPUT, 21, 3, pB->i2eFifoRemains, pB->i2eOutMailWaiting, pB->i2eWaitingForEmptyFifo ); break; // from while(count) } } } // End of while(count) i2QueueNeeds(pB, pCh, NEED_INLINE); // We drop through either when the count expires, or when there is some // count left, but there was a non-blocking write. if (countOriginal > count) { ip2trace (CHANN, ITRC_OUTPUT, 17, 2, countOriginal, count ); serviceOutgoingFifo( pB ); } ip2trace (CHANN, ITRC_OUTPUT, ITRC_RETURN, 2, countOriginal, count ); return countOriginal - count; } //****************************************************************************** // Function: i2FlushOutput(pCh) // Parameters: Pointer to a channel structure // Returns: Nothing // // Description: // Sends bypass command to start flushing (waiting possibly forever until there // is room), then sends inline command to stop flushing output, (again waiting // possibly forever). //****************************************************************************** static inline void i2FlushOutput(i2ChanStrPtr pCh) { ip2trace (CHANN, ITRC_FLUSH, 1, 1, pCh->flush_flags ); if (pCh->flush_flags) return; if ( 1 != i2QueueCommands(PTYPE_BYPASS, pCh, 0, 1, CMD_STARTFL) ) { pCh->flush_flags = STARTFL_FLAG; // Failed - flag for later ip2trace (CHANN, ITRC_FLUSH, 2, 0 ); } else if ( 1 != i2QueueCommands(PTYPE_INLINE, pCh, 0, 1, CMD_STOPFL) ) { pCh->flush_flags = STOPFL_FLAG; // Failed - flag for later ip2trace (CHANN, ITRC_FLUSH, 3, 0 ); } } static int i2RetryFlushOutput(i2ChanStrPtr pCh) { int old_flags = pCh->flush_flags; ip2trace (CHANN, ITRC_FLUSH, 14, 1, old_flags ); pCh->flush_flags = 0; // Clear flag so we can avoid recursion // and queue the commands if ( old_flags & STARTFL_FLAG ) { if ( 1 == i2QueueCommands(PTYPE_BYPASS, pCh, 0, 1, CMD_STARTFL) ) { old_flags = STOPFL_FLAG; //Success - send stop flush } else { old_flags = STARTFL_FLAG; //Failure - Flag for retry later } ip2trace (CHANN, ITRC_FLUSH, 15, 1, old_flags ); } if ( old_flags & STOPFL_FLAG ) { if (1 == i2QueueCommands(PTYPE_INLINE, pCh, 0, 1, CMD_STOPFL)) { old_flags = 0; // Success - clear flags } ip2trace (CHANN, ITRC_FLUSH, 16, 1, old_flags ); } pCh->flush_flags = old_flags; ip2trace (CHANN, ITRC_FLUSH, 17, 1, old_flags ); return old_flags; } //****************************************************************************** // Function: i2DrainOutput(pCh,timeout) // Parameters: Pointer to a channel structure // Maximum period to wait // Returns: ? // // Description: // Uses the bookmark request command to ask the board to send a bookmark back as // soon as all the data is completely sent. //****************************************************************************** static void i2DrainWakeup(unsigned long d) { i2ChanStrPtr pCh = (i2ChanStrPtr)d; ip2trace (CHANN, ITRC_DRAIN, 10, 1, pCh->BookmarkTimer.expires ); pCh->BookmarkTimer.expires = 0; wake_up_interruptible( &pCh->pBookmarkWait ); } static void i2DrainOutput(i2ChanStrPtr pCh, int timeout) { wait_queue_t wait; i2eBordStrPtr pB; ip2trace (CHANN, ITRC_DRAIN, ITRC_ENTER, 1, pCh->BookmarkTimer.expires); pB = pCh->pMyBord; // If the board has gone fatal, return bad, // and also hit the trap routine if it exists. if (pB->i2eFatal) { if (pB->i2eFatalTrap) { (*(pB)->i2eFatalTrap)(pB); } return; } if ((timeout > 0) && (pCh->BookmarkTimer.expires == 0 )) { // One per customer (channel) setup_timer(&pCh->BookmarkTimer, i2DrainWakeup, (unsigned long)pCh); ip2trace (CHANN, ITRC_DRAIN, 1, 1, pCh->BookmarkTimer.expires ); mod_timer(&pCh->BookmarkTimer, jiffies + timeout); } i2QueueCommands( PTYPE_INLINE, pCh, -1, 1, CMD_BMARK_REQ ); init_waitqueue_entry(&wait, current); add_wait_queue(&(pCh->pBookmarkWait), &wait); set_current_state( TASK_INTERRUPTIBLE ); serviceOutgoingFifo( pB ); schedule(); // Now we take our interruptible sleep on // Clean up the queue set_current_state( TASK_RUNNING ); remove_wait_queue(&(pCh->pBookmarkWait), &wait); // if expires == 0 then timer poped, then do not need to del_timer if ((timeout > 0) && pCh->BookmarkTimer.expires && time_before(jiffies, pCh->BookmarkTimer.expires)) { del_timer( &(pCh->BookmarkTimer) ); pCh->BookmarkTimer.expires = 0; ip2trace (CHANN, ITRC_DRAIN, 3, 1, pCh->BookmarkTimer.expires ); } ip2trace (CHANN, ITRC_DRAIN, ITRC_RETURN, 1, pCh->BookmarkTimer.expires ); return; } //****************************************************************************** // Function: i2OutputFree(pCh) // Parameters: Pointer to a channel structure // Returns: Space in output buffer // // Description: // Returns -1 if very gross error. Otherwise returns the amount of bytes still // free in the output buffer. //****************************************************************************** static int i2OutputFree(i2ChanStrPtr pCh) { int amountToMove; unsigned long flags; // Ensure channel structure seems real if ( !i2Validate ( pCh ) ) { return -1; } read_lock_irqsave(&pCh->Obuf_spinlock, flags); amountToMove = pCh->Obuf_strip - pCh->Obuf_stuff - 1; read_unlock_irqrestore(&pCh->Obuf_spinlock, flags); if (amountToMove < 0) { amountToMove += OBUF_SIZE; } // If this is negative, we will discover later amountToMove -= sizeof(i2DataHeader); return (amountToMove < 0) ? 0 : amountToMove; } static void ip2_owake( PTTY tp) { i2ChanStrPtr pCh; if (tp == NULL) return; pCh = tp->driver_data; ip2trace (CHANN, ITRC_SICMD, 10, 2, tp->flags, (1 << TTY_DO_WRITE_WAKEUP) ); tty_wakeup(tp); } static inline void set_baud_params(i2eBordStrPtr pB) { int i,j; i2ChanStrPtr *pCh; pCh = (i2ChanStrPtr *) pB->i2eChannelPtr; for (i = 0; i < ABS_MAX_BOXES; i++) { if (pB->channelBtypes.bid_value[i]) { if (BID_HAS_654(pB->channelBtypes.bid_value[i])) { for (j = 0; j < ABS_BIGGEST_BOX; j++) { if (pCh[i*16+j] == NULL) break; (pCh[i*16+j])->BaudBase = 921600; // MAX for ST654 (pCh[i*16+j])->BaudDivisor = 96; } } else { // has cirrus cd1400 for (j = 0; j < ABS_BIGGEST_BOX; j++) { if (pCh[i*16+j] == NULL) break; (pCh[i*16+j])->BaudBase = 115200; // MAX for CD1400 (pCh[i*16+j])->BaudDivisor = 12; } } } } } //****************************************************************************** // Function: i2StripFifo(pB) // Parameters: Pointer to a board structure // Returns: ? // // Description: // Strips all the available data from the incoming FIFO, identifies the type of // packet, and either buffers the data or does what needs to be done. // // Note there is no overflow checking here: if the board sends more data than it // ought to, we will not detect it here, but blindly overflow... //****************************************************************************** // A buffer for reading in blocks for unknown channels static unsigned char junkBuffer[IBUF_SIZE]; // A buffer to read in a status packet. Because of the size of the count field // for these things, the maximum packet size must be less than MAX_CMD_PACK_SIZE static unsigned char cmdBuffer[MAX_CMD_PACK_SIZE + 4]; // This table changes the bit order from MSR order given by STAT_MODEM packet to // status bits used in our library. static char xlatDss[16] = { 0 | 0 | 0 | 0 , 0 | 0 | 0 | I2_CTS , 0 | 0 | I2_DSR | 0 , 0 | 0 | I2_DSR | I2_CTS , 0 | I2_RI | 0 | 0 , 0 | I2_RI | 0 | I2_CTS , 0 | I2_RI | I2_DSR | 0 , 0 | I2_RI | I2_DSR | I2_CTS , I2_DCD | 0 | 0 | 0 , I2_DCD | 0 | 0 | I2_CTS , I2_DCD | 0 | I2_DSR | 0 , I2_DCD | 0 | I2_DSR | I2_CTS , I2_DCD | I2_RI | 0 | 0 , I2_DCD | I2_RI | 0 | I2_CTS , I2_DCD | I2_RI | I2_DSR | 0 , I2_DCD | I2_RI | I2_DSR | I2_CTS }; static inline void i2StripFifo(i2eBordStrPtr pB) { i2ChanStrPtr pCh; int channel; int count; unsigned short stuffIndex; int amountToRead; unsigned char *pc, *pcLimit; unsigned char uc; unsigned char dss_change; unsigned long bflags,cflags; // ip2trace (ITRC_NO_PORT, ITRC_SFIFO, ITRC_ENTER, 0 ); while (I2_HAS_INPUT(pB)) { // ip2trace (ITRC_NO_PORT, ITRC_SFIFO, 2, 0 ); // Process packet from fifo a one atomic unit write_lock_irqsave(&pB->read_fifo_spinlock, bflags); // The first word (or two bytes) will have channel number and type of // packet, possibly other information pB->i2eLeadoffWord[0] = iiReadWord(pB); switch(PTYPE_OF(pB->i2eLeadoffWord)) { case PTYPE_DATA: pB->got_input = 1; // ip2trace (ITRC_NO_PORT, ITRC_SFIFO, 3, 0 ); channel = CHANNEL_OF(pB->i2eLeadoffWord); /* Store channel */ count = iiReadWord(pB); /* Count is in the next word */ // NEW: Check the count for sanity! Should the hardware fail, our death // is more pleasant. While an oversize channel is acceptable (just more // than the driver supports), an over-length count clearly means we are // sick! if ( ((unsigned int)count) > IBUF_SIZE ) { pB->i2eFatal = 2; write_unlock_irqrestore(&pB->read_fifo_spinlock, bflags); return; /* Bail out ASAP */ } // Channel is illegally big ? if ((channel >= pB->i2eChannelCnt) || (NULL==(pCh = ((i2ChanStrPtr*)pB->i2eChannelPtr)[channel]))) { iiReadBuf(pB, junkBuffer, count); write_unlock_irqrestore(&pB->read_fifo_spinlock, bflags); break; /* From switch: ready for next packet */ } // Channel should be valid, then // If this is a hot-key, merely post its receipt for now. These are // always supposed to be 1-byte packets, so we won't even check the // count. Also we will post an acknowledgement to the board so that // more data can be forthcoming. Note that we are not trying to use // these sequences in this driver, merely to robustly ignore them. if(ID_OF(pB->i2eLeadoffWord) == ID_HOT_KEY) { pCh->hotKeyIn = iiReadWord(pB) & 0xff; write_unlock_irqrestore(&pB->read_fifo_spinlock, bflags); i2QueueCommands(PTYPE_BYPASS, pCh, 0, 1, CMD_HOTACK); break; /* From the switch: ready for next packet */ } // Normal data! We crudely assume there is room for the data in our // buffer because the board wouldn't have exceeded his credit limit. write_lock_irqsave(&pCh->Ibuf_spinlock, cflags); // We have 2 locks now stuffIndex = pCh->Ibuf_stuff; amountToRead = IBUF_SIZE - stuffIndex; if (amountToRead > count) amountToRead = count; // stuffIndex would have been already adjusted so there would // always be room for at least one, and count is always at least // one. iiReadBuf(pB, &(pCh->Ibuf[stuffIndex]), amountToRead); pCh->icount.rx += amountToRead; // Update the stuffIndex by the amount of data moved. Note we could // never ask for more data than would just fit. However, we might // have read in one more byte than we wanted because the read // rounds up to even bytes. If this byte is on the end of the // packet, and is padding, we ignore it. If the byte is part of // the actual data, we need to move it. stuffIndex += amountToRead; if (stuffIndex >= IBUF_SIZE) { if ((amountToRead & 1) && (count > amountToRead)) { pCh->Ibuf[0] = pCh->Ibuf[IBUF_SIZE]; amountToRead++; stuffIndex = 1; } else { stuffIndex = 0; } } // If there is anything left over, read it as well if (count > amountToRead) { amountToRead = count - amountToRead; iiReadBuf(pB, &(pCh->Ibuf[stuffIndex]), amountToRead); pCh->icount.rx += amountToRead; stuffIndex += amountToRead; } // Update stuff index pCh->Ibuf_stuff = stuffIndex; write_unlock_irqrestore(&pCh->Ibuf_spinlock, cflags); write_unlock_irqrestore(&pB->read_fifo_spinlock, bflags); #ifdef USE_IQ schedule_work(&pCh->tqueue_input); #else do_input(&pCh->tqueue_input); #endif // Note we do not need to maintain any flow-control credits at this // time: if we were to increment .asof and decrement .room, there // would be no net effect. Instead, when we strip data, we will // increment .asof and leave .room unchanged. break; // From switch: ready for next packet case PTYPE_STATUS: ip2trace (ITRC_NO_PORT, ITRC_SFIFO, 4, 0 ); count = CMD_COUNT_OF(pB->i2eLeadoffWord); iiReadBuf(pB, cmdBuffer, count); // We can release early with buffer grab write_unlock_irqrestore(&pB->read_fifo_spinlock, bflags); pc = cmdBuffer; pcLimit = &(cmdBuffer[count]); while (pc < pcLimit) { channel = *pc++; ip2trace (channel, ITRC_SFIFO, 7, 2, channel, *pc ); /* check for valid channel */ if (channel < pB->i2eChannelCnt && (pCh = (((i2ChanStrPtr*)pB->i2eChannelPtr)[channel])) != NULL ) { dss_change = 0; switch (uc = *pc++) { /* Breaks and modem signals are easy: just update status */ case STAT_CTS_UP: if ( !(pCh->dataSetIn & I2_CTS) ) { pCh->dataSetIn |= I2_DCTS; pCh->icount.cts++; dss_change = 1; } pCh->dataSetIn |= I2_CTS; break; case STAT_CTS_DN: if ( pCh->dataSetIn & I2_CTS ) { pCh->dataSetIn |= I2_DCTS; pCh->icount.cts++; dss_change = 1; } pCh->dataSetIn &= ~I2_CTS; break; case STAT_DCD_UP: ip2trace (channel, ITRC_MODEM, 1, 1, pCh->dataSetIn ); if ( !(pCh->dataSetIn & I2_DCD) ) { ip2trace (CHANN, ITRC_MODEM, 2, 0 ); pCh->dataSetIn |= I2_DDCD; pCh->icount.dcd++; dss_change = 1; } pCh->dataSetIn |= I2_DCD; ip2trace (channel, ITRC_MODEM, 3, 1, pCh->dataSetIn ); break; case STAT_DCD_DN: ip2trace (channel, ITRC_MODEM, 4, 1, pCh->dataSetIn ); if ( pCh->dataSetIn & I2_DCD ) { ip2trace (channel, ITRC_MODEM, 5, 0 ); pCh->dataSetIn |= I2_DDCD; pCh->icount.dcd++; dss_change = 1; } pCh->dataSetIn &= ~I2_DCD; ip2trace (channel, ITRC_MODEM, 6, 1, pCh->dataSetIn ); break; case STAT_DSR_UP: if ( !(pCh->dataSetIn & I2_DSR) ) { pCh->dataSetIn |= I2_DDSR; pCh->icount.dsr++; dss_change = 1; } pCh->dataSetIn |= I2_DSR; break; case STAT_DSR_DN: if ( pCh->dataSetIn & I2_DSR ) { pCh->dataSetIn |= I2_DDSR; pCh->icount.dsr++; dss_change = 1; } pCh->dataSetIn &= ~I2_DSR; break; case STAT_RI_UP: if ( !(pCh->dataSetIn & I2_RI) ) { pCh->dataSetIn |= I2_DRI; pCh->icount.rng++; dss_change = 1; } pCh->dataSetIn |= I2_RI ; break; case STAT_RI_DN: // to be compat with serial.c //if ( pCh->dataSetIn & I2_RI ) //{ // pCh->dataSetIn |= I2_DRI; // pCh->icount.rng++; // dss_change = 1; //} pCh->dataSetIn &= ~I2_RI ; break; case STAT_BRK_DET: pCh->dataSetIn |= I2_BRK; pCh->icount.brk++; dss_change = 1; break; // Bookmarks? one less request we're waiting for case STAT_BMARK: pCh->bookMarks--; if (pCh->bookMarks <= 0 ) { pCh->bookMarks = 0; wake_up_interruptible( &pCh->pBookmarkWait ); ip2trace (channel, ITRC_DRAIN, 20, 1, pCh->BookmarkTimer.expires ); } break; // Flow control packets? Update the new credits, and if // someone was waiting for output, queue him up again. case STAT_FLOW: pCh->outfl.room = ((flowStatPtr)pc)->room - (pCh->outfl.asof - ((flowStatPtr)pc)->asof); ip2trace (channel, ITRC_STFLW, 1, 1, pCh->outfl.room ); if (pCh->channelNeeds & NEED_CREDIT) { ip2trace (channel, ITRC_STFLW, 2, 1, pCh->channelNeeds); pCh->channelNeeds &= ~NEED_CREDIT; i2QueueNeeds(pB, pCh, NEED_INLINE); if ( pCh->pTTY ) ip2_owake(pCh->pTTY); } ip2trace (channel, ITRC_STFLW, 3, 1, pCh->channelNeeds); pc += sizeof(flowStat); break; /* Special packets: */ /* Just copy the information into the channel structure */ case STAT_STATUS: pCh->channelStatus = *((debugStatPtr)pc); pc += sizeof(debugStat); break; case STAT_TXCNT: pCh->channelTcount = *((cntStatPtr)pc); pc += sizeof(cntStat); break; case STAT_RXCNT: pCh->channelRcount = *((cntStatPtr)pc); pc += sizeof(cntStat); break; case STAT_BOXIDS: pB->channelBtypes = *((bidStatPtr)pc); pc += sizeof(bidStat); set_baud_params(pB); break; case STAT_HWFAIL: i2QueueCommands (PTYPE_INLINE, pCh, 0, 1, CMD_HW_TEST); pCh->channelFail = *((failStatPtr)pc); pc += sizeof(failStat); break; /* No explicit match? then * Might be an error packet... */ default: switch (uc & STAT_MOD_ERROR) { case STAT_ERROR: if (uc & STAT_E_PARITY) { pCh->dataSetIn |= I2_PAR; pCh->icount.parity++; } if (uc & STAT_E_FRAMING){ pCh->dataSetIn |= I2_FRA; pCh->icount.frame++; } if (uc & STAT_E_OVERRUN){ pCh->dataSetIn |= I2_OVR; pCh->icount.overrun++; } break; case STAT_MODEM: // the answer to DSS_NOW request (not change) pCh->dataSetIn = (pCh->dataSetIn & ~(I2_RI | I2_CTS | I2_DCD | I2_DSR) ) | xlatDss[uc & 0xf]; wake_up_interruptible ( &pCh->dss_now_wait ); default: break; } } /* End of switch on status type */ if (dss_change) { #ifdef USE_IQ schedule_work(&pCh->tqueue_status); #else do_status(&pCh->tqueue_status); #endif } } else /* Or else, channel is invalid */ { // Even though the channel is invalid, we must test the // status to see how much additional data it has (to be // skipped) switch (*pc++) { case STAT_FLOW: pc += 4; /* Skip the data */ break; default: break; } } } // End of while (there is still some status packet left) break; default: // Neither packet? should be impossible ip2trace (ITRC_NO_PORT, ITRC_SFIFO, 5, 1, PTYPE_OF(pB->i2eLeadoffWord) ); write_unlock_irqrestore(&pB->read_fifo_spinlock, bflags); break; } // End of switch on type of packets } /*while(board I2_HAS_INPUT)*/ ip2trace (ITRC_NO_PORT, ITRC_SFIFO, ITRC_RETURN, 0 ); // Send acknowledgement to the board even if there was no data! pB->i2eOutMailWaiting |= MB_IN_STRIPPED; return; } //****************************************************************************** // Function: i2Write2Fifo(pB,address,count) // Parameters: Pointer to a board structure, source address, byte count // Returns: bytes written // // Description: // Writes count bytes to board io address(implied) from source // Adjusts count, leaves reserve for next time around bypass cmds //****************************************************************************** static int i2Write2Fifo(i2eBordStrPtr pB, unsigned char *source, int count,int reserve) { int rc = 0; unsigned long flags; write_lock_irqsave(&pB->write_fifo_spinlock, flags); if (!pB->i2eWaitingForEmptyFifo) { if (pB->i2eFifoRemains > (count+reserve)) { pB->i2eFifoRemains -= count; iiWriteBuf(pB, source, count); pB->i2eOutMailWaiting |= MB_OUT_STUFFED; rc = count; } } write_unlock_irqrestore(&pB->write_fifo_spinlock, flags); return rc; } //****************************************************************************** // Function: i2StuffFifoBypass(pB) // Parameters: Pointer to a board structure // Returns: Nothing // // Description: // Stuffs as many bypass commands into the fifo as possible. This is simpler // than stuffing data or inline commands to fifo, since we do not have // flow-control to deal with. //****************************************************************************** static inline void i2StuffFifoBypass(i2eBordStrPtr pB) { i2ChanStrPtr pCh; unsigned char *pRemove; unsigned short stripIndex; unsigned short packetSize; unsigned short paddedSize; unsigned short notClogged = 1; unsigned long flags; int bailout = 1000; // Continue processing so long as there are entries, or there is room in the // fifo. Each entry represents a channel with something to do. while ( --bailout && notClogged && (NULL != (pCh = i2DeQueueNeeds(pB,NEED_BYPASS)))) { write_lock_irqsave(&pCh->Cbuf_spinlock, flags); stripIndex = pCh->Cbuf_strip; // as long as there are packets for this channel... while (stripIndex != pCh->Cbuf_stuff) { pRemove = &(pCh->Cbuf[stripIndex]); packetSize = CMD_COUNT_OF(pRemove) + sizeof(i2CmdHeader); paddedSize = roundup(packetSize, 2); if (paddedSize > 0) { if ( 0 == i2Write2Fifo(pB, pRemove, paddedSize,0)) { notClogged = 0; /* fifo full */ i2QueueNeeds(pB, pCh, NEED_BYPASS); // Put back on queue break; // Break from the channel } } #ifdef DEBUG_FIFO WriteDBGBuf("BYPS", pRemove, paddedSize); #endif /* DEBUG_FIFO */ pB->debugBypassCount++; pRemove += packetSize; stripIndex += packetSize; if (stripIndex >= CBUF_SIZE) { stripIndex = 0; pRemove = pCh->Cbuf; } } // Done with this channel. Move to next, removing this one from // the queue of channels if we cleaned it out (i.e., didn't get clogged. pCh->Cbuf_strip = stripIndex; write_unlock_irqrestore(&pCh->Cbuf_spinlock, flags); } // Either clogged or finished all the work #ifdef IP2DEBUG_TRACE if ( !bailout ) { ip2trace (ITRC_NO_PORT, ITRC_ERROR, 1, 0 ); } #endif } //****************************************************************************** // Function: i2StuffFifoFlow(pB) // Parameters: Pointer to a board structure // Returns: Nothing // // Description: // Stuffs as many flow control packets into the fifo as possible. This is easier // even than doing normal bypass commands, because there is always at most one // packet, already assembled, for each channel. //****************************************************************************** static inline void i2StuffFifoFlow(i2eBordStrPtr pB) { i2ChanStrPtr pCh; unsigned short paddedSize = roundup(sizeof(flowIn), 2); ip2trace (ITRC_NO_PORT, ITRC_SFLOW, ITRC_ENTER, 2, pB->i2eFifoRemains, paddedSize ); // Continue processing so long as there are entries, or there is room in the // fifo. Each entry represents a channel with something to do. while ( (NULL != (pCh = i2DeQueueNeeds(pB,NEED_FLOW)))) { pB->debugFlowCount++; // NO Chan LOCK needed ??? if ( 0 == i2Write2Fifo(pB,(unsigned char *)&(pCh->infl),paddedSize,0)) { break; } #ifdef DEBUG_FIFO WriteDBGBuf("FLOW",(unsigned char *) &(pCh->infl), paddedSize); #endif /* DEBUG_FIFO */ } // Either clogged or finished all the work ip2trace (ITRC_NO_PORT, ITRC_SFLOW, ITRC_RETURN, 0 ); } //****************************************************************************** // Function: i2StuffFifoInline(pB) // Parameters: Pointer to a board structure // Returns: Nothing // // Description: // Stuffs as much data and inline commands into the fifo as possible. This is // the most complex fifo-stuffing operation, since there if now the channel // flow-control issue to deal with. //****************************************************************************** static inline void i2StuffFifoInline(i2eBordStrPtr pB) { i2ChanStrPtr pCh; unsigned char *pRemove; unsigned short stripIndex; unsigned short packetSize; unsigned short paddedSize; unsigned short notClogged = 1; unsigned short flowsize; unsigned long flags; int bailout = 1000; int bailout2; ip2trace (ITRC_NO_PORT, ITRC_SICMD, ITRC_ENTER, 3, pB->i2eFifoRemains, pB->i2Dbuf_strip, pB->i2Dbuf_stuff ); // Continue processing so long as there are entries, or there is room in the // fifo. Each entry represents a channel with something to do. while ( --bailout && notClogged && (NULL != (pCh = i2DeQueueNeeds(pB,NEED_INLINE))) ) { write_lock_irqsave(&pCh->Obuf_spinlock, flags); stripIndex = pCh->Obuf_strip; ip2trace (CHANN, ITRC_SICMD, 3, 2, stripIndex, pCh->Obuf_stuff ); // as long as there are packets for this channel... bailout2 = 1000; while ( --bailout2 && stripIndex != pCh->Obuf_stuff) { pRemove = &(pCh->Obuf[stripIndex]); // Must determine whether this be a data or command packet to // calculate correctly the header size and the amount of // flow-control credit this type of packet will use. if (PTYPE_OF(pRemove) == PTYPE_DATA) { flowsize = DATA_COUNT_OF(pRemove); packetSize = flowsize + sizeof(i2DataHeader); } else { flowsize = CMD_COUNT_OF(pRemove); packetSize = flowsize + sizeof(i2CmdHeader); } flowsize = CREDIT_USAGE(flowsize); paddedSize = roundup(packetSize, 2); ip2trace (CHANN, ITRC_SICMD, 4, 2, pB->i2eFifoRemains, paddedSize ); // If we don't have enough credits from the board to send the data, // flag the channel that we are waiting for flow control credit, and // break out. This will clean up this channel and remove us from the // queue of hot things to do. ip2trace (CHANN, ITRC_SICMD, 5, 2, pCh->outfl.room, flowsize ); if (pCh->outfl.room <= flowsize) { // Do Not have the credits to send this packet. i2QueueNeeds(pB, pCh, NEED_CREDIT); notClogged = 0; break; // So to do next channel } if ( (paddedSize > 0) && ( 0 == i2Write2Fifo(pB, pRemove, paddedSize, 128))) { // Do Not have room in fifo to send this packet. notClogged = 0; i2QueueNeeds(pB, pCh, NEED_INLINE); break; // Break from the channel } #ifdef DEBUG_FIFO WriteDBGBuf("DATA", pRemove, paddedSize); #endif /* DEBUG_FIFO */ pB->debugInlineCount++; pCh->icount.tx += flowsize; // Update current credits pCh->outfl.room -= flowsize; pCh->outfl.asof += flowsize; if (PTYPE_OF(pRemove) == PTYPE_DATA) { pCh->Obuf_char_count -= DATA_COUNT_OF(pRemove); } pRemove += packetSize; stripIndex += packetSize; ip2trace (CHANN, ITRC_SICMD, 6, 2, stripIndex, pCh->Obuf_strip); if (stripIndex >= OBUF_SIZE) { stripIndex = 0; pRemove = pCh->Obuf; ip2trace (CHANN, ITRC_SICMD, 7, 1, stripIndex ); } } /* while */ if ( !bailout2 ) { ip2trace (CHANN, ITRC_ERROR, 3, 0 ); } // Done with this channel. Move to next, removing this one from the // queue of channels if we cleaned it out (i.e., didn't get clogged. pCh->Obuf_strip = stripIndex; write_unlock_irqrestore(&pCh->Obuf_spinlock, flags); if ( notClogged ) { ip2trace (CHANN, ITRC_SICMD, 8, 0 ); if ( pCh->pTTY ) { ip2_owake(pCh->pTTY); } } } // Either clogged or finished all the work if ( !bailout ) { ip2trace (ITRC_NO_PORT, ITRC_ERROR, 4, 0 ); } ip2trace (ITRC_NO_PORT, ITRC_SICMD, ITRC_RETURN, 1,pB->i2Dbuf_strip); } //****************************************************************************** // Function: serviceOutgoingFifo(pB) // Parameters: Pointer to a board structure // Returns: Nothing // // Description: // Helper routine to put data in the outgoing fifo, if we aren't already waiting // for something to be there. If the fifo has only room for a very little data, // go head and hit the board with a mailbox hit immediately. Otherwise, it will // have to happen later in the interrupt processing. Since this routine may be // called both at interrupt and foreground time, we must turn off interrupts // during the entire process. //****************************************************************************** static void serviceOutgoingFifo(i2eBordStrPtr pB) { // If we aren't currently waiting for the board to empty our fifo, service // everything that is pending, in priority order (especially, Bypass before // Inline). if ( ! pB->i2eWaitingForEmptyFifo ) { i2StuffFifoFlow(pB); i2StuffFifoBypass(pB); i2StuffFifoInline(pB); iiSendPendingMail(pB); } } //****************************************************************************** // Function: i2ServiceBoard(pB) // Parameters: Pointer to a board structure // Returns: Nothing // // Description: // Normally this is called from interrupt level, but there is deliberately // nothing in here specific to being called from interrupt level. All the // hardware-specific, interrupt-specific things happen at the outer levels. // // For example, a timer interrupt could drive this routine for some sort of // polled operation. The only requirement is that the programmer deal with any // atomiticity/concurrency issues that result. // // This routine responds to the board's having sent mailbox information to the // host (which would normally cause an interrupt). This routine reads the // incoming mailbox. If there is no data in it, this board did not create the // interrupt and/or has nothing to be done to it. (Except, if we have been // waiting to write mailbox data to it, we may do so. // // Based on the value in the mailbox, we may take various actions. // // No checking here of pB validity: after all, it shouldn't have been called by // the handler unless pB were on the list. //****************************************************************************** static inline int i2ServiceBoard ( i2eBordStrPtr pB ) { unsigned inmail; unsigned long flags; /* This should be atomic because of the way we are called... */ if (NO_MAIL_HERE == ( inmail = pB->i2eStartMail ) ) { inmail = iiGetMail(pB); } pB->i2eStartMail = NO_MAIL_HERE; ip2trace (ITRC_NO_PORT, ITRC_INTR, 2, 1, inmail ); if (inmail != NO_MAIL_HERE) { // If the board has gone fatal, nothing to do but hit a bit that will // alert foreground tasks to protest! if ( inmail & MB_FATAL_ERROR ) { pB->i2eFatal = 1; goto exit_i2ServiceBoard; } /* Assuming no fatal condition, we proceed to do work */ if ( inmail & MB_IN_STUFFED ) { pB->i2eFifoInInts++; i2StripFifo(pB); /* There might be incoming packets */ } if (inmail & MB_OUT_STRIPPED) { pB->i2eFifoOutInts++; write_lock_irqsave(&pB->write_fifo_spinlock, flags); pB->i2eFifoRemains = pB->i2eFifoSize; pB->i2eWaitingForEmptyFifo = 0; write_unlock_irqrestore(&pB->write_fifo_spinlock, flags); ip2trace (ITRC_NO_PORT, ITRC_INTR, 30, 1, pB->i2eFifoRemains ); } serviceOutgoingFifo(pB); } ip2trace (ITRC_NO_PORT, ITRC_INTR, 8, 0 ); exit_i2ServiceBoard: return 0; }
gpl-2.0
premaca/android_kernel_cyanogen_msm8916
net/sunrpc/clnt.c
1643
56242
/* * linux/net/sunrpc/clnt.c * * This file contains the high-level RPC interface. * It is modeled as a finite state machine to support both synchronous * and asynchronous requests. * * - RPC header generation and argument serialization. * - Credential refresh. * - TCP connect handling. * - Retry of operation when it is suspected the operation failed because * of uid squashing on the server, or when the credentials were stale * and need to be refreshed, or when a packet was damaged in transit. * This may be have to be moved to the VFS layer. * * Copyright (C) 1992,1993 Rick Sladkey <jrs@world.std.com> * Copyright (C) 1995,1996 Olaf Kirch <okir@monad.swb.de> */ #include <linux/module.h> #include <linux/types.h> #include <linux/kallsyms.h> #include <linux/mm.h> #include <linux/namei.h> #include <linux/mount.h> #include <linux/slab.h> #include <linux/utsname.h> #include <linux/workqueue.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/un.h> #include <linux/rcupdate.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/addr.h> #include <linux/sunrpc/rpc_pipe_fs.h> #include <linux/sunrpc/metrics.h> #include <linux/sunrpc/bc_xprt.h> #include <trace/events/sunrpc.h> #include "sunrpc.h" #include "netns.h" #ifdef RPC_DEBUG # define RPCDBG_FACILITY RPCDBG_CALL #endif #define dprint_status(t) \ dprintk("RPC: %5u %s (status %d)\n", t->tk_pid, \ __func__, t->tk_status) /* * All RPC clients are linked into this list */ static DECLARE_WAIT_QUEUE_HEAD(destroy_wait); static void call_start(struct rpc_task *task); static void call_reserve(struct rpc_task *task); static void call_reserveresult(struct rpc_task *task); static void call_allocate(struct rpc_task *task); static void call_decode(struct rpc_task *task); static void call_bind(struct rpc_task *task); static void call_bind_status(struct rpc_task *task); static void call_transmit(struct rpc_task *task); #if defined(CONFIG_SUNRPC_BACKCHANNEL) static void call_bc_transmit(struct rpc_task *task); #endif /* CONFIG_SUNRPC_BACKCHANNEL */ static void call_status(struct rpc_task *task); static void call_transmit_status(struct rpc_task *task); static void call_refresh(struct rpc_task *task); static void call_refreshresult(struct rpc_task *task); static void call_timeout(struct rpc_task *task); static void call_connect(struct rpc_task *task); static void call_connect_status(struct rpc_task *task); static __be32 *rpc_encode_header(struct rpc_task *task); static __be32 *rpc_verify_header(struct rpc_task *task); static int rpc_ping(struct rpc_clnt *clnt); static void rpc_register_client(struct rpc_clnt *clnt) { struct net *net = rpc_net_ns(clnt); struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); spin_lock(&sn->rpc_client_lock); list_add(&clnt->cl_clients, &sn->all_clients); spin_unlock(&sn->rpc_client_lock); } static void rpc_unregister_client(struct rpc_clnt *clnt) { struct net *net = rpc_net_ns(clnt); struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); spin_lock(&sn->rpc_client_lock); list_del(&clnt->cl_clients); spin_unlock(&sn->rpc_client_lock); } static void __rpc_clnt_remove_pipedir(struct rpc_clnt *clnt) { if (clnt->cl_dentry) { if (clnt->cl_auth && clnt->cl_auth->au_ops->pipes_destroy) clnt->cl_auth->au_ops->pipes_destroy(clnt->cl_auth); rpc_remove_client_dir(clnt->cl_dentry); } clnt->cl_dentry = NULL; } static void rpc_clnt_remove_pipedir(struct rpc_clnt *clnt) { struct net *net = rpc_net_ns(clnt); struct super_block *pipefs_sb; pipefs_sb = rpc_get_sb_net(net); if (pipefs_sb) { __rpc_clnt_remove_pipedir(clnt); rpc_put_sb_net(net); } } static struct dentry *rpc_setup_pipedir_sb(struct super_block *sb, struct rpc_clnt *clnt, const char *dir_name) { static uint32_t clntid; char name[15]; struct qstr q = { .name = name }; struct dentry *dir, *dentry; int error; dir = rpc_d_lookup_sb(sb, dir_name); if (dir == NULL) { pr_info("RPC: pipefs directory doesn't exist: %s\n", dir_name); return dir; } for (;;) { q.len = snprintf(name, sizeof(name), "clnt%x", (unsigned int)clntid++); name[sizeof(name) - 1] = '\0'; q.hash = full_name_hash(q.name, q.len); dentry = rpc_create_client_dir(dir, &q, clnt); if (!IS_ERR(dentry)) break; error = PTR_ERR(dentry); if (error != -EEXIST) { printk(KERN_INFO "RPC: Couldn't create pipefs entry" " %s/%s, error %d\n", dir_name, name, error); break; } } dput(dir); return dentry; } static int rpc_setup_pipedir(struct rpc_clnt *clnt, const char *dir_name) { struct net *net = rpc_net_ns(clnt); struct super_block *pipefs_sb; struct dentry *dentry; clnt->cl_dentry = NULL; if (dir_name == NULL) return 0; pipefs_sb = rpc_get_sb_net(net); if (!pipefs_sb) return 0; dentry = rpc_setup_pipedir_sb(pipefs_sb, clnt, dir_name); rpc_put_sb_net(net); if (IS_ERR(dentry)) return PTR_ERR(dentry); clnt->cl_dentry = dentry; return 0; } static inline int rpc_clnt_skip_event(struct rpc_clnt *clnt, unsigned long event) { if (((event == RPC_PIPEFS_MOUNT) && clnt->cl_dentry) || ((event == RPC_PIPEFS_UMOUNT) && !clnt->cl_dentry)) return 1; return 0; } static int __rpc_clnt_handle_event(struct rpc_clnt *clnt, unsigned long event, struct super_block *sb) { struct dentry *dentry; int err = 0; switch (event) { case RPC_PIPEFS_MOUNT: dentry = rpc_setup_pipedir_sb(sb, clnt, clnt->cl_program->pipe_dir_name); if (!dentry) return -ENOENT; if (IS_ERR(dentry)) return PTR_ERR(dentry); clnt->cl_dentry = dentry; if (clnt->cl_auth->au_ops->pipes_create) { err = clnt->cl_auth->au_ops->pipes_create(clnt->cl_auth); if (err) __rpc_clnt_remove_pipedir(clnt); } break; case RPC_PIPEFS_UMOUNT: __rpc_clnt_remove_pipedir(clnt); break; default: printk(KERN_ERR "%s: unknown event: %ld\n", __func__, event); return -ENOTSUPP; } return err; } static int __rpc_pipefs_event(struct rpc_clnt *clnt, unsigned long event, struct super_block *sb) { int error = 0; for (;; clnt = clnt->cl_parent) { if (!rpc_clnt_skip_event(clnt, event)) error = __rpc_clnt_handle_event(clnt, event, sb); if (error || clnt == clnt->cl_parent) break; } return error; } static struct rpc_clnt *rpc_get_client_for_event(struct net *net, int event) { struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); struct rpc_clnt *clnt; spin_lock(&sn->rpc_client_lock); list_for_each_entry(clnt, &sn->all_clients, cl_clients) { if (clnt->cl_program->pipe_dir_name == NULL) continue; if (rpc_clnt_skip_event(clnt, event)) continue; if (atomic_inc_not_zero(&clnt->cl_count) == 0) continue; spin_unlock(&sn->rpc_client_lock); return clnt; } spin_unlock(&sn->rpc_client_lock); return NULL; } static int rpc_pipefs_event(struct notifier_block *nb, unsigned long event, void *ptr) { struct super_block *sb = ptr; struct rpc_clnt *clnt; int error = 0; while ((clnt = rpc_get_client_for_event(sb->s_fs_info, event))) { error = __rpc_pipefs_event(clnt, event, sb); rpc_release_client(clnt); if (error) break; } return error; } static struct notifier_block rpc_clients_block = { .notifier_call = rpc_pipefs_event, .priority = SUNRPC_PIPEFS_RPC_PRIO, }; int rpc_clients_notifier_register(void) { return rpc_pipefs_notifier_register(&rpc_clients_block); } void rpc_clients_notifier_unregister(void) { return rpc_pipefs_notifier_unregister(&rpc_clients_block); } static void rpc_clnt_set_nodename(struct rpc_clnt *clnt, const char *nodename) { clnt->cl_nodelen = strlen(nodename); if (clnt->cl_nodelen > UNX_MAXNODENAME) clnt->cl_nodelen = UNX_MAXNODENAME; memcpy(clnt->cl_nodename, nodename, clnt->cl_nodelen); } static struct rpc_clnt * rpc_new_client(const struct rpc_create_args *args, struct rpc_xprt *xprt) { const struct rpc_program *program = args->program; const struct rpc_version *version; struct rpc_clnt *clnt = NULL; struct rpc_auth *auth; int err; /* sanity check the name before trying to print it */ dprintk("RPC: creating %s client for %s (xprt %p)\n", program->name, args->servername, xprt); err = rpciod_up(); if (err) goto out_no_rpciod; err = -EINVAL; if (args->version >= program->nrvers) goto out_err; version = program->version[args->version]; if (version == NULL) goto out_err; err = -ENOMEM; clnt = kzalloc(sizeof(*clnt), GFP_KERNEL); if (!clnt) goto out_err; clnt->cl_parent = clnt; rcu_assign_pointer(clnt->cl_xprt, xprt); clnt->cl_procinfo = version->procs; clnt->cl_maxproc = version->nrprocs; clnt->cl_protname = program->name; clnt->cl_prog = args->prognumber ? : program->number; clnt->cl_vers = version->number; clnt->cl_stats = program->stats; clnt->cl_metrics = rpc_alloc_iostats(clnt); err = -ENOMEM; if (clnt->cl_metrics == NULL) goto out_no_stats; clnt->cl_program = program; INIT_LIST_HEAD(&clnt->cl_tasks); spin_lock_init(&clnt->cl_lock); if (!xprt_bound(xprt)) clnt->cl_autobind = 1; clnt->cl_timeout = xprt->timeout; if (args->timeout != NULL) { memcpy(&clnt->cl_timeout_default, args->timeout, sizeof(clnt->cl_timeout_default)); clnt->cl_timeout = &clnt->cl_timeout_default; } clnt->cl_rtt = &clnt->cl_rtt_default; rpc_init_rtt(&clnt->cl_rtt_default, clnt->cl_timeout->to_initval); clnt->cl_principal = NULL; if (args->client_name) { clnt->cl_principal = kstrdup(args->client_name, GFP_KERNEL); if (!clnt->cl_principal) goto out_no_principal; } atomic_set(&clnt->cl_count, 1); err = rpc_setup_pipedir(clnt, program->pipe_dir_name); if (err < 0) goto out_no_path; auth = rpcauth_create(args->authflavor, clnt); if (IS_ERR(auth)) { dprintk("RPC: Couldn't create auth handle (flavor %u)\n", args->authflavor); err = PTR_ERR(auth); goto out_no_auth; } /* save the nodename */ rpc_clnt_set_nodename(clnt, utsname()->nodename); rpc_register_client(clnt); return clnt; out_no_auth: rpc_clnt_remove_pipedir(clnt); out_no_path: kfree(clnt->cl_principal); out_no_principal: rpc_free_iostats(clnt->cl_metrics); out_no_stats: kfree(clnt); out_err: rpciod_down(); out_no_rpciod: xprt_put(xprt); return ERR_PTR(err); } /** * rpc_create - create an RPC client and transport with one call * @args: rpc_clnt create argument structure * * Creates and initializes an RPC transport and an RPC client. * * It can ping the server in order to determine if it is up, and to see if * it supports this program and version. RPC_CLNT_CREATE_NOPING disables * this behavior so asynchronous tasks can also use rpc_create. */ struct rpc_clnt *rpc_create(struct rpc_create_args *args) { struct rpc_xprt *xprt; struct rpc_clnt *clnt; struct xprt_create xprtargs = { .net = args->net, .ident = args->protocol, .srcaddr = args->saddress, .dstaddr = args->address, .addrlen = args->addrsize, .servername = args->servername, .bc_xprt = args->bc_xprt, }; char servername[48]; if (args->flags & RPC_CLNT_CREATE_INFINITE_SLOTS) xprtargs.flags |= XPRT_CREATE_INFINITE_SLOTS; if (args->flags & RPC_CLNT_CREATE_NO_IDLE_TIMEOUT) xprtargs.flags |= XPRT_CREATE_NO_IDLE_TIMEOUT; /* * If the caller chooses not to specify a hostname, whip * up a string representation of the passed-in address. */ if (xprtargs.servername == NULL) { struct sockaddr_un *sun = (struct sockaddr_un *)args->address; struct sockaddr_in *sin = (struct sockaddr_in *)args->address; struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)args->address; servername[0] = '\0'; switch (args->address->sa_family) { case AF_LOCAL: snprintf(servername, sizeof(servername), "%s", sun->sun_path); break; case AF_INET: snprintf(servername, sizeof(servername), "%pI4", &sin->sin_addr.s_addr); break; case AF_INET6: snprintf(servername, sizeof(servername), "%pI6", &sin6->sin6_addr); break; default: /* caller wants default server name, but * address family isn't recognized. */ return ERR_PTR(-EINVAL); } xprtargs.servername = servername; } xprt = xprt_create_transport(&xprtargs); if (IS_ERR(xprt)) return (struct rpc_clnt *)xprt; /* * By default, kernel RPC client connects from a reserved port. * CAP_NET_BIND_SERVICE will not be set for unprivileged requesters, * but it is always enabled for rpciod, which handles the connect * operation. */ xprt->resvport = 1; if (args->flags & RPC_CLNT_CREATE_NONPRIVPORT) xprt->resvport = 0; clnt = rpc_new_client(args, xprt); if (IS_ERR(clnt)) return clnt; if (!(args->flags & RPC_CLNT_CREATE_NOPING)) { int err = rpc_ping(clnt); if (err != 0) { rpc_shutdown_client(clnt); return ERR_PTR(err); } } clnt->cl_softrtry = 1; if (args->flags & RPC_CLNT_CREATE_HARDRTRY) clnt->cl_softrtry = 0; if (args->flags & RPC_CLNT_CREATE_AUTOBIND) clnt->cl_autobind = 1; if (args->flags & RPC_CLNT_CREATE_DISCRTRY) clnt->cl_discrtry = 1; if (!(args->flags & RPC_CLNT_CREATE_QUIET)) clnt->cl_chatty = 1; return clnt; } EXPORT_SYMBOL_GPL(rpc_create); /* * This function clones the RPC client structure. It allows us to share the * same transport while varying parameters such as the authentication * flavour. */ static struct rpc_clnt *__rpc_clone_client(struct rpc_create_args *args, struct rpc_clnt *clnt) { struct rpc_xprt *xprt; struct rpc_clnt *new; int err; err = -ENOMEM; rcu_read_lock(); xprt = xprt_get(rcu_dereference(clnt->cl_xprt)); rcu_read_unlock(); if (xprt == NULL) goto out_err; args->servername = xprt->servername; new = rpc_new_client(args, xprt); if (IS_ERR(new)) { err = PTR_ERR(new); goto out_err; } atomic_inc(&clnt->cl_count); new->cl_parent = clnt; /* Turn off autobind on clones */ new->cl_autobind = 0; new->cl_softrtry = clnt->cl_softrtry; new->cl_discrtry = clnt->cl_discrtry; new->cl_chatty = clnt->cl_chatty; return new; out_err: dprintk("RPC: %s: returned error %d\n", __func__, err); return ERR_PTR(err); } /** * rpc_clone_client - Clone an RPC client structure * * @clnt: RPC client whose parameters are copied * * Returns a fresh RPC client or an ERR_PTR. */ struct rpc_clnt *rpc_clone_client(struct rpc_clnt *clnt) { struct rpc_create_args args = { .program = clnt->cl_program, .prognumber = clnt->cl_prog, .version = clnt->cl_vers, .authflavor = clnt->cl_auth->au_flavor, .client_name = clnt->cl_principal, }; return __rpc_clone_client(&args, clnt); } EXPORT_SYMBOL_GPL(rpc_clone_client); /** * rpc_clone_client_set_auth - Clone an RPC client structure and set its auth * * @clnt: RPC client whose parameters are copied * @flavor: security flavor for new client * * Returns a fresh RPC client or an ERR_PTR. */ struct rpc_clnt * rpc_clone_client_set_auth(struct rpc_clnt *clnt, rpc_authflavor_t flavor) { struct rpc_create_args args = { .program = clnt->cl_program, .prognumber = clnt->cl_prog, .version = clnt->cl_vers, .authflavor = flavor, .client_name = clnt->cl_principal, }; return __rpc_clone_client(&args, clnt); } EXPORT_SYMBOL_GPL(rpc_clone_client_set_auth); /* * Kill all tasks for the given client. * XXX: kill their descendants as well? */ void rpc_killall_tasks(struct rpc_clnt *clnt) { struct rpc_task *rovr; if (list_empty(&clnt->cl_tasks)) return; dprintk("RPC: killing all tasks for client %p\n", clnt); /* * Spin lock all_tasks to prevent changes... */ spin_lock(&clnt->cl_lock); list_for_each_entry(rovr, &clnt->cl_tasks, tk_task) { if (!RPC_IS_ACTIVATED(rovr)) continue; if (!(rovr->tk_flags & RPC_TASK_KILLED)) { rovr->tk_flags |= RPC_TASK_KILLED; rpc_exit(rovr, -EIO); if (RPC_IS_QUEUED(rovr)) rpc_wake_up_queued_task(rovr->tk_waitqueue, rovr); } } spin_unlock(&clnt->cl_lock); } EXPORT_SYMBOL_GPL(rpc_killall_tasks); /* * Properly shut down an RPC client, terminating all outstanding * requests. */ void rpc_shutdown_client(struct rpc_clnt *clnt) { might_sleep(); dprintk_rcu("RPC: shutting down %s client for %s\n", clnt->cl_protname, rcu_dereference(clnt->cl_xprt)->servername); while (!list_empty(&clnt->cl_tasks)) { rpc_killall_tasks(clnt); wait_event_timeout(destroy_wait, list_empty(&clnt->cl_tasks), 1*HZ); } rpc_release_client(clnt); } EXPORT_SYMBOL_GPL(rpc_shutdown_client); /* * Free an RPC client */ static void rpc_free_client(struct rpc_clnt *clnt) { dprintk_rcu("RPC: destroying %s client for %s\n", clnt->cl_protname, rcu_dereference(clnt->cl_xprt)->servername); if (clnt->cl_parent != clnt) rpc_release_client(clnt->cl_parent); rpc_unregister_client(clnt); rpc_clnt_remove_pipedir(clnt); rpc_free_iostats(clnt->cl_metrics); kfree(clnt->cl_principal); clnt->cl_metrics = NULL; xprt_put(rcu_dereference_raw(clnt->cl_xprt)); rpciod_down(); kfree(clnt); } /* * Free an RPC client */ static void rpc_free_auth(struct rpc_clnt *clnt) { if (clnt->cl_auth == NULL) { rpc_free_client(clnt); return; } /* * Note: RPCSEC_GSS may need to send NULL RPC calls in order to * release remaining GSS contexts. This mechanism ensures * that it can do so safely. */ atomic_inc(&clnt->cl_count); rpcauth_release(clnt->cl_auth); clnt->cl_auth = NULL; if (atomic_dec_and_test(&clnt->cl_count)) rpc_free_client(clnt); } /* * Release reference to the RPC client */ void rpc_release_client(struct rpc_clnt *clnt) { dprintk("RPC: rpc_release_client(%p)\n", clnt); if (list_empty(&clnt->cl_tasks)) wake_up(&destroy_wait); if (atomic_dec_and_test(&clnt->cl_count)) rpc_free_auth(clnt); } EXPORT_SYMBOL_GPL(rpc_release_client); /** * rpc_bind_new_program - bind a new RPC program to an existing client * @old: old rpc_client * @program: rpc program to set * @vers: rpc program version * * Clones the rpc client and sets up a new RPC program. This is mainly * of use for enabling different RPC programs to share the same transport. * The Sun NFSv2/v3 ACL protocol can do this. */ struct rpc_clnt *rpc_bind_new_program(struct rpc_clnt *old, const struct rpc_program *program, u32 vers) { struct rpc_create_args args = { .program = program, .prognumber = program->number, .version = vers, .authflavor = old->cl_auth->au_flavor, .client_name = old->cl_principal, }; struct rpc_clnt *clnt; int err; clnt = __rpc_clone_client(&args, old); if (IS_ERR(clnt)) goto out; err = rpc_ping(clnt); if (err != 0) { rpc_shutdown_client(clnt); clnt = ERR_PTR(err); } out: return clnt; } EXPORT_SYMBOL_GPL(rpc_bind_new_program); void rpc_task_release_client(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; if (clnt != NULL) { /* Remove from client task list */ spin_lock(&clnt->cl_lock); list_del(&task->tk_task); spin_unlock(&clnt->cl_lock); task->tk_client = NULL; rpc_release_client(clnt); } } static void rpc_task_set_client(struct rpc_task *task, struct rpc_clnt *clnt) { if (clnt != NULL) { rpc_task_release_client(task); task->tk_client = clnt; atomic_inc(&clnt->cl_count); if (clnt->cl_softrtry) task->tk_flags |= RPC_TASK_SOFT; if (sk_memalloc_socks()) { struct rpc_xprt *xprt; rcu_read_lock(); xprt = rcu_dereference(clnt->cl_xprt); if (xprt->swapper) task->tk_flags |= RPC_TASK_SWAPPER; rcu_read_unlock(); } /* Add to the client's list of all tasks */ spin_lock(&clnt->cl_lock); list_add_tail(&task->tk_task, &clnt->cl_tasks); spin_unlock(&clnt->cl_lock); } } void rpc_task_reset_client(struct rpc_task *task, struct rpc_clnt *clnt) { rpc_task_release_client(task); rpc_task_set_client(task, clnt); } EXPORT_SYMBOL_GPL(rpc_task_reset_client); static void rpc_task_set_rpc_message(struct rpc_task *task, const struct rpc_message *msg) { if (msg != NULL) { task->tk_msg.rpc_proc = msg->rpc_proc; task->tk_msg.rpc_argp = msg->rpc_argp; task->tk_msg.rpc_resp = msg->rpc_resp; if (msg->rpc_cred != NULL) task->tk_msg.rpc_cred = get_rpccred(msg->rpc_cred); } } /* * Default callback for async RPC calls */ static void rpc_default_callback(struct rpc_task *task, void *data) { } static const struct rpc_call_ops rpc_default_ops = { .rpc_call_done = rpc_default_callback, }; /** * rpc_run_task - Allocate a new RPC task, then run rpc_execute against it * @task_setup_data: pointer to task initialisation data */ struct rpc_task *rpc_run_task(const struct rpc_task_setup *task_setup_data) { struct rpc_task *task; task = rpc_new_task(task_setup_data); if (IS_ERR(task)) goto out; rpc_task_set_client(task, task_setup_data->rpc_client); rpc_task_set_rpc_message(task, task_setup_data->rpc_message); if (task->tk_action == NULL) rpc_call_start(task); atomic_inc(&task->tk_count); rpc_execute(task); out: return task; } EXPORT_SYMBOL_GPL(rpc_run_task); /** * rpc_call_sync - Perform a synchronous RPC call * @clnt: pointer to RPC client * @msg: RPC call parameters * @flags: RPC call flags */ int rpc_call_sync(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags) { struct rpc_task *task; struct rpc_task_setup task_setup_data = { .rpc_client = clnt, .rpc_message = msg, .callback_ops = &rpc_default_ops, .flags = flags, }; int status; WARN_ON_ONCE(flags & RPC_TASK_ASYNC); if (flags & RPC_TASK_ASYNC) { rpc_release_calldata(task_setup_data.callback_ops, task_setup_data.callback_data); return -EINVAL; } task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); status = task->tk_status; rpc_put_task(task); return status; } EXPORT_SYMBOL_GPL(rpc_call_sync); /** * rpc_call_async - Perform an asynchronous RPC call * @clnt: pointer to RPC client * @msg: RPC call parameters * @flags: RPC call flags * @tk_ops: RPC call ops * @data: user call data */ int rpc_call_async(struct rpc_clnt *clnt, const struct rpc_message *msg, int flags, const struct rpc_call_ops *tk_ops, void *data) { struct rpc_task *task; struct rpc_task_setup task_setup_data = { .rpc_client = clnt, .rpc_message = msg, .callback_ops = tk_ops, .callback_data = data, .flags = flags|RPC_TASK_ASYNC, }; task = rpc_run_task(&task_setup_data); if (IS_ERR(task)) return PTR_ERR(task); rpc_put_task(task); return 0; } EXPORT_SYMBOL_GPL(rpc_call_async); #if defined(CONFIG_SUNRPC_BACKCHANNEL) /** * rpc_run_bc_task - Allocate a new RPC task for backchannel use, then run * rpc_execute against it * @req: RPC request * @tk_ops: RPC call ops */ struct rpc_task *rpc_run_bc_task(struct rpc_rqst *req, const struct rpc_call_ops *tk_ops) { struct rpc_task *task; struct xdr_buf *xbufp = &req->rq_snd_buf; struct rpc_task_setup task_setup_data = { .callback_ops = tk_ops, }; dprintk("RPC: rpc_run_bc_task req= %p\n", req); /* * Create an rpc_task to send the data */ task = rpc_new_task(&task_setup_data); if (IS_ERR(task)) { xprt_free_bc_request(req); goto out; } task->tk_rqstp = req; /* * Set up the xdr_buf length. * This also indicates that the buffer is XDR encoded already. */ xbufp->len = xbufp->head[0].iov_len + xbufp->page_len + xbufp->tail[0].iov_len; task->tk_action = call_bc_transmit; atomic_inc(&task->tk_count); WARN_ON_ONCE(atomic_read(&task->tk_count) != 2); rpc_execute(task); out: dprintk("RPC: rpc_run_bc_task: task= %p\n", task); return task; } #endif /* CONFIG_SUNRPC_BACKCHANNEL */ void rpc_call_start(struct rpc_task *task) { task->tk_action = call_start; } EXPORT_SYMBOL_GPL(rpc_call_start); /** * rpc_peeraddr - extract remote peer address from clnt's xprt * @clnt: RPC client structure * @buf: target buffer * @bufsize: length of target buffer * * Returns the number of bytes that are actually in the stored address. */ size_t rpc_peeraddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t bufsize) { size_t bytes; struct rpc_xprt *xprt; rcu_read_lock(); xprt = rcu_dereference(clnt->cl_xprt); bytes = xprt->addrlen; if (bytes > bufsize) bytes = bufsize; memcpy(buf, &xprt->addr, bytes); rcu_read_unlock(); return bytes; } EXPORT_SYMBOL_GPL(rpc_peeraddr); /** * rpc_peeraddr2str - return remote peer address in printable format * @clnt: RPC client structure * @format: address format * * NB: the lifetime of the memory referenced by the returned pointer is * the same as the rpc_xprt itself. As long as the caller uses this * pointer, it must hold the RCU read lock. */ const char *rpc_peeraddr2str(struct rpc_clnt *clnt, enum rpc_display_format_t format) { struct rpc_xprt *xprt; xprt = rcu_dereference(clnt->cl_xprt); if (xprt->address_strings[format] != NULL) return xprt->address_strings[format]; else return "unprintable"; } EXPORT_SYMBOL_GPL(rpc_peeraddr2str); static const struct sockaddr_in rpc_inaddr_loopback = { .sin_family = AF_INET, .sin_addr.s_addr = htonl(INADDR_ANY), }; static const struct sockaddr_in6 rpc_in6addr_loopback = { .sin6_family = AF_INET6, .sin6_addr = IN6ADDR_ANY_INIT, }; /* * Try a getsockname() on a connected datagram socket. Using a * connected datagram socket prevents leaving a socket in TIME_WAIT. * This conserves the ephemeral port number space. * * Returns zero and fills in "buf" if successful; otherwise, a * negative errno is returned. */ static int rpc_sockname(struct net *net, struct sockaddr *sap, size_t salen, struct sockaddr *buf, int buflen) { struct socket *sock; int err; err = __sock_create(net, sap->sa_family, SOCK_DGRAM, IPPROTO_UDP, &sock, 1); if (err < 0) { dprintk("RPC: can't create UDP socket (%d)\n", err); goto out; } switch (sap->sa_family) { case AF_INET: err = kernel_bind(sock, (struct sockaddr *)&rpc_inaddr_loopback, sizeof(rpc_inaddr_loopback)); break; case AF_INET6: err = kernel_bind(sock, (struct sockaddr *)&rpc_in6addr_loopback, sizeof(rpc_in6addr_loopback)); break; default: err = -EAFNOSUPPORT; goto out; } if (err < 0) { dprintk("RPC: can't bind UDP socket (%d)\n", err); goto out_release; } err = kernel_connect(sock, sap, salen, 0); if (err < 0) { dprintk("RPC: can't connect UDP socket (%d)\n", err); goto out_release; } err = kernel_getsockname(sock, buf, &buflen); if (err < 0) { dprintk("RPC: getsockname failed (%d)\n", err); goto out_release; } err = 0; if (buf->sa_family == AF_INET6) { struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)buf; sin6->sin6_scope_id = 0; } dprintk("RPC: %s succeeded\n", __func__); out_release: sock_release(sock); out: return err; } /* * Scraping a connected socket failed, so we don't have a useable * local address. Fallback: generate an address that will prevent * the server from calling us back. * * Returns zero and fills in "buf" if successful; otherwise, a * negative errno is returned. */ static int rpc_anyaddr(int family, struct sockaddr *buf, size_t buflen) { switch (family) { case AF_INET: if (buflen < sizeof(rpc_inaddr_loopback)) return -EINVAL; memcpy(buf, &rpc_inaddr_loopback, sizeof(rpc_inaddr_loopback)); break; case AF_INET6: if (buflen < sizeof(rpc_in6addr_loopback)) return -EINVAL; memcpy(buf, &rpc_in6addr_loopback, sizeof(rpc_in6addr_loopback)); default: dprintk("RPC: %s: address family not supported\n", __func__); return -EAFNOSUPPORT; } dprintk("RPC: %s: succeeded\n", __func__); return 0; } /** * rpc_localaddr - discover local endpoint address for an RPC client * @clnt: RPC client structure * @buf: target buffer * @buflen: size of target buffer, in bytes * * Returns zero and fills in "buf" and "buflen" if successful; * otherwise, a negative errno is returned. * * This works even if the underlying transport is not currently connected, * or if the upper layer never previously provided a source address. * * The result of this function call is transient: multiple calls in * succession may give different results, depending on how local * networking configuration changes over time. */ int rpc_localaddr(struct rpc_clnt *clnt, struct sockaddr *buf, size_t buflen) { struct sockaddr_storage address; struct sockaddr *sap = (struct sockaddr *)&address; struct rpc_xprt *xprt; struct net *net; size_t salen; int err; rcu_read_lock(); xprt = rcu_dereference(clnt->cl_xprt); salen = xprt->addrlen; memcpy(sap, &xprt->addr, salen); net = get_net(xprt->xprt_net); rcu_read_unlock(); rpc_set_port(sap, 0); err = rpc_sockname(net, sap, salen, buf, buflen); put_net(net); if (err != 0) /* Couldn't discover local address, return ANYADDR */ return rpc_anyaddr(sap->sa_family, buf, buflen); return 0; } EXPORT_SYMBOL_GPL(rpc_localaddr); void rpc_setbufsize(struct rpc_clnt *clnt, unsigned int sndsize, unsigned int rcvsize) { struct rpc_xprt *xprt; rcu_read_lock(); xprt = rcu_dereference(clnt->cl_xprt); if (xprt->ops->set_buffer_size) xprt->ops->set_buffer_size(xprt, sndsize, rcvsize); rcu_read_unlock(); } EXPORT_SYMBOL_GPL(rpc_setbufsize); /** * rpc_protocol - Get transport protocol number for an RPC client * @clnt: RPC client to query * */ int rpc_protocol(struct rpc_clnt *clnt) { int protocol; rcu_read_lock(); protocol = rcu_dereference(clnt->cl_xprt)->prot; rcu_read_unlock(); return protocol; } EXPORT_SYMBOL_GPL(rpc_protocol); /** * rpc_net_ns - Get the network namespace for this RPC client * @clnt: RPC client to query * */ struct net *rpc_net_ns(struct rpc_clnt *clnt) { struct net *ret; rcu_read_lock(); ret = rcu_dereference(clnt->cl_xprt)->xprt_net; rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(rpc_net_ns); /** * rpc_max_payload - Get maximum payload size for a transport, in bytes * @clnt: RPC client to query * * For stream transports, this is one RPC record fragment (see RFC * 1831), as we don't support multi-record requests yet. For datagram * transports, this is the size of an IP packet minus the IP, UDP, and * RPC header sizes. */ size_t rpc_max_payload(struct rpc_clnt *clnt) { size_t ret; rcu_read_lock(); ret = rcu_dereference(clnt->cl_xprt)->max_payload; rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(rpc_max_payload); /** * rpc_get_timeout - Get timeout for transport in units of HZ * @clnt: RPC client to query */ unsigned long rpc_get_timeout(struct rpc_clnt *clnt) { unsigned long ret; rcu_read_lock(); ret = rcu_dereference(clnt->cl_xprt)->timeout->to_initval; rcu_read_unlock(); return ret; } EXPORT_SYMBOL_GPL(rpc_get_timeout); /** * rpc_force_rebind - force transport to check that remote port is unchanged * @clnt: client to rebind * */ void rpc_force_rebind(struct rpc_clnt *clnt) { if (clnt->cl_autobind) { rcu_read_lock(); xprt_clear_bound(rcu_dereference(clnt->cl_xprt)); rcu_read_unlock(); } } EXPORT_SYMBOL_GPL(rpc_force_rebind); /* * Restart an (async) RPC call from the call_prepare state. * Usually called from within the exit handler. */ int rpc_restart_call_prepare(struct rpc_task *task) { if (RPC_ASSASSINATED(task)) return 0; task->tk_action = call_start; if (task->tk_ops->rpc_call_prepare != NULL) task->tk_action = rpc_prepare_task; return 1; } EXPORT_SYMBOL_GPL(rpc_restart_call_prepare); /* * Restart an (async) RPC call. Usually called from within the * exit handler. */ int rpc_restart_call(struct rpc_task *task) { if (RPC_ASSASSINATED(task)) return 0; task->tk_action = call_start; return 1; } EXPORT_SYMBOL_GPL(rpc_restart_call); #ifdef RPC_DEBUG static const char *rpc_proc_name(const struct rpc_task *task) { const struct rpc_procinfo *proc = task->tk_msg.rpc_proc; if (proc) { if (proc->p_name) return proc->p_name; else return "NULL"; } else return "no proc"; } #endif /* * 0. Initial state * * Other FSM states can be visited zero or more times, but * this state is visited exactly once for each RPC. */ static void call_start(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; dprintk("RPC: %5u call_start %s%d proc %s (%s)\n", task->tk_pid, clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task), (RPC_IS_ASYNC(task) ? "async" : "sync")); /* Increment call count */ task->tk_msg.rpc_proc->p_count++; clnt->cl_stats->rpccnt++; task->tk_action = call_reserve; } /* * 1. Reserve an RPC call slot */ static void call_reserve(struct rpc_task *task) { dprint_status(task); task->tk_status = 0; task->tk_action = call_reserveresult; xprt_reserve(task); } static void call_retry_reserve(struct rpc_task *task); /* * 1b. Grok the result of xprt_reserve() */ static void call_reserveresult(struct rpc_task *task) { int status = task->tk_status; dprint_status(task); /* * After a call to xprt_reserve(), we must have either * a request slot or else an error status. */ task->tk_status = 0; if (status >= 0) { if (task->tk_rqstp) { task->tk_action = call_refresh; return; } printk(KERN_ERR "%s: status=%d, but no request slot, exiting\n", __func__, status); rpc_exit(task, -EIO); return; } /* * Even though there was an error, we may have acquired * a request slot somehow. Make sure not to leak it. */ if (task->tk_rqstp) { printk(KERN_ERR "%s: status=%d, request allocated anyway\n", __func__, status); xprt_release(task); } switch (status) { case -ENOMEM: rpc_delay(task, HZ >> 2); case -EAGAIN: /* woken up; retry */ task->tk_action = call_retry_reserve; return; case -EIO: /* probably a shutdown */ break; default: printk(KERN_ERR "%s: unrecognized error %d, exiting\n", __func__, status); break; } rpc_exit(task, status); } /* * 1c. Retry reserving an RPC call slot */ static void call_retry_reserve(struct rpc_task *task) { dprint_status(task); task->tk_status = 0; task->tk_action = call_reserveresult; xprt_retry_reserve(task); } /* * 2. Bind and/or refresh the credentials */ static void call_refresh(struct rpc_task *task) { dprint_status(task); task->tk_action = call_refreshresult; task->tk_status = 0; task->tk_client->cl_stats->rpcauthrefresh++; rpcauth_refreshcred(task); } /* * 2a. Process the results of a credential refresh */ static void call_refreshresult(struct rpc_task *task) { int status = task->tk_status; dprint_status(task); task->tk_status = 0; task->tk_action = call_refresh; switch (status) { case 0: if (rpcauth_uptodatecred(task)) { task->tk_action = call_allocate; return; } /* Use rate-limiting and a max number of retries if refresh * had status 0 but failed to update the cred. */ case -ETIMEDOUT: rpc_delay(task, 3*HZ); case -EAGAIN: status = -EACCES; case -EKEYEXPIRED: if (!task->tk_cred_retry) break; task->tk_cred_retry--; dprintk("RPC: %5u %s: retry refresh creds\n", task->tk_pid, __func__); return; } dprintk("RPC: %5u %s: refresh creds failed with error %d\n", task->tk_pid, __func__, status); rpc_exit(task, status); } /* * 2b. Allocate the buffer. For details, see sched.c:rpc_malloc. * (Note: buffer memory is freed in xprt_release). */ static void call_allocate(struct rpc_task *task) { unsigned int slack = task->tk_rqstp->rq_cred->cr_auth->au_cslack; struct rpc_rqst *req = task->tk_rqstp; struct rpc_xprt *xprt = req->rq_xprt; struct rpc_procinfo *proc = task->tk_msg.rpc_proc; dprint_status(task); task->tk_status = 0; task->tk_action = call_bind; if (req->rq_buffer) return; if (proc->p_proc != 0) { BUG_ON(proc->p_arglen == 0); if (proc->p_decode != NULL) BUG_ON(proc->p_replen == 0); } /* * Calculate the size (in quads) of the RPC call * and reply headers, and convert both values * to byte sizes. */ req->rq_callsize = RPC_CALLHDRSIZE + (slack << 1) + proc->p_arglen; req->rq_callsize <<= 2; req->rq_rcvsize = RPC_REPHDRSIZE + slack + proc->p_replen; req->rq_rcvsize <<= 2; req->rq_buffer = xprt->ops->buf_alloc(task, req->rq_callsize + req->rq_rcvsize); if (req->rq_buffer != NULL) return; dprintk("RPC: %5u rpc_buffer allocation failed\n", task->tk_pid); if (RPC_IS_ASYNC(task) || !fatal_signal_pending(current)) { task->tk_action = call_allocate; rpc_delay(task, HZ>>4); return; } rpc_exit(task, -ERESTARTSYS); } static inline int rpc_task_need_encode(struct rpc_task *task) { return task->tk_rqstp->rq_snd_buf.len == 0; } static inline void rpc_task_force_reencode(struct rpc_task *task) { task->tk_rqstp->rq_snd_buf.len = 0; task->tk_rqstp->rq_bytes_sent = 0; } static inline void rpc_xdr_buf_init(struct xdr_buf *buf, void *start, size_t len) { buf->head[0].iov_base = start; buf->head[0].iov_len = len; buf->tail[0].iov_len = 0; buf->page_len = 0; buf->flags = 0; buf->len = 0; buf->buflen = len; } /* * 3. Encode arguments of an RPC call */ static void rpc_xdr_encode(struct rpc_task *task) { struct rpc_rqst *req = task->tk_rqstp; kxdreproc_t encode; __be32 *p; dprint_status(task); rpc_xdr_buf_init(&req->rq_snd_buf, req->rq_buffer, req->rq_callsize); rpc_xdr_buf_init(&req->rq_rcv_buf, (char *)req->rq_buffer + req->rq_callsize, req->rq_rcvsize); p = rpc_encode_header(task); if (p == NULL) { printk(KERN_INFO "RPC: couldn't encode RPC header, exit EIO\n"); rpc_exit(task, -EIO); return; } encode = task->tk_msg.rpc_proc->p_encode; if (encode == NULL) return; task->tk_status = rpcauth_wrap_req(task, encode, req, p, task->tk_msg.rpc_argp); } /* * 4. Get the server port number if not yet set */ static void call_bind(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; dprint_status(task); task->tk_action = call_connect; if (!xprt_bound(xprt)) { task->tk_action = call_bind_status; task->tk_timeout = xprt->bind_timeout; xprt->ops->rpcbind(task); } } /* * 4a. Sort out bind result */ static void call_bind_status(struct rpc_task *task) { int status = -EIO; if (task->tk_status >= 0) { dprint_status(task); task->tk_status = 0; task->tk_action = call_connect; return; } trace_rpc_bind_status(task); switch (task->tk_status) { case -ENOMEM: dprintk("RPC: %5u rpcbind out of memory\n", task->tk_pid); rpc_delay(task, HZ >> 2); goto retry_timeout; case -EACCES: dprintk("RPC: %5u remote rpcbind: RPC program/version " "unavailable\n", task->tk_pid); /* fail immediately if this is an RPC ping */ if (task->tk_msg.rpc_proc->p_proc == 0) { status = -EOPNOTSUPP; break; } if (task->tk_rebind_retry == 0) break; task->tk_rebind_retry--; rpc_delay(task, 3*HZ); goto retry_timeout; case -ETIMEDOUT: dprintk("RPC: %5u rpcbind request timed out\n", task->tk_pid); goto retry_timeout; case -EPFNOSUPPORT: /* server doesn't support any rpcbind version we know of */ dprintk("RPC: %5u unrecognized remote rpcbind service\n", task->tk_pid); break; case -EPROTONOSUPPORT: dprintk("RPC: %5u remote rpcbind version unavailable, retrying\n", task->tk_pid); task->tk_status = 0; task->tk_action = call_bind; return; case -ECONNREFUSED: /* connection problems */ case -ECONNRESET: case -ENOTCONN: case -EHOSTDOWN: case -EHOSTUNREACH: case -ENETUNREACH: case -EPIPE: dprintk("RPC: %5u remote rpcbind unreachable: %d\n", task->tk_pid, task->tk_status); if (!RPC_IS_SOFTCONN(task)) { rpc_delay(task, 5*HZ); goto retry_timeout; } status = task->tk_status; break; default: dprintk("RPC: %5u unrecognized rpcbind error (%d)\n", task->tk_pid, -task->tk_status); } rpc_exit(task, status); return; retry_timeout: task->tk_action = call_timeout; } /* * 4b. Connect to the RPC server */ static void call_connect(struct rpc_task *task) { struct rpc_xprt *xprt = task->tk_rqstp->rq_xprt; dprintk("RPC: %5u call_connect xprt %p %s connected\n", task->tk_pid, xprt, (xprt_connected(xprt) ? "is" : "is not")); task->tk_action = call_transmit; if (!xprt_connected(xprt)) { task->tk_action = call_connect_status; if (task->tk_status < 0) return; if (task->tk_flags & RPC_TASK_NOCONNECT) { rpc_exit(task, -ENOTCONN); return; } xprt_connect(task); } } /* * 4c. Sort out connect result */ static void call_connect_status(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; int status = task->tk_status; dprint_status(task); trace_rpc_connect_status(task, status); switch (status) { /* if soft mounted, test if we've timed out */ case -ETIMEDOUT: task->tk_action = call_timeout; return; case -ECONNREFUSED: case -ECONNRESET: case -ENETUNREACH: if (RPC_IS_SOFTCONN(task)) break; /* retry with existing socket, after a delay */ case 0: case -EAGAIN: task->tk_status = 0; clnt->cl_stats->netreconn++; task->tk_action = call_transmit; return; } rpc_exit(task, status); } /* * 5. Transmit the RPC request, and wait for reply */ static void call_transmit(struct rpc_task *task) { dprint_status(task); task->tk_action = call_status; if (task->tk_status < 0) return; task->tk_status = xprt_prepare_transmit(task); if (task->tk_status != 0) return; task->tk_action = call_transmit_status; /* Encode here so that rpcsec_gss can use correct sequence number. */ if (rpc_task_need_encode(task)) { rpc_xdr_encode(task); /* Did the encode result in an error condition? */ if (task->tk_status != 0) { /* Was the error nonfatal? */ if (task->tk_status == -EAGAIN) rpc_delay(task, HZ >> 4); else rpc_exit(task, task->tk_status); return; } } xprt_transmit(task); if (task->tk_status < 0) return; /* * On success, ensure that we call xprt_end_transmit() before sleeping * in order to allow access to the socket to other RPC requests. */ call_transmit_status(task); if (rpc_reply_expected(task)) return; task->tk_action = rpc_exit_task; rpc_wake_up_queued_task(&task->tk_rqstp->rq_xprt->pending, task); } /* * 5a. Handle cleanup after a transmission */ static void call_transmit_status(struct rpc_task *task) { task->tk_action = call_status; /* * Common case: success. Force the compiler to put this * test first. */ if (task->tk_status == 0) { xprt_end_transmit(task); rpc_task_force_reencode(task); return; } switch (task->tk_status) { case -EAGAIN: break; default: dprint_status(task); xprt_end_transmit(task); rpc_task_force_reencode(task); break; /* * Special cases: if we've been waiting on the * socket's write_space() callback, or if the * socket just returned a connection error, * then hold onto the transport lock. */ case -ECONNREFUSED: case -EHOSTDOWN: case -EHOSTUNREACH: case -ENETUNREACH: if (RPC_IS_SOFTCONN(task)) { xprt_end_transmit(task); rpc_exit(task, task->tk_status); break; } case -ECONNRESET: case -ENOTCONN: case -EPIPE: rpc_task_force_reencode(task); } } #if defined(CONFIG_SUNRPC_BACKCHANNEL) /* * 5b. Send the backchannel RPC reply. On error, drop the reply. In * addition, disconnect on connectivity errors. */ static void call_bc_transmit(struct rpc_task *task) { struct rpc_rqst *req = task->tk_rqstp; task->tk_status = xprt_prepare_transmit(task); if (task->tk_status == -EAGAIN) { /* * Could not reserve the transport. Try again after the * transport is released. */ task->tk_status = 0; task->tk_action = call_bc_transmit; return; } task->tk_action = rpc_exit_task; if (task->tk_status < 0) { printk(KERN_NOTICE "RPC: Could not send backchannel reply " "error: %d\n", task->tk_status); return; } xprt_transmit(task); xprt_end_transmit(task); dprint_status(task); switch (task->tk_status) { case 0: /* Success */ break; case -EHOSTDOWN: case -EHOSTUNREACH: case -ENETUNREACH: case -ETIMEDOUT: /* * Problem reaching the server. Disconnect and let the * forechannel reestablish the connection. The server will * have to retransmit the backchannel request and we'll * reprocess it. Since these ops are idempotent, there's no * need to cache our reply at this time. */ printk(KERN_NOTICE "RPC: Could not send backchannel reply " "error: %d\n", task->tk_status); xprt_conditional_disconnect(req->rq_xprt, req->rq_connect_cookie); break; default: /* * We were unable to reply and will have to drop the * request. The server should reconnect and retransmit. */ WARN_ON_ONCE(task->tk_status == -EAGAIN); printk(KERN_NOTICE "RPC: Could not send backchannel reply " "error: %d\n", task->tk_status); break; } rpc_wake_up_queued_task(&req->rq_xprt->pending, task); } #endif /* CONFIG_SUNRPC_BACKCHANNEL */ /* * 6. Sort out the RPC call status */ static void call_status(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; struct rpc_rqst *req = task->tk_rqstp; int status; if (req->rq_reply_bytes_recvd > 0 && !req->rq_bytes_sent) task->tk_status = req->rq_reply_bytes_recvd; dprint_status(task); status = task->tk_status; if (status >= 0) { task->tk_action = call_decode; return; } trace_rpc_call_status(task); task->tk_status = 0; switch(status) { case -EHOSTDOWN: case -EHOSTUNREACH: case -ENETUNREACH: /* * Delay any retries for 3 seconds, then handle as if it * were a timeout. */ rpc_delay(task, 3*HZ); case -ETIMEDOUT: task->tk_action = call_timeout; if (task->tk_client->cl_discrtry) xprt_conditional_disconnect(req->rq_xprt, req->rq_connect_cookie); break; case -ECONNRESET: case -ECONNREFUSED: rpc_force_rebind(clnt); rpc_delay(task, 3*HZ); case -EPIPE: case -ENOTCONN: task->tk_action = call_bind; break; case -EAGAIN: task->tk_action = call_transmit; break; case -EIO: /* shutdown or soft timeout */ rpc_exit(task, status); break; default: if (clnt->cl_chatty) printk("%s: RPC call returned error %d\n", clnt->cl_protname, -status); rpc_exit(task, status); } } /* * 6a. Handle RPC timeout * We do not release the request slot, so we keep using the * same XID for all retransmits. */ static void call_timeout(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; if (xprt_adjust_timeout(task->tk_rqstp) == 0) { dprintk("RPC: %5u call_timeout (minor)\n", task->tk_pid); goto retry; } dprintk("RPC: %5u call_timeout (major)\n", task->tk_pid); task->tk_timeouts++; if (RPC_IS_SOFTCONN(task)) { rpc_exit(task, -ETIMEDOUT); return; } if (RPC_IS_SOFT(task)) { if (clnt->cl_chatty) { rcu_read_lock(); printk(KERN_NOTICE "%s: server %s not responding, timed out\n", clnt->cl_protname, rcu_dereference(clnt->cl_xprt)->servername); rcu_read_unlock(); } if (task->tk_flags & RPC_TASK_TIMEOUT) rpc_exit(task, -ETIMEDOUT); else rpc_exit(task, -EIO); return; } if (!(task->tk_flags & RPC_CALL_MAJORSEEN)) { task->tk_flags |= RPC_CALL_MAJORSEEN; if (clnt->cl_chatty) { rcu_read_lock(); printk(KERN_NOTICE "%s: server %s not responding, still trying\n", clnt->cl_protname, rcu_dereference(clnt->cl_xprt)->servername); rcu_read_unlock(); } } rpc_force_rebind(clnt); /* * Did our request time out due to an RPCSEC_GSS out-of-sequence * event? RFC2203 requires the server to drop all such requests. */ rpcauth_invalcred(task); retry: clnt->cl_stats->rpcretrans++; task->tk_action = call_bind; task->tk_status = 0; } /* * 7. Decode the RPC reply */ static void call_decode(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; struct rpc_rqst *req = task->tk_rqstp; kxdrdproc_t decode = task->tk_msg.rpc_proc->p_decode; __be32 *p; dprint_status(task); if (task->tk_flags & RPC_CALL_MAJORSEEN) { if (clnt->cl_chatty) { rcu_read_lock(); printk(KERN_NOTICE "%s: server %s OK\n", clnt->cl_protname, rcu_dereference(clnt->cl_xprt)->servername); rcu_read_unlock(); } task->tk_flags &= ~RPC_CALL_MAJORSEEN; } /* * Ensure that we see all writes made by xprt_complete_rqst() * before it changed req->rq_reply_bytes_recvd. */ smp_rmb(); req->rq_rcv_buf.len = req->rq_private_buf.len; /* Check that the softirq receive buffer is valid */ WARN_ON(memcmp(&req->rq_rcv_buf, &req->rq_private_buf, sizeof(req->rq_rcv_buf)) != 0); if (req->rq_rcv_buf.len < 12) { if (!RPC_IS_SOFT(task)) { task->tk_action = call_bind; clnt->cl_stats->rpcretrans++; goto out_retry; } dprintk("RPC: %s: too small RPC reply size (%d bytes)\n", clnt->cl_protname, task->tk_status); task->tk_action = call_timeout; goto out_retry; } p = rpc_verify_header(task); if (IS_ERR(p)) { if (p == ERR_PTR(-EAGAIN)) goto out_retry; return; } task->tk_action = rpc_exit_task; if (decode) { task->tk_status = rpcauth_unwrap_resp(task, decode, req, p, task->tk_msg.rpc_resp); } dprintk("RPC: %5u call_decode result %d\n", task->tk_pid, task->tk_status); return; out_retry: task->tk_status = 0; /* Note: rpc_verify_header() may have freed the RPC slot */ if (task->tk_rqstp == req) { req->rq_reply_bytes_recvd = req->rq_rcv_buf.len = 0; if (task->tk_client->cl_discrtry) xprt_conditional_disconnect(req->rq_xprt, req->rq_connect_cookie); } } static __be32 * rpc_encode_header(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; struct rpc_rqst *req = task->tk_rqstp; __be32 *p = req->rq_svec[0].iov_base; /* FIXME: check buffer size? */ p = xprt_skip_transport_header(req->rq_xprt, p); *p++ = req->rq_xid; /* XID */ *p++ = htonl(RPC_CALL); /* CALL */ *p++ = htonl(RPC_VERSION); /* RPC version */ *p++ = htonl(clnt->cl_prog); /* program number */ *p++ = htonl(clnt->cl_vers); /* program version */ *p++ = htonl(task->tk_msg.rpc_proc->p_proc); /* procedure */ p = rpcauth_marshcred(task, p); req->rq_slen = xdr_adjust_iovec(&req->rq_svec[0], p); return p; } static __be32 * rpc_verify_header(struct rpc_task *task) { struct rpc_clnt *clnt = task->tk_client; struct kvec *iov = &task->tk_rqstp->rq_rcv_buf.head[0]; int len = task->tk_rqstp->rq_rcv_buf.len >> 2; __be32 *p = iov->iov_base; u32 n; int error = -EACCES; if ((task->tk_rqstp->rq_rcv_buf.len & 3) != 0) { /* RFC-1014 says that the representation of XDR data must be a * multiple of four bytes * - if it isn't pointer subtraction in the NFS client may give * undefined results */ dprintk("RPC: %5u %s: XDR representation not a multiple of" " 4 bytes: 0x%x\n", task->tk_pid, __func__, task->tk_rqstp->rq_rcv_buf.len); goto out_eio; } if ((len -= 3) < 0) goto out_overflow; p += 1; /* skip XID */ if ((n = ntohl(*p++)) != RPC_REPLY) { dprintk("RPC: %5u %s: not an RPC reply: %x\n", task->tk_pid, __func__, n); goto out_garbage; } if ((n = ntohl(*p++)) != RPC_MSG_ACCEPTED) { if (--len < 0) goto out_overflow; switch ((n = ntohl(*p++))) { case RPC_AUTH_ERROR: break; case RPC_MISMATCH: dprintk("RPC: %5u %s: RPC call version mismatch!\n", task->tk_pid, __func__); error = -EPROTONOSUPPORT; goto out_err; default: dprintk("RPC: %5u %s: RPC call rejected, " "unknown error: %x\n", task->tk_pid, __func__, n); goto out_eio; } if (--len < 0) goto out_overflow; switch ((n = ntohl(*p++))) { case RPC_AUTH_REJECTEDCRED: case RPC_AUTH_REJECTEDVERF: case RPCSEC_GSS_CREDPROBLEM: case RPCSEC_GSS_CTXPROBLEM: if (!task->tk_cred_retry) break; task->tk_cred_retry--; dprintk("RPC: %5u %s: retry stale creds\n", task->tk_pid, __func__); rpcauth_invalcred(task); /* Ensure we obtain a new XID! */ xprt_release(task); task->tk_action = call_reserve; goto out_retry; case RPC_AUTH_BADCRED: case RPC_AUTH_BADVERF: /* possibly garbled cred/verf? */ if (!task->tk_garb_retry) break; task->tk_garb_retry--; dprintk("RPC: %5u %s: retry garbled creds\n", task->tk_pid, __func__); task->tk_action = call_bind; goto out_retry; case RPC_AUTH_TOOWEAK: rcu_read_lock(); printk(KERN_NOTICE "RPC: server %s requires stronger " "authentication.\n", rcu_dereference(clnt->cl_xprt)->servername); rcu_read_unlock(); break; default: dprintk("RPC: %5u %s: unknown auth error: %x\n", task->tk_pid, __func__, n); error = -EIO; } dprintk("RPC: %5u %s: call rejected %d\n", task->tk_pid, __func__, n); goto out_err; } if (!(p = rpcauth_checkverf(task, p))) { dprintk("RPC: %5u %s: auth check failed\n", task->tk_pid, __func__); goto out_garbage; /* bad verifier, retry */ } len = p - (__be32 *)iov->iov_base - 1; if (len < 0) goto out_overflow; switch ((n = ntohl(*p++))) { case RPC_SUCCESS: return p; case RPC_PROG_UNAVAIL: dprintk_rcu("RPC: %5u %s: program %u is unsupported " "by server %s\n", task->tk_pid, __func__, (unsigned int)clnt->cl_prog, rcu_dereference(clnt->cl_xprt)->servername); error = -EPFNOSUPPORT; goto out_err; case RPC_PROG_MISMATCH: dprintk_rcu("RPC: %5u %s: program %u, version %u unsupported " "by server %s\n", task->tk_pid, __func__, (unsigned int)clnt->cl_prog, (unsigned int)clnt->cl_vers, rcu_dereference(clnt->cl_xprt)->servername); error = -EPROTONOSUPPORT; goto out_err; case RPC_PROC_UNAVAIL: dprintk_rcu("RPC: %5u %s: proc %s unsupported by program %u, " "version %u on server %s\n", task->tk_pid, __func__, rpc_proc_name(task), clnt->cl_prog, clnt->cl_vers, rcu_dereference(clnt->cl_xprt)->servername); error = -EOPNOTSUPP; goto out_err; case RPC_GARBAGE_ARGS: dprintk("RPC: %5u %s: server saw garbage\n", task->tk_pid, __func__); break; /* retry */ default: dprintk("RPC: %5u %s: server accept status: %x\n", task->tk_pid, __func__, n); /* Also retry */ } out_garbage: clnt->cl_stats->rpcgarbage++; if (task->tk_garb_retry) { task->tk_garb_retry--; dprintk("RPC: %5u %s: retrying\n", task->tk_pid, __func__); task->tk_action = call_bind; out_retry: return ERR_PTR(-EAGAIN); } out_eio: error = -EIO; out_err: rpc_exit(task, error); dprintk("RPC: %5u %s: call failed with error %d\n", task->tk_pid, __func__, error); return ERR_PTR(error); out_overflow: dprintk("RPC: %5u %s: server reply was truncated.\n", task->tk_pid, __func__); goto out_garbage; } static void rpcproc_encode_null(void *rqstp, struct xdr_stream *xdr, void *obj) { } static int rpcproc_decode_null(void *rqstp, struct xdr_stream *xdr, void *obj) { return 0; } static struct rpc_procinfo rpcproc_null = { .p_encode = rpcproc_encode_null, .p_decode = rpcproc_decode_null, }; static int rpc_ping(struct rpc_clnt *clnt) { struct rpc_message msg = { .rpc_proc = &rpcproc_null, }; int err; msg.rpc_cred = authnull_ops.lookup_cred(NULL, NULL, 0); err = rpc_call_sync(clnt, &msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN); put_rpccred(msg.rpc_cred); return err; } struct rpc_task *rpc_call_null(struct rpc_clnt *clnt, struct rpc_cred *cred, int flags) { struct rpc_message msg = { .rpc_proc = &rpcproc_null, .rpc_cred = cred, }; struct rpc_task_setup task_setup_data = { .rpc_client = clnt, .rpc_message = &msg, .callback_ops = &rpc_default_ops, .flags = flags, }; return rpc_run_task(&task_setup_data); } EXPORT_SYMBOL_GPL(rpc_call_null); #ifdef RPC_DEBUG static void rpc_show_header(void) { printk(KERN_INFO "-pid- flgs status -client- --rqstp- " "-timeout ---ops--\n"); } static void rpc_show_task(const struct rpc_clnt *clnt, const struct rpc_task *task) { const char *rpc_waitq = "none"; if (RPC_IS_QUEUED(task)) rpc_waitq = rpc_qname(task->tk_waitqueue); printk(KERN_INFO "%5u %04x %6d %8p %8p %8ld %8p %sv%u %s a:%ps q:%s\n", task->tk_pid, task->tk_flags, task->tk_status, clnt, task->tk_rqstp, task->tk_timeout, task->tk_ops, clnt->cl_protname, clnt->cl_vers, rpc_proc_name(task), task->tk_action, rpc_waitq); } void rpc_show_tasks(struct net *net) { struct rpc_clnt *clnt; struct rpc_task *task; int header = 0; struct sunrpc_net *sn = net_generic(net, sunrpc_net_id); spin_lock(&sn->rpc_client_lock); list_for_each_entry(clnt, &sn->all_clients, cl_clients) { spin_lock(&clnt->cl_lock); list_for_each_entry(task, &clnt->cl_tasks, tk_task) { if (!header) { rpc_show_header(); header++; } rpc_show_task(clnt, task); } spin_unlock(&clnt->cl_lock); } spin_unlock(&sn->rpc_client_lock); } #endif
gpl-2.0
sandymanu/android_kernel_xiaomi_kenzo
fs/hfsplus/xattr.c
2155
18494
/* * linux/fs/hfsplus/xattr.c * * Vyacheslav Dubeyko <slava@dubeyko.com> * * Logic of processing extended attributes */ #include "hfsplus_fs.h" #include "xattr.h" const struct xattr_handler *hfsplus_xattr_handlers[] = { &hfsplus_xattr_osx_handler, &hfsplus_xattr_user_handler, &hfsplus_xattr_trusted_handler, &hfsplus_xattr_security_handler, NULL }; static int strcmp_xattr_finder_info(const char *name) { if (name) { return strncmp(name, HFSPLUS_XATTR_FINDER_INFO_NAME, sizeof(HFSPLUS_XATTR_FINDER_INFO_NAME)); } return -1; } static int strcmp_xattr_acl(const char *name) { if (name) { return strncmp(name, HFSPLUS_XATTR_ACL_NAME, sizeof(HFSPLUS_XATTR_ACL_NAME)); } return -1; } static inline int is_known_namespace(const char *name) { if (strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN) && strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN) && strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) && strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN)) return false; return true; } static int can_set_xattr(struct inode *inode, const char *name, const void *value, size_t value_len) { if (!strncmp(name, XATTR_SYSTEM_PREFIX, XATTR_SYSTEM_PREFIX_LEN)) return -EOPNOTSUPP; /* TODO: implement ACL support */ if (!strncmp(name, XATTR_MAC_OSX_PREFIX, XATTR_MAC_OSX_PREFIX_LEN)) { /* * This makes sure that we aren't trying to set an * attribute in a different namespace by prefixing it * with "osx." */ if (is_known_namespace(name + XATTR_MAC_OSX_PREFIX_LEN)) return -EOPNOTSUPP; return 0; } /* * Don't allow setting an attribute in an unknown namespace. */ if (strncmp(name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) && strncmp(name, XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN) && strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) return -EOPNOTSUPP; return 0; } int __hfsplus_setxattr(struct inode *inode, const char *name, const void *value, size_t size, int flags) { int err = 0; struct hfs_find_data cat_fd; hfsplus_cat_entry entry; u16 cat_entry_flags, cat_entry_type; u16 folder_finderinfo_len = sizeof(struct DInfo) + sizeof(struct DXInfo); u16 file_finderinfo_len = sizeof(struct FInfo) + sizeof(struct FXInfo); if ((!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) || HFSPLUS_IS_RSRC(inode)) return -EOPNOTSUPP; err = can_set_xattr(inode, name, value, size); if (err) return err; if (strncmp(name, XATTR_MAC_OSX_PREFIX, XATTR_MAC_OSX_PREFIX_LEN) == 0) name += XATTR_MAC_OSX_PREFIX_LEN; if (value == NULL) { value = ""; size = 0; } err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &cat_fd); if (err) { pr_err("can't init xattr find struct\n"); return err; } err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &cat_fd); if (err) { pr_err("catalog searching failed\n"); goto end_setxattr; } if (!strcmp_xattr_finder_info(name)) { if (flags & XATTR_CREATE) { pr_err("xattr exists yet\n"); err = -EOPNOTSUPP; goto end_setxattr; } hfs_bnode_read(cat_fd.bnode, &entry, cat_fd.entryoffset, sizeof(hfsplus_cat_entry)); if (be16_to_cpu(entry.type) == HFSPLUS_FOLDER) { if (size == folder_finderinfo_len) { memcpy(&entry.folder.user_info, value, folder_finderinfo_len); hfs_bnode_write(cat_fd.bnode, &entry, cat_fd.entryoffset, sizeof(struct hfsplus_cat_folder)); hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY); } else { err = -ERANGE; goto end_setxattr; } } else if (be16_to_cpu(entry.type) == HFSPLUS_FILE) { if (size == file_finderinfo_len) { memcpy(&entry.file.user_info, value, file_finderinfo_len); hfs_bnode_write(cat_fd.bnode, &entry, cat_fd.entryoffset, sizeof(struct hfsplus_cat_file)); hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY); } else { err = -ERANGE; goto end_setxattr; } } else { err = -EOPNOTSUPP; goto end_setxattr; } goto end_setxattr; } if (!HFSPLUS_SB(inode->i_sb)->attr_tree) { err = -EOPNOTSUPP; goto end_setxattr; } if (hfsplus_attr_exists(inode, name)) { if (flags & XATTR_CREATE) { pr_err("xattr exists yet\n"); err = -EOPNOTSUPP; goto end_setxattr; } err = hfsplus_delete_attr(inode, name); if (err) goto end_setxattr; err = hfsplus_create_attr(inode, name, value, size); if (err) goto end_setxattr; } else { if (flags & XATTR_REPLACE) { pr_err("cannot replace xattr\n"); err = -EOPNOTSUPP; goto end_setxattr; } err = hfsplus_create_attr(inode, name, value, size); if (err) goto end_setxattr; } cat_entry_type = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset); if (cat_entry_type == HFSPLUS_FOLDER) { cat_entry_flags = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset + offsetof(struct hfsplus_cat_folder, flags)); cat_entry_flags |= HFSPLUS_XATTR_EXISTS; if (!strcmp_xattr_acl(name)) cat_entry_flags |= HFSPLUS_ACL_EXISTS; hfs_bnode_write_u16(cat_fd.bnode, cat_fd.entryoffset + offsetof(struct hfsplus_cat_folder, flags), cat_entry_flags); hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY); } else if (cat_entry_type == HFSPLUS_FILE) { cat_entry_flags = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset + offsetof(struct hfsplus_cat_file, flags)); cat_entry_flags |= HFSPLUS_XATTR_EXISTS; if (!strcmp_xattr_acl(name)) cat_entry_flags |= HFSPLUS_ACL_EXISTS; hfs_bnode_write_u16(cat_fd.bnode, cat_fd.entryoffset + offsetof(struct hfsplus_cat_file, flags), cat_entry_flags); hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY); } else { pr_err("invalid catalog entry type\n"); err = -EIO; goto end_setxattr; } end_setxattr: hfs_find_exit(&cat_fd); return err; } static inline int is_osx_xattr(const char *xattr_name) { return !is_known_namespace(xattr_name); } static int name_len(const char *xattr_name, int xattr_name_len) { int len = xattr_name_len + 1; if (is_osx_xattr(xattr_name)) len += XATTR_MAC_OSX_PREFIX_LEN; return len; } static int copy_name(char *buffer, const char *xattr_name, int name_len) { int len = name_len; int offset = 0; if (is_osx_xattr(xattr_name)) { strncpy(buffer, XATTR_MAC_OSX_PREFIX, XATTR_MAC_OSX_PREFIX_LEN); offset += XATTR_MAC_OSX_PREFIX_LEN; len += XATTR_MAC_OSX_PREFIX_LEN; } strncpy(buffer + offset, xattr_name, name_len); memset(buffer + offset + name_len, 0, 1); len += 1; return len; } static ssize_t hfsplus_getxattr_finder_info(struct dentry *dentry, void *value, size_t size) { ssize_t res = 0; struct inode *inode = dentry->d_inode; struct hfs_find_data fd; u16 entry_type; u16 folder_rec_len = sizeof(struct DInfo) + sizeof(struct DXInfo); u16 file_rec_len = sizeof(struct FInfo) + sizeof(struct FXInfo); u16 record_len = max(folder_rec_len, file_rec_len); u8 folder_finder_info[sizeof(struct DInfo) + sizeof(struct DXInfo)]; u8 file_finder_info[sizeof(struct FInfo) + sizeof(struct FXInfo)]; if (size >= record_len) { res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd); if (res) { pr_err("can't init xattr find struct\n"); return res; } res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd); if (res) goto end_getxattr_finder_info; entry_type = hfs_bnode_read_u16(fd.bnode, fd.entryoffset); if (entry_type == HFSPLUS_FOLDER) { hfs_bnode_read(fd.bnode, folder_finder_info, fd.entryoffset + offsetof(struct hfsplus_cat_folder, user_info), folder_rec_len); memcpy(value, folder_finder_info, folder_rec_len); res = folder_rec_len; } else if (entry_type == HFSPLUS_FILE) { hfs_bnode_read(fd.bnode, file_finder_info, fd.entryoffset + offsetof(struct hfsplus_cat_file, user_info), file_rec_len); memcpy(value, file_finder_info, file_rec_len); res = file_rec_len; } else { res = -EOPNOTSUPP; goto end_getxattr_finder_info; } } else res = size ? -ERANGE : record_len; end_getxattr_finder_info: if (size >= record_len) hfs_find_exit(&fd); return res; } ssize_t hfsplus_getxattr(struct dentry *dentry, const char *name, void *value, size_t size) { struct inode *inode = dentry->d_inode; struct hfs_find_data fd; hfsplus_attr_entry *entry; __be32 xattr_record_type; u32 record_type; u16 record_length = 0; ssize_t res = 0; if ((!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) || HFSPLUS_IS_RSRC(inode)) return -EOPNOTSUPP; if (strncmp(name, XATTR_MAC_OSX_PREFIX, XATTR_MAC_OSX_PREFIX_LEN) == 0) { /* skip "osx." prefix */ name += XATTR_MAC_OSX_PREFIX_LEN; /* * Don't allow retrieving properly prefixed attributes * by prepending them with "osx." */ if (is_known_namespace(name)) return -EOPNOTSUPP; } if (!strcmp_xattr_finder_info(name)) return hfsplus_getxattr_finder_info(dentry, value, size); if (!HFSPLUS_SB(inode->i_sb)->attr_tree) return -EOPNOTSUPP; entry = hfsplus_alloc_attr_entry(); if (!entry) { pr_err("can't allocate xattr entry\n"); return -ENOMEM; } res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->attr_tree, &fd); if (res) { pr_err("can't init xattr find struct\n"); goto failed_getxattr_init; } res = hfsplus_find_attr(inode->i_sb, inode->i_ino, name, &fd); if (res) { if (res == -ENOENT) res = -ENODATA; else pr_err("xattr searching failed\n"); goto out; } hfs_bnode_read(fd.bnode, &xattr_record_type, fd.entryoffset, sizeof(xattr_record_type)); record_type = be32_to_cpu(xattr_record_type); if (record_type == HFSPLUS_ATTR_INLINE_DATA) { record_length = hfs_bnode_read_u16(fd.bnode, fd.entryoffset + offsetof(struct hfsplus_attr_inline_data, length)); if (record_length > HFSPLUS_MAX_INLINE_DATA_SIZE) { pr_err("invalid xattr record size\n"); res = -EIO; goto out; } } else if (record_type == HFSPLUS_ATTR_FORK_DATA || record_type == HFSPLUS_ATTR_EXTENTS) { pr_err("only inline data xattr are supported\n"); res = -EOPNOTSUPP; goto out; } else { pr_err("invalid xattr record\n"); res = -EIO; goto out; } if (size) { hfs_bnode_read(fd.bnode, entry, fd.entryoffset, offsetof(struct hfsplus_attr_inline_data, raw_bytes) + record_length); } if (size >= record_length) { memcpy(value, entry->inline_data.raw_bytes, record_length); res = record_length; } else res = size ? -ERANGE : record_length; out: hfs_find_exit(&fd); failed_getxattr_init: hfsplus_destroy_attr_entry(entry); return res; } static inline int can_list(const char *xattr_name) { if (!xattr_name) return 0; return strncmp(xattr_name, XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN) || capable(CAP_SYS_ADMIN); } static ssize_t hfsplus_listxattr_finder_info(struct dentry *dentry, char *buffer, size_t size) { ssize_t res = 0; struct inode *inode = dentry->d_inode; struct hfs_find_data fd; u16 entry_type; u8 folder_finder_info[sizeof(struct DInfo) + sizeof(struct DXInfo)]; u8 file_finder_info[sizeof(struct FInfo) + sizeof(struct FXInfo)]; unsigned long len, found_bit; int xattr_name_len, symbols_count; res = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &fd); if (res) { pr_err("can't init xattr find struct\n"); return res; } res = hfsplus_find_cat(inode->i_sb, inode->i_ino, &fd); if (res) goto end_listxattr_finder_info; entry_type = hfs_bnode_read_u16(fd.bnode, fd.entryoffset); if (entry_type == HFSPLUS_FOLDER) { len = sizeof(struct DInfo) + sizeof(struct DXInfo); hfs_bnode_read(fd.bnode, folder_finder_info, fd.entryoffset + offsetof(struct hfsplus_cat_folder, user_info), len); found_bit = find_first_bit((void *)folder_finder_info, len*8); } else if (entry_type == HFSPLUS_FILE) { len = sizeof(struct FInfo) + sizeof(struct FXInfo); hfs_bnode_read(fd.bnode, file_finder_info, fd.entryoffset + offsetof(struct hfsplus_cat_file, user_info), len); found_bit = find_first_bit((void *)file_finder_info, len*8); } else { res = -EOPNOTSUPP; goto end_listxattr_finder_info; } if (found_bit >= (len*8)) res = 0; else { symbols_count = sizeof(HFSPLUS_XATTR_FINDER_INFO_NAME) - 1; xattr_name_len = name_len(HFSPLUS_XATTR_FINDER_INFO_NAME, symbols_count); if (!buffer || !size) { if (can_list(HFSPLUS_XATTR_FINDER_INFO_NAME)) res = xattr_name_len; } else if (can_list(HFSPLUS_XATTR_FINDER_INFO_NAME)) { if (size < xattr_name_len) res = -ERANGE; else { res = copy_name(buffer, HFSPLUS_XATTR_FINDER_INFO_NAME, symbols_count); } } } end_listxattr_finder_info: hfs_find_exit(&fd); return res; } ssize_t hfsplus_listxattr(struct dentry *dentry, char *buffer, size_t size) { ssize_t err; ssize_t res = 0; struct inode *inode = dentry->d_inode; struct hfs_find_data fd; u16 key_len = 0; struct hfsplus_attr_key attr_key; char strbuf[HFSPLUS_ATTR_MAX_STRLEN + XATTR_MAC_OSX_PREFIX_LEN + 1] = {0}; int xattr_name_len; if ((!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) || HFSPLUS_IS_RSRC(inode)) return -EOPNOTSUPP; res = hfsplus_listxattr_finder_info(dentry, buffer, size); if (res < 0) return res; else if (!HFSPLUS_SB(inode->i_sb)->attr_tree) return (res == 0) ? -EOPNOTSUPP : res; err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->attr_tree, &fd); if (err) { pr_err("can't init xattr find struct\n"); return err; } err = hfsplus_find_attr(inode->i_sb, inode->i_ino, NULL, &fd); if (err) { if (err == -ENOENT) { if (res == 0) res = -ENODATA; goto end_listxattr; } else { res = err; goto end_listxattr; } } for (;;) { key_len = hfs_bnode_read_u16(fd.bnode, fd.keyoffset); if (key_len == 0 || key_len > fd.tree->max_key_len) { pr_err("invalid xattr key length: %d\n", key_len); res = -EIO; goto end_listxattr; } hfs_bnode_read(fd.bnode, &attr_key, fd.keyoffset, key_len + sizeof(key_len)); if (be32_to_cpu(attr_key.cnid) != inode->i_ino) goto end_listxattr; xattr_name_len = HFSPLUS_ATTR_MAX_STRLEN; if (hfsplus_uni2asc(inode->i_sb, (const struct hfsplus_unistr *)&fd.key->attr.key_name, strbuf, &xattr_name_len)) { pr_err("unicode conversion failed\n"); res = -EIO; goto end_listxattr; } if (!buffer || !size) { if (can_list(strbuf)) res += name_len(strbuf, xattr_name_len); } else if (can_list(strbuf)) { if (size < (res + name_len(strbuf, xattr_name_len))) { res = -ERANGE; goto end_listxattr; } else res += copy_name(buffer + res, strbuf, xattr_name_len); } if (hfs_brec_goto(&fd, 1)) goto end_listxattr; } end_listxattr: hfs_find_exit(&fd); return res; } int hfsplus_removexattr(struct dentry *dentry, const char *name) { int err = 0; struct inode *inode = dentry->d_inode; struct hfs_find_data cat_fd; u16 flags; u16 cat_entry_type; int is_xattr_acl_deleted = 0; int is_all_xattrs_deleted = 0; if ((!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode)) || HFSPLUS_IS_RSRC(inode)) return -EOPNOTSUPP; if (!HFSPLUS_SB(inode->i_sb)->attr_tree) return -EOPNOTSUPP; err = can_set_xattr(inode, name, NULL, 0); if (err) return err; if (strncmp(name, XATTR_MAC_OSX_PREFIX, XATTR_MAC_OSX_PREFIX_LEN) == 0) name += XATTR_MAC_OSX_PREFIX_LEN; if (!strcmp_xattr_finder_info(name)) return -EOPNOTSUPP; err = hfs_find_init(HFSPLUS_SB(inode->i_sb)->cat_tree, &cat_fd); if (err) { pr_err("can't init xattr find struct\n"); return err; } err = hfsplus_find_cat(inode->i_sb, inode->i_ino, &cat_fd); if (err) { pr_err("catalog searching failed\n"); goto end_removexattr; } err = hfsplus_delete_attr(inode, name); if (err) goto end_removexattr; is_xattr_acl_deleted = !strcmp_xattr_acl(name); is_all_xattrs_deleted = !hfsplus_attr_exists(inode, NULL); if (!is_xattr_acl_deleted && !is_all_xattrs_deleted) goto end_removexattr; cat_entry_type = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset); if (cat_entry_type == HFSPLUS_FOLDER) { flags = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset + offsetof(struct hfsplus_cat_folder, flags)); if (is_xattr_acl_deleted) flags &= ~HFSPLUS_ACL_EXISTS; if (is_all_xattrs_deleted) flags &= ~HFSPLUS_XATTR_EXISTS; hfs_bnode_write_u16(cat_fd.bnode, cat_fd.entryoffset + offsetof(struct hfsplus_cat_folder, flags), flags); hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY); } else if (cat_entry_type == HFSPLUS_FILE) { flags = hfs_bnode_read_u16(cat_fd.bnode, cat_fd.entryoffset + offsetof(struct hfsplus_cat_file, flags)); if (is_xattr_acl_deleted) flags &= ~HFSPLUS_ACL_EXISTS; if (is_all_xattrs_deleted) flags &= ~HFSPLUS_XATTR_EXISTS; hfs_bnode_write_u16(cat_fd.bnode, cat_fd.entryoffset + offsetof(struct hfsplus_cat_file, flags), flags); hfsplus_mark_inode_dirty(inode, HFSPLUS_I_CAT_DIRTY); } else { pr_err("invalid catalog entry type\n"); err = -EIO; goto end_removexattr; } end_removexattr: hfs_find_exit(&cat_fd); return err; } static int hfsplus_osx_getxattr(struct dentry *dentry, const char *name, void *buffer, size_t size, int type) { char xattr_name[HFSPLUS_ATTR_MAX_STRLEN + XATTR_MAC_OSX_PREFIX_LEN + 1] = {0}; size_t len = strlen(name); if (!strcmp(name, "")) return -EINVAL; if (len > HFSPLUS_ATTR_MAX_STRLEN) return -EOPNOTSUPP; strcpy(xattr_name, XATTR_MAC_OSX_PREFIX); strcpy(xattr_name + XATTR_MAC_OSX_PREFIX_LEN, name); return hfsplus_getxattr(dentry, xattr_name, buffer, size); } static int hfsplus_osx_setxattr(struct dentry *dentry, const char *name, const void *buffer, size_t size, int flags, int type) { char xattr_name[HFSPLUS_ATTR_MAX_STRLEN + XATTR_MAC_OSX_PREFIX_LEN + 1] = {0}; size_t len = strlen(name); if (!strcmp(name, "")) return -EINVAL; if (len > HFSPLUS_ATTR_MAX_STRLEN) return -EOPNOTSUPP; strcpy(xattr_name, XATTR_MAC_OSX_PREFIX); strcpy(xattr_name + XATTR_MAC_OSX_PREFIX_LEN, name); return hfsplus_setxattr(dentry, xattr_name, buffer, size, flags); } static size_t hfsplus_osx_listxattr(struct dentry *dentry, char *list, size_t list_size, const char *name, size_t name_len, int type) { /* * This method is not used. * It is used hfsplus_listxattr() instead of generic_listxattr(). */ return -EOPNOTSUPP; } const struct xattr_handler hfsplus_xattr_osx_handler = { .prefix = XATTR_MAC_OSX_PREFIX, .list = hfsplus_osx_listxattr, .get = hfsplus_osx_getxattr, .set = hfsplus_osx_setxattr, };
gpl-2.0
ptmr3/i717-7005_ICS_Kernel
drivers/gpu/drm/i915/dvo_tfp410.c
2667
7947
/* * Copyright © 2007 Dave Mueller * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS * IN THE SOFTWARE. * * Authors: * Dave Mueller <dave.mueller@gmx.ch> * */ #include "dvo.h" /* register definitions according to the TFP410 data sheet */ #define TFP410_VID 0x014C #define TFP410_DID 0x0410 #define TFP410_VID_LO 0x00 #define TFP410_VID_HI 0x01 #define TFP410_DID_LO 0x02 #define TFP410_DID_HI 0x03 #define TFP410_REV 0x04 #define TFP410_CTL_1 0x08 #define TFP410_CTL_1_TDIS (1<<6) #define TFP410_CTL_1_VEN (1<<5) #define TFP410_CTL_1_HEN (1<<4) #define TFP410_CTL_1_DSEL (1<<3) #define TFP410_CTL_1_BSEL (1<<2) #define TFP410_CTL_1_EDGE (1<<1) #define TFP410_CTL_1_PD (1<<0) #define TFP410_CTL_2 0x09 #define TFP410_CTL_2_VLOW (1<<7) #define TFP410_CTL_2_MSEL_MASK (0x7<<4) #define TFP410_CTL_2_MSEL (1<<4) #define TFP410_CTL_2_TSEL (1<<3) #define TFP410_CTL_2_RSEN (1<<2) #define TFP410_CTL_2_HTPLG (1<<1) #define TFP410_CTL_2_MDI (1<<0) #define TFP410_CTL_3 0x0A #define TFP410_CTL_3_DK_MASK (0x7<<5) #define TFP410_CTL_3_DK (1<<5) #define TFP410_CTL_3_DKEN (1<<4) #define TFP410_CTL_3_CTL_MASK (0x7<<1) #define TFP410_CTL_3_CTL (1<<1) #define TFP410_USERCFG 0x0B #define TFP410_DE_DLY 0x32 #define TFP410_DE_CTL 0x33 #define TFP410_DE_CTL_DEGEN (1<<6) #define TFP410_DE_CTL_VSPOL (1<<5) #define TFP410_DE_CTL_HSPOL (1<<4) #define TFP410_DE_CTL_DEDLY8 (1<<0) #define TFP410_DE_TOP 0x34 #define TFP410_DE_CNT_LO 0x36 #define TFP410_DE_CNT_HI 0x37 #define TFP410_DE_LIN_LO 0x38 #define TFP410_DE_LIN_HI 0x39 #define TFP410_H_RES_LO 0x3A #define TFP410_H_RES_HI 0x3B #define TFP410_V_RES_LO 0x3C #define TFP410_V_RES_HI 0x3D struct tfp410_priv { bool quiet; }; static bool tfp410_readb(struct intel_dvo_device *dvo, int addr, uint8_t *ch) { struct tfp410_priv *tfp = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; u8 out_buf[2]; u8 in_buf[2]; struct i2c_msg msgs[] = { { .addr = dvo->slave_addr, .flags = 0, .len = 1, .buf = out_buf, }, { .addr = dvo->slave_addr, .flags = I2C_M_RD, .len = 1, .buf = in_buf, } }; out_buf[0] = addr; out_buf[1] = 0; if (i2c_transfer(adapter, msgs, 2) == 2) { *ch = in_buf[0]; return true; }; if (!tfp->quiet) { DRM_DEBUG_KMS("Unable to read register 0x%02x from %s:%02x.\n", addr, adapter->name, dvo->slave_addr); } return false; } static bool tfp410_writeb(struct intel_dvo_device *dvo, int addr, uint8_t ch) { struct tfp410_priv *tfp = dvo->dev_priv; struct i2c_adapter *adapter = dvo->i2c_bus; uint8_t out_buf[2]; struct i2c_msg msg = { .addr = dvo->slave_addr, .flags = 0, .len = 2, .buf = out_buf, }; out_buf[0] = addr; out_buf[1] = ch; if (i2c_transfer(adapter, &msg, 1) == 1) return true; if (!tfp->quiet) { DRM_DEBUG_KMS("Unable to write register 0x%02x to %s:%d.\n", addr, adapter->name, dvo->slave_addr); } return false; } static int tfp410_getid(struct intel_dvo_device *dvo, int addr) { uint8_t ch1, ch2; if (tfp410_readb(dvo, addr+0, &ch1) && tfp410_readb(dvo, addr+1, &ch2)) return ((ch2 << 8) & 0xFF00) | (ch1 & 0x00FF); return -1; } /* Ti TFP410 driver for chip on i2c bus */ static bool tfp410_init(struct intel_dvo_device *dvo, struct i2c_adapter *adapter) { /* this will detect the tfp410 chip on the specified i2c bus */ struct tfp410_priv *tfp; int id; tfp = kzalloc(sizeof(struct tfp410_priv), GFP_KERNEL); if (tfp == NULL) return false; dvo->i2c_bus = adapter; dvo->dev_priv = tfp; tfp->quiet = true; if ((id = tfp410_getid(dvo, TFP410_VID_LO)) != TFP410_VID) { DRM_DEBUG_KMS("tfp410 not detected got VID %X: from %s " "Slave %d.\n", id, adapter->name, dvo->slave_addr); goto out; } if ((id = tfp410_getid(dvo, TFP410_DID_LO)) != TFP410_DID) { DRM_DEBUG_KMS("tfp410 not detected got DID %X: from %s " "Slave %d.\n", id, adapter->name, dvo->slave_addr); goto out; } tfp->quiet = false; return true; out: kfree(tfp); return false; } static enum drm_connector_status tfp410_detect(struct intel_dvo_device *dvo) { enum drm_connector_status ret = connector_status_disconnected; uint8_t ctl2; if (tfp410_readb(dvo, TFP410_CTL_2, &ctl2)) { if (ctl2 & TFP410_CTL_2_RSEN) ret = connector_status_connected; else ret = connector_status_disconnected; } return ret; } static enum drm_mode_status tfp410_mode_valid(struct intel_dvo_device *dvo, struct drm_display_mode *mode) { return MODE_OK; } static void tfp410_mode_set(struct intel_dvo_device *dvo, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode) { /* As long as the basics are set up, since we don't have clock dependencies * in the mode setup, we can just leave the registers alone and everything * will work fine. */ /* don't do much */ return; } /* set the tfp410 power state */ static void tfp410_dpms(struct intel_dvo_device *dvo, int mode) { uint8_t ctl1; if (!tfp410_readb(dvo, TFP410_CTL_1, &ctl1)) return; if (mode == DRM_MODE_DPMS_ON) ctl1 |= TFP410_CTL_1_PD; else ctl1 &= ~TFP410_CTL_1_PD; tfp410_writeb(dvo, TFP410_CTL_1, ctl1); } static void tfp410_dump_regs(struct intel_dvo_device *dvo) { uint8_t val, val2; tfp410_readb(dvo, TFP410_REV, &val); DRM_LOG_KMS("TFP410_REV: 0x%02X\n", val); tfp410_readb(dvo, TFP410_CTL_1, &val); DRM_LOG_KMS("TFP410_CTL1: 0x%02X\n", val); tfp410_readb(dvo, TFP410_CTL_2, &val); DRM_LOG_KMS("TFP410_CTL2: 0x%02X\n", val); tfp410_readb(dvo, TFP410_CTL_3, &val); DRM_LOG_KMS("TFP410_CTL3: 0x%02X\n", val); tfp410_readb(dvo, TFP410_USERCFG, &val); DRM_LOG_KMS("TFP410_USERCFG: 0x%02X\n", val); tfp410_readb(dvo, TFP410_DE_DLY, &val); DRM_LOG_KMS("TFP410_DE_DLY: 0x%02X\n", val); tfp410_readb(dvo, TFP410_DE_CTL, &val); DRM_LOG_KMS("TFP410_DE_CTL: 0x%02X\n", val); tfp410_readb(dvo, TFP410_DE_TOP, &val); DRM_LOG_KMS("TFP410_DE_TOP: 0x%02X\n", val); tfp410_readb(dvo, TFP410_DE_CNT_LO, &val); tfp410_readb(dvo, TFP410_DE_CNT_HI, &val2); DRM_LOG_KMS("TFP410_DE_CNT: 0x%02X%02X\n", val2, val); tfp410_readb(dvo, TFP410_DE_LIN_LO, &val); tfp410_readb(dvo, TFP410_DE_LIN_HI, &val2); DRM_LOG_KMS("TFP410_DE_LIN: 0x%02X%02X\n", val2, val); tfp410_readb(dvo, TFP410_H_RES_LO, &val); tfp410_readb(dvo, TFP410_H_RES_HI, &val2); DRM_LOG_KMS("TFP410_H_RES: 0x%02X%02X\n", val2, val); tfp410_readb(dvo, TFP410_V_RES_LO, &val); tfp410_readb(dvo, TFP410_V_RES_HI, &val2); DRM_LOG_KMS("TFP410_V_RES: 0x%02X%02X\n", val2, val); } static void tfp410_destroy(struct intel_dvo_device *dvo) { struct tfp410_priv *tfp = dvo->dev_priv; if (tfp) { kfree(tfp); dvo->dev_priv = NULL; } } struct intel_dvo_dev_ops tfp410_ops = { .init = tfp410_init, .detect = tfp410_detect, .mode_valid = tfp410_mode_valid, .mode_set = tfp410_mode_set, .dpms = tfp410_dpms, .dump_regs = tfp410_dump_regs, .destroy = tfp410_destroy, };
gpl-2.0
prasad-joshi/logfs
drivers/of/device.c
3179
4212
#include <linux/string.h> #include <linux/kernel.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/init.h> #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/slab.h> #include <asm/errno.h> /** * of_match_device - Tell if a struct device matches an of_device_id list * @ids: array of of device match structures to search in * @dev: the of device structure to match against * * Used by a driver to check whether an platform_device present in the * system is in its list of supported devices. */ const struct of_device_id *of_match_device(const struct of_device_id *matches, const struct device *dev) { if ((!matches) || (!dev->of_node)) return NULL; return of_match_node(matches, dev->of_node); } EXPORT_SYMBOL(of_match_device); struct platform_device *of_dev_get(struct platform_device *dev) { struct device *tmp; if (!dev) return NULL; tmp = get_device(&dev->dev); if (tmp) return to_platform_device(tmp); else return NULL; } EXPORT_SYMBOL(of_dev_get); void of_dev_put(struct platform_device *dev) { if (dev) put_device(&dev->dev); } EXPORT_SYMBOL(of_dev_put); int of_device_add(struct platform_device *ofdev) { BUG_ON(ofdev->dev.of_node == NULL); /* name and id have to be set so that the platform bus doesn't get * confused on matching */ ofdev->name = dev_name(&ofdev->dev); ofdev->id = -1; /* device_add will assume that this device is on the same node as * the parent. If there is no parent defined, set the node * explicitly */ if (!ofdev->dev.parent) set_dev_node(&ofdev->dev, of_node_to_nid(ofdev->dev.of_node)); return device_add(&ofdev->dev); } int of_device_register(struct platform_device *pdev) { device_initialize(&pdev->dev); return of_device_add(pdev); } EXPORT_SYMBOL(of_device_register); void of_device_unregister(struct platform_device *ofdev) { device_unregister(&ofdev->dev); } EXPORT_SYMBOL(of_device_unregister); ssize_t of_device_get_modalias(struct device *dev, char *str, ssize_t len) { const char *compat; int cplen, i; ssize_t tsize, csize, repend; /* Name & Type */ csize = snprintf(str, len, "of:N%sT%s", dev->of_node->name, dev->of_node->type); /* Get compatible property if any */ compat = of_get_property(dev->of_node, "compatible", &cplen); if (!compat) return csize; /* Find true end (we tolerate multiple \0 at the end */ for (i = (cplen - 1); i >= 0 && !compat[i]; i--) cplen--; if (!cplen) return csize; cplen++; /* Check space (need cplen+1 chars including final \0) */ tsize = csize + cplen; repend = tsize; if (csize >= len) /* @ the limit, all is already filled */ return tsize; if (tsize >= len) { /* limit compat list */ cplen = len - csize - 1; repend = len; } /* Copy and do char replacement */ memcpy(&str[csize + 1], compat, cplen); for (i = csize; i < repend; i++) { char c = str[i]; if (c == '\0') str[i] = 'C'; else if (c == ' ') str[i] = '_'; } return tsize; } /** * of_device_uevent - Display OF related uevent information */ int of_device_uevent(struct device *dev, struct kobj_uevent_env *env) { const char *compat; int seen = 0, cplen, sl; if ((!dev) || (!dev->of_node)) return -ENODEV; if (add_uevent_var(env, "OF_NAME=%s", dev->of_node->name)) return -ENOMEM; if (add_uevent_var(env, "OF_TYPE=%s", dev->of_node->type)) return -ENOMEM; /* Since the compatible field can contain pretty much anything * it's not really legal to split it out with commas. We split it * up using a number of environment variables instead. */ compat = of_get_property(dev->of_node, "compatible", &cplen); while (compat && *compat && cplen > 0) { if (add_uevent_var(env, "OF_COMPATIBLE_%d=%s", seen, compat)) return -ENOMEM; sl = strlen(compat) + 1; compat += sl; cplen -= sl; seen++; } if (add_uevent_var(env, "OF_COMPATIBLE_N=%d", seen)) return -ENOMEM; /* modalias is trickier, we add it in 2 steps */ if (add_uevent_var(env, "MODALIAS=")) return -ENOMEM; sl = of_device_get_modalias(dev, &env->buf[env->buflen-1], sizeof(env->buf) - env->buflen); if (sl >= (sizeof(env->buf) - env->buflen)) return -ENOMEM; env->buflen += sl; return 0; }
gpl-2.0
cuteprince/jb_kernel_3.0.16_htc_golfu
drivers/s390/net/ctcm_sysfs.c
3179
5395
/* * drivers/s390/net/ctcm_sysfs.c * * Copyright IBM Corp. 2007, 2007 * Authors: Peter Tiedemann (ptiedem@de.ibm.com) * */ #undef DEBUG #undef DEBUGDATA #undef DEBUGCCW #define KMSG_COMPONENT "ctcm" #define pr_fmt(fmt) KMSG_COMPONENT ": " fmt #include <linux/sysfs.h> #include <linux/slab.h> #include "ctcm_main.h" /* * sysfs attributes */ static ssize_t ctcm_buffer_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ctcm_priv *priv = dev_get_drvdata(dev); if (!priv) return -ENODEV; return sprintf(buf, "%d\n", priv->buffer_size); } static ssize_t ctcm_buffer_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct net_device *ndev; int bs1; struct ctcm_priv *priv = dev_get_drvdata(dev); ndev = priv->channel[CTCM_READ]->netdev; if (!(priv && priv->channel[CTCM_READ] && ndev)) { CTCM_DBF_TEXT(SETUP, CTC_DBF_ERROR, "bfnondev"); return -ENODEV; } sscanf(buf, "%u", &bs1); if (bs1 > CTCM_BUFSIZE_LIMIT) goto einval; if (bs1 < (576 + LL_HEADER_LENGTH + 2)) goto einval; priv->buffer_size = bs1; /* just to overwrite the default */ if ((ndev->flags & IFF_RUNNING) && (bs1 < (ndev->mtu + LL_HEADER_LENGTH + 2))) goto einval; priv->channel[CTCM_READ]->max_bufsize = bs1; priv->channel[CTCM_WRITE]->max_bufsize = bs1; if (!(ndev->flags & IFF_RUNNING)) ndev->mtu = bs1 - LL_HEADER_LENGTH - 2; priv->channel[CTCM_READ]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED; priv->channel[CTCM_WRITE]->flags |= CHANNEL_FLAGS_BUFSIZE_CHANGED; CTCM_DBF_DEV(SETUP, ndev, buf); return count; einval: CTCM_DBF_DEV(SETUP, ndev, "buff_err"); return -EINVAL; } static void ctcm_print_statistics(struct ctcm_priv *priv) { char *sbuf; char *p; if (!priv) return; sbuf = kmalloc(2048, GFP_KERNEL); if (sbuf == NULL) return; p = sbuf; p += sprintf(p, " Device FSM state: %s\n", fsm_getstate_str(priv->fsm)); p += sprintf(p, " RX channel FSM state: %s\n", fsm_getstate_str(priv->channel[CTCM_READ]->fsm)); p += sprintf(p, " TX channel FSM state: %s\n", fsm_getstate_str(priv->channel[CTCM_WRITE]->fsm)); p += sprintf(p, " Max. TX buffer used: %ld\n", priv->channel[WRITE]->prof.maxmulti); p += sprintf(p, " Max. chained SKBs: %ld\n", priv->channel[WRITE]->prof.maxcqueue); p += sprintf(p, " TX single write ops: %ld\n", priv->channel[WRITE]->prof.doios_single); p += sprintf(p, " TX multi write ops: %ld\n", priv->channel[WRITE]->prof.doios_multi); p += sprintf(p, " Netto bytes written: %ld\n", priv->channel[WRITE]->prof.txlen); p += sprintf(p, " Max. TX IO-time: %ld\n", priv->channel[WRITE]->prof.tx_time); printk(KERN_INFO "Statistics for %s:\n%s", priv->channel[CTCM_WRITE]->netdev->name, sbuf); kfree(sbuf); return; } static ssize_t stats_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ctcm_priv *priv = dev_get_drvdata(dev); if (!priv) return -ENODEV; ctcm_print_statistics(priv); return sprintf(buf, "0\n"); } static ssize_t stats_write(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct ctcm_priv *priv = dev_get_drvdata(dev); if (!priv) return -ENODEV; /* Reset statistics */ memset(&priv->channel[WRITE]->prof, 0, sizeof(priv->channel[CTCM_WRITE]->prof)); return count; } static ssize_t ctcm_proto_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ctcm_priv *priv = dev_get_drvdata(dev); if (!priv) return -ENODEV; return sprintf(buf, "%d\n", priv->protocol); } static ssize_t ctcm_proto_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int value; struct ctcm_priv *priv = dev_get_drvdata(dev); if (!priv) return -ENODEV; sscanf(buf, "%u", &value); if (!((value == CTCM_PROTO_S390) || (value == CTCM_PROTO_LINUX) || (value == CTCM_PROTO_MPC) || (value == CTCM_PROTO_OS390))) return -EINVAL; priv->protocol = value; CTCM_DBF_DEV(SETUP, dev, buf); return count; } const char *ctcm_type[] = { "not a channel", "CTC/A", "FICON channel", "ESCON channel", "unknown channel type", "unsupported channel type", }; static ssize_t ctcm_type_show(struct device *dev, struct device_attribute *attr, char *buf) { struct ccwgroup_device *cgdev; cgdev = to_ccwgroupdev(dev); if (!cgdev) return -ENODEV; return sprintf(buf, "%s\n", ctcm_type[cgdev->cdev[0]->id.driver_info]); } static DEVICE_ATTR(buffer, 0644, ctcm_buffer_show, ctcm_buffer_write); static DEVICE_ATTR(protocol, 0644, ctcm_proto_show, ctcm_proto_store); static DEVICE_ATTR(type, 0444, ctcm_type_show, NULL); static DEVICE_ATTR(stats, 0644, stats_show, stats_write); static struct attribute *ctcm_attr[] = { &dev_attr_protocol.attr, &dev_attr_type.attr, &dev_attr_buffer.attr, NULL, }; static struct attribute_group ctcm_attr_group = { .attrs = ctcm_attr, }; int ctcm_add_attributes(struct device *dev) { int rc; rc = device_create_file(dev, &dev_attr_stats); return rc; } void ctcm_remove_attributes(struct device *dev) { device_remove_file(dev, &dev_attr_stats); } int ctcm_add_files(struct device *dev) { return sysfs_create_group(&dev->kobj, &ctcm_attr_group); } void ctcm_remove_files(struct device *dev) { sysfs_remove_group(&dev->kobj, &ctcm_attr_group); }
gpl-2.0
ihadzic/linux-vcrtcm
drivers/video/matrox/matroxfb_crtc2.c
4203
20397
/* * * Hardware accelerated Matrox Millennium I, II, Mystique, G100, G200, G400 and G450. * * (c) 1998-2002 Petr Vandrovec <vandrove@vc.cvut.cz> * * Portions Copyright (c) 2001 Matrox Graphics Inc. * * Version: 1.65 2002/08/14 * */ #include "matroxfb_maven.h" #include "matroxfb_crtc2.h" #include "matroxfb_misc.h" #include "matroxfb_DAC1064.h" #include <linux/matroxfb.h> #include <linux/slab.h> #include <linux/uaccess.h> /* **************************************************** */ static int mem = 8192; module_param(mem, int, 0); MODULE_PARM_DESC(mem, "Memory size reserved for dualhead (default=8MB)"); /* **************************************************** */ static int matroxfb_dh_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info* info) { u_int32_t col; #define m2info (container_of(info, struct matroxfb_dh_fb_info, fbcon)) if (regno >= 16) return 1; if (m2info->fbcon.var.grayscale) { /* gray = 0.30*R + 0.59*G + 0.11*B */ red = green = blue = (red * 77 + green * 151 + blue * 28) >> 8; } red = CNVT_TOHW(red, m2info->fbcon.var.red.length); green = CNVT_TOHW(green, m2info->fbcon.var.green.length); blue = CNVT_TOHW(blue, m2info->fbcon.var.blue.length); transp = CNVT_TOHW(transp, m2info->fbcon.var.transp.length); col = (red << m2info->fbcon.var.red.offset) | (green << m2info->fbcon.var.green.offset) | (blue << m2info->fbcon.var.blue.offset) | (transp << m2info->fbcon.var.transp.offset); switch (m2info->fbcon.var.bits_per_pixel) { case 16: m2info->cmap[regno] = col | (col << 16); break; case 32: m2info->cmap[regno] = col; break; } return 0; #undef m2info } static void matroxfb_dh_restore(struct matroxfb_dh_fb_info* m2info, struct my_timming* mt, int mode, unsigned int pos) { u_int32_t tmp; u_int32_t datactl; struct matrox_fb_info *minfo = m2info->primary_dev; switch (mode) { case 15: tmp = 0x00200000; break; case 16: tmp = 0x00400000; break; /* case 32: */ default: tmp = 0x00800000; break; } tmp |= 0x00000001; /* enable CRTC2 */ datactl = 0; if (minfo->outputs[1].src == MATROXFB_SRC_CRTC2) { if (minfo->devflags.g450dac) { tmp |= 0x00000006; /* source from secondary pixel PLL */ /* no vidrst when in monitor mode */ if (minfo->outputs[1].mode != MATROXFB_OUTPUT_MODE_MONITOR) { tmp |= 0xC0001000; /* Enable H/V vidrst */ } } else { tmp |= 0x00000002; /* source from VDOCLK */ tmp |= 0xC0000000; /* enable vvidrst & hvidrst */ /* MGA TVO is our clock source */ } } else if (minfo->outputs[0].src == MATROXFB_SRC_CRTC2) { tmp |= 0x00000004; /* source from pixclock */ /* PIXPLL is our clock source */ } if (minfo->outputs[0].src == MATROXFB_SRC_CRTC2) { tmp |= 0x00100000; /* connect CRTC2 to DAC */ } if (mt->interlaced) { tmp |= 0x02000000; /* interlaced, second field is bigger, as G450 apparently ignores it */ mt->VDisplay >>= 1; mt->VSyncStart >>= 1; mt->VSyncEnd >>= 1; mt->VTotal >>= 1; } if ((mt->HTotal & 7) == 2) { datactl |= 0x00000010; mt->HTotal &= ~7; } tmp |= 0x10000000; /* 0x10000000 is VIDRST polarity */ mga_outl(0x3C14, ((mt->HDisplay - 8) << 16) | (mt->HTotal - 8)); mga_outl(0x3C18, ((mt->HSyncEnd - 8) << 16) | (mt->HSyncStart - 8)); mga_outl(0x3C1C, ((mt->VDisplay - 1) << 16) | (mt->VTotal - 1)); mga_outl(0x3C20, ((mt->VSyncEnd - 1) << 16) | (mt->VSyncStart - 1)); mga_outl(0x3C24, ((mt->VSyncStart) << 16) | (mt->HSyncStart)); /* preload */ { u_int32_t linelen = m2info->fbcon.var.xres_virtual * (m2info->fbcon.var.bits_per_pixel >> 3); if (tmp & 0x02000000) { /* field #0 is smaller, so... */ mga_outl(0x3C2C, pos); /* field #1 vmemory start */ mga_outl(0x3C28, pos + linelen); /* field #0 vmemory start */ linelen <<= 1; m2info->interlaced = 1; } else { mga_outl(0x3C28, pos); /* vmemory start */ m2info->interlaced = 0; } mga_outl(0x3C40, linelen); } mga_outl(0x3C4C, datactl); /* data control */ if (tmp & 0x02000000) { int i; mga_outl(0x3C10, tmp & ~0x02000000); for (i = 0; i < 2; i++) { unsigned int nl; unsigned int lastl = 0; while ((nl = mga_inl(0x3C48) & 0xFFF) >= lastl) { lastl = nl; } } } mga_outl(0x3C10, tmp); minfo->hw.crtc2.ctl = tmp; tmp = mt->VDisplay << 16; /* line compare */ if (mt->sync & FB_SYNC_HOR_HIGH_ACT) tmp |= 0x00000100; if (mt->sync & FB_SYNC_VERT_HIGH_ACT) tmp |= 0x00000200; mga_outl(0x3C44, tmp); } static void matroxfb_dh_disable(struct matroxfb_dh_fb_info* m2info) { struct matrox_fb_info *minfo = m2info->primary_dev; mga_outl(0x3C10, 0x00000004); /* disable CRTC2, CRTC1->DAC1, PLL as clock source */ minfo->hw.crtc2.ctl = 0x00000004; } static void matroxfb_dh_pan_var(struct matroxfb_dh_fb_info* m2info, struct fb_var_screeninfo* var) { unsigned int pos; unsigned int linelen; unsigned int pixelsize; struct matrox_fb_info *minfo = m2info->primary_dev; m2info->fbcon.var.xoffset = var->xoffset; m2info->fbcon.var.yoffset = var->yoffset; pixelsize = m2info->fbcon.var.bits_per_pixel >> 3; linelen = m2info->fbcon.var.xres_virtual * pixelsize; pos = m2info->fbcon.var.yoffset * linelen + m2info->fbcon.var.xoffset * pixelsize; pos += m2info->video.offbase; if (m2info->interlaced) { mga_outl(0x3C2C, pos); mga_outl(0x3C28, pos + linelen); } else { mga_outl(0x3C28, pos); } } static int matroxfb_dh_decode_var(struct matroxfb_dh_fb_info* m2info, struct fb_var_screeninfo* var, int *visual, int *video_cmap_len, int *mode) { unsigned int mask; unsigned int memlen; unsigned int vramlen; switch (var->bits_per_pixel) { case 16: mask = 0x1F; break; case 32: mask = 0x0F; break; default: return -EINVAL; } vramlen = m2info->video.len_usable; if (var->yres_virtual < var->yres) var->yres_virtual = var->yres; if (var->xres_virtual < var->xres) var->xres_virtual = var->xres; var->xres_virtual = (var->xres_virtual + mask) & ~mask; if (var->yres_virtual > 32767) return -EINVAL; memlen = var->xres_virtual * var->yres_virtual * (var->bits_per_pixel >> 3); if (memlen > vramlen) return -EINVAL; if (var->xoffset + var->xres > var->xres_virtual) var->xoffset = var->xres_virtual - var->xres; if (var->yoffset + var->yres > var->yres_virtual) var->yoffset = var->yres_virtual - var->yres; var->xres &= ~7; var->left_margin &= ~7; var->right_margin &= ~7; var->hsync_len &= ~7; *mode = var->bits_per_pixel; if (var->bits_per_pixel == 16) { if (var->green.length == 5) { var->red.offset = 10; var->red.length = 5; var->green.offset = 5; var->green.length = 5; var->blue.offset = 0; var->blue.length = 5; var->transp.offset = 15; var->transp.length = 1; *mode = 15; } else { var->red.offset = 11; var->red.length = 5; var->green.offset = 5; var->green.length = 6; var->blue.offset = 0; var->blue.length = 5; var->transp.offset = 0; var->transp.length = 0; } } else { var->red.offset = 16; var->red.length = 8; var->green.offset = 8; var->green.length = 8; var->blue.offset = 0; var->blue.length = 8; var->transp.offset = 24; var->transp.length = 8; } *visual = FB_VISUAL_TRUECOLOR; *video_cmap_len = 16; return 0; } static int matroxfb_dh_open(struct fb_info* info, int user) { #define m2info (container_of(info, struct matroxfb_dh_fb_info, fbcon)) struct matrox_fb_info *minfo = m2info->primary_dev; if (minfo) { int err; if (minfo->dead) { return -ENXIO; } err = minfo->fbops.fb_open(&minfo->fbcon, user); if (err) { return err; } } return 0; #undef m2info } static int matroxfb_dh_release(struct fb_info* info, int user) { #define m2info (container_of(info, struct matroxfb_dh_fb_info, fbcon)) int err = 0; struct matrox_fb_info *minfo = m2info->primary_dev; if (minfo) { err = minfo->fbops.fb_release(&minfo->fbcon, user); } return err; #undef m2info } /* * This function is called before the register_framebuffer so * no locking is needed. */ static void matroxfb_dh_init_fix(struct matroxfb_dh_fb_info *m2info) { struct fb_fix_screeninfo *fix = &m2info->fbcon.fix; strcpy(fix->id, "MATROX DH"); fix->smem_start = m2info->video.base; fix->smem_len = m2info->video.len_usable; fix->ypanstep = 1; fix->ywrapstep = 0; fix->xpanstep = 8; /* TBD */ fix->mmio_start = m2info->mmio.base; fix->mmio_len = m2info->mmio.len; fix->accel = 0; /* no accel... */ } static int matroxfb_dh_check_var(struct fb_var_screeninfo* var, struct fb_info* info) { #define m2info (container_of(info, struct matroxfb_dh_fb_info, fbcon)) int visual; int cmap_len; int mode; return matroxfb_dh_decode_var(m2info, var, &visual, &cmap_len, &mode); #undef m2info } static int matroxfb_dh_set_par(struct fb_info* info) { #define m2info (container_of(info, struct matroxfb_dh_fb_info, fbcon)) int visual; int cmap_len; int mode; int err; struct fb_var_screeninfo* var = &info->var; struct matrox_fb_info *minfo = m2info->primary_dev; if ((err = matroxfb_dh_decode_var(m2info, var, &visual, &cmap_len, &mode)) != 0) return err; /* cmap */ { m2info->fbcon.screen_base = vaddr_va(m2info->video.vbase); m2info->fbcon.fix.visual = visual; m2info->fbcon.fix.type = FB_TYPE_PACKED_PIXELS; m2info->fbcon.fix.type_aux = 0; m2info->fbcon.fix.line_length = (var->xres_virtual * var->bits_per_pixel) >> 3; } { struct my_timming mt; unsigned int pos; int out; int cnt; matroxfb_var2my(&m2info->fbcon.var, &mt); mt.crtc = MATROXFB_SRC_CRTC2; /* CRTC2 delay */ mt.delay = 34; pos = (m2info->fbcon.var.yoffset * m2info->fbcon.var.xres_virtual + m2info->fbcon.var.xoffset) * m2info->fbcon.var.bits_per_pixel >> 3; pos += m2info->video.offbase; cnt = 0; down_read(&minfo->altout.lock); for (out = 0; out < MATROXFB_MAX_OUTPUTS; out++) { if (minfo->outputs[out].src == MATROXFB_SRC_CRTC2) { cnt++; if (minfo->outputs[out].output->compute) { minfo->outputs[out].output->compute(minfo->outputs[out].data, &mt); } } } minfo->crtc2.pixclock = mt.pixclock; minfo->crtc2.mnp = mt.mnp; up_read(&minfo->altout.lock); if (cnt) { matroxfb_dh_restore(m2info, &mt, mode, pos); } else { matroxfb_dh_disable(m2info); } DAC1064_global_init(minfo); DAC1064_global_restore(minfo); down_read(&minfo->altout.lock); for (out = 0; out < MATROXFB_MAX_OUTPUTS; out++) { if (minfo->outputs[out].src == MATROXFB_SRC_CRTC2 && minfo->outputs[out].output->program) { minfo->outputs[out].output->program(minfo->outputs[out].data); } } for (out = 0; out < MATROXFB_MAX_OUTPUTS; out++) { if (minfo->outputs[out].src == MATROXFB_SRC_CRTC2 && minfo->outputs[out].output->start) { minfo->outputs[out].output->start(minfo->outputs[out].data); } } up_read(&minfo->altout.lock); } m2info->initialized = 1; return 0; #undef m2info } static int matroxfb_dh_pan_display(struct fb_var_screeninfo* var, struct fb_info* info) { #define m2info (container_of(info, struct matroxfb_dh_fb_info, fbcon)) matroxfb_dh_pan_var(m2info, var); return 0; #undef m2info } static int matroxfb_dh_get_vblank(const struct matroxfb_dh_fb_info* m2info, struct fb_vblank* vblank) { struct matrox_fb_info *minfo = m2info->primary_dev; matroxfb_enable_irq(minfo, 0); memset(vblank, 0, sizeof(*vblank)); vblank->flags = FB_VBLANK_HAVE_VCOUNT | FB_VBLANK_HAVE_VBLANK; /* mask out reserved bits + field number (odd/even) */ vblank->vcount = mga_inl(0x3C48) & 0x000007FF; /* compatibility stuff */ if (vblank->vcount >= m2info->fbcon.var.yres) vblank->flags |= FB_VBLANK_VBLANKING; if (test_bit(0, &minfo->irq_flags)) { vblank->flags |= FB_VBLANK_HAVE_COUNT; /* Only one writer, aligned int value... it should work without lock and without atomic_t */ vblank->count = minfo->crtc2.vsync.cnt; } return 0; } static int matroxfb_dh_ioctl(struct fb_info *info, unsigned int cmd, unsigned long arg) { #define m2info (container_of(info, struct matroxfb_dh_fb_info, fbcon)) struct matrox_fb_info *minfo = m2info->primary_dev; DBG(__func__) switch (cmd) { case FBIOGET_VBLANK: { struct fb_vblank vblank; int err; err = matroxfb_dh_get_vblank(m2info, &vblank); if (err) return err; if (copy_to_user((void __user *)arg, &vblank, sizeof(vblank))) return -EFAULT; return 0; } case FBIO_WAITFORVSYNC: { u_int32_t crt; if (get_user(crt, (u_int32_t __user *)arg)) return -EFAULT; if (crt != 0) return -ENODEV; return matroxfb_wait_for_sync(minfo, 1); } case MATROXFB_SET_OUTPUT_MODE: case MATROXFB_GET_OUTPUT_MODE: case MATROXFB_GET_ALL_OUTPUTS: { return minfo->fbcon.fbops->fb_ioctl(&minfo->fbcon, cmd, arg); } case MATROXFB_SET_OUTPUT_CONNECTION: { u_int32_t tmp; int out; int changes; if (get_user(tmp, (u_int32_t __user *)arg)) return -EFAULT; for (out = 0; out < 32; out++) { if (tmp & (1 << out)) { if (out >= MATROXFB_MAX_OUTPUTS) return -ENXIO; if (!minfo->outputs[out].output) return -ENXIO; switch (minfo->outputs[out].src) { case MATROXFB_SRC_NONE: case MATROXFB_SRC_CRTC2: break; default: return -EBUSY; } } } if (minfo->devflags.panellink) { if (tmp & MATROXFB_OUTPUT_CONN_DFP) return -EINVAL; if ((minfo->outputs[2].src == MATROXFB_SRC_CRTC1) && tmp) return -EBUSY; } changes = 0; for (out = 0; out < MATROXFB_MAX_OUTPUTS; out++) { if (tmp & (1 << out)) { if (minfo->outputs[out].src != MATROXFB_SRC_CRTC2) { changes = 1; minfo->outputs[out].src = MATROXFB_SRC_CRTC2; } } else if (minfo->outputs[out].src == MATROXFB_SRC_CRTC2) { changes = 1; minfo->outputs[out].src = MATROXFB_SRC_NONE; } } if (!changes) return 0; matroxfb_dh_set_par(info); return 0; } case MATROXFB_GET_OUTPUT_CONNECTION: { u_int32_t conn = 0; int out; for (out = 0; out < MATROXFB_MAX_OUTPUTS; out++) { if (minfo->outputs[out].src == MATROXFB_SRC_CRTC2) { conn |= 1 << out; } } if (put_user(conn, (u_int32_t __user *)arg)) return -EFAULT; return 0; } case MATROXFB_GET_AVAILABLE_OUTPUTS: { u_int32_t tmp = 0; int out; for (out = 0; out < MATROXFB_MAX_OUTPUTS; out++) { if (minfo->outputs[out].output) { switch (minfo->outputs[out].src) { case MATROXFB_SRC_NONE: case MATROXFB_SRC_CRTC2: tmp |= 1 << out; break; } } } if (minfo->devflags.panellink) { tmp &= ~MATROXFB_OUTPUT_CONN_DFP; if (minfo->outputs[2].src == MATROXFB_SRC_CRTC1) { tmp = 0; } } if (put_user(tmp, (u_int32_t __user *)arg)) return -EFAULT; return 0; } } return -ENOTTY; #undef m2info } static int matroxfb_dh_blank(int blank, struct fb_info* info) { #define m2info (container_of(info, struct matroxfb_dh_fb_info, fbcon)) switch (blank) { case 1: case 2: case 3: case 4: default:; } /* do something... */ return 0; #undef m2info } static struct fb_ops matroxfb_dh_ops = { .owner = THIS_MODULE, .fb_open = matroxfb_dh_open, .fb_release = matroxfb_dh_release, .fb_check_var = matroxfb_dh_check_var, .fb_set_par = matroxfb_dh_set_par, .fb_setcolreg = matroxfb_dh_setcolreg, .fb_pan_display =matroxfb_dh_pan_display, .fb_blank = matroxfb_dh_blank, .fb_ioctl = matroxfb_dh_ioctl, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, }; static struct fb_var_screeninfo matroxfb_dh_defined = { 640,480,640,480,/* W,H, virtual W,H */ 0,0, /* offset */ 32, /* depth */ 0, /* gray */ {0,0,0}, /* R */ {0,0,0}, /* G */ {0,0,0}, /* B */ {0,0,0}, /* alpha */ 0, /* nonstd */ FB_ACTIVATE_NOW, -1,-1, /* display size */ 0, /* accel flags */ 39721L,48L,16L,33L,10L, 96L,2,0, /* no sync info */ FB_VMODE_NONINTERLACED, 0, {0,0,0,0,0} }; static int matroxfb_dh_regit(const struct matrox_fb_info *minfo, struct matroxfb_dh_fb_info *m2info) { #define minfo (m2info->primary_dev) void* oldcrtc2; m2info->fbcon.fbops = &matroxfb_dh_ops; m2info->fbcon.flags = FBINFO_FLAG_DEFAULT; m2info->fbcon.flags |= FBINFO_HWACCEL_XPAN | FBINFO_HWACCEL_YPAN; m2info->fbcon.pseudo_palette = m2info->cmap; fb_alloc_cmap(&m2info->fbcon.cmap, 256, 1); if (mem < 64) mem *= 1024; if (mem < 64*1024) mem *= 1024; mem &= ~0x00000FFF; /* PAGE_MASK? */ if (minfo->video.len_usable + mem <= minfo->video.len) m2info->video.offbase = minfo->video.len - mem; else if (minfo->video.len < mem) { return -ENOMEM; } else { /* check yres on first head... */ m2info->video.borrowed = mem; minfo->video.len_usable -= mem; m2info->video.offbase = minfo->video.len_usable; } m2info->video.base = minfo->video.base + m2info->video.offbase; m2info->video.len = m2info->video.len_usable = m2info->video.len_maximum = mem; m2info->video.vbase.vaddr = vaddr_va(minfo->video.vbase) + m2info->video.offbase; m2info->mmio.base = minfo->mmio.base; m2info->mmio.vbase = minfo->mmio.vbase; m2info->mmio.len = minfo->mmio.len; matroxfb_dh_init_fix(m2info); if (register_framebuffer(&m2info->fbcon)) { return -ENXIO; } if (!m2info->initialized) fb_set_var(&m2info->fbcon, &matroxfb_dh_defined); down_write(&minfo->crtc2.lock); oldcrtc2 = minfo->crtc2.info; minfo->crtc2.info = m2info; up_write(&minfo->crtc2.lock); if (oldcrtc2) { printk(KERN_ERR "matroxfb_crtc2: Internal consistency check failed: crtc2 already present: %p\n", oldcrtc2); } return 0; #undef minfo } /* ************************** */ static int matroxfb_dh_registerfb(struct matroxfb_dh_fb_info* m2info) { #define minfo (m2info->primary_dev) if (matroxfb_dh_regit(minfo, m2info)) { printk(KERN_ERR "matroxfb_crtc2: secondary head failed to register\n"); return -1; } printk(KERN_INFO "matroxfb_crtc2: secondary head of fb%u was registered as fb%u\n", minfo->fbcon.node, m2info->fbcon.node); m2info->fbcon_registered = 1; return 0; #undef minfo } static void matroxfb_dh_deregisterfb(struct matroxfb_dh_fb_info* m2info) { #define minfo (m2info->primary_dev) if (m2info->fbcon_registered) { int id; struct matroxfb_dh_fb_info* crtc2; down_write(&minfo->crtc2.lock); crtc2 = minfo->crtc2.info; if (crtc2 == m2info) minfo->crtc2.info = NULL; up_write(&minfo->crtc2.lock); if (crtc2 != m2info) { printk(KERN_ERR "matroxfb_crtc2: Internal consistency check failed: crtc2 mismatch at unload: %p != %p\n", crtc2, m2info); printk(KERN_ERR "matroxfb_crtc2: Expect kernel crash after module unload.\n"); return; } id = m2info->fbcon.node; unregister_framebuffer(&m2info->fbcon); /* return memory back to primary head */ minfo->video.len_usable += m2info->video.borrowed; printk(KERN_INFO "matroxfb_crtc2: fb%u unregistered\n", id); m2info->fbcon_registered = 0; } #undef minfo } static void* matroxfb_crtc2_probe(struct matrox_fb_info* minfo) { struct matroxfb_dh_fb_info* m2info; /* hardware is CRTC2 incapable... */ if (!minfo->devflags.crtc2) return NULL; m2info = kzalloc(sizeof(*m2info), GFP_KERNEL); if (!m2info) { printk(KERN_ERR "matroxfb_crtc2: Not enough memory for CRTC2 control structs\n"); return NULL; } m2info->primary_dev = minfo; if (matroxfb_dh_registerfb(m2info)) { kfree(m2info); printk(KERN_ERR "matroxfb_crtc2: CRTC2 framebuffer failed to register\n"); return NULL; } return m2info; } static void matroxfb_crtc2_remove(struct matrox_fb_info* minfo, void* crtc2) { matroxfb_dh_deregisterfb(crtc2); kfree(crtc2); } static struct matroxfb_driver crtc2 = { .name = "Matrox G400 CRTC2", .probe = matroxfb_crtc2_probe, .remove = matroxfb_crtc2_remove }; static int matroxfb_crtc2_init(void) { if (fb_get_options("matrox_crtc2fb", NULL)) return -ENODEV; matroxfb_register_driver(&crtc2); return 0; } static void matroxfb_crtc2_exit(void) { matroxfb_unregister_driver(&crtc2); } MODULE_AUTHOR("(c) 1999-2002 Petr Vandrovec <vandrove@vc.cvut.cz>"); MODULE_DESCRIPTION("Matrox G400 CRTC2 driver"); MODULE_LICENSE("GPL"); module_init(matroxfb_crtc2_init); module_exit(matroxfb_crtc2_exit); /* we do not have __setup() yet */
gpl-2.0
spezi77/kernel_msm
arch/arm/mach-imx/mm-imx5.c
4715
7485
/* * Copyright 2008-2010 Freescale Semiconductor, Inc. All Rights Reserved. * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License * Version 2 or later at the following locations: * * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html * * Create static mapping between physical to virtual memory. */ #include <linux/mm.h> #include <linux/init.h> #include <linux/clk.h> #include <asm/system_misc.h> #include <asm/mach/map.h> #include <mach/hardware.h> #include <mach/common.h> #include <mach/devices-common.h> #include <mach/iomux-v3.h> static struct clk *gpc_dvfs_clk; static void imx5_idle(void) { /* gpc clock is needed for SRPG */ if (gpc_dvfs_clk == NULL) { gpc_dvfs_clk = clk_get(NULL, "gpc_dvfs"); if (IS_ERR(gpc_dvfs_clk)) return; } clk_enable(gpc_dvfs_clk); mx5_cpu_lp_set(WAIT_UNCLOCKED_POWER_OFF); if (!tzic_enable_wake()) cpu_do_idle(); clk_disable(gpc_dvfs_clk); } /* * Define the MX50 memory map. */ static struct map_desc mx50_io_desc[] __initdata = { imx_map_entry(MX50, TZIC, MT_DEVICE), imx_map_entry(MX50, SPBA0, MT_DEVICE), imx_map_entry(MX50, AIPS1, MT_DEVICE), imx_map_entry(MX50, AIPS2, MT_DEVICE), }; /* * Define the MX51 memory map. */ static struct map_desc mx51_io_desc[] __initdata = { imx_map_entry(MX51, TZIC, MT_DEVICE), imx_map_entry(MX51, IRAM, MT_DEVICE), imx_map_entry(MX51, AIPS1, MT_DEVICE), imx_map_entry(MX51, SPBA0, MT_DEVICE), imx_map_entry(MX51, AIPS2, MT_DEVICE), }; /* * Define the MX53 memory map. */ static struct map_desc mx53_io_desc[] __initdata = { imx_map_entry(MX53, TZIC, MT_DEVICE), imx_map_entry(MX53, AIPS1, MT_DEVICE), imx_map_entry(MX53, SPBA0, MT_DEVICE), imx_map_entry(MX53, AIPS2, MT_DEVICE), }; /* * This function initializes the memory map. It is called during the * system startup to create static physical to virtual memory mappings * for the IO modules. */ void __init mx50_map_io(void) { iotable_init(mx50_io_desc, ARRAY_SIZE(mx50_io_desc)); } void __init mx51_map_io(void) { iotable_init(mx51_io_desc, ARRAY_SIZE(mx51_io_desc)); } void __init mx53_map_io(void) { iotable_init(mx53_io_desc, ARRAY_SIZE(mx53_io_desc)); } void __init imx50_init_early(void) { mxc_set_cpu_type(MXC_CPU_MX50); mxc_iomux_v3_init(MX50_IO_ADDRESS(MX50_IOMUXC_BASE_ADDR)); mxc_arch_reset_init(MX50_IO_ADDRESS(MX50_WDOG_BASE_ADDR)); } void __init imx51_init_early(void) { mxc_set_cpu_type(MXC_CPU_MX51); mxc_iomux_v3_init(MX51_IO_ADDRESS(MX51_IOMUXC_BASE_ADDR)); mxc_arch_reset_init(MX51_IO_ADDRESS(MX51_WDOG1_BASE_ADDR)); arm_pm_idle = imx5_idle; } void __init imx53_init_early(void) { mxc_set_cpu_type(MXC_CPU_MX53); mxc_iomux_v3_init(MX53_IO_ADDRESS(MX53_IOMUXC_BASE_ADDR)); mxc_arch_reset_init(MX53_IO_ADDRESS(MX53_WDOG1_BASE_ADDR)); } void __init mx50_init_irq(void) { tzic_init_irq(MX50_IO_ADDRESS(MX50_TZIC_BASE_ADDR)); } void __init mx51_init_irq(void) { tzic_init_irq(MX51_IO_ADDRESS(MX51_TZIC_BASE_ADDR)); } void __init mx53_init_irq(void) { tzic_init_irq(MX53_IO_ADDRESS(MX53_TZIC_BASE_ADDR)); } static struct sdma_script_start_addrs imx51_sdma_script __initdata = { .ap_2_ap_addr = 642, .uart_2_mcu_addr = 817, .mcu_2_app_addr = 747, .mcu_2_shp_addr = 961, .ata_2_mcu_addr = 1473, .mcu_2_ata_addr = 1392, .app_2_per_addr = 1033, .app_2_mcu_addr = 683, .shp_2_per_addr = 1251, .shp_2_mcu_addr = 892, }; static struct sdma_platform_data imx51_sdma_pdata __initdata = { .fw_name = "sdma-imx51.bin", .script_addrs = &imx51_sdma_script, }; static struct sdma_script_start_addrs imx53_sdma_script __initdata = { .ap_2_ap_addr = 642, .app_2_mcu_addr = 683, .mcu_2_app_addr = 747, .uart_2_mcu_addr = 817, .shp_2_mcu_addr = 891, .mcu_2_shp_addr = 960, .uartsh_2_mcu_addr = 1032, .spdif_2_mcu_addr = 1100, .mcu_2_spdif_addr = 1134, .firi_2_mcu_addr = 1193, .mcu_2_firi_addr = 1290, }; static struct sdma_platform_data imx53_sdma_pdata __initdata = { .fw_name = "sdma-imx53.bin", .script_addrs = &imx53_sdma_script, }; static const struct resource imx50_audmux_res[] __initconst = { DEFINE_RES_MEM(MX50_AUDMUX_BASE_ADDR, SZ_16K), }; static const struct resource imx51_audmux_res[] __initconst = { DEFINE_RES_MEM(MX51_AUDMUX_BASE_ADDR, SZ_16K), }; static const struct resource imx53_audmux_res[] __initconst = { DEFINE_RES_MEM(MX53_AUDMUX_BASE_ADDR, SZ_16K), }; void __init imx50_soc_init(void) { /* i.mx50 has the i.mx31 type gpio */ mxc_register_gpio("imx31-gpio", 0, MX50_GPIO1_BASE_ADDR, SZ_16K, MX50_INT_GPIO1_LOW, MX50_INT_GPIO1_HIGH); mxc_register_gpio("imx31-gpio", 1, MX50_GPIO2_BASE_ADDR, SZ_16K, MX50_INT_GPIO2_LOW, MX50_INT_GPIO2_HIGH); mxc_register_gpio("imx31-gpio", 2, MX50_GPIO3_BASE_ADDR, SZ_16K, MX50_INT_GPIO3_LOW, MX50_INT_GPIO3_HIGH); mxc_register_gpio("imx31-gpio", 3, MX50_GPIO4_BASE_ADDR, SZ_16K, MX50_INT_GPIO4_LOW, MX50_INT_GPIO4_HIGH); mxc_register_gpio("imx31-gpio", 4, MX50_GPIO5_BASE_ADDR, SZ_16K, MX50_INT_GPIO5_LOW, MX50_INT_GPIO5_HIGH); mxc_register_gpio("imx31-gpio", 5, MX50_GPIO6_BASE_ADDR, SZ_16K, MX50_INT_GPIO6_LOW, MX50_INT_GPIO6_HIGH); /* i.mx50 has the i.mx31 type audmux */ platform_device_register_simple("imx31-audmux", 0, imx50_audmux_res, ARRAY_SIZE(imx50_audmux_res)); } void __init imx51_soc_init(void) { /* i.mx51 has the i.mx31 type gpio */ mxc_register_gpio("imx31-gpio", 0, MX51_GPIO1_BASE_ADDR, SZ_16K, MX51_INT_GPIO1_LOW, MX51_INT_GPIO1_HIGH); mxc_register_gpio("imx31-gpio", 1, MX51_GPIO2_BASE_ADDR, SZ_16K, MX51_INT_GPIO2_LOW, MX51_INT_GPIO2_HIGH); mxc_register_gpio("imx31-gpio", 2, MX51_GPIO3_BASE_ADDR, SZ_16K, MX51_INT_GPIO3_LOW, MX51_INT_GPIO3_HIGH); mxc_register_gpio("imx31-gpio", 3, MX51_GPIO4_BASE_ADDR, SZ_16K, MX51_INT_GPIO4_LOW, MX51_INT_GPIO4_HIGH); /* i.mx51 has the i.mx35 type sdma */ imx_add_imx_sdma("imx35-sdma", MX51_SDMA_BASE_ADDR, MX51_INT_SDMA, &imx51_sdma_pdata); /* Setup AIPS registers */ imx_set_aips(MX51_IO_ADDRESS(MX51_AIPS1_BASE_ADDR)); imx_set_aips(MX51_IO_ADDRESS(MX51_AIPS2_BASE_ADDR)); /* i.mx51 has the i.mx31 type audmux */ platform_device_register_simple("imx31-audmux", 0, imx51_audmux_res, ARRAY_SIZE(imx51_audmux_res)); } void __init imx53_soc_init(void) { /* i.mx53 has the i.mx31 type gpio */ mxc_register_gpio("imx31-gpio", 0, MX53_GPIO1_BASE_ADDR, SZ_16K, MX53_INT_GPIO1_LOW, MX53_INT_GPIO1_HIGH); mxc_register_gpio("imx31-gpio", 1, MX53_GPIO2_BASE_ADDR, SZ_16K, MX53_INT_GPIO2_LOW, MX53_INT_GPIO2_HIGH); mxc_register_gpio("imx31-gpio", 2, MX53_GPIO3_BASE_ADDR, SZ_16K, MX53_INT_GPIO3_LOW, MX53_INT_GPIO3_HIGH); mxc_register_gpio("imx31-gpio", 3, MX53_GPIO4_BASE_ADDR, SZ_16K, MX53_INT_GPIO4_LOW, MX53_INT_GPIO4_HIGH); mxc_register_gpio("imx31-gpio", 4, MX53_GPIO5_BASE_ADDR, SZ_16K, MX53_INT_GPIO5_LOW, MX53_INT_GPIO5_HIGH); mxc_register_gpio("imx31-gpio", 5, MX53_GPIO6_BASE_ADDR, SZ_16K, MX53_INT_GPIO6_LOW, MX53_INT_GPIO6_HIGH); mxc_register_gpio("imx31-gpio", 6, MX53_GPIO7_BASE_ADDR, SZ_16K, MX53_INT_GPIO7_LOW, MX53_INT_GPIO7_HIGH); /* i.mx53 has the i.mx35 type sdma */ imx_add_imx_sdma("imx35-sdma", MX53_SDMA_BASE_ADDR, MX53_INT_SDMA, &imx53_sdma_pdata); /* Setup AIPS registers */ imx_set_aips(MX53_IO_ADDRESS(MX53_AIPS1_BASE_ADDR)); imx_set_aips(MX53_IO_ADDRESS(MX53_AIPS2_BASE_ADDR)); /* i.mx53 has the i.mx31 type audmux */ platform_device_register_simple("imx31-audmux", 0, imx53_audmux_res, ARRAY_SIZE(imx53_audmux_res)); }
gpl-2.0
alexax66/CM12.1_kernel_serranodsxx
net/ipv4/xfrm4_tunnel.c
7275
2765
/* xfrm4_tunnel.c: Generic IP tunnel transformer. * * Copyright (C) 2003 David S. Miller (davem@redhat.com) */ #define pr_fmt(fmt) "IPsec: " fmt #include <linux/skbuff.h> #include <linux/module.h> #include <linux/mutex.h> #include <net/xfrm.h> #include <net/ip.h> #include <net/protocol.h> static int ipip_output(struct xfrm_state *x, struct sk_buff *skb) { skb_push(skb, -skb_network_offset(skb)); return 0; } static int ipip_xfrm_rcv(struct xfrm_state *x, struct sk_buff *skb) { return ip_hdr(skb)->protocol; } static int ipip_init_state(struct xfrm_state *x) { if (x->props.mode != XFRM_MODE_TUNNEL) return -EINVAL; if (x->encap) return -EINVAL; x->props.header_len = sizeof(struct iphdr); return 0; } static void ipip_destroy(struct xfrm_state *x) { } static const struct xfrm_type ipip_type = { .description = "IPIP", .owner = THIS_MODULE, .proto = IPPROTO_IPIP, .init_state = ipip_init_state, .destructor = ipip_destroy, .input = ipip_xfrm_rcv, .output = ipip_output }; static int xfrm_tunnel_rcv(struct sk_buff *skb) { return xfrm4_rcv_spi(skb, IPPROTO_IPIP, ip_hdr(skb)->saddr); } static int xfrm_tunnel_err(struct sk_buff *skb, u32 info) { return -ENOENT; } static struct xfrm_tunnel xfrm_tunnel_handler __read_mostly = { .handler = xfrm_tunnel_rcv, .err_handler = xfrm_tunnel_err, .priority = 2, }; #if IS_ENABLED(CONFIG_IPV6) static struct xfrm_tunnel xfrm64_tunnel_handler __read_mostly = { .handler = xfrm_tunnel_rcv, .err_handler = xfrm_tunnel_err, .priority = 2, }; #endif static int __init ipip_init(void) { if (xfrm_register_type(&ipip_type, AF_INET) < 0) { pr_info("%s: can't add xfrm type\n", __func__); return -EAGAIN; } if (xfrm4_tunnel_register(&xfrm_tunnel_handler, AF_INET)) { pr_info("%s: can't add xfrm handler for AF_INET\n", __func__); xfrm_unregister_type(&ipip_type, AF_INET); return -EAGAIN; } #if IS_ENABLED(CONFIG_IPV6) if (xfrm4_tunnel_register(&xfrm64_tunnel_handler, AF_INET6)) { pr_info("%s: can't add xfrm handler for AF_INET6\n", __func__); xfrm4_tunnel_deregister(&xfrm_tunnel_handler, AF_INET); xfrm_unregister_type(&ipip_type, AF_INET); return -EAGAIN; } #endif return 0; } static void __exit ipip_fini(void) { #if IS_ENABLED(CONFIG_IPV6) if (xfrm4_tunnel_deregister(&xfrm64_tunnel_handler, AF_INET6)) pr_info("%s: can't remove xfrm handler for AF_INET6\n", __func__); #endif if (xfrm4_tunnel_deregister(&xfrm_tunnel_handler, AF_INET)) pr_info("%s: can't remove xfrm handler for AF_INET\n", __func__); if (xfrm_unregister_type(&ipip_type, AF_INET) < 0) pr_info("%s: can't remove xfrm type\n", __func__); } module_init(ipip_init); module_exit(ipip_fini); MODULE_LICENSE("GPL"); MODULE_ALIAS_XFRM_TYPE(AF_INET, XFRM_PROTO_IPIP);
gpl-2.0
mgherzan/linux-arm
drivers/media/video/saa7164/saa7164-api.c
8043
44788
/* * Driver for the NXP SAA7164 PCIe bridge * * Copyright (c) 2010 Steven Toth <stoth@kernellabs.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/wait.h> #include <linux/slab.h> #include "saa7164.h" int saa7164_api_get_load_info(struct saa7164_dev *dev, struct tmFwInfoStruct *i) { int ret; if (!(saa_debug & DBGLVL_CPU)) return 0; dprintk(DBGLVL_API, "%s()\n", __func__); i->deviceinst = 0; i->devicespec = 0; i->mode = 0; i->status = 0; ret = saa7164_cmd_send(dev, 0, GET_CUR, GET_FW_STATUS_CONTROL, sizeof(struct tmFwInfoStruct), i); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); printk(KERN_INFO "saa7164[%d]-CPU: %d percent", dev->nr, i->CPULoad); return ret; } int saa7164_api_collect_debug(struct saa7164_dev *dev) { struct tmComResDebugGetData d; u8 more = 255; int ret; dprintk(DBGLVL_API, "%s()\n", __func__); while (more--) { memset(&d, 0, sizeof(d)); ret = saa7164_cmd_send(dev, 0, GET_CUR, GET_DEBUG_DATA_CONTROL, sizeof(d), &d); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); if (d.dwResult != SAA_OK) break; printk(KERN_INFO "saa7164[%d]-FWMSG: %s", dev->nr, d.ucDebugData); } return 0; } int saa7164_api_set_debug(struct saa7164_dev *dev, u8 level) { struct tmComResDebugSetLevel lvl; int ret; dprintk(DBGLVL_API, "%s(level=%d)\n", __func__, level); /* Retrieve current state */ ret = saa7164_cmd_send(dev, 0, GET_CUR, SET_DEBUG_LEVEL_CONTROL, sizeof(lvl), &lvl); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); dprintk(DBGLVL_API, "%s() Was %d\n", __func__, lvl.dwDebugLevel); lvl.dwDebugLevel = level; /* set new state */ ret = saa7164_cmd_send(dev, 0, SET_CUR, SET_DEBUG_LEVEL_CONTROL, sizeof(lvl), &lvl); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); return ret; } int saa7164_api_set_vbi_format(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; struct tmComResProbeCommit fmt, rsp; int ret; dprintk(DBGLVL_API, "%s(nr=%d, unitid=0x%x)\n", __func__, port->nr, port->hwcfg.unitid); fmt.bmHint = 0; fmt.bFormatIndex = 1; fmt.bFrameIndex = 1; /* Probe, see if it can support this format */ ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid, SET_CUR, SAA_PROBE_CONTROL, sizeof(fmt), &fmt); if (ret != SAA_OK) printk(KERN_ERR "%s() set error, ret = 0x%x\n", __func__, ret); /* See of the format change was successful */ ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid, GET_CUR, SAA_PROBE_CONTROL, sizeof(rsp), &rsp); if (ret != SAA_OK) { printk(KERN_ERR "%s() get error, ret = 0x%x\n", __func__, ret); } else { /* Compare requested vs received, should be same */ if (memcmp(&fmt, &rsp, sizeof(rsp)) == 0) { dprintk(DBGLVL_API, "SET/PROBE Verified\n"); /* Ask the device to select the negotiated format */ ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid, SET_CUR, SAA_COMMIT_CONTROL, sizeof(fmt), &fmt); if (ret != SAA_OK) printk(KERN_ERR "%s() commit error, ret = 0x%x\n", __func__, ret); ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid, GET_CUR, SAA_COMMIT_CONTROL, sizeof(rsp), &rsp); if (ret != SAA_OK) printk(KERN_ERR "%s() GET commit error, ret = 0x%x\n", __func__, ret); if (memcmp(&fmt, &rsp, sizeof(rsp)) != 0) { printk(KERN_ERR "%s() memcmp error, ret = 0x%x\n", __func__, ret); } else dprintk(DBGLVL_API, "SET/COMMIT Verified\n"); dprintk(DBGLVL_API, "rsp.bmHint = 0x%x\n", rsp.bmHint); dprintk(DBGLVL_API, "rsp.bFormatIndex = 0x%x\n", rsp.bFormatIndex); dprintk(DBGLVL_API, "rsp.bFrameIndex = 0x%x\n", rsp.bFrameIndex); } else printk(KERN_ERR "%s() compare failed\n", __func__); } if (ret == SAA_OK) dprintk(DBGLVL_API, "%s(nr=%d) Success\n", __func__, port->nr); return ret; } int saa7164_api_set_gop_size(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; struct tmComResEncVideoGopStructure gs; int ret; dprintk(DBGLVL_ENC, "%s()\n", __func__); gs.ucRefFrameDist = port->encoder_params.refdist; gs.ucGOPSize = port->encoder_params.gop_size; ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR, EU_VIDEO_GOP_STRUCTURE_CONTROL, sizeof(gs), &gs); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); return ret; } int saa7164_api_set_encoder(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; struct tmComResEncVideoBitRate vb; struct tmComResEncAudioBitRate ab; int ret; dprintk(DBGLVL_ENC, "%s() unitid=0x%x\n", __func__, port->hwcfg.sourceid); if (port->encoder_params.stream_type == V4L2_MPEG_STREAM_TYPE_MPEG2_PS) port->encoder_profile = EU_PROFILE_PS_DVD; else port->encoder_profile = EU_PROFILE_TS_HQ; ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR, EU_PROFILE_CONTROL, sizeof(u8), &port->encoder_profile); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); /* Resolution */ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR, EU_PROFILE_CONTROL, sizeof(u8), &port->encoder_profile); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); /* Establish video bitrates */ if (port->encoder_params.bitrate_mode == V4L2_MPEG_VIDEO_BITRATE_MODE_CBR) vb.ucVideoBitRateMode = EU_VIDEO_BIT_RATE_MODE_CONSTANT; else vb.ucVideoBitRateMode = EU_VIDEO_BIT_RATE_MODE_VARIABLE_PEAK; vb.dwVideoBitRate = port->encoder_params.bitrate; vb.dwVideoBitRatePeak = port->encoder_params.bitrate_peak; ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR, EU_VIDEO_BIT_RATE_CONTROL, sizeof(struct tmComResEncVideoBitRate), &vb); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); /* Establish audio bitrates */ ab.ucAudioBitRateMode = 0; ab.dwAudioBitRate = 384000; ab.dwAudioBitRatePeak = ab.dwAudioBitRate; ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR, EU_AUDIO_BIT_RATE_CONTROL, sizeof(struct tmComResEncAudioBitRate), &ab); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); saa7164_api_set_aspect_ratio(port); saa7164_api_set_gop_size(port); return ret; } int saa7164_api_get_encoder(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; struct tmComResEncVideoBitRate v; struct tmComResEncAudioBitRate a; struct tmComResEncVideoInputAspectRatio ar; int ret; dprintk(DBGLVL_ENC, "%s() unitid=0x%x\n", __func__, port->hwcfg.sourceid); port->encoder_profile = 0; port->video_format = 0; port->video_resolution = 0; port->audio_format = 0; ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR, EU_PROFILE_CONTROL, sizeof(u8), &port->encoder_profile); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR, EU_VIDEO_RESOLUTION_CONTROL, sizeof(u8), &port->video_resolution); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR, EU_VIDEO_FORMAT_CONTROL, sizeof(u8), &port->video_format); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR, EU_VIDEO_BIT_RATE_CONTROL, sizeof(v), &v); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR, EU_AUDIO_FORMAT_CONTROL, sizeof(u8), &port->audio_format); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR, EU_AUDIO_BIT_RATE_CONTROL, sizeof(a), &a); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); /* Aspect Ratio */ ar.width = 0; ar.height = 0; ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, GET_CUR, EU_VIDEO_INPUT_ASPECT_CONTROL, sizeof(struct tmComResEncVideoInputAspectRatio), &ar); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); dprintk(DBGLVL_ENC, "encoder_profile = %d\n", port->encoder_profile); dprintk(DBGLVL_ENC, "video_format = %d\n", port->video_format); dprintk(DBGLVL_ENC, "audio_format = %d\n", port->audio_format); dprintk(DBGLVL_ENC, "video_resolution= %d\n", port->video_resolution); dprintk(DBGLVL_ENC, "v.ucVideoBitRateMode = %d\n", v.ucVideoBitRateMode); dprintk(DBGLVL_ENC, "v.dwVideoBitRate = %d\n", v.dwVideoBitRate); dprintk(DBGLVL_ENC, "v.dwVideoBitRatePeak = %d\n", v.dwVideoBitRatePeak); dprintk(DBGLVL_ENC, "a.ucVideoBitRateMode = %d\n", a.ucAudioBitRateMode); dprintk(DBGLVL_ENC, "a.dwVideoBitRate = %d\n", a.dwAudioBitRate); dprintk(DBGLVL_ENC, "a.dwVideoBitRatePeak = %d\n", a.dwAudioBitRatePeak); dprintk(DBGLVL_ENC, "aspect.width / height = %d:%d\n", ar.width, ar.height); return ret; } int saa7164_api_set_aspect_ratio(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; struct tmComResEncVideoInputAspectRatio ar; int ret; dprintk(DBGLVL_ENC, "%s(%d)\n", __func__, port->encoder_params.ctl_aspect); switch (port->encoder_params.ctl_aspect) { case V4L2_MPEG_VIDEO_ASPECT_1x1: ar.width = 1; ar.height = 1; break; case V4L2_MPEG_VIDEO_ASPECT_4x3: ar.width = 4; ar.height = 3; break; case V4L2_MPEG_VIDEO_ASPECT_16x9: ar.width = 16; ar.height = 9; break; case V4L2_MPEG_VIDEO_ASPECT_221x100: ar.width = 221; ar.height = 100; break; default: BUG(); } dprintk(DBGLVL_ENC, "%s(%d) now %d:%d\n", __func__, port->encoder_params.ctl_aspect, ar.width, ar.height); /* Aspect Ratio */ ret = saa7164_cmd_send(port->dev, port->hwcfg.sourceid, SET_CUR, EU_VIDEO_INPUT_ASPECT_CONTROL, sizeof(struct tmComResEncVideoInputAspectRatio), &ar); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); return ret; } int saa7164_api_set_usercontrol(struct saa7164_port *port, u8 ctl) { struct saa7164_dev *dev = port->dev; int ret; u16 val; if (ctl == PU_BRIGHTNESS_CONTROL) val = port->ctl_brightness; else if (ctl == PU_CONTRAST_CONTROL) val = port->ctl_contrast; else if (ctl == PU_HUE_CONTROL) val = port->ctl_hue; else if (ctl == PU_SATURATION_CONTROL) val = port->ctl_saturation; else if (ctl == PU_SHARPNESS_CONTROL) val = port->ctl_sharpness; else return -EINVAL; dprintk(DBGLVL_ENC, "%s() unitid=0x%x ctl=%d, val=%d\n", __func__, port->encunit.vsourceid, ctl, val); ret = saa7164_cmd_send(port->dev, port->encunit.vsourceid, SET_CUR, ctl, sizeof(u16), &val); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); return ret; } int saa7164_api_get_usercontrol(struct saa7164_port *port, u8 ctl) { struct saa7164_dev *dev = port->dev; int ret; u16 val; ret = saa7164_cmd_send(port->dev, port->encunit.vsourceid, GET_CUR, ctl, sizeof(u16), &val); if (ret != SAA_OK) { printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); return ret; } dprintk(DBGLVL_ENC, "%s() ctl=%d, val=%d\n", __func__, ctl, val); if (ctl == PU_BRIGHTNESS_CONTROL) port->ctl_brightness = val; else if (ctl == PU_CONTRAST_CONTROL) port->ctl_contrast = val; else if (ctl == PU_HUE_CONTROL) port->ctl_hue = val; else if (ctl == PU_SATURATION_CONTROL) port->ctl_saturation = val; else if (ctl == PU_SHARPNESS_CONTROL) port->ctl_sharpness = val; return ret; } int saa7164_api_set_videomux(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; u8 inputs[] = { 1, 2, 2, 2, 5, 5, 5 }; int ret; dprintk(DBGLVL_ENC, "%s() v_mux=%d a_mux=%d\n", __func__, port->mux_input, inputs[port->mux_input - 1]); /* Audio Mute */ ret = saa7164_api_audio_mute(port, 1); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); /* Video Mux */ ret = saa7164_cmd_send(port->dev, port->vidproc.sourceid, SET_CUR, SU_INPUT_SELECT_CONTROL, sizeof(u8), &port->mux_input); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); /* Audio Mux */ ret = saa7164_cmd_send(port->dev, port->audfeat.sourceid, SET_CUR, SU_INPUT_SELECT_CONTROL, sizeof(u8), &inputs[port->mux_input - 1]); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); /* Audio UnMute */ ret = saa7164_api_audio_mute(port, 0); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); return ret; } int saa7164_api_audio_mute(struct saa7164_port *port, int mute) { struct saa7164_dev *dev = port->dev; u8 v = mute; int ret; dprintk(DBGLVL_API, "%s(%d)\n", __func__, mute); ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, SET_CUR, MUTE_CONTROL, sizeof(u8), &v); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); return ret; } /* 0 = silence, 0xff = full */ int saa7164_api_set_audio_volume(struct saa7164_port *port, s8 level) { struct saa7164_dev *dev = port->dev; s16 v, min, max; int ret; dprintk(DBGLVL_API, "%s(%d)\n", __func__, level); /* Obtain the min/max ranges */ ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, GET_MIN, VOLUME_CONTROL, sizeof(u16), &min); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, GET_MAX, VOLUME_CONTROL, sizeof(u16), &max); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, GET_CUR, (0x01 << 8) | VOLUME_CONTROL, sizeof(u16), &v); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); dprintk(DBGLVL_API, "%s(%d) min=%d max=%d cur=%d\n", __func__, level, min, max, v); v = level; if (v < min) v = min; if (v > max) v = max; /* Left */ ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, SET_CUR, (0x01 << 8) | VOLUME_CONTROL, sizeof(s16), &v); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); /* Right */ ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, SET_CUR, (0x02 << 8) | VOLUME_CONTROL, sizeof(s16), &v); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, GET_CUR, (0x01 << 8) | VOLUME_CONTROL, sizeof(u16), &v); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); dprintk(DBGLVL_API, "%s(%d) min=%d max=%d cur=%d\n", __func__, level, min, max, v); return ret; } int saa7164_api_set_audio_std(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; struct tmComResAudioDefaults lvl; struct tmComResTunerStandard tvaudio; int ret; dprintk(DBGLVL_API, "%s()\n", __func__); /* Establish default levels */ lvl.ucDecoderLevel = TMHW_LEV_ADJ_DECLEV_DEFAULT; lvl.ucDecoderFM_Level = TMHW_LEV_ADJ_DECLEV_DEFAULT; lvl.ucMonoLevel = TMHW_LEV_ADJ_MONOLEV_DEFAULT; lvl.ucNICAM_Level = TMHW_LEV_ADJ_NICLEV_DEFAULT; lvl.ucSAP_Level = TMHW_LEV_ADJ_SAPLEV_DEFAULT; lvl.ucADC_Level = TMHW_LEV_ADJ_ADCLEV_DEFAULT; ret = saa7164_cmd_send(port->dev, port->audfeat.unitid, SET_CUR, AUDIO_DEFAULT_CONTROL, sizeof(struct tmComResAudioDefaults), &lvl); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); /* Manually select the appropriate TV audio standard */ if (port->encodernorm.id & V4L2_STD_NTSC) { tvaudio.std = TU_STANDARD_NTSC_M; tvaudio.country = 1; } else { tvaudio.std = TU_STANDARD_PAL_I; tvaudio.country = 44; } ret = saa7164_cmd_send(port->dev, port->tunerunit.unitid, SET_CUR, TU_STANDARD_CONTROL, sizeof(tvaudio), &tvaudio); if (ret != SAA_OK) printk(KERN_ERR "%s() TU_STANDARD_CONTROL error, ret = 0x%x\n", __func__, ret); return ret; } int saa7164_api_set_audio_detection(struct saa7164_port *port, int autodetect) { struct saa7164_dev *dev = port->dev; struct tmComResTunerStandardAuto p; int ret; dprintk(DBGLVL_API, "%s(%d)\n", __func__, autodetect); /* Disable TV Audio autodetect if not already set (buggy) */ if (autodetect) p.mode = TU_STANDARD_AUTO; else p.mode = TU_STANDARD_MANUAL; ret = saa7164_cmd_send(port->dev, port->tunerunit.unitid, SET_CUR, TU_STANDARD_AUTO_CONTROL, sizeof(p), &p); if (ret != SAA_OK) printk(KERN_ERR "%s() TU_STANDARD_AUTO_CONTROL error, ret = 0x%x\n", __func__, ret); return ret; } int saa7164_api_get_videomux(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; int ret; ret = saa7164_cmd_send(port->dev, port->vidproc.sourceid, GET_CUR, SU_INPUT_SELECT_CONTROL, sizeof(u8), &port->mux_input); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); dprintk(DBGLVL_ENC, "%s() v_mux=%d\n", __func__, port->mux_input); return ret; } int saa7164_api_set_dif(struct saa7164_port *port, u8 reg, u8 val) { struct saa7164_dev *dev = port->dev; u16 len = 0; u8 buf[256]; int ret; u8 mas; dprintk(DBGLVL_API, "%s(nr=%d type=%d val=%x)\n", __func__, port->nr, port->type, val); if (port->nr == 0) mas = 0xd0; else mas = 0xe0; memset(buf, 0, sizeof(buf)); buf[0x00] = 0x04; buf[0x01] = 0x00; buf[0x02] = 0x00; buf[0x03] = 0x00; buf[0x04] = 0x04; buf[0x05] = 0x00; buf[0x06] = 0x00; buf[0x07] = 0x00; buf[0x08] = reg; buf[0x09] = 0x26; buf[0x0a] = mas; buf[0x0b] = 0xb0; buf[0x0c] = val; buf[0x0d] = 0x00; buf[0x0e] = 0x00; buf[0x0f] = 0x00; ret = saa7164_cmd_send(dev, port->ifunit.unitid, GET_LEN, EXU_REGISTER_ACCESS_CONTROL, sizeof(len), &len); if (ret != SAA_OK) { printk(KERN_ERR "%s() error, ret(1) = 0x%x\n", __func__, ret); return -EIO; } ret = saa7164_cmd_send(dev, port->ifunit.unitid, SET_CUR, EXU_REGISTER_ACCESS_CONTROL, len, &buf); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret(2) = 0x%x\n", __func__, ret); #if 0 saa7164_dumphex16(dev, buf, 16); #endif return ret == SAA_OK ? 0 : -EIO; } /* Disable the IF block AGC controls */ int saa7164_api_configure_dif(struct saa7164_port *port, u32 std) { struct saa7164_dev *dev = port->dev; int ret = 0; u8 agc_disable; dprintk(DBGLVL_API, "%s(nr=%d, 0x%x)\n", __func__, port->nr, std); if (std & V4L2_STD_NTSC) { dprintk(DBGLVL_API, " NTSC\n"); saa7164_api_set_dif(port, 0x00, 0x01); /* Video Standard */ agc_disable = 0; } else if (std & V4L2_STD_PAL_I) { dprintk(DBGLVL_API, " PAL-I\n"); saa7164_api_set_dif(port, 0x00, 0x08); /* Video Standard */ agc_disable = 0; } else if (std & V4L2_STD_PAL_M) { dprintk(DBGLVL_API, " PAL-M\n"); saa7164_api_set_dif(port, 0x00, 0x01); /* Video Standard */ agc_disable = 0; } else if (std & V4L2_STD_PAL_N) { dprintk(DBGLVL_API, " PAL-N\n"); saa7164_api_set_dif(port, 0x00, 0x01); /* Video Standard */ agc_disable = 0; } else if (std & V4L2_STD_PAL_Nc) { dprintk(DBGLVL_API, " PAL-Nc\n"); saa7164_api_set_dif(port, 0x00, 0x01); /* Video Standard */ agc_disable = 0; } else if (std & V4L2_STD_PAL_B) { dprintk(DBGLVL_API, " PAL-B\n"); saa7164_api_set_dif(port, 0x00, 0x02); /* Video Standard */ agc_disable = 0; } else if (std & V4L2_STD_PAL_DK) { dprintk(DBGLVL_API, " PAL-DK\n"); saa7164_api_set_dif(port, 0x00, 0x10); /* Video Standard */ agc_disable = 0; } else if (std & V4L2_STD_SECAM_L) { dprintk(DBGLVL_API, " SECAM-L\n"); saa7164_api_set_dif(port, 0x00, 0x20); /* Video Standard */ agc_disable = 0; } else { /* Unknown standard, assume DTV */ dprintk(DBGLVL_API, " Unknown (assuming DTV)\n"); /* Undefinded Video Standard */ saa7164_api_set_dif(port, 0x00, 0x80); agc_disable = 1; } saa7164_api_set_dif(port, 0x48, 0xa0); /* AGC Functions 1 */ saa7164_api_set_dif(port, 0xc0, agc_disable); /* AGC Output Disable */ saa7164_api_set_dif(port, 0x7c, 0x04); /* CVBS EQ */ saa7164_api_set_dif(port, 0x04, 0x01); /* Active */ msleep(100); saa7164_api_set_dif(port, 0x04, 0x00); /* Active (again) */ msleep(100); return ret; } /* Ensure the dif is in the correct state for the operating mode * (analog / dtv). We only configure the diff through the analog encoder * so when we're in digital mode we need to find the appropriate encoder * and use it to configure the DIF. */ int saa7164_api_initialize_dif(struct saa7164_port *port) { struct saa7164_dev *dev = port->dev; struct saa7164_port *p = NULL; int ret = -EINVAL; u32 std = 0; dprintk(DBGLVL_API, "%s(nr=%d type=%d)\n", __func__, port->nr, port->type); if (port->type == SAA7164_MPEG_ENCODER) { /* Pick any analog standard to init the diff. * we'll come back during encoder_init' * and set the correct standard if requried. */ std = V4L2_STD_NTSC; } else if (port->type == SAA7164_MPEG_DVB) { if (port->nr == SAA7164_PORT_TS1) p = &dev->ports[SAA7164_PORT_ENC1]; else p = &dev->ports[SAA7164_PORT_ENC2]; } else if (port->type == SAA7164_MPEG_VBI) { std = V4L2_STD_NTSC; if (port->nr == SAA7164_PORT_VBI1) p = &dev->ports[SAA7164_PORT_ENC1]; else p = &dev->ports[SAA7164_PORT_ENC2]; } else BUG(); if (p) ret = saa7164_api_configure_dif(p, std); return ret; } int saa7164_api_transition_port(struct saa7164_port *port, u8 mode) { struct saa7164_dev *dev = port->dev; int ret; dprintk(DBGLVL_API, "%s(nr=%d unitid=0x%x,%d)\n", __func__, port->nr, port->hwcfg.unitid, mode); ret = saa7164_cmd_send(port->dev, port->hwcfg.unitid, SET_CUR, SAA_STATE_CONTROL, sizeof(mode), &mode); if (ret != SAA_OK) printk(KERN_ERR "%s(portnr %d unitid 0x%x) error, ret = 0x%x\n", __func__, port->nr, port->hwcfg.unitid, ret); return ret; } int saa7164_api_get_fw_version(struct saa7164_dev *dev, u32 *version) { int ret; ret = saa7164_cmd_send(dev, 0, GET_CUR, GET_FW_VERSION_CONTROL, sizeof(u32), version); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); return ret; } int saa7164_api_read_eeprom(struct saa7164_dev *dev, u8 *buf, int buflen) { u8 reg[] = { 0x0f, 0x00 }; if (buflen < 128) return -ENOMEM; /* Assumption: Hauppauge eeprom is at 0xa0 on on bus 0 */ /* TODO: Pull the details from the boards struct */ return saa7164_api_i2c_read(&dev->i2c_bus[0], 0xa0 >> 1, sizeof(reg), &reg[0], 128, buf); } int saa7164_api_configure_port_vbi(struct saa7164_dev *dev, struct saa7164_port *port) { struct tmComResVBIFormatDescrHeader *fmt = &port->vbi_fmt_ntsc; dprintk(DBGLVL_API, " bFormatIndex = 0x%x\n", fmt->bFormatIndex); dprintk(DBGLVL_API, " VideoStandard = 0x%x\n", fmt->VideoStandard); dprintk(DBGLVL_API, " StartLine = %d\n", fmt->StartLine); dprintk(DBGLVL_API, " EndLine = %d\n", fmt->EndLine); dprintk(DBGLVL_API, " FieldRate = %d\n", fmt->FieldRate); dprintk(DBGLVL_API, " bNumLines = %d\n", fmt->bNumLines); /* Cache the hardware configuration in the port */ port->bufcounter = port->hwcfg.BARLocation; port->pitch = port->hwcfg.BARLocation + (2 * sizeof(u32)); port->bufsize = port->hwcfg.BARLocation + (3 * sizeof(u32)); port->bufoffset = port->hwcfg.BARLocation + (4 * sizeof(u32)); port->bufptr32l = port->hwcfg.BARLocation + (4 * sizeof(u32)) + (sizeof(u32) * port->hwcfg.buffercount) + sizeof(u32); port->bufptr32h = port->hwcfg.BARLocation + (4 * sizeof(u32)) + (sizeof(u32) * port->hwcfg.buffercount); port->bufptr64 = port->hwcfg.BARLocation + (4 * sizeof(u32)) + (sizeof(u32) * port->hwcfg.buffercount); dprintk(DBGLVL_API, " = port->hwcfg.BARLocation = 0x%x\n", port->hwcfg.BARLocation); dprintk(DBGLVL_API, " = VS_FORMAT_VBI (becomes dev->en[%d])\n", port->nr); return 0; } int saa7164_api_configure_port_mpeg2ts(struct saa7164_dev *dev, struct saa7164_port *port, struct tmComResTSFormatDescrHeader *tsfmt) { dprintk(DBGLVL_API, " bFormatIndex = 0x%x\n", tsfmt->bFormatIndex); dprintk(DBGLVL_API, " bDataOffset = 0x%x\n", tsfmt->bDataOffset); dprintk(DBGLVL_API, " bPacketLength= 0x%x\n", tsfmt->bPacketLength); dprintk(DBGLVL_API, " bStrideLength= 0x%x\n", tsfmt->bStrideLength); dprintk(DBGLVL_API, " bguid = (....)\n"); /* Cache the hardware configuration in the port */ port->bufcounter = port->hwcfg.BARLocation; port->pitch = port->hwcfg.BARLocation + (2 * sizeof(u32)); port->bufsize = port->hwcfg.BARLocation + (3 * sizeof(u32)); port->bufoffset = port->hwcfg.BARLocation + (4 * sizeof(u32)); port->bufptr32l = port->hwcfg.BARLocation + (4 * sizeof(u32)) + (sizeof(u32) * port->hwcfg.buffercount) + sizeof(u32); port->bufptr32h = port->hwcfg.BARLocation + (4 * sizeof(u32)) + (sizeof(u32) * port->hwcfg.buffercount); port->bufptr64 = port->hwcfg.BARLocation + (4 * sizeof(u32)) + (sizeof(u32) * port->hwcfg.buffercount); dprintk(DBGLVL_API, " = port->hwcfg.BARLocation = 0x%x\n", port->hwcfg.BARLocation); dprintk(DBGLVL_API, " = VS_FORMAT_MPEGTS (becomes dev->ts[%d])\n", port->nr); return 0; } int saa7164_api_configure_port_mpeg2ps(struct saa7164_dev *dev, struct saa7164_port *port, struct tmComResPSFormatDescrHeader *fmt) { dprintk(DBGLVL_API, " bFormatIndex = 0x%x\n", fmt->bFormatIndex); dprintk(DBGLVL_API, " wPacketLength= 0x%x\n", fmt->wPacketLength); dprintk(DBGLVL_API, " wPackLength= 0x%x\n", fmt->wPackLength); dprintk(DBGLVL_API, " bPackDataType= 0x%x\n", fmt->bPackDataType); /* Cache the hardware configuration in the port */ /* TODO: CHECK THIS in the port config */ port->bufcounter = port->hwcfg.BARLocation; port->pitch = port->hwcfg.BARLocation + (2 * sizeof(u32)); port->bufsize = port->hwcfg.BARLocation + (3 * sizeof(u32)); port->bufoffset = port->hwcfg.BARLocation + (4 * sizeof(u32)); port->bufptr32l = port->hwcfg.BARLocation + (4 * sizeof(u32)) + (sizeof(u32) * port->hwcfg.buffercount) + sizeof(u32); port->bufptr32h = port->hwcfg.BARLocation + (4 * sizeof(u32)) + (sizeof(u32) * port->hwcfg.buffercount); port->bufptr64 = port->hwcfg.BARLocation + (4 * sizeof(u32)) + (sizeof(u32) * port->hwcfg.buffercount); dprintk(DBGLVL_API, " = port->hwcfg.BARLocation = 0x%x\n", port->hwcfg.BARLocation); dprintk(DBGLVL_API, " = VS_FORMAT_MPEGPS (becomes dev->enc[%d])\n", port->nr); return 0; } int saa7164_api_dump_subdevs(struct saa7164_dev *dev, u8 *buf, int len) { struct saa7164_port *tsport = NULL; struct saa7164_port *encport = NULL; struct saa7164_port *vbiport = NULL; u32 idx, next_offset; int i; struct tmComResDescrHeader *hdr, *t; struct tmComResExtDevDescrHeader *exthdr; struct tmComResPathDescrHeader *pathhdr; struct tmComResAntTermDescrHeader *anttermhdr; struct tmComResTunerDescrHeader *tunerunithdr; struct tmComResDMATermDescrHeader *vcoutputtermhdr; struct tmComResTSFormatDescrHeader *tsfmt; struct tmComResPSFormatDescrHeader *psfmt; struct tmComResSelDescrHeader *psel; struct tmComResProcDescrHeader *pdh; struct tmComResAFeatureDescrHeader *afd; struct tmComResEncoderDescrHeader *edh; struct tmComResVBIFormatDescrHeader *vbifmt; u32 currpath = 0; dprintk(DBGLVL_API, "%s(?,?,%d) sizeof(struct tmComResDescrHeader) = %d bytes\n", __func__, len, (u32)sizeof(struct tmComResDescrHeader)); for (idx = 0; idx < (len - sizeof(struct tmComResDescrHeader));) { hdr = (struct tmComResDescrHeader *)(buf + idx); if (hdr->type != CS_INTERFACE) return SAA_ERR_NOT_SUPPORTED; dprintk(DBGLVL_API, "@ 0x%x =\n", idx); switch (hdr->subtype) { case GENERAL_REQUEST: dprintk(DBGLVL_API, " GENERAL_REQUEST\n"); break; case VC_TUNER_PATH: dprintk(DBGLVL_API, " VC_TUNER_PATH\n"); pathhdr = (struct tmComResPathDescrHeader *)(buf + idx); dprintk(DBGLVL_API, " pathid = 0x%x\n", pathhdr->pathid); currpath = pathhdr->pathid; break; case VC_INPUT_TERMINAL: dprintk(DBGLVL_API, " VC_INPUT_TERMINAL\n"); anttermhdr = (struct tmComResAntTermDescrHeader *)(buf + idx); dprintk(DBGLVL_API, " terminalid = 0x%x\n", anttermhdr->terminalid); dprintk(DBGLVL_API, " terminaltype = 0x%x\n", anttermhdr->terminaltype); switch (anttermhdr->terminaltype) { case ITT_ANTENNA: dprintk(DBGLVL_API, " = ITT_ANTENNA\n"); break; case LINE_CONNECTOR: dprintk(DBGLVL_API, " = LINE_CONNECTOR\n"); break; case SPDIF_CONNECTOR: dprintk(DBGLVL_API, " = SPDIF_CONNECTOR\n"); break; case COMPOSITE_CONNECTOR: dprintk(DBGLVL_API, " = COMPOSITE_CONNECTOR\n"); break; case SVIDEO_CONNECTOR: dprintk(DBGLVL_API, " = SVIDEO_CONNECTOR\n"); break; case COMPONENT_CONNECTOR: dprintk(DBGLVL_API, " = COMPONENT_CONNECTOR\n"); break; case STANDARD_DMA: dprintk(DBGLVL_API, " = STANDARD_DMA\n"); break; default: dprintk(DBGLVL_API, " = undefined (0x%x)\n", anttermhdr->terminaltype); } dprintk(DBGLVL_API, " assocterminal= 0x%x\n", anttermhdr->assocterminal); dprintk(DBGLVL_API, " iterminal = 0x%x\n", anttermhdr->iterminal); dprintk(DBGLVL_API, " controlsize = 0x%x\n", anttermhdr->controlsize); break; case VC_OUTPUT_TERMINAL: dprintk(DBGLVL_API, " VC_OUTPUT_TERMINAL\n"); vcoutputtermhdr = (struct tmComResDMATermDescrHeader *)(buf + idx); dprintk(DBGLVL_API, " unitid = 0x%x\n", vcoutputtermhdr->unitid); dprintk(DBGLVL_API, " terminaltype = 0x%x\n", vcoutputtermhdr->terminaltype); switch (vcoutputtermhdr->terminaltype) { case ITT_ANTENNA: dprintk(DBGLVL_API, " = ITT_ANTENNA\n"); break; case LINE_CONNECTOR: dprintk(DBGLVL_API, " = LINE_CONNECTOR\n"); break; case SPDIF_CONNECTOR: dprintk(DBGLVL_API, " = SPDIF_CONNECTOR\n"); break; case COMPOSITE_CONNECTOR: dprintk(DBGLVL_API, " = COMPOSITE_CONNECTOR\n"); break; case SVIDEO_CONNECTOR: dprintk(DBGLVL_API, " = SVIDEO_CONNECTOR\n"); break; case COMPONENT_CONNECTOR: dprintk(DBGLVL_API, " = COMPONENT_CONNECTOR\n"); break; case STANDARD_DMA: dprintk(DBGLVL_API, " = STANDARD_DMA\n"); break; default: dprintk(DBGLVL_API, " = undefined (0x%x)\n", vcoutputtermhdr->terminaltype); } dprintk(DBGLVL_API, " assocterminal= 0x%x\n", vcoutputtermhdr->assocterminal); dprintk(DBGLVL_API, " sourceid = 0x%x\n", vcoutputtermhdr->sourceid); dprintk(DBGLVL_API, " iterminal = 0x%x\n", vcoutputtermhdr->iterminal); dprintk(DBGLVL_API, " BARLocation = 0x%x\n", vcoutputtermhdr->BARLocation); dprintk(DBGLVL_API, " flags = 0x%x\n", vcoutputtermhdr->flags); dprintk(DBGLVL_API, " interruptid = 0x%x\n", vcoutputtermhdr->interruptid); dprintk(DBGLVL_API, " buffercount = 0x%x\n", vcoutputtermhdr->buffercount); dprintk(DBGLVL_API, " metadatasize = 0x%x\n", vcoutputtermhdr->metadatasize); dprintk(DBGLVL_API, " controlsize = 0x%x\n", vcoutputtermhdr->controlsize); dprintk(DBGLVL_API, " numformats = 0x%x\n", vcoutputtermhdr->numformats); t = (struct tmComResDescrHeader *) ((struct tmComResDMATermDescrHeader *)(buf + idx)); next_offset = idx + (vcoutputtermhdr->len); for (i = 0; i < vcoutputtermhdr->numformats; i++) { t = (struct tmComResDescrHeader *) (buf + next_offset); switch (t->subtype) { case VS_FORMAT_MPEG2TS: tsfmt = (struct tmComResTSFormatDescrHeader *)t; if (currpath == 1) tsport = &dev->ports[SAA7164_PORT_TS1]; else tsport = &dev->ports[SAA7164_PORT_TS2]; memcpy(&tsport->hwcfg, vcoutputtermhdr, sizeof(*vcoutputtermhdr)); saa7164_api_configure_port_mpeg2ts(dev, tsport, tsfmt); break; case VS_FORMAT_MPEG2PS: psfmt = (struct tmComResPSFormatDescrHeader *)t; if (currpath == 1) encport = &dev->ports[SAA7164_PORT_ENC1]; else encport = &dev->ports[SAA7164_PORT_ENC2]; memcpy(&encport->hwcfg, vcoutputtermhdr, sizeof(*vcoutputtermhdr)); saa7164_api_configure_port_mpeg2ps(dev, encport, psfmt); break; case VS_FORMAT_VBI: vbifmt = (struct tmComResVBIFormatDescrHeader *)t; if (currpath == 1) vbiport = &dev->ports[SAA7164_PORT_VBI1]; else vbiport = &dev->ports[SAA7164_PORT_VBI2]; memcpy(&vbiport->hwcfg, vcoutputtermhdr, sizeof(*vcoutputtermhdr)); memcpy(&vbiport->vbi_fmt_ntsc, vbifmt, sizeof(*vbifmt)); saa7164_api_configure_port_vbi(dev, vbiport); break; case VS_FORMAT_RDS: dprintk(DBGLVL_API, " = VS_FORMAT_RDS\n"); break; case VS_FORMAT_UNCOMPRESSED: dprintk(DBGLVL_API, " = VS_FORMAT_UNCOMPRESSED\n"); break; case VS_FORMAT_TYPE: dprintk(DBGLVL_API, " = VS_FORMAT_TYPE\n"); break; default: dprintk(DBGLVL_API, " = undefined (0x%x)\n", t->subtype); } next_offset += t->len; } break; case TUNER_UNIT: dprintk(DBGLVL_API, " TUNER_UNIT\n"); tunerunithdr = (struct tmComResTunerDescrHeader *)(buf + idx); dprintk(DBGLVL_API, " unitid = 0x%x\n", tunerunithdr->unitid); dprintk(DBGLVL_API, " sourceid = 0x%x\n", tunerunithdr->sourceid); dprintk(DBGLVL_API, " iunit = 0x%x\n", tunerunithdr->iunit); dprintk(DBGLVL_API, " tuningstandards = 0x%x\n", tunerunithdr->tuningstandards); dprintk(DBGLVL_API, " controlsize = 0x%x\n", tunerunithdr->controlsize); dprintk(DBGLVL_API, " controls = 0x%x\n", tunerunithdr->controls); if (tunerunithdr->unitid == tunerunithdr->iunit) { if (currpath == 1) encport = &dev->ports[SAA7164_PORT_ENC1]; else encport = &dev->ports[SAA7164_PORT_ENC2]; memcpy(&encport->tunerunit, tunerunithdr, sizeof(struct tmComResTunerDescrHeader)); dprintk(DBGLVL_API, " (becomes dev->enc[%d] tuner)\n", encport->nr); } break; case VC_SELECTOR_UNIT: psel = (struct tmComResSelDescrHeader *)(buf + idx); dprintk(DBGLVL_API, " VC_SELECTOR_UNIT\n"); dprintk(DBGLVL_API, " unitid = 0x%x\n", psel->unitid); dprintk(DBGLVL_API, " nrinpins = 0x%x\n", psel->nrinpins); dprintk(DBGLVL_API, " sourceid = 0x%x\n", psel->sourceid); break; case VC_PROCESSING_UNIT: pdh = (struct tmComResProcDescrHeader *)(buf + idx); dprintk(DBGLVL_API, " VC_PROCESSING_UNIT\n"); dprintk(DBGLVL_API, " unitid = 0x%x\n", pdh->unitid); dprintk(DBGLVL_API, " sourceid = 0x%x\n", pdh->sourceid); dprintk(DBGLVL_API, " controlsize = 0x%x\n", pdh->controlsize); if (pdh->controlsize == 0x04) { if (currpath == 1) encport = &dev->ports[SAA7164_PORT_ENC1]; else encport = &dev->ports[SAA7164_PORT_ENC2]; memcpy(&encport->vidproc, pdh, sizeof(struct tmComResProcDescrHeader)); dprintk(DBGLVL_API, " (becomes dev->enc[%d])\n", encport->nr); } break; case FEATURE_UNIT: afd = (struct tmComResAFeatureDescrHeader *)(buf + idx); dprintk(DBGLVL_API, " FEATURE_UNIT\n"); dprintk(DBGLVL_API, " unitid = 0x%x\n", afd->unitid); dprintk(DBGLVL_API, " sourceid = 0x%x\n", afd->sourceid); dprintk(DBGLVL_API, " controlsize = 0x%x\n", afd->controlsize); if (currpath == 1) encport = &dev->ports[SAA7164_PORT_ENC1]; else encport = &dev->ports[SAA7164_PORT_ENC2]; memcpy(&encport->audfeat, afd, sizeof(struct tmComResAFeatureDescrHeader)); dprintk(DBGLVL_API, " (becomes dev->enc[%d])\n", encport->nr); break; case ENCODER_UNIT: edh = (struct tmComResEncoderDescrHeader *)(buf + idx); dprintk(DBGLVL_API, " ENCODER_UNIT\n"); dprintk(DBGLVL_API, " subtype = 0x%x\n", edh->subtype); dprintk(DBGLVL_API, " unitid = 0x%x\n", edh->unitid); dprintk(DBGLVL_API, " vsourceid = 0x%x\n", edh->vsourceid); dprintk(DBGLVL_API, " asourceid = 0x%x\n", edh->asourceid); dprintk(DBGLVL_API, " iunit = 0x%x\n", edh->iunit); if (edh->iunit == edh->unitid) { if (currpath == 1) encport = &dev->ports[SAA7164_PORT_ENC1]; else encport = &dev->ports[SAA7164_PORT_ENC2]; memcpy(&encport->encunit, edh, sizeof(struct tmComResEncoderDescrHeader)); dprintk(DBGLVL_API, " (becomes dev->enc[%d])\n", encport->nr); } break; case EXTENSION_UNIT: dprintk(DBGLVL_API, " EXTENSION_UNIT\n"); exthdr = (struct tmComResExtDevDescrHeader *)(buf + idx); dprintk(DBGLVL_API, " unitid = 0x%x\n", exthdr->unitid); dprintk(DBGLVL_API, " deviceid = 0x%x\n", exthdr->deviceid); dprintk(DBGLVL_API, " devicetype = 0x%x\n", exthdr->devicetype); if (exthdr->devicetype & 0x1) dprintk(DBGLVL_API, " = Decoder Device\n"); if (exthdr->devicetype & 0x2) dprintk(DBGLVL_API, " = GPIO Source\n"); if (exthdr->devicetype & 0x4) dprintk(DBGLVL_API, " = Video Decoder\n"); if (exthdr->devicetype & 0x8) dprintk(DBGLVL_API, " = Audio Decoder\n"); if (exthdr->devicetype & 0x20) dprintk(DBGLVL_API, " = Crossbar\n"); if (exthdr->devicetype & 0x40) dprintk(DBGLVL_API, " = Tuner\n"); if (exthdr->devicetype & 0x80) dprintk(DBGLVL_API, " = IF PLL\n"); if (exthdr->devicetype & 0x100) dprintk(DBGLVL_API, " = Demodulator\n"); if (exthdr->devicetype & 0x200) dprintk(DBGLVL_API, " = RDS Decoder\n"); if (exthdr->devicetype & 0x400) dprintk(DBGLVL_API, " = Encoder\n"); if (exthdr->devicetype & 0x800) dprintk(DBGLVL_API, " = IR Decoder\n"); if (exthdr->devicetype & 0x1000) dprintk(DBGLVL_API, " = EEPROM\n"); if (exthdr->devicetype & 0x2000) dprintk(DBGLVL_API, " = VBI Decoder\n"); if (exthdr->devicetype & 0x10000) dprintk(DBGLVL_API, " = Streaming Device\n"); if (exthdr->devicetype & 0x20000) dprintk(DBGLVL_API, " = DRM Device\n"); if (exthdr->devicetype & 0x40000000) dprintk(DBGLVL_API, " = Generic Device\n"); if (exthdr->devicetype & 0x80000000) dprintk(DBGLVL_API, " = Config Space Device\n"); dprintk(DBGLVL_API, " numgpiopins = 0x%x\n", exthdr->numgpiopins); dprintk(DBGLVL_API, " numgpiogroups = 0x%x\n", exthdr->numgpiogroups); dprintk(DBGLVL_API, " controlsize = 0x%x\n", exthdr->controlsize); if (exthdr->devicetype & 0x80) { if (currpath == 1) encport = &dev->ports[SAA7164_PORT_ENC1]; else encport = &dev->ports[SAA7164_PORT_ENC2]; memcpy(&encport->ifunit, exthdr, sizeof(struct tmComResExtDevDescrHeader)); dprintk(DBGLVL_API, " (becomes dev->enc[%d])\n", encport->nr); } break; case PVC_INFRARED_UNIT: dprintk(DBGLVL_API, " PVC_INFRARED_UNIT\n"); break; case DRM_UNIT: dprintk(DBGLVL_API, " DRM_UNIT\n"); break; default: dprintk(DBGLVL_API, "default %d\n", hdr->subtype); } dprintk(DBGLVL_API, " 1.%x\n", hdr->len); dprintk(DBGLVL_API, " 2.%x\n", hdr->type); dprintk(DBGLVL_API, " 3.%x\n", hdr->subtype); dprintk(DBGLVL_API, " 4.%x\n", hdr->unitid); idx += hdr->len; } return 0; } int saa7164_api_enum_subdevs(struct saa7164_dev *dev) { int ret; u32 buflen = 0; u8 *buf; dprintk(DBGLVL_API, "%s()\n", __func__); /* Get the total descriptor length */ ret = saa7164_cmd_send(dev, 0, GET_LEN, GET_DESCRIPTORS_CONTROL, sizeof(buflen), &buflen); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); dprintk(DBGLVL_API, "%s() total descriptor size = %d bytes.\n", __func__, buflen); /* Allocate enough storage for all of the descs */ buf = kzalloc(buflen, GFP_KERNEL); if (!buf) return SAA_ERR_NO_RESOURCES; /* Retrieve them */ ret = saa7164_cmd_send(dev, 0, GET_CUR, GET_DESCRIPTORS_CONTROL, buflen, buf); if (ret != SAA_OK) { printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); goto out; } if (saa_debug & DBGLVL_API) saa7164_dumphex16(dev, buf, (buflen/16)*16); saa7164_api_dump_subdevs(dev, buf, buflen); out: kfree(buf); return ret; } int saa7164_api_i2c_read(struct saa7164_i2c *bus, u8 addr, u32 reglen, u8 *reg, u32 datalen, u8 *data) { struct saa7164_dev *dev = bus->dev; u16 len = 0; int unitid; u32 regval; u8 buf[256]; int ret; dprintk(DBGLVL_API, "%s()\n", __func__); if (reglen > 4) return -EIO; if (reglen == 1) regval = *(reg); else if (reglen == 2) regval = ((*(reg) << 8) || *(reg+1)); else if (reglen == 3) regval = ((*(reg) << 16) | (*(reg+1) << 8) | *(reg+2)); else if (reglen == 4) regval = ((*(reg) << 24) | (*(reg+1) << 16) | (*(reg+2) << 8) | *(reg+3)); /* Prepare the send buffer */ /* Bytes 00-03 source register length * 04-07 source bytes to read * 08... register address */ memset(buf, 0, sizeof(buf)); memcpy((buf + 2 * sizeof(u32) + 0), reg, reglen); *((u32 *)(buf + 0 * sizeof(u32))) = reglen; *((u32 *)(buf + 1 * sizeof(u32))) = datalen; unitid = saa7164_i2caddr_to_unitid(bus, addr); if (unitid < 0) { printk(KERN_ERR "%s() error, cannot translate regaddr 0x%x to unitid\n", __func__, addr); return -EIO; } ret = saa7164_cmd_send(bus->dev, unitid, GET_LEN, EXU_REGISTER_ACCESS_CONTROL, sizeof(len), &len); if (ret != SAA_OK) { printk(KERN_ERR "%s() error, ret(1) = 0x%x\n", __func__, ret); return -EIO; } dprintk(DBGLVL_API, "%s() len = %d bytes\n", __func__, len); if (saa_debug & DBGLVL_I2C) saa7164_dumphex16(dev, buf, 2 * 16); ret = saa7164_cmd_send(bus->dev, unitid, GET_CUR, EXU_REGISTER_ACCESS_CONTROL, len, &buf); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret(2) = 0x%x\n", __func__, ret); else { if (saa_debug & DBGLVL_I2C) saa7164_dumphex16(dev, buf, sizeof(buf)); memcpy(data, (buf + 2 * sizeof(u32) + reglen), datalen); } return ret == SAA_OK ? 0 : -EIO; } /* For a given 8 bit i2c address device, write the buffer */ int saa7164_api_i2c_write(struct saa7164_i2c *bus, u8 addr, u32 datalen, u8 *data) { struct saa7164_dev *dev = bus->dev; u16 len = 0; int unitid; int reglen; u8 buf[256]; int ret; dprintk(DBGLVL_API, "%s()\n", __func__); if ((datalen == 0) || (datalen > 232)) return -EIO; memset(buf, 0, sizeof(buf)); unitid = saa7164_i2caddr_to_unitid(bus, addr); if (unitid < 0) { printk(KERN_ERR "%s() error, cannot translate regaddr 0x%x to unitid\n", __func__, addr); return -EIO; } reglen = saa7164_i2caddr_to_reglen(bus, addr); if (reglen < 0) { printk(KERN_ERR "%s() error, cannot translate regaddr to reglen\n", __func__); return -EIO; } ret = saa7164_cmd_send(bus->dev, unitid, GET_LEN, EXU_REGISTER_ACCESS_CONTROL, sizeof(len), &len); if (ret != SAA_OK) { printk(KERN_ERR "%s() error, ret(1) = 0x%x\n", __func__, ret); return -EIO; } dprintk(DBGLVL_API, "%s() len = %d bytes\n", __func__, len); /* Prepare the send buffer */ /* Bytes 00-03 dest register length * 04-07 dest bytes to write * 08... register address */ *((u32 *)(buf + 0 * sizeof(u32))) = reglen; *((u32 *)(buf + 1 * sizeof(u32))) = datalen - reglen; memcpy((buf + 2 * sizeof(u32)), data, datalen); if (saa_debug & DBGLVL_I2C) saa7164_dumphex16(dev, buf, sizeof(buf)); ret = saa7164_cmd_send(bus->dev, unitid, SET_CUR, EXU_REGISTER_ACCESS_CONTROL, len, &buf); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret(2) = 0x%x\n", __func__, ret); return ret == SAA_OK ? 0 : -EIO; } int saa7164_api_modify_gpio(struct saa7164_dev *dev, u8 unitid, u8 pin, u8 state) { int ret; struct tmComResGPIO t; dprintk(DBGLVL_API, "%s(0x%x, %d, %d)\n", __func__, unitid, pin, state); if ((pin > 7) || (state > 2)) return SAA_ERR_BAD_PARAMETER; t.pin = pin; t.state = state; ret = saa7164_cmd_send(dev, unitid, SET_CUR, EXU_GPIO_CONTROL, sizeof(t), &t); if (ret != SAA_OK) printk(KERN_ERR "%s() error, ret = 0x%x\n", __func__, ret); return ret; } int saa7164_api_set_gpiobit(struct saa7164_dev *dev, u8 unitid, u8 pin) { return saa7164_api_modify_gpio(dev, unitid, pin, 1); } int saa7164_api_clear_gpiobit(struct saa7164_dev *dev, u8 unitid, u8 pin) { return saa7164_api_modify_gpio(dev, unitid, pin, 0); }
gpl-2.0
DirtyJerz/omap
drivers/block/aoe/aoedev.c
8299
5423
/* Copyright (c) 2007 Coraid, Inc. See COPYING for GPL terms. */ /* * aoedev.c * AoE device utility functions; maintains device list. */ #include <linux/hdreg.h> #include <linux/blkdev.h> #include <linux/netdevice.h> #include <linux/delay.h> #include <linux/slab.h> #include "aoe.h" static void dummy_timer(ulong); static void aoedev_freedev(struct aoedev *); static void freetgt(struct aoedev *d, struct aoetgt *t); static void skbpoolfree(struct aoedev *d); static struct aoedev *devlist; static DEFINE_SPINLOCK(devlist_lock); struct aoedev * aoedev_by_aoeaddr(int maj, int min) { struct aoedev *d; ulong flags; spin_lock_irqsave(&devlist_lock, flags); for (d=devlist; d; d=d->next) if (d->aoemajor == maj && d->aoeminor == min) break; spin_unlock_irqrestore(&devlist_lock, flags); return d; } static void dummy_timer(ulong vp) { struct aoedev *d; d = (struct aoedev *)vp; if (d->flags & DEVFL_TKILL) return; d->timer.expires = jiffies + HZ; add_timer(&d->timer); } void aoedev_downdev(struct aoedev *d) { struct aoetgt **t, **te; struct frame *f, *e; struct buf *buf; struct bio *bio; t = d->targets; te = t + NTARGETS; for (; t < te && *t; t++) { f = (*t)->frames; e = f + (*t)->nframes; for (; f < e; f->tag = FREETAG, f->buf = NULL, f++) { if (f->tag == FREETAG || f->buf == NULL) continue; buf = f->buf; bio = buf->bio; if (--buf->nframesout == 0 && buf != d->inprocess) { mempool_free(buf, d->bufpool); bio_endio(bio, -EIO); } } (*t)->maxout = (*t)->nframes; (*t)->nout = 0; } buf = d->inprocess; if (buf) { bio = buf->bio; mempool_free(buf, d->bufpool); bio_endio(bio, -EIO); } d->inprocess = NULL; d->htgt = NULL; while (!list_empty(&d->bufq)) { buf = container_of(d->bufq.next, struct buf, bufs); list_del(d->bufq.next); bio = buf->bio; mempool_free(buf, d->bufpool); bio_endio(bio, -EIO); } if (d->gd) set_capacity(d->gd, 0); d->flags &= ~DEVFL_UP; } static void aoedev_freedev(struct aoedev *d) { struct aoetgt **t, **e; cancel_work_sync(&d->work); if (d->gd) { aoedisk_rm_sysfs(d); del_gendisk(d->gd); put_disk(d->gd); } t = d->targets; e = t + NTARGETS; for (; t < e && *t; t++) freetgt(d, *t); if (d->bufpool) mempool_destroy(d->bufpool); skbpoolfree(d); blk_cleanup_queue(d->blkq); kfree(d); } int aoedev_flush(const char __user *str, size_t cnt) { ulong flags; struct aoedev *d, **dd; struct aoedev *rmd = NULL; char buf[16]; int all = 0; if (cnt >= 3) { if (cnt > sizeof buf) cnt = sizeof buf; if (copy_from_user(buf, str, cnt)) return -EFAULT; all = !strncmp(buf, "all", 3); } spin_lock_irqsave(&devlist_lock, flags); dd = &devlist; while ((d = *dd)) { spin_lock(&d->lock); if ((!all && (d->flags & DEVFL_UP)) || (d->flags & (DEVFL_GDALLOC|DEVFL_NEWSIZE)) || d->nopen) { spin_unlock(&d->lock); dd = &d->next; continue; } *dd = d->next; aoedev_downdev(d); d->flags |= DEVFL_TKILL; spin_unlock(&d->lock); d->next = rmd; rmd = d; } spin_unlock_irqrestore(&devlist_lock, flags); while ((d = rmd)) { rmd = d->next; del_timer_sync(&d->timer); aoedev_freedev(d); /* must be able to sleep */ } return 0; } /* I'm not really sure that this is a realistic problem, but if the network driver goes gonzo let's just leak memory after complaining. */ static void skbfree(struct sk_buff *skb) { enum { Sms = 100, Tms = 3*1000}; int i = Tms / Sms; if (skb == NULL) return; while (atomic_read(&skb_shinfo(skb)->dataref) != 1 && i-- > 0) msleep(Sms); if (i < 0) { printk(KERN_ERR "aoe: %s holds ref: %s\n", skb->dev ? skb->dev->name : "netif", "cannot free skb -- memory leaked."); return; } skb_shinfo(skb)->nr_frags = skb->data_len = 0; skb_trim(skb, 0); dev_kfree_skb(skb); } static void skbpoolfree(struct aoedev *d) { struct sk_buff *skb, *tmp; skb_queue_walk_safe(&d->skbpool, skb, tmp) skbfree(skb); __skb_queue_head_init(&d->skbpool); } /* find it or malloc it */ struct aoedev * aoedev_by_sysminor_m(ulong sysminor) { struct aoedev *d; ulong flags; spin_lock_irqsave(&devlist_lock, flags); for (d=devlist; d; d=d->next) if (d->sysminor == sysminor) break; if (d) goto out; d = kcalloc(1, sizeof *d, GFP_ATOMIC); if (!d) goto out; INIT_WORK(&d->work, aoecmd_sleepwork); spin_lock_init(&d->lock); skb_queue_head_init(&d->sendq); skb_queue_head_init(&d->skbpool); init_timer(&d->timer); d->timer.data = (ulong) d; d->timer.function = dummy_timer; d->timer.expires = jiffies + HZ; add_timer(&d->timer); d->bufpool = NULL; /* defer to aoeblk_gdalloc */ d->tgt = d->targets; INIT_LIST_HEAD(&d->bufq); d->sysminor = sysminor; d->aoemajor = AOEMAJOR(sysminor); d->aoeminor = AOEMINOR(sysminor); d->mintimer = MINTIMER; d->next = devlist; devlist = d; out: spin_unlock_irqrestore(&devlist_lock, flags); return d; } static void freetgt(struct aoedev *d, struct aoetgt *t) { struct frame *f, *e; f = t->frames; e = f + t->nframes; for (; f < e; f++) skbfree(f->skb); kfree(t->frames); kfree(t); } void aoedev_exit(void) { struct aoedev *d; ulong flags; while ((d = devlist)) { devlist = d->next; spin_lock_irqsave(&d->lock, flags); aoedev_downdev(d); d->flags |= DEVFL_TKILL; spin_unlock_irqrestore(&d->lock, flags); del_timer_sync(&d->timer); aoedev_freedev(d); } } int __init aoedev_init(void) { return 0; }
gpl-2.0
Ander-Alvarez/ultracm13
samples/tracepoints/tracepoint-probe-sample.c
9323
1346
/* * tracepoint-probe-sample.c * * sample tracepoint probes. */ #include <linux/module.h> #include <linux/file.h> #include <linux/dcache.h> #include "tp-samples-trace.h" /* * Here the caller only guarantees locking for struct file and struct inode. * Locking must therefore be done in the probe to use the dentry. */ static void probe_subsys_event(void *ignore, struct inode *inode, struct file *file) { path_get(&file->f_path); dget(file->f_path.dentry); printk(KERN_INFO "Event is encountered with filename %s\n", file->f_path.dentry->d_name.name); dput(file->f_path.dentry); path_put(&file->f_path); } static void probe_subsys_eventb(void *ignore) { printk(KERN_INFO "Event B is encountered\n"); } static int __init tp_sample_trace_init(void) { int ret; ret = register_trace_subsys_event(probe_subsys_event, NULL); WARN_ON(ret); ret = register_trace_subsys_eventb(probe_subsys_eventb, NULL); WARN_ON(ret); return 0; } module_init(tp_sample_trace_init); static void __exit tp_sample_trace_exit(void) { unregister_trace_subsys_eventb(probe_subsys_eventb, NULL); unregister_trace_subsys_event(probe_subsys_event, NULL); tracepoint_synchronize_unregister(); } module_exit(tp_sample_trace_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mathieu Desnoyers"); MODULE_DESCRIPTION("Tracepoint Probes Samples");
gpl-2.0
060411121/zcl_linux
drivers/video/fbdev/g364fb.c
10091
6919
/* $Id: g364fb.c,v 1.3 1998/08/28 22:43:00 tsbogend Exp $ * * linux/drivers/video/g364fb.c -- Mips Magnum frame buffer device * * (C) 1998 Thomas Bogendoerfer * * This driver is based on tgafb.c * * Copyright (C) 1997 Geert Uytterhoeven * Copyright (C) 1995 Jay Estabrook * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/console.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/fb.h> #include <linux/init.h> #include <asm/io.h> #include <asm/jazz.h> /* * Various defines for the G364 */ #define G364_MEM_BASE 0xe4400000 #define G364_PORT_BASE 0xe4000000 #define ID_REG 0xe4000000 /* Read only */ #define BOOT_REG 0xe4080000 #define TIMING_REG 0xe4080108 /* to 0x080170 - DON'T TOUCH! */ #define DISPLAY_REG 0xe4080118 #define VDISPLAY_REG 0xe4080150 #define MASK_REG 0xe4080200 #define CTLA_REG 0xe4080300 #define CURS_TOGGLE 0x800000 #define BIT_PER_PIX 0x700000 /* bits 22 to 20 of Control A */ #define DELAY_SAMPLE 0x080000 #define PORT_INTER 0x040000 #define PIX_PIPE_DEL 0x030000 /* bits 17 and 16 of Control A */ #define PIX_PIPE_DEL2 0x008000 /* same as above - don't ask me why */ #define TR_CYCLE_TOG 0x004000 #define VRAM_ADR_INC 0x003000 /* bits 13 and 12 of Control A */ #define BLANK_OFF 0x000800 #define FORCE_BLANK 0x000400 #define BLK_FUN_SWTCH 0x000200 #define BLANK_IO 0x000100 #define BLANK_LEVEL 0x000080 #define A_VID_FORM 0x000040 #define D_SYNC_FORM 0x000020 #define FRAME_FLY_PAT 0x000010 #define OP_MODE 0x000008 #define INTL_STAND 0x000004 #define SCRN_FORM 0x000002 #define ENABLE_VTG 0x000001 #define TOP_REG 0xe4080400 #define CURS_PAL_REG 0xe4080508 /* to 0x080518 */ #define CHKSUM_REG 0xe4080600 /* to 0x080610 - unused */ #define CURS_POS_REG 0xe4080638 #define CLR_PAL_REG 0xe4080800 /* to 0x080ff8 */ #define CURS_PAT_REG 0xe4081000 /* to 0x081ff8 */ #define MON_ID_REG 0xe4100000 /* unused */ #define RESET_REG 0xe4180000 /* Write only */ static struct fb_info fb_info; static struct fb_fix_screeninfo fb_fix __initdata = { .id = "G364 8plane", .smem_start = 0x40000000, /* physical address */ .type = FB_TYPE_PACKED_PIXELS, .visual = FB_VISUAL_PSEUDOCOLOR, .ypanstep = 1, .accel = FB_ACCEL_NONE, }; static struct fb_var_screeninfo fb_var __initdata = { .bits_per_pixel = 8, .red = { 0, 8, 0 }, .green = { 0, 8, 0 }, .blue = { 0, 8, 0 }, .activate = FB_ACTIVATE_NOW, .height = -1, .width = -1, .pixclock = 39722, .left_margin = 40, .right_margin = 24, .upper_margin = 32, .lower_margin = 11, .hsync_len = 96, .vsync_len = 2, .vmode = FB_VMODE_NONINTERLACED, }; /* * Interface used by the world */ int g364fb_init(void); static int g364fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info); static int g364fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info); static int g364fb_cursor(struct fb_info *info, struct fb_cursor *cursor); static int g364fb_blank(int blank, struct fb_info *info); static struct fb_ops g364fb_ops = { .owner = THIS_MODULE, .fb_setcolreg = g364fb_setcolreg, .fb_pan_display = g364fb_pan_display, .fb_blank = g364fb_blank, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_cursor = g364fb_cursor, }; int g364fb_cursor(struct fb_info *info, struct fb_cursor *cursor) { switch (cursor->enable) { case CM_ERASE: *(unsigned int *) CTLA_REG |= CURS_TOGGLE; break; case CM_MOVE: case CM_DRAW: *(unsigned int *) CTLA_REG &= ~CURS_TOGGLE; *(unsigned int *) CURS_POS_REG = ((x * fontwidth(p)) << 12) | ((y * fontheight(p)) - info->var.yoffset); break; } return 0; } /* * Pan or Wrap the Display * * This call looks only at xoffset, yoffset and the FB_VMODE_YWRAP flag */ static int g364fb_pan_display(struct fb_var_screeninfo *var, struct fb_info *info) { if (var->xoffset || var->yoffset + info->var.yres > info->var.yres_virtual) return -EINVAL; *(unsigned int *) TOP_REG = var->yoffset * info->var.xres; return 0; } /* * Blank the display. */ static int g364fb_blank(int blank, struct fb_info *info) { if (blank) *(unsigned int *) CTLA_REG |= FORCE_BLANK; else *(unsigned int *) CTLA_REG &= ~FORCE_BLANK; return 0; } /* * Set a single color register. Return != 0 for invalid regno. */ static int g364fb_setcolreg(u_int regno, u_int red, u_int green, u_int blue, u_int transp, struct fb_info *info) { volatile unsigned int *ptr = (volatile unsigned int *) CLR_PAL_REG; if (regno > 255) return 1; red >>= 8; green >>= 8; blue >>= 8; ptr[regno << 1] = (red << 16) | (green << 8) | blue; return 0; } /* * Initialisation */ int __init g364fb_init(void) { volatile unsigned int *pal_ptr = (volatile unsigned int *) CLR_PAL_REG; volatile unsigned int *curs_pal_ptr = (volatile unsigned int *) CURS_PAL_REG; int mem, i, j; if (fb_get_options("g364fb", NULL)) return -ENODEV; /* TBD: G364 detection */ /* get the resolution set by ARC console */ *(volatile unsigned int *) CTLA_REG &= ~ENABLE_VTG; fb_var.xres = (*((volatile unsigned int *) DISPLAY_REG) & 0x00ffffff) * 4; fb_var.yres = (*((volatile unsigned int *) VDISPLAY_REG) & 0x00ffffff) / 2; *(volatile unsigned int *) CTLA_REG |= ENABLE_VTG; /* setup cursor */ curs_pal_ptr[0] |= 0x00ffffff; curs_pal_ptr[2] |= 0x00ffffff; curs_pal_ptr[4] |= 0x00ffffff; /* * first set the whole cursor to transparent */ for (i = 0; i < 512; i++) *(unsigned short *) (CURS_PAT_REG + i * 8) = 0; /* * switch the last two lines to cursor palette 3 * we assume here, that FONTSIZE_X is 8 */ *(unsigned short *) (CURS_PAT_REG + 14 * 64) = 0xffff; *(unsigned short *) (CURS_PAT_REG + 15 * 64) = 0xffff; fb_var.xres_virtual = fbvar.xres; fb_fix.line_length = (xres / 8) * fb_var.bits_per_pixel; fb_fix.smem_start = 0x40000000; /* physical address */ /* get size of video memory; this is special for the JAZZ hardware */ mem = (r4030_read_reg32(JAZZ_R4030_CONFIG) >> 8) & 3; fb_fix.smem_len = (1 << (mem * 2)) * 512 * 1024; fb_var.yres_virtual = fb_fix.smem_len / fb_var.xres; fb_info.fbops = &g364fb_ops; fb_info.screen_base = (char *) G364_MEM_BASE; /* virtual kernel address */ fb_info.var = fb_var; fb_info.fix = fb_fix; fb_info.flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN; fb_alloc_cmap(&fb_info.cmap, 255, 0); if (register_framebuffer(&fb_info) < 0) return -EINVAL; return 0; } module_init(g364fb_init); MODULE_LICENSE("GPL");
gpl-2.0
mehrvarz/msm-kitkat-tm-usbhost-charge
arch/tile/lib/memmove.c
12395
1452
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/types.h> #include <linux/string.h> #include <linux/module.h> void *memmove(void *dest, const void *src, size_t n) { if ((const char *)src >= (char *)dest + n || (char *)dest >= (const char *)src + n) { /* We found no overlap, so let memcpy do all the heavy * lifting (prefetching, etc.) */ return memcpy(dest, src, n); } if (n != 0) { const uint8_t *in; uint8_t x; uint8_t *out; int stride; if (src < dest) { /* copy backwards */ in = (const uint8_t *)src + n - 1; out = (uint8_t *)dest + n - 1; stride = -1; } else { /* copy forwards */ in = (const uint8_t *)src; out = (uint8_t *)dest; stride = 1; } /* Manually software-pipeline this loop. */ x = *in; in += stride; while (--n != 0) { *out = x; out += stride; x = *in; in += stride; } *out = x; } return dest; } EXPORT_SYMBOL(memmove);
gpl-2.0
garwynn/SMN900P_MI5_Kernel
drivers/net/fddi/skfp/hwmtm.c
12651
56623
/****************************************************************************** * * (C)Copyright 1998,1999 SysKonnect, * a business unit of Schneider & Koch & Co. Datensysteme GmbH. * * See the file "skfddi.c" for further information. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * The information in this file is provided "AS IS" without warranty. * ******************************************************************************/ #ifndef lint static char const ID_sccs[] = "@(#)hwmtm.c 1.40 99/05/31 (C) SK" ; #endif #define HWMTM #ifndef FDDI #define FDDI #endif #include "h/types.h" #include "h/fddi.h" #include "h/smc.h" #include "h/supern_2.h" #include "h/skfbiinc.h" /* ------------------------------------------------------------- DOCUMENTATION ------------------------------------------------------------- BEGIN_MANUAL_ENTRY(DOCUMENTATION) T B D END_MANUAL_ENTRY */ /* ------------------------------------------------------------- LOCAL VARIABLES: ------------------------------------------------------------- */ #ifdef COMMON_MB_POOL static SMbuf *mb_start = 0 ; static SMbuf *mb_free = 0 ; static int mb_init = FALSE ; static int call_count = 0 ; #endif /* ------------------------------------------------------------- EXTERNE VARIABLES: ------------------------------------------------------------- */ #ifdef DEBUG #ifndef DEBUG_BRD extern struct smt_debug debug ; #endif #endif #ifdef NDIS_OS2 extern u_char offDepth ; extern u_char force_irq_pending ; #endif /* ------------------------------------------------------------- LOCAL FUNCTIONS: ------------------------------------------------------------- */ static void queue_llc_rx(struct s_smc *smc, SMbuf *mb); static void smt_to_llc(struct s_smc *smc, SMbuf *mb); static void init_txd_ring(struct s_smc *smc); static void init_rxd_ring(struct s_smc *smc); static void queue_txd_mb(struct s_smc *smc, SMbuf *mb); static u_long init_descr_ring(struct s_smc *smc, union s_fp_descr volatile *start, int count); static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue); static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue); static SMbuf* get_llc_rx(struct s_smc *smc); static SMbuf* get_txd_mb(struct s_smc *smc); static void mac_drv_clear_txd(struct s_smc *smc); /* ------------------------------------------------------------- EXTERNAL FUNCTIONS: ------------------------------------------------------------- */ /* The external SMT functions are listed in cmtdef.h */ extern void* mac_drv_get_space(struct s_smc *smc, unsigned int size); extern void* mac_drv_get_desc_mem(struct s_smc *smc, unsigned int size); extern void mac_drv_fill_rxd(struct s_smc *smc); extern void mac_drv_tx_complete(struct s_smc *smc, volatile struct s_smt_fp_txd *txd); extern void mac_drv_rx_complete(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd, int frag_count, int len); extern void mac_drv_requeue_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd, int frag_count); extern void mac_drv_clear_rxd(struct s_smc *smc, volatile struct s_smt_fp_rxd *rxd, int frag_count); #ifdef USE_OS_CPY extern void hwm_cpy_rxd2mb(void); extern void hwm_cpy_txd2mb(void); #endif #ifdef ALL_RX_COMPLETE extern void mac_drv_all_receives_complete(void); #endif extern u_long mac_drv_virt2phys(struct s_smc *smc, void *virt); extern u_long dma_master(struct s_smc *smc, void *virt, int len, int flag); #ifdef NDIS_OS2 extern void post_proc(void); #else extern void dma_complete(struct s_smc *smc, volatile union s_fp_descr *descr, int flag); #endif extern int mac_drv_rx_init(struct s_smc *smc, int len, int fc, char *look_ahead, int la_len); /* ------------------------------------------------------------- PUBLIC FUNCTIONS: ------------------------------------------------------------- */ void process_receive(struct s_smc *smc); void fddi_isr(struct s_smc *smc); void smt_free_mbuf(struct s_smc *smc, SMbuf *mb); void init_driver_fplus(struct s_smc *smc); void mac_drv_rx_mode(struct s_smc *smc, int mode); void init_fddi_driver(struct s_smc *smc, u_char *mac_addr); void mac_drv_clear_tx_queue(struct s_smc *smc); void mac_drv_clear_rx_queue(struct s_smc *smc); void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len, int frame_status); void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len, int frame_status); int mac_drv_init(struct s_smc *smc); int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len, int frame_status); u_int mac_drv_check_space(void); SMbuf* smt_get_mbuf(struct s_smc *smc); #ifdef DEBUG void mac_drv_debug_lev(void); #endif /* ------------------------------------------------------------- MACROS: ------------------------------------------------------------- */ #ifndef UNUSED #ifdef lint #define UNUSED(x) (x) = (x) #else #define UNUSED(x) #endif #endif #ifdef USE_CAN_ADDR #define MA smc->hw.fddi_canon_addr.a #define GROUP_ADDR_BIT 0x01 #else #define MA smc->hw.fddi_home_addr.a #define GROUP_ADDR_BIT 0x80 #endif #define RXD_TXD_COUNT (HWM_ASYNC_TXD_COUNT+HWM_SYNC_TXD_COUNT+\ SMT_R1_RXD_COUNT+SMT_R2_RXD_COUNT) #ifdef MB_OUTSIDE_SMC #define EXT_VIRT_MEM ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd) +\ MAX_MBUF*sizeof(SMbuf)) #define EXT_VIRT_MEM_2 ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd)) #else #define EXT_VIRT_MEM ((RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd)) #endif /* * define critical read for 16 Bit drivers */ #if defined(NDIS_OS2) || defined(ODI2) #define CR_READ(var) ((var) & 0xffff0000 | ((var) & 0xffff)) #else #define CR_READ(var) (__le32)(var) #endif #define IMASK_SLOW (IS_PLINT1 | IS_PLINT2 | IS_TIMINT | IS_TOKEN | \ IS_MINTR1 | IS_MINTR2 | IS_MINTR3 | IS_R1_P | \ IS_R1_C | IS_XA_C | IS_XS_C) /* ------------------------------------------------------------- INIT- AND SMT FUNCTIONS: ------------------------------------------------------------- */ /* * BEGIN_MANUAL_ENTRY(mac_drv_check_space) * u_int mac_drv_check_space() * * function DOWNCALL (drvsr.c) * This function calculates the needed non virtual * memory for MBufs, RxD and TxD descriptors etc. * needed by the driver. * * return u_int memory in bytes * * END_MANUAL_ENTRY */ u_int mac_drv_check_space(void) { #ifdef MB_OUTSIDE_SMC #ifdef COMMON_MB_POOL call_count++ ; if (call_count == 1) { return EXT_VIRT_MEM; } else { return EXT_VIRT_MEM_2; } #else return EXT_VIRT_MEM; #endif #else return 0; #endif } /* * BEGIN_MANUAL_ENTRY(mac_drv_init) * void mac_drv_init(smc) * * function DOWNCALL (drvsr.c) * In this function the hardware module allocates it's * memory. * The operating system dependent module should call * mac_drv_init once, after the adatper is detected. * END_MANUAL_ENTRY */ int mac_drv_init(struct s_smc *smc) { if (sizeof(struct s_smt_fp_rxd) % 16) { SMT_PANIC(smc,HWM_E0001,HWM_E0001_MSG) ; } if (sizeof(struct s_smt_fp_txd) % 16) { SMT_PANIC(smc,HWM_E0002,HWM_E0002_MSG) ; } /* * get the required memory for the RxDs and TxDs */ if (!(smc->os.hwm.descr_p = (union s_fp_descr volatile *) mac_drv_get_desc_mem(smc,(u_int) (RXD_TXD_COUNT+1)*sizeof(struct s_smt_fp_txd)))) { return 1; /* no space the hwm modul can't work */ } /* * get the memory for the SMT MBufs */ #ifndef MB_OUTSIDE_SMC smc->os.hwm.mbuf_pool.mb_start=(SMbuf *)(&smc->os.hwm.mbuf_pool.mb[0]) ; #else #ifndef COMMON_MB_POOL if (!(smc->os.hwm.mbuf_pool.mb_start = (SMbuf *) mac_drv_get_space(smc, MAX_MBUF*sizeof(SMbuf)))) { return 1; /* no space the hwm modul can't work */ } #else if (!mb_start) { if (!(mb_start = (SMbuf *) mac_drv_get_space(smc, MAX_MBUF*sizeof(SMbuf)))) { return 1; /* no space the hwm modul can't work */ } } #endif #endif return 0; } /* * BEGIN_MANUAL_ENTRY(init_driver_fplus) * init_driver_fplus(smc) * * Sets hardware modul specific values for the mode register 2 * (e.g. the byte alignment for the received frames, the position of the * least significant byte etc.) * END_MANUAL_ENTRY */ void init_driver_fplus(struct s_smc *smc) { smc->hw.fp.mdr2init = FM_LSB | FM_BMMODE | FM_ENNPRQ | FM_ENHSRQ | 3 ; #ifdef PCI smc->hw.fp.mdr2init |= FM_CHKPAR | FM_PARITY ; #endif smc->hw.fp.mdr3init = FM_MENRQAUNLCK | FM_MENRS ; #ifdef USE_CAN_ADDR /* enable address bit swapping */ smc->hw.fp.frselreg_init = FM_ENXMTADSWAP | FM_ENRCVADSWAP ; #endif } static u_long init_descr_ring(struct s_smc *smc, union s_fp_descr volatile *start, int count) { int i ; union s_fp_descr volatile *d1 ; union s_fp_descr volatile *d2 ; u_long phys ; DB_GEN("descr ring starts at = %x ",(void *)start,0,3) ; for (i=count-1, d1=start; i ; i--) { d2 = d1 ; d1++ ; /* descr is owned by the host */ d2->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ; d2->r.rxd_next = &d1->r ; phys = mac_drv_virt2phys(smc,(void *)d1) ; d2->r.rxd_nrdadr = cpu_to_le32(phys) ; } DB_GEN("descr ring ends at = %x ",(void *)d1,0,3) ; d1->r.rxd_rbctrl = cpu_to_le32(BMU_CHECK) ; d1->r.rxd_next = &start->r ; phys = mac_drv_virt2phys(smc,(void *)start) ; d1->r.rxd_nrdadr = cpu_to_le32(phys) ; for (i=count, d1=start; i ; i--) { DRV_BUF_FLUSH(&d1->r,DDI_DMA_SYNC_FORDEV) ; d1++; } return phys; } static void init_txd_ring(struct s_smc *smc) { struct s_smt_fp_txd volatile *ds ; struct s_smt_tx_queue *queue ; u_long phys ; /* * initialize the transmit descriptors */ ds = (struct s_smt_fp_txd volatile *) ((char *)smc->os.hwm.descr_p + SMT_R1_RXD_COUNT*sizeof(struct s_smt_fp_rxd)) ; queue = smc->hw.fp.tx[QUEUE_A0] ; DB_GEN("Init async TxD ring, %d TxDs ",HWM_ASYNC_TXD_COUNT,0,3) ; (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds, HWM_ASYNC_TXD_COUNT) ; phys = le32_to_cpu(ds->txd_ntdadr) ; ds++ ; queue->tx_curr_put = queue->tx_curr_get = ds ; ds-- ; queue->tx_free = HWM_ASYNC_TXD_COUNT ; queue->tx_used = 0 ; outpd(ADDR(B5_XA_DA),phys) ; ds = (struct s_smt_fp_txd volatile *) ((char *)ds + HWM_ASYNC_TXD_COUNT*sizeof(struct s_smt_fp_txd)) ; queue = smc->hw.fp.tx[QUEUE_S] ; DB_GEN("Init sync TxD ring, %d TxDs ",HWM_SYNC_TXD_COUNT,0,3) ; (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds, HWM_SYNC_TXD_COUNT) ; phys = le32_to_cpu(ds->txd_ntdadr) ; ds++ ; queue->tx_curr_put = queue->tx_curr_get = ds ; queue->tx_free = HWM_SYNC_TXD_COUNT ; queue->tx_used = 0 ; outpd(ADDR(B5_XS_DA),phys) ; } static void init_rxd_ring(struct s_smc *smc) { struct s_smt_fp_rxd volatile *ds ; struct s_smt_rx_queue *queue ; u_long phys ; /* * initialize the receive descriptors */ ds = (struct s_smt_fp_rxd volatile *) smc->os.hwm.descr_p ; queue = smc->hw.fp.rx[QUEUE_R1] ; DB_GEN("Init RxD ring, %d RxDs ",SMT_R1_RXD_COUNT,0,3) ; (void)init_descr_ring(smc,(union s_fp_descr volatile *)ds, SMT_R1_RXD_COUNT) ; phys = le32_to_cpu(ds->rxd_nrdadr) ; ds++ ; queue->rx_curr_put = queue->rx_curr_get = ds ; queue->rx_free = SMT_R1_RXD_COUNT ; queue->rx_used = 0 ; outpd(ADDR(B4_R1_DA),phys) ; } /* * BEGIN_MANUAL_ENTRY(init_fddi_driver) * void init_fddi_driver(smc,mac_addr) * * initializes the driver and it's variables * * END_MANUAL_ENTRY */ void init_fddi_driver(struct s_smc *smc, u_char *mac_addr) { SMbuf *mb ; int i ; init_board(smc,mac_addr) ; (void)init_fplus(smc) ; /* * initialize the SMbufs for the SMT */ #ifndef COMMON_MB_POOL mb = smc->os.hwm.mbuf_pool.mb_start ; smc->os.hwm.mbuf_pool.mb_free = (SMbuf *)NULL ; for (i = 0; i < MAX_MBUF; i++) { mb->sm_use_count = 1 ; smt_free_mbuf(smc,mb) ; mb++ ; } #else mb = mb_start ; if (!mb_init) { mb_free = 0 ; for (i = 0; i < MAX_MBUF; i++) { mb->sm_use_count = 1 ; smt_free_mbuf(smc,mb) ; mb++ ; } mb_init = TRUE ; } #endif /* * initialize the other variables */ smc->os.hwm.llc_rx_pipe = smc->os.hwm.llc_rx_tail = (SMbuf *)NULL ; smc->os.hwm.txd_tx_pipe = smc->os.hwm.txd_tx_tail = NULL ; smc->os.hwm.pass_SMT = smc->os.hwm.pass_NSA = smc->os.hwm.pass_DB = 0 ; smc->os.hwm.pass_llc_promisc = TRUE ; smc->os.hwm.queued_rx_frames = smc->os.hwm.queued_txd_mb = 0 ; smc->os.hwm.detec_count = 0 ; smc->os.hwm.rx_break = 0 ; smc->os.hwm.rx_len_error = 0 ; smc->os.hwm.isr_flag = FALSE ; /* * make sure that the start pointer is 16 byte aligned */ i = 16 - ((long)smc->os.hwm.descr_p & 0xf) ; if (i != 16) { DB_GEN("i = %d",i,0,3) ; smc->os.hwm.descr_p = (union s_fp_descr volatile *) ((char *)smc->os.hwm.descr_p+i) ; } DB_GEN("pt to descr area = %x",(void *)smc->os.hwm.descr_p,0,3) ; init_txd_ring(smc) ; init_rxd_ring(smc) ; mac_drv_fill_rxd(smc) ; init_plc(smc) ; } SMbuf *smt_get_mbuf(struct s_smc *smc) { register SMbuf *mb ; #ifndef COMMON_MB_POOL mb = smc->os.hwm.mbuf_pool.mb_free ; #else mb = mb_free ; #endif if (mb) { #ifndef COMMON_MB_POOL smc->os.hwm.mbuf_pool.mb_free = mb->sm_next ; #else mb_free = mb->sm_next ; #endif mb->sm_off = 8 ; mb->sm_use_count = 1 ; } DB_GEN("get SMbuf: mb = %x",(void *)mb,0,3) ; return mb; /* May be NULL */ } void smt_free_mbuf(struct s_smc *smc, SMbuf *mb) { if (mb) { mb->sm_use_count-- ; DB_GEN("free_mbuf: sm_use_count = %d",mb->sm_use_count,0,3) ; /* * If the use_count is != zero the MBuf is queued * more than once and must not queued into the * free MBuf queue */ if (!mb->sm_use_count) { DB_GEN("free SMbuf: mb = %x",(void *)mb,0,3) ; #ifndef COMMON_MB_POOL mb->sm_next = smc->os.hwm.mbuf_pool.mb_free ; smc->os.hwm.mbuf_pool.mb_free = mb ; #else mb->sm_next = mb_free ; mb_free = mb ; #endif } } else SMT_PANIC(smc,HWM_E0003,HWM_E0003_MSG) ; } /* * BEGIN_MANUAL_ENTRY(mac_drv_repair_descr) * void mac_drv_repair_descr(smc) * * function called from SMT (HWM / hwmtm.c) * The BMU is idle when this function is called. * Mac_drv_repair_descr sets up the physical address * for all receive and transmit queues where the BMU * should continue. * It may be that the BMU was reseted during a fragmented * transfer. In this case there are some fragments which will * never completed by the BMU. The OWN bit of this fragments * must be switched to be owned by the host. * * Give a start command to the receive BMU. * Start the transmit BMUs if transmit frames pending. * * END_MANUAL_ENTRY */ void mac_drv_repair_descr(struct s_smc *smc) { u_long phys ; if (smc->hw.hw_state != STOPPED) { SK_BREAK() ; SMT_PANIC(smc,HWM_E0013,HWM_E0013_MSG) ; return ; } /* * repair tx queues: don't start */ phys = repair_txd_ring(smc,smc->hw.fp.tx[QUEUE_A0]) ; outpd(ADDR(B5_XA_DA),phys) ; if (smc->hw.fp.tx_q[QUEUE_A0].tx_used) { outpd(ADDR(B0_XA_CSR),CSR_START) ; } phys = repair_txd_ring(smc,smc->hw.fp.tx[QUEUE_S]) ; outpd(ADDR(B5_XS_DA),phys) ; if (smc->hw.fp.tx_q[QUEUE_S].tx_used) { outpd(ADDR(B0_XS_CSR),CSR_START) ; } /* * repair rx queues */ phys = repair_rxd_ring(smc,smc->hw.fp.rx[QUEUE_R1]) ; outpd(ADDR(B4_R1_DA),phys) ; outpd(ADDR(B0_R1_CSR),CSR_START) ; } static u_long repair_txd_ring(struct s_smc *smc, struct s_smt_tx_queue *queue) { int i ; int tx_used ; u_long phys ; u_long tbctrl ; struct s_smt_fp_txd volatile *t ; SK_UNUSED(smc) ; t = queue->tx_curr_get ; tx_used = queue->tx_used ; for (i = tx_used+queue->tx_free-1 ; i ; i-- ) { t = t->txd_next ; } phys = le32_to_cpu(t->txd_ntdadr) ; t = queue->tx_curr_get ; while (tx_used) { DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ; tbctrl = le32_to_cpu(t->txd_tbctrl) ; if (tbctrl & BMU_OWN) { if (tbctrl & BMU_STF) { break ; /* exit the loop */ } else { /* * repair the descriptor */ t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ; } } phys = le32_to_cpu(t->txd_ntdadr) ; DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; t = t->txd_next ; tx_used-- ; } return phys; } /* * Repairs the receive descriptor ring and returns the physical address * where the BMU should continue working. * * o The physical address where the BMU was stopped has to be * determined. This is the next RxD after rx_curr_get with an OWN * bit set. * o The BMU should start working at beginning of the next frame. * RxDs with an OWN bit set but with a reset STF bit should be * skipped and owned by the driver (OWN = 0). */ static u_long repair_rxd_ring(struct s_smc *smc, struct s_smt_rx_queue *queue) { int i ; int rx_used ; u_long phys ; u_long rbctrl ; struct s_smt_fp_rxd volatile *r ; SK_UNUSED(smc) ; r = queue->rx_curr_get ; rx_used = queue->rx_used ; for (i = SMT_R1_RXD_COUNT-1 ; i ; i-- ) { r = r->rxd_next ; } phys = le32_to_cpu(r->rxd_nrdadr) ; r = queue->rx_curr_get ; while (rx_used) { DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; rbctrl = le32_to_cpu(r->rxd_rbctrl) ; if (rbctrl & BMU_OWN) { if (rbctrl & BMU_STF) { break ; /* exit the loop */ } else { /* * repair the descriptor */ r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ; } } phys = le32_to_cpu(r->rxd_nrdadr) ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; r = r->rxd_next ; rx_used-- ; } return phys; } /* ------------------------------------------------------------- INTERRUPT SERVICE ROUTINE: ------------------------------------------------------------- */ /* * BEGIN_MANUAL_ENTRY(fddi_isr) * void fddi_isr(smc) * * function DOWNCALL (drvsr.c) * interrupt service routine, handles the interrupt requests * generated by the FDDI adapter. * * NOTE: The operating system dependent module must guarantee that the * interrupts of the adapter are disabled when it calls fddi_isr. * * About the USE_BREAK_ISR mechanismn: * * The main requirement of this mechanismn is to force an timer IRQ when * leaving process_receive() with leave_isr set. process_receive() may * be called at any time from anywhere! * To be sure we don't miss such event we set 'force_irq' per default. * We have to force and Timer IRQ if 'smc->os.hwm.leave_isr' AND * 'force_irq' are set. 'force_irq' may be reset if a receive complete * IRQ is pending. * * END_MANUAL_ENTRY */ void fddi_isr(struct s_smc *smc) { u_long is ; /* ISR source */ u_short stu, stl ; SMbuf *mb ; #ifdef USE_BREAK_ISR int force_irq ; #endif #ifdef ODI2 if (smc->os.hwm.rx_break) { mac_drv_fill_rxd(smc) ; if (smc->hw.fp.rx_q[QUEUE_R1].rx_used > 0) { smc->os.hwm.rx_break = 0 ; process_receive(smc) ; } else { smc->os.hwm.detec_count = 0 ; smt_force_irq(smc) ; } } #endif smc->os.hwm.isr_flag = TRUE ; #ifdef USE_BREAK_ISR force_irq = TRUE ; if (smc->os.hwm.leave_isr) { smc->os.hwm.leave_isr = FALSE ; process_receive(smc) ; } #endif while ((is = GET_ISR() & ISR_MASK)) { NDD_TRACE("CH0B",is,0,0) ; DB_GEN("ISA = 0x%x",is,0,7) ; if (is & IMASK_SLOW) { NDD_TRACE("CH1b",is,0,0) ; if (is & IS_PLINT1) { /* PLC1 */ plc1_irq(smc) ; } if (is & IS_PLINT2) { /* PLC2 */ plc2_irq(smc) ; } if (is & IS_MINTR1) { /* FORMAC+ STU1(U/L) */ stu = inpw(FM_A(FM_ST1U)) ; stl = inpw(FM_A(FM_ST1L)) ; DB_GEN("Slow transmit complete",0,0,6) ; mac1_irq(smc,stu,stl) ; } if (is & IS_MINTR2) { /* FORMAC+ STU2(U/L) */ stu= inpw(FM_A(FM_ST2U)) ; stl= inpw(FM_A(FM_ST2L)) ; DB_GEN("Slow receive complete",0,0,6) ; DB_GEN("stl = %x : stu = %x",stl,stu,7) ; mac2_irq(smc,stu,stl) ; } if (is & IS_MINTR3) { /* FORMAC+ STU3(U/L) */ stu= inpw(FM_A(FM_ST3U)) ; stl= inpw(FM_A(FM_ST3L)) ; DB_GEN("FORMAC Mode Register 3",0,0,6) ; mac3_irq(smc,stu,stl) ; } if (is & IS_TIMINT) { /* Timer 82C54-2 */ timer_irq(smc) ; #ifdef NDIS_OS2 force_irq_pending = 0 ; #endif /* * out of RxD detection */ if (++smc->os.hwm.detec_count > 4) { /* * check out of RxD condition */ process_receive(smc) ; } } if (is & IS_TOKEN) { /* Restricted Token Monitor */ rtm_irq(smc) ; } if (is & IS_R1_P) { /* Parity error rx queue 1 */ /* clear IRQ */ outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_P) ; SMT_PANIC(smc,HWM_E0004,HWM_E0004_MSG) ; } if (is & IS_R1_C) { /* Encoding error rx queue 1 */ /* clear IRQ */ outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_C) ; SMT_PANIC(smc,HWM_E0005,HWM_E0005_MSG) ; } if (is & IS_XA_C) { /* Encoding error async tx q */ /* clear IRQ */ outpd(ADDR(B5_XA_CSR),CSR_IRQ_CL_C) ; SMT_PANIC(smc,HWM_E0006,HWM_E0006_MSG) ; } if (is & IS_XS_C) { /* Encoding error sync tx q */ /* clear IRQ */ outpd(ADDR(B5_XS_CSR),CSR_IRQ_CL_C) ; SMT_PANIC(smc,HWM_E0007,HWM_E0007_MSG) ; } } /* * Fast Tx complete Async/Sync Queue (BMU service) */ if (is & (IS_XS_F|IS_XA_F)) { DB_GEN("Fast tx complete queue",0,0,6) ; /* * clear IRQ, Note: no IRQ is lost, because * we always service both queues */ outpd(ADDR(B5_XS_CSR),CSR_IRQ_CL_F) ; outpd(ADDR(B5_XA_CSR),CSR_IRQ_CL_F) ; mac_drv_clear_txd(smc) ; llc_restart_tx(smc) ; } /* * Fast Rx Complete (BMU service) */ if (is & IS_R1_F) { DB_GEN("Fast receive complete",0,0,6) ; /* clear IRQ */ #ifndef USE_BREAK_ISR outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ; process_receive(smc) ; #else process_receive(smc) ; if (smc->os.hwm.leave_isr) { force_irq = FALSE ; } else { outpd(ADDR(B4_R1_CSR),CSR_IRQ_CL_F) ; process_receive(smc) ; } #endif } #ifndef NDIS_OS2 while ((mb = get_llc_rx(smc))) { smt_to_llc(smc,mb) ; } #else if (offDepth) post_proc() ; while (!offDepth && (mb = get_llc_rx(smc))) { smt_to_llc(smc,mb) ; } if (!offDepth && smc->os.hwm.rx_break) { process_receive(smc) ; } #endif if (smc->q.ev_get != smc->q.ev_put) { NDD_TRACE("CH2a",0,0,0) ; ev_dispatcher(smc) ; } #ifdef NDIS_OS2 post_proc() ; if (offDepth) { /* leave fddi_isr because */ break ; /* indications not allowed */ } #endif #ifdef USE_BREAK_ISR if (smc->os.hwm.leave_isr) { break ; /* leave fddi_isr */ } #endif /* NOTE: when the isr is left, no rx is pending */ } /* end of interrupt source polling loop */ #ifdef USE_BREAK_ISR if (smc->os.hwm.leave_isr && force_irq) { smt_force_irq(smc) ; } #endif smc->os.hwm.isr_flag = FALSE ; NDD_TRACE("CH0E",0,0,0) ; } /* ------------------------------------------------------------- RECEIVE FUNCTIONS: ------------------------------------------------------------- */ #ifndef NDIS_OS2 /* * BEGIN_MANUAL_ENTRY(mac_drv_rx_mode) * void mac_drv_rx_mode(smc,mode) * * function DOWNCALL (fplus.c) * Corresponding to the parameter mode, the operating system * dependent module can activate several receive modes. * * para mode = 1: RX_ENABLE_ALLMULTI enable all multicasts * = 2: RX_DISABLE_ALLMULTI disable "enable all multicasts" * = 3: RX_ENABLE_PROMISC enable promiscuous * = 4: RX_DISABLE_PROMISC disable promiscuous * = 5: RX_ENABLE_NSA enable rec. of all NSA frames * (disabled after 'driver reset' & 'set station address') * = 6: RX_DISABLE_NSA disable rec. of all NSA frames * * = 21: RX_ENABLE_PASS_SMT ( see description ) * = 22: RX_DISABLE_PASS_SMT ( " " ) * = 23: RX_ENABLE_PASS_NSA ( " " ) * = 24: RX_DISABLE_PASS_NSA ( " " ) * = 25: RX_ENABLE_PASS_DB ( " " ) * = 26: RX_DISABLE_PASS_DB ( " " ) * = 27: RX_DISABLE_PASS_ALL ( " " ) * = 28: RX_DISABLE_LLC_PROMISC ( " " ) * = 29: RX_ENABLE_LLC_PROMISC ( " " ) * * * RX_ENABLE_PASS_SMT / RX_DISABLE_PASS_SMT * * If the operating system dependent module activates the * mode RX_ENABLE_PASS_SMT, the hardware module * duplicates all SMT frames with the frame control * FC_SMT_INFO and passes them to the LLC receive channel * by calling mac_drv_rx_init. * The SMT Frames which are sent by the local SMT and the NSA * frames whose A- and C-Indicator is not set are also duplicated * and passed. * The receive mode RX_DISABLE_PASS_SMT disables the passing * of SMT frames. * * RX_ENABLE_PASS_NSA / RX_DISABLE_PASS_NSA * * If the operating system dependent module activates the * mode RX_ENABLE_PASS_NSA, the hardware module * duplicates all NSA frames with frame control FC_SMT_NSA * and a set A-Indicator and passed them to the LLC * receive channel by calling mac_drv_rx_init. * All NSA Frames which are sent by the local SMT * are also duplicated and passed. * The receive mode RX_DISABLE_PASS_NSA disables the passing * of NSA frames with the A- or C-Indicator set. * * NOTE: For fear that the hardware module receives NSA frames with * a reset A-Indicator, the operating system dependent module * has to call mac_drv_rx_mode with the mode RX_ENABLE_NSA * before activate the RX_ENABLE_PASS_NSA mode and after every * 'driver reset' and 'set station address'. * * RX_ENABLE_PASS_DB / RX_DISABLE_PASS_DB * * If the operating system dependent module activates the * mode RX_ENABLE_PASS_DB, direct BEACON frames * (FC_BEACON frame control) are passed to the LLC receive * channel by mac_drv_rx_init. * The receive mode RX_DISABLE_PASS_DB disables the passing * of direct BEACON frames. * * RX_DISABLE_PASS_ALL * * Disables all special receives modes. It is equal to * call mac_drv_set_rx_mode successively with the * parameters RX_DISABLE_NSA, RX_DISABLE_PASS_SMT, * RX_DISABLE_PASS_NSA and RX_DISABLE_PASS_DB. * * RX_ENABLE_LLC_PROMISC * * (default) all received LLC frames and all SMT/NSA/DBEACON * frames depending on the attitude of the flags * PASS_SMT/PASS_NSA/PASS_DBEACON will be delivered to the * LLC layer * * RX_DISABLE_LLC_PROMISC * * all received SMT/NSA/DBEACON frames depending on the * attitude of the flags PASS_SMT/PASS_NSA/PASS_DBEACON * will be delivered to the LLC layer. * all received LLC frames with a directed address, Multicast * or Broadcast address will be delivered to the LLC * layer too. * * END_MANUAL_ENTRY */ void mac_drv_rx_mode(struct s_smc *smc, int mode) { switch(mode) { case RX_ENABLE_PASS_SMT: smc->os.hwm.pass_SMT = TRUE ; break ; case RX_DISABLE_PASS_SMT: smc->os.hwm.pass_SMT = FALSE ; break ; case RX_ENABLE_PASS_NSA: smc->os.hwm.pass_NSA = TRUE ; break ; case RX_DISABLE_PASS_NSA: smc->os.hwm.pass_NSA = FALSE ; break ; case RX_ENABLE_PASS_DB: smc->os.hwm.pass_DB = TRUE ; break ; case RX_DISABLE_PASS_DB: smc->os.hwm.pass_DB = FALSE ; break ; case RX_DISABLE_PASS_ALL: smc->os.hwm.pass_SMT = smc->os.hwm.pass_NSA = FALSE ; smc->os.hwm.pass_DB = FALSE ; smc->os.hwm.pass_llc_promisc = TRUE ; mac_set_rx_mode(smc,RX_DISABLE_NSA) ; break ; case RX_DISABLE_LLC_PROMISC: smc->os.hwm.pass_llc_promisc = FALSE ; break ; case RX_ENABLE_LLC_PROMISC: smc->os.hwm.pass_llc_promisc = TRUE ; break ; case RX_ENABLE_ALLMULTI: case RX_DISABLE_ALLMULTI: case RX_ENABLE_PROMISC: case RX_DISABLE_PROMISC: case RX_ENABLE_NSA: case RX_DISABLE_NSA: default: mac_set_rx_mode(smc,mode) ; break ; } } #endif /* ifndef NDIS_OS2 */ /* * process receive queue */ void process_receive(struct s_smc *smc) { int i ; int n ; int frag_count ; /* number of RxDs of the curr rx buf */ int used_frags ; /* number of RxDs of the curr frame */ struct s_smt_rx_queue *queue ; /* points to the queue ctl struct */ struct s_smt_fp_rxd volatile *r ; /* rxd pointer */ struct s_smt_fp_rxd volatile *rxd ; /* first rxd of rx frame */ u_long rbctrl ; /* receive buffer control word */ u_long rfsw ; /* receive frame status word */ u_short rx_used ; u_char far *virt ; char far *data ; SMbuf *mb ; u_char fc ; /* Frame control */ int len ; /* Frame length */ smc->os.hwm.detec_count = 0 ; queue = smc->hw.fp.rx[QUEUE_R1] ; NDD_TRACE("RHxB",0,0,0) ; for ( ; ; ) { r = queue->rx_curr_get ; rx_used = queue->rx_used ; frag_count = 0 ; #ifdef USE_BREAK_ISR if (smc->os.hwm.leave_isr) { goto rx_end ; } #endif #ifdef NDIS_OS2 if (offDepth) { smc->os.hwm.rx_break = 1 ; goto rx_end ; } smc->os.hwm.rx_break = 0 ; #endif #ifdef ODI2 if (smc->os.hwm.rx_break) { goto rx_end ; } #endif n = 0 ; do { DB_RX("Check RxD %x for OWN and EOF",(void *)r,0,5) ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; rbctrl = le32_to_cpu(CR_READ(r->rxd_rbctrl)); if (rbctrl & BMU_OWN) { NDD_TRACE("RHxE",r,rfsw,rbctrl) ; DB_RX("End of RxDs",0,0,4) ; goto rx_end ; } /* * out of RxD detection */ if (!rx_used) { SK_BREAK() ; SMT_PANIC(smc,HWM_E0009,HWM_E0009_MSG) ; /* Either we don't have an RxD or all * RxDs are filled. Therefore it's allowed * for to set the STOPPED flag */ smc->hw.hw_state = STOPPED ; mac_drv_clear_rx_queue(smc) ; smc->hw.hw_state = STARTED ; mac_drv_fill_rxd(smc) ; smc->os.hwm.detec_count = 0 ; goto rx_end ; } rfsw = le32_to_cpu(r->rxd_rfsw) ; if ((rbctrl & BMU_STF) != ((rbctrl & BMU_ST_BUF) <<5)) { /* * The BMU_STF bit is deleted, 1 frame is * placed into more than 1 rx buffer * * skip frame by setting the rx len to 0 * * if fragment count == 0 * The missing STF bit belongs to the * current frame, search for the * EOF bit to complete the frame * else * the fragment belongs to the next frame, * exit the loop and process the frame */ SK_BREAK() ; rfsw = 0 ; if (frag_count) { break ; } } n += rbctrl & 0xffff ; r = r->rxd_next ; frag_count++ ; rx_used-- ; } while (!(rbctrl & BMU_EOF)) ; used_frags = frag_count ; DB_RX("EOF set in RxD, used_frags = %d ",used_frags,0,5) ; /* may be next 2 DRV_BUF_FLUSH() can be skipped, because */ /* BMU_ST_BUF will not be changed by the ASIC */ DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; while (rx_used && !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) { DB_RX("Check STF bit in %x",(void *)r,0,5) ; r = r->rxd_next ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; frag_count++ ; rx_used-- ; } DB_RX("STF bit found",0,0,5) ; /* * The received frame is finished for the process receive */ rxd = queue->rx_curr_get ; queue->rx_curr_get = r ; queue->rx_free += frag_count ; queue->rx_used = rx_used ; /* * ASIC Errata no. 7 (STF - Bit Bug) */ rxd->rxd_rbctrl &= cpu_to_le32(~BMU_STF) ; for (r=rxd, i=frag_count ; i ; r=r->rxd_next, i--){ DB_RX("dma_complete for RxD %x",(void *)r,0,5) ; dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR); } smc->hw.fp.err_stats.err_valid++ ; smc->mib.m[MAC0].fddiMACCopied_Ct++ ; /* the length of the data including the FC */ len = (rfsw & RD_LENGTH) - 4 ; DB_RX("frame length = %d",len,0,4) ; /* * check the frame_length and all error flags */ if (rfsw & (RX_MSRABT|RX_FS_E|RX_FS_CRC|RX_FS_IMPL)){ if (rfsw & RD_S_MSRABT) { DB_RX("Frame aborted by the FORMAC",0,0,2) ; smc->hw.fp.err_stats.err_abort++ ; } /* * check frame status */ if (rfsw & RD_S_SEAC2) { DB_RX("E-Indicator set",0,0,2) ; smc->hw.fp.err_stats.err_e_indicator++ ; } if (rfsw & RD_S_SFRMERR) { DB_RX("CRC error",0,0,2) ; smc->hw.fp.err_stats.err_crc++ ; } if (rfsw & RX_FS_IMPL) { DB_RX("Implementer frame",0,0,2) ; smc->hw.fp.err_stats.err_imp_frame++ ; } goto abort_frame ; } if (len > FDDI_RAW_MTU-4) { DB_RX("Frame too long error",0,0,2) ; smc->hw.fp.err_stats.err_too_long++ ; goto abort_frame ; } /* * SUPERNET 3 Bug: FORMAC delivers status words * of aborded frames to the BMU */ if (len <= 4) { DB_RX("Frame length = 0",0,0,2) ; goto abort_frame ; } if (len != (n-4)) { DB_RX("BMU: rx len differs: [%d:%d]",len,n,4); smc->os.hwm.rx_len_error++ ; goto abort_frame ; } /* * Check SA == MA */ virt = (u_char far *) rxd->rxd_virt ; DB_RX("FC = %x",*virt,0,2) ; if (virt[12] == MA[5] && virt[11] == MA[4] && virt[10] == MA[3] && virt[9] == MA[2] && virt[8] == MA[1] && (virt[7] & ~GROUP_ADDR_BIT) == MA[0]) { goto abort_frame ; } /* * test if LLC frame */ if (rfsw & RX_FS_LLC) { /* * if pass_llc_promisc is disable * if DA != Multicast or Broadcast or DA!=MA * abort the frame */ if (!smc->os.hwm.pass_llc_promisc) { if(!(virt[1] & GROUP_ADDR_BIT)) { if (virt[6] != MA[5] || virt[5] != MA[4] || virt[4] != MA[3] || virt[3] != MA[2] || virt[2] != MA[1] || virt[1] != MA[0]) { DB_RX("DA != MA and not multi- or broadcast",0,0,2) ; goto abort_frame ; } } } /* * LLC frame received */ DB_RX("LLC - receive",0,0,4) ; mac_drv_rx_complete(smc,rxd,frag_count,len) ; } else { if (!(mb = smt_get_mbuf(smc))) { smc->hw.fp.err_stats.err_no_buf++ ; DB_RX("No SMbuf; receive terminated",0,0,4) ; goto abort_frame ; } data = smtod(mb,char *) - 1 ; /* * copy the frame into a SMT_MBuf */ #ifdef USE_OS_CPY hwm_cpy_rxd2mb(rxd,data,len) ; #else for (r=rxd, i=used_frags ; i ; r=r->rxd_next, i--){ n = le32_to_cpu(r->rxd_rbctrl) & RD_LENGTH ; DB_RX("cp SMT frame to mb: len = %d",n,0,6) ; memcpy(data,r->rxd_virt,n) ; data += n ; } data = smtod(mb,char *) - 1 ; #endif fc = *(char *)mb->sm_data = *data ; mb->sm_len = len - 1 ; /* len - fc */ data++ ; /* * SMT frame received */ switch(fc) { case FC_SMT_INFO : smc->hw.fp.err_stats.err_smt_frame++ ; DB_RX("SMT frame received ",0,0,5) ; if (smc->os.hwm.pass_SMT) { DB_RX("pass SMT frame ",0,0,5) ; mac_drv_rx_complete(smc, rxd, frag_count,len) ; } else { DB_RX("requeue RxD",0,0,5) ; mac_drv_requeue_rxd(smc,rxd,frag_count); } smt_received_pack(smc,mb,(int)(rfsw>>25)) ; break ; case FC_SMT_NSA : smc->hw.fp.err_stats.err_smt_frame++ ; DB_RX("SMT frame received ",0,0,5) ; /* if pass_NSA set pass the NSA frame or */ /* pass_SMT set and the A-Indicator */ /* is not set, pass the NSA frame */ if (smc->os.hwm.pass_NSA || (smc->os.hwm.pass_SMT && !(rfsw & A_INDIC))) { DB_RX("pass SMT frame ",0,0,5) ; mac_drv_rx_complete(smc, rxd, frag_count,len) ; } else { DB_RX("requeue RxD",0,0,5) ; mac_drv_requeue_rxd(smc,rxd,frag_count); } smt_received_pack(smc,mb,(int)(rfsw>>25)) ; break ; case FC_BEACON : if (smc->os.hwm.pass_DB) { DB_RX("pass DB frame ",0,0,5) ; mac_drv_rx_complete(smc, rxd, frag_count,len) ; } else { DB_RX("requeue RxD",0,0,5) ; mac_drv_requeue_rxd(smc,rxd,frag_count); } smt_free_mbuf(smc,mb) ; break ; default : /* * unknown FC abord the frame */ DB_RX("unknown FC error",0,0,2) ; smt_free_mbuf(smc,mb) ; DB_RX("requeue RxD",0,0,5) ; mac_drv_requeue_rxd(smc,rxd,frag_count) ; if ((fc & 0xf0) == FC_MAC) smc->hw.fp.err_stats.err_mac_frame++ ; else smc->hw.fp.err_stats.err_imp_frame++ ; break ; } } DB_RX("next RxD is %x ",queue->rx_curr_get,0,3) ; NDD_TRACE("RHx1",queue->rx_curr_get,0,0) ; continue ; /*--------------------------------------------------------------------*/ abort_frame: DB_RX("requeue RxD",0,0,5) ; mac_drv_requeue_rxd(smc,rxd,frag_count) ; DB_RX("next RxD is %x ",queue->rx_curr_get,0,3) ; NDD_TRACE("RHx2",queue->rx_curr_get,0,0) ; } rx_end: #ifdef ALL_RX_COMPLETE mac_drv_all_receives_complete(smc) ; #endif return ; /* lint bug: needs return detect end of function */ } static void smt_to_llc(struct s_smc *smc, SMbuf *mb) { u_char fc ; DB_RX("send a queued frame to the llc layer",0,0,4) ; smc->os.hwm.r.len = mb->sm_len ; smc->os.hwm.r.mb_pos = smtod(mb,char *) ; fc = *smc->os.hwm.r.mb_pos ; (void)mac_drv_rx_init(smc,(int)mb->sm_len,(int)fc, smc->os.hwm.r.mb_pos,(int)mb->sm_len) ; smt_free_mbuf(smc,mb) ; } /* * BEGIN_MANUAL_ENTRY(hwm_rx_frag) * void hwm_rx_frag(smc,virt,phys,len,frame_status) * * function MACRO (hardware module, hwmtm.h) * This function calls dma_master for preparing the * system hardware for the DMA transfer and initializes * the current RxD with the length and the physical and * virtual address of the fragment. Furthermore, it sets the * STF and EOF bits depending on the frame status byte, * switches the OWN flag of the RxD, so that it is owned by the * adapter and issues an rx_start. * * para virt virtual pointer to the fragment * len the length of the fragment * frame_status status of the frame, see design description * * NOTE: It is possible to call this function with a fragment length * of zero. * * END_MANUAL_ENTRY */ void hwm_rx_frag(struct s_smc *smc, char far *virt, u_long phys, int len, int frame_status) { struct s_smt_fp_rxd volatile *r ; __le32 rbctrl; NDD_TRACE("RHfB",virt,len,frame_status) ; DB_RX("hwm_rx_frag: len = %d, frame_status = %x\n",len,frame_status,2) ; r = smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put ; r->rxd_virt = virt ; r->rxd_rbadr = cpu_to_le32(phys) ; rbctrl = cpu_to_le32( (((__u32)frame_status & (FIRST_FRAG|LAST_FRAG))<<26) | (((u_long) frame_status & FIRST_FRAG) << 21) | BMU_OWN | BMU_CHECK | BMU_EN_IRQ_EOF | len) ; r->rxd_rbctrl = rbctrl ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; outpd(ADDR(B0_R1_CSR),CSR_START) ; smc->hw.fp.rx_q[QUEUE_R1].rx_free-- ; smc->hw.fp.rx_q[QUEUE_R1].rx_used++ ; smc->hw.fp.rx_q[QUEUE_R1].rx_curr_put = r->rxd_next ; NDD_TRACE("RHfE",r,le32_to_cpu(r->rxd_rbadr),0) ; } /* * BEGINN_MANUAL_ENTRY(mac_drv_clear_rx_queue) * * void mac_drv_clear_rx_queue(smc) * struct s_smc *smc ; * * function DOWNCALL (hardware module, hwmtm.c) * mac_drv_clear_rx_queue is called by the OS-specific module * after it has issued a card_stop. * In this case, the frames in the receive queue are obsolete and * should be removed. For removing mac_drv_clear_rx_queue * calls dma_master for each RxD and mac_drv_clear_rxd for each * receive buffer. * * NOTE: calling sequence card_stop: * CLI_FBI(), card_stop(), * mac_drv_clear_tx_queue(), mac_drv_clear_rx_queue(), * * NOTE: The caller is responsible that the BMUs are idle * when this function is called. * * END_MANUAL_ENTRY */ void mac_drv_clear_rx_queue(struct s_smc *smc) { struct s_smt_fp_rxd volatile *r ; struct s_smt_fp_rxd volatile *next_rxd ; struct s_smt_rx_queue *queue ; int frag_count ; int i ; if (smc->hw.hw_state != STOPPED) { SK_BREAK() ; SMT_PANIC(smc,HWM_E0012,HWM_E0012_MSG) ; return ; } queue = smc->hw.fp.rx[QUEUE_R1] ; DB_RX("clear_rx_queue",0,0,5) ; /* * dma_complete and mac_drv_clear_rxd for all RxDs / receive buffers */ r = queue->rx_curr_get ; while (queue->rx_used) { DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; DB_RX("switch OWN bit of RxD 0x%x ",r,0,5) ; r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ; frag_count = 1 ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; r = r->rxd_next ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; while (r != queue->rx_curr_put && !(r->rxd_rbctrl & cpu_to_le32(BMU_ST_BUF))) { DB_RX("Check STF bit in %x",(void *)r,0,5) ; r->rxd_rbctrl &= ~cpu_to_le32(BMU_OWN) ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORDEV) ; r = r->rxd_next ; DRV_BUF_FLUSH(r,DDI_DMA_SYNC_FORCPU) ; frag_count++ ; } DB_RX("STF bit found",0,0,5) ; next_rxd = r ; for (r=queue->rx_curr_get,i=frag_count; i ; r=r->rxd_next,i--){ DB_RX("dma_complete for RxD %x",(void *)r,0,5) ; dma_complete(smc,(union s_fp_descr volatile *)r,DMA_WR); } DB_RX("mac_drv_clear_rxd: RxD %x frag_count %d ", (void *)queue->rx_curr_get,frag_count,5) ; mac_drv_clear_rxd(smc,queue->rx_curr_get,frag_count) ; queue->rx_curr_get = next_rxd ; queue->rx_used -= frag_count ; queue->rx_free += frag_count ; } } /* ------------------------------------------------------------- SEND FUNCTIONS: ------------------------------------------------------------- */ /* * BEGIN_MANUAL_ENTRY(hwm_tx_init) * int hwm_tx_init(smc,fc,frag_count,frame_len,frame_status) * * function DOWN_CALL (hardware module, hwmtm.c) * hwm_tx_init checks if the frame can be sent through the * corresponding send queue. * * para fc the frame control. To determine through which * send queue the frame should be transmitted. * 0x50 - 0x57: asynchronous LLC frame * 0xD0 - 0xD7: synchronous LLC frame * 0x41, 0x4F: SMT frame to the network * 0x42: SMT frame to the network and to the local SMT * 0x43: SMT frame to the local SMT * frag_count count of the fragments for this frame * frame_len length of the frame * frame_status status of the frame, the send queue bit is already * specified * * return frame_status * * END_MANUAL_ENTRY */ int hwm_tx_init(struct s_smc *smc, u_char fc, int frag_count, int frame_len, int frame_status) { NDD_TRACE("THiB",fc,frag_count,frame_len) ; smc->os.hwm.tx_p = smc->hw.fp.tx[frame_status & QUEUE_A0] ; smc->os.hwm.tx_descr = TX_DESCRIPTOR | (((u_long)(frame_len-1)&3)<<27) ; smc->os.hwm.tx_len = frame_len ; DB_TX("hwm_tx_init: fc = %x, len = %d",fc,frame_len,3) ; if ((fc & ~(FC_SYNC_BIT|FC_LLC_PRIOR)) == FC_ASYNC_LLC) { frame_status |= LAN_TX ; } else { switch (fc) { case FC_SMT_INFO : case FC_SMT_NSA : frame_status |= LAN_TX ; break ; case FC_SMT_LOC : frame_status |= LOC_TX ; break ; case FC_SMT_LAN_LOC : frame_status |= LAN_TX | LOC_TX ; break ; default : SMT_PANIC(smc,HWM_E0010,HWM_E0010_MSG) ; } } if (!smc->hw.mac_ring_is_up) { frame_status &= ~LAN_TX ; frame_status |= RING_DOWN ; DB_TX("Ring is down: terminate LAN_TX",0,0,2) ; } if (frag_count > smc->os.hwm.tx_p->tx_free) { #ifndef NDIS_OS2 mac_drv_clear_txd(smc) ; if (frag_count > smc->os.hwm.tx_p->tx_free) { DB_TX("Out of TxDs, terminate LAN_TX",0,0,2) ; frame_status &= ~LAN_TX ; frame_status |= OUT_OF_TXD ; } #else DB_TX("Out of TxDs, terminate LAN_TX",0,0,2) ; frame_status &= ~LAN_TX ; frame_status |= OUT_OF_TXD ; #endif } DB_TX("frame_status = %x",frame_status,0,3) ; NDD_TRACE("THiE",frame_status,smc->os.hwm.tx_p->tx_free,0) ; return frame_status; } /* * BEGIN_MANUAL_ENTRY(hwm_tx_frag) * void hwm_tx_frag(smc,virt,phys,len,frame_status) * * function DOWNCALL (hardware module, hwmtm.c) * If the frame should be sent to the LAN, this function calls * dma_master, fills the current TxD with the virtual and the * physical address, sets the STF and EOF bits dependent on * the frame status, and requests the BMU to start the * transmit. * If the frame should be sent to the local SMT, an SMT_MBuf * is allocated if the FIRST_FRAG bit is set in the frame_status. * The fragment of the frame is copied into the SMT MBuf. * The function smt_received_pack is called if the LAST_FRAG * bit is set in the frame_status word. * * para virt virtual pointer to the fragment * len the length of the fragment * frame_status status of the frame, see design description * * return nothing returned, no parameter is modified * * NOTE: It is possible to invoke this macro with a fragment length * of zero. * * END_MANUAL_ENTRY */ void hwm_tx_frag(struct s_smc *smc, char far *virt, u_long phys, int len, int frame_status) { struct s_smt_fp_txd volatile *t ; struct s_smt_tx_queue *queue ; __le32 tbctrl ; queue = smc->os.hwm.tx_p ; NDD_TRACE("THfB",virt,len,frame_status) ; /* Bug fix: AF / May 31 1999 (#missing) * snmpinfo problem reported by IBM is caused by invalid * t-pointer (txd) if LAN_TX is not set but LOC_TX only. * Set: t = queue->tx_curr_put here ! */ t = queue->tx_curr_put ; DB_TX("hwm_tx_frag: len = %d, frame_status = %x ",len,frame_status,2) ; if (frame_status & LAN_TX) { /* '*t' is already defined */ DB_TX("LAN_TX: TxD = %x, virt = %x ",t,virt,3) ; t->txd_virt = virt ; t->txd_txdscr = cpu_to_le32(smc->os.hwm.tx_descr) ; t->txd_tbadr = cpu_to_le32(phys) ; tbctrl = cpu_to_le32((((__u32)frame_status & (FIRST_FRAG|LAST_FRAG|EN_IRQ_EOF))<< 26) | BMU_OWN|BMU_CHECK |len) ; t->txd_tbctrl = tbctrl ; #ifndef AIX DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; outpd(queue->tx_bmu_ctl,CSR_START) ; #else /* ifndef AIX */ DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; if (frame_status & QUEUE_A0) { outpd(ADDR(B0_XA_CSR),CSR_START) ; } else { outpd(ADDR(B0_XS_CSR),CSR_START) ; } #endif queue->tx_free-- ; queue->tx_used++ ; queue->tx_curr_put = t->txd_next ; if (frame_status & LAST_FRAG) { smc->mib.m[MAC0].fddiMACTransmit_Ct++ ; } } if (frame_status & LOC_TX) { DB_TX("LOC_TX: ",0,0,3) ; if (frame_status & FIRST_FRAG) { if(!(smc->os.hwm.tx_mb = smt_get_mbuf(smc))) { smc->hw.fp.err_stats.err_no_buf++ ; DB_TX("No SMbuf; transmit terminated",0,0,4) ; } else { smc->os.hwm.tx_data = smtod(smc->os.hwm.tx_mb,char *) - 1 ; #ifdef USE_OS_CPY #ifdef PASS_1ST_TXD_2_TX_COMP hwm_cpy_txd2mb(t,smc->os.hwm.tx_data, smc->os.hwm.tx_len) ; #endif #endif } } if (smc->os.hwm.tx_mb) { #ifndef USE_OS_CPY DB_TX("copy fragment into MBuf ",0,0,3) ; memcpy(smc->os.hwm.tx_data,virt,len) ; smc->os.hwm.tx_data += len ; #endif if (frame_status & LAST_FRAG) { #ifdef USE_OS_CPY #ifndef PASS_1ST_TXD_2_TX_COMP /* * hwm_cpy_txd2mb(txd,data,len) copies 'len' * bytes from the virtual pointer in 'rxd' * to 'data'. The virtual pointer of the * os-specific tx-buffer should be written * in the LAST txd. */ hwm_cpy_txd2mb(t,smc->os.hwm.tx_data, smc->os.hwm.tx_len) ; #endif /* nPASS_1ST_TXD_2_TX_COMP */ #endif /* USE_OS_CPY */ smc->os.hwm.tx_data = smtod(smc->os.hwm.tx_mb,char *) - 1 ; *(char *)smc->os.hwm.tx_mb->sm_data = *smc->os.hwm.tx_data ; smc->os.hwm.tx_data++ ; smc->os.hwm.tx_mb->sm_len = smc->os.hwm.tx_len - 1 ; DB_TX("pass LLC frame to SMT ",0,0,3) ; smt_received_pack(smc,smc->os.hwm.tx_mb, RD_FS_LOCAL) ; } } } NDD_TRACE("THfE",t,queue->tx_free,0) ; } /* * queues a receive for later send */ static void queue_llc_rx(struct s_smc *smc, SMbuf *mb) { DB_GEN("queue_llc_rx: mb = %x",(void *)mb,0,4) ; smc->os.hwm.queued_rx_frames++ ; mb->sm_next = (SMbuf *)NULL ; if (smc->os.hwm.llc_rx_pipe == NULL) { smc->os.hwm.llc_rx_pipe = mb ; } else { smc->os.hwm.llc_rx_tail->sm_next = mb ; } smc->os.hwm.llc_rx_tail = mb ; /* * force an timer IRQ to receive the data */ if (!smc->os.hwm.isr_flag) { smt_force_irq(smc) ; } } /* * get a SMbuf from the llc_rx_queue */ static SMbuf *get_llc_rx(struct s_smc *smc) { SMbuf *mb ; if ((mb = smc->os.hwm.llc_rx_pipe)) { smc->os.hwm.queued_rx_frames-- ; smc->os.hwm.llc_rx_pipe = mb->sm_next ; } DB_GEN("get_llc_rx: mb = 0x%x",(void *)mb,0,4) ; return mb; } /* * queues a transmit SMT MBuf during the time were the MBuf is * queued the TxD ring */ static void queue_txd_mb(struct s_smc *smc, SMbuf *mb) { DB_GEN("_rx: queue_txd_mb = %x",(void *)mb,0,4) ; smc->os.hwm.queued_txd_mb++ ; mb->sm_next = (SMbuf *)NULL ; if (smc->os.hwm.txd_tx_pipe == NULL) { smc->os.hwm.txd_tx_pipe = mb ; } else { smc->os.hwm.txd_tx_tail->sm_next = mb ; } smc->os.hwm.txd_tx_tail = mb ; } /* * get a SMbuf from the txd_tx_queue */ static SMbuf *get_txd_mb(struct s_smc *smc) { SMbuf *mb ; if ((mb = smc->os.hwm.txd_tx_pipe)) { smc->os.hwm.queued_txd_mb-- ; smc->os.hwm.txd_tx_pipe = mb->sm_next ; } DB_GEN("get_txd_mb: mb = 0x%x",(void *)mb,0,4) ; return mb; } /* * SMT Send function */ void smt_send_mbuf(struct s_smc *smc, SMbuf *mb, int fc) { char far *data ; int len ; int n ; int i ; int frag_count ; int frame_status ; SK_LOC_DECL(char far,*virt[3]) ; int frag_len[3] ; struct s_smt_tx_queue *queue ; struct s_smt_fp_txd volatile *t ; u_long phys ; __le32 tbctrl; NDD_TRACE("THSB",mb,fc,0) ; DB_TX("smt_send_mbuf: mb = 0x%x, fc = 0x%x",mb,fc,4) ; mb->sm_off-- ; /* set to fc */ mb->sm_len++ ; /* + fc */ data = smtod(mb,char *) ; *data = fc ; if (fc == FC_SMT_LOC) *data = FC_SMT_INFO ; /* * determine the frag count and the virt addresses of the frags */ frag_count = 0 ; len = mb->sm_len ; while (len) { n = SMT_PAGESIZE - ((long)data & (SMT_PAGESIZE-1)) ; if (n >= len) { n = len ; } DB_TX("frag: virt/len = 0x%x/%d ",(void *)data,n,5) ; virt[frag_count] = data ; frag_len[frag_count] = n ; frag_count++ ; len -= n ; data += n ; } /* * determine the frame status */ queue = smc->hw.fp.tx[QUEUE_A0] ; if (fc == FC_BEACON || fc == FC_SMT_LOC) { frame_status = LOC_TX ; } else { frame_status = LAN_TX ; if ((smc->os.hwm.pass_NSA &&(fc == FC_SMT_NSA)) || (smc->os.hwm.pass_SMT &&(fc == FC_SMT_INFO))) frame_status |= LOC_TX ; } if (!smc->hw.mac_ring_is_up || frag_count > queue->tx_free) { frame_status &= ~LAN_TX; if (frame_status) { DB_TX("Ring is down: terminate LAN_TX",0,0,2) ; } else { DB_TX("Ring is down: terminate transmission",0,0,2) ; smt_free_mbuf(smc,mb) ; return ; } } DB_TX("frame_status = 0x%x ",frame_status,0,5) ; if ((frame_status & LAN_TX) && (frame_status & LOC_TX)) { mb->sm_use_count = 2 ; } if (frame_status & LAN_TX) { t = queue->tx_curr_put ; frame_status |= FIRST_FRAG ; for (i = 0; i < frag_count; i++) { DB_TX("init TxD = 0x%x",(void *)t,0,5) ; if (i == frag_count-1) { frame_status |= LAST_FRAG ; t->txd_txdscr = cpu_to_le32(TX_DESCRIPTOR | (((__u32)(mb->sm_len-1)&3) << 27)) ; } t->txd_virt = virt[i] ; phys = dma_master(smc, (void far *)virt[i], frag_len[i], DMA_RD|SMT_BUF) ; t->txd_tbadr = cpu_to_le32(phys) ; tbctrl = cpu_to_le32((((__u32)frame_status & (FIRST_FRAG|LAST_FRAG)) << 26) | BMU_OWN | BMU_CHECK | BMU_SMT_TX |frag_len[i]) ; t->txd_tbctrl = tbctrl ; #ifndef AIX DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; outpd(queue->tx_bmu_ctl,CSR_START) ; #else DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; outpd(ADDR(B0_XA_CSR),CSR_START) ; #endif frame_status &= ~FIRST_FRAG ; queue->tx_curr_put = t = t->txd_next ; queue->tx_free-- ; queue->tx_used++ ; } smc->mib.m[MAC0].fddiMACTransmit_Ct++ ; queue_txd_mb(smc,mb) ; } if (frame_status & LOC_TX) { DB_TX("pass Mbuf to LLC queue",0,0,5) ; queue_llc_rx(smc,mb) ; } /* * We need to unqueue the free SMT_MBUFs here, because it may * be that the SMT want's to send more than 1 frame for one down call */ mac_drv_clear_txd(smc) ; NDD_TRACE("THSE",t,queue->tx_free,frag_count) ; } /* BEGIN_MANUAL_ENTRY(mac_drv_clear_txd) * void mac_drv_clear_txd(smc) * * function DOWNCALL (hardware module, hwmtm.c) * mac_drv_clear_txd searches in both send queues for TxD's * which were finished by the adapter. It calls dma_complete * for each TxD. If the last fragment of an LLC frame is * reached, it calls mac_drv_tx_complete to release the * send buffer. * * return nothing * * END_MANUAL_ENTRY */ static void mac_drv_clear_txd(struct s_smc *smc) { struct s_smt_tx_queue *queue ; struct s_smt_fp_txd volatile *t1 ; struct s_smt_fp_txd volatile *t2 = NULL ; SMbuf *mb ; u_long tbctrl ; int i ; int frag_count ; int n ; NDD_TRACE("THcB",0,0,0) ; for (i = QUEUE_S; i <= QUEUE_A0; i++) { queue = smc->hw.fp.tx[i] ; t1 = queue->tx_curr_get ; DB_TX("clear_txd: QUEUE = %d (0=sync/1=async)",i,0,5) ; for ( ; ; ) { frag_count = 0 ; do { DRV_BUF_FLUSH(t1,DDI_DMA_SYNC_FORCPU) ; DB_TX("check OWN/EOF bit of TxD 0x%x",t1,0,5) ; tbctrl = le32_to_cpu(CR_READ(t1->txd_tbctrl)); if (tbctrl & BMU_OWN || !queue->tx_used){ DB_TX("End of TxDs queue %d",i,0,4) ; goto free_next_queue ; /* next queue */ } t1 = t1->txd_next ; frag_count++ ; } while (!(tbctrl & BMU_EOF)) ; t1 = queue->tx_curr_get ; for (n = frag_count; n; n--) { tbctrl = le32_to_cpu(t1->txd_tbctrl) ; dma_complete(smc, (union s_fp_descr volatile *) t1, (int) (DMA_RD | ((tbctrl & BMU_SMT_TX) >> 18))) ; t2 = t1 ; t1 = t1->txd_next ; } if (tbctrl & BMU_SMT_TX) { mb = get_txd_mb(smc) ; smt_free_mbuf(smc,mb) ; } else { #ifndef PASS_1ST_TXD_2_TX_COMP DB_TX("mac_drv_tx_comp for TxD 0x%x",t2,0,4) ; mac_drv_tx_complete(smc,t2) ; #else DB_TX("mac_drv_tx_comp for TxD 0x%x", queue->tx_curr_get,0,4) ; mac_drv_tx_complete(smc,queue->tx_curr_get) ; #endif } queue->tx_curr_get = t1 ; queue->tx_free += frag_count ; queue->tx_used -= frag_count ; } free_next_queue: ; } NDD_TRACE("THcE",0,0,0) ; } /* * BEGINN_MANUAL_ENTRY(mac_drv_clear_tx_queue) * * void mac_drv_clear_tx_queue(smc) * struct s_smc *smc ; * * function DOWNCALL (hardware module, hwmtm.c) * mac_drv_clear_tx_queue is called from the SMT when * the RMT state machine has entered the ISOLATE state. * This function is also called by the os-specific module * after it has called the function card_stop(). * In this case, the frames in the send queues are obsolete and * should be removed. * * note calling sequence: * CLI_FBI(), card_stop(), * mac_drv_clear_tx_queue(), mac_drv_clear_rx_queue(), * * NOTE: The caller is responsible that the BMUs are idle * when this function is called. * * END_MANUAL_ENTRY */ void mac_drv_clear_tx_queue(struct s_smc *smc) { struct s_smt_fp_txd volatile *t ; struct s_smt_tx_queue *queue ; int tx_used ; int i ; if (smc->hw.hw_state != STOPPED) { SK_BREAK() ; SMT_PANIC(smc,HWM_E0011,HWM_E0011_MSG) ; return ; } for (i = QUEUE_S; i <= QUEUE_A0; i++) { queue = smc->hw.fp.tx[i] ; DB_TX("clear_tx_queue: QUEUE = %d (0=sync/1=async)",i,0,5) ; /* * switch the OWN bit of all pending frames to the host */ t = queue->tx_curr_get ; tx_used = queue->tx_used ; while (tx_used) { DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORCPU) ; DB_TX("switch OWN bit of TxD 0x%x ",t,0,5) ; t->txd_tbctrl &= ~cpu_to_le32(BMU_OWN) ; DRV_BUF_FLUSH(t,DDI_DMA_SYNC_FORDEV) ; t = t->txd_next ; tx_used-- ; } } /* * release all TxD's for both send queues */ mac_drv_clear_txd(smc) ; for (i = QUEUE_S; i <= QUEUE_A0; i++) { queue = smc->hw.fp.tx[i] ; t = queue->tx_curr_get ; /* * write the phys pointer of the NEXT descriptor into the * BMU's current address descriptor pointer and set * tx_curr_get and tx_curr_put to this position */ if (i == QUEUE_S) { outpd(ADDR(B5_XS_DA),le32_to_cpu(t->txd_ntdadr)) ; } else { outpd(ADDR(B5_XA_DA),le32_to_cpu(t->txd_ntdadr)) ; } queue->tx_curr_put = queue->tx_curr_get->txd_next ; queue->tx_curr_get = queue->tx_curr_put ; } } /* ------------------------------------------------------------- TEST FUNCTIONS: ------------------------------------------------------------- */ #ifdef DEBUG /* * BEGIN_MANUAL_ENTRY(mac_drv_debug_lev) * void mac_drv_debug_lev(smc,flag,lev) * * function DOWNCALL (drvsr.c) * To get a special debug info the user can assign a debug level * to any debug flag. * * para flag debug flag, possible values are: * = 0: reset all debug flags (the defined level is * ignored) * = 1: debug.d_smtf * = 2: debug.d_smt * = 3: debug.d_ecm * = 4: debug.d_rmt * = 5: debug.d_cfm * = 6: debug.d_pcm * * = 10: debug.d_os.hwm_rx (hardware module receive path) * = 11: debug.d_os.hwm_tx(hardware module transmit path) * = 12: debug.d_os.hwm_gen(hardware module general flag) * * lev debug level * * END_MANUAL_ENTRY */ void mac_drv_debug_lev(struct s_smc *smc, int flag, int lev) { switch(flag) { case (int)NULL: DB_P.d_smtf = DB_P.d_smt = DB_P.d_ecm = DB_P.d_rmt = 0 ; DB_P.d_cfm = 0 ; DB_P.d_os.hwm_rx = DB_P.d_os.hwm_tx = DB_P.d_os.hwm_gen = 0 ; #ifdef SBA DB_P.d_sba = 0 ; #endif #ifdef ESS DB_P.d_ess = 0 ; #endif break ; case DEBUG_SMTF: DB_P.d_smtf = lev ; break ; case DEBUG_SMT: DB_P.d_smt = lev ; break ; case DEBUG_ECM: DB_P.d_ecm = lev ; break ; case DEBUG_RMT: DB_P.d_rmt = lev ; break ; case DEBUG_CFM: DB_P.d_cfm = lev ; break ; case DEBUG_PCM: DB_P.d_pcm = lev ; break ; case DEBUG_SBA: #ifdef SBA DB_P.d_sba = lev ; #endif break ; case DEBUG_ESS: #ifdef ESS DB_P.d_ess = lev ; #endif break ; case DB_HWM_RX: DB_P.d_os.hwm_rx = lev ; break ; case DB_HWM_TX: DB_P.d_os.hwm_tx = lev ; break ; case DB_HWM_GEN: DB_P.d_os.hwm_gen = lev ; break ; default: break ; } } #endif
gpl-2.0
vanlamtung/linux-2.6-imx
drivers/acpi/acpica/uttrack.c
364
20630
/****************************************************************************** * * Module Name: uttrack - Memory allocation tracking routines (debug only) * *****************************************************************************/ /* * Copyright (C) 2000 - 2015, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ /* * These procedures are used for tracking memory leaks in the subsystem, and * they get compiled out when the ACPI_DBG_TRACK_ALLOCATIONS is not set. * * Each memory allocation is tracked via a doubly linked list. Each * element contains the caller's component, module name, function name, and * line number. acpi_ut_allocate and acpi_ut_allocate_zeroed call * acpi_ut_track_allocation to add an element to the list; deletion * occurs in the body of acpi_ut_free. */ #include <acpi/acpi.h> #include "accommon.h" #ifdef ACPI_DBG_TRACK_ALLOCATIONS #define _COMPONENT ACPI_UTILITIES ACPI_MODULE_NAME("uttrack") /* Local prototypes */ static struct acpi_debug_mem_block *acpi_ut_find_allocation(struct acpi_debug_mem_block *allocation); static acpi_status acpi_ut_track_allocation(struct acpi_debug_mem_block *address, acpi_size size, u8 alloc_type, u32 component, const char *module, u32 line); static acpi_status acpi_ut_remove_allocation(struct acpi_debug_mem_block *address, u32 component, const char *module, u32 line); /******************************************************************************* * * FUNCTION: acpi_ut_create_list * * PARAMETERS: cache_name - Ascii name for the cache * object_size - Size of each cached object * return_cache - Where the new cache object is returned * * RETURN: Status * * DESCRIPTION: Create a local memory list for tracking purposed * ******************************************************************************/ acpi_status acpi_ut_create_list(char *list_name, u16 object_size, struct acpi_memory_list **return_cache) { struct acpi_memory_list *cache; cache = acpi_os_allocate(sizeof(struct acpi_memory_list)); if (!cache) { return (AE_NO_MEMORY); } ACPI_MEMSET(cache, 0, sizeof(struct acpi_memory_list)); cache->list_name = list_name; cache->object_size = object_size; *return_cache = cache; return (AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ut_allocate_and_track * * PARAMETERS: size - Size of the allocation * component - Component type of caller * module - Source file name of caller * line - Line number of caller * * RETURN: Address of the allocated memory on success, NULL on failure. * * DESCRIPTION: The subsystem's equivalent of malloc. * ******************************************************************************/ void *acpi_ut_allocate_and_track(acpi_size size, u32 component, const char *module, u32 line) { struct acpi_debug_mem_block *allocation; acpi_status status; /* Check for an inadvertent size of zero bytes */ if (!size) { ACPI_WARNING((module, line, "Attempt to allocate zero bytes, allocating 1 byte")); size = 1; } allocation = acpi_os_allocate(size + sizeof(struct acpi_debug_mem_header)); if (!allocation) { /* Report allocation error */ ACPI_WARNING((module, line, "Could not allocate size %u", (u32)size)); return (NULL); } status = acpi_ut_track_allocation(allocation, size, ACPI_MEM_MALLOC, component, module, line); if (ACPI_FAILURE(status)) { acpi_os_free(allocation); return (NULL); } acpi_gbl_global_list->total_allocated++; acpi_gbl_global_list->total_size += (u32)size; acpi_gbl_global_list->current_total_size += (u32)size; if (acpi_gbl_global_list->current_total_size > acpi_gbl_global_list->max_occupied) { acpi_gbl_global_list->max_occupied = acpi_gbl_global_list->current_total_size; } return ((void *)&allocation->user_space); } /******************************************************************************* * * FUNCTION: acpi_ut_allocate_zeroed_and_track * * PARAMETERS: size - Size of the allocation * component - Component type of caller * module - Source file name of caller * line - Line number of caller * * RETURN: Address of the allocated memory on success, NULL on failure. * * DESCRIPTION: Subsystem equivalent of calloc. * ******************************************************************************/ void *acpi_ut_allocate_zeroed_and_track(acpi_size size, u32 component, const char *module, u32 line) { struct acpi_debug_mem_block *allocation; acpi_status status; /* Check for an inadvertent size of zero bytes */ if (!size) { ACPI_WARNING((module, line, "Attempt to allocate zero bytes, allocating 1 byte")); size = 1; } allocation = acpi_os_allocate_zeroed(size + sizeof(struct acpi_debug_mem_header)); if (!allocation) { /* Report allocation error */ ACPI_ERROR((module, line, "Could not allocate size %u", (u32)size)); return (NULL); } status = acpi_ut_track_allocation(allocation, size, ACPI_MEM_CALLOC, component, module, line); if (ACPI_FAILURE(status)) { acpi_os_free(allocation); return (NULL); } acpi_gbl_global_list->total_allocated++; acpi_gbl_global_list->total_size += (u32)size; acpi_gbl_global_list->current_total_size += (u32)size; if (acpi_gbl_global_list->current_total_size > acpi_gbl_global_list->max_occupied) { acpi_gbl_global_list->max_occupied = acpi_gbl_global_list->current_total_size; } return ((void *)&allocation->user_space); } /******************************************************************************* * * FUNCTION: acpi_ut_free_and_track * * PARAMETERS: allocation - Address of the memory to deallocate * component - Component type of caller * module - Source file name of caller * line - Line number of caller * * RETURN: None * * DESCRIPTION: Frees the memory at Allocation * ******************************************************************************/ void acpi_ut_free_and_track(void *allocation, u32 component, const char *module, u32 line) { struct acpi_debug_mem_block *debug_block; acpi_status status; ACPI_FUNCTION_TRACE_PTR(ut_free, allocation); if (NULL == allocation) { ACPI_ERROR((module, line, "Attempt to delete a NULL address")); return_VOID; } debug_block = ACPI_CAST_PTR(struct acpi_debug_mem_block, (((char *)allocation) - sizeof(struct acpi_debug_mem_header))); acpi_gbl_global_list->total_freed++; acpi_gbl_global_list->current_total_size -= debug_block->size; status = acpi_ut_remove_allocation(debug_block, component, module, line); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not free memory")); } acpi_os_free(debug_block); ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "%p freed (block %p)\n", allocation, debug_block)); return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ut_find_allocation * * PARAMETERS: allocation - Address of allocated memory * * RETURN: Three cases: * 1) List is empty, NULL is returned. * 2) Element was found. Returns Allocation parameter. * 3) Element was not found. Returns position where it should be * inserted into the list. * * DESCRIPTION: Searches for an element in the global allocation tracking list. * If the element is not found, returns the location within the * list where the element should be inserted. * * Note: The list is ordered by larger-to-smaller addresses. * * This global list is used to detect memory leaks in ACPICA as * well as other issues such as an attempt to release the same * internal object more than once. Although expensive as far * as cpu time, this list is much more helpful for finding these * types of issues than using memory leak detectors outside of * the ACPICA code. * ******************************************************************************/ static struct acpi_debug_mem_block *acpi_ut_find_allocation(struct acpi_debug_mem_block *allocation) { struct acpi_debug_mem_block *element; element = acpi_gbl_global_list->list_head; if (!element) { return (NULL); } /* * Search for the address. * * Note: List is ordered by larger-to-smaller addresses, on the * assumption that a new allocation usually has a larger address * than previous allocations. */ while (element > allocation) { /* Check for end-of-list */ if (!element->next) { return (element); } element = element->next; } if (element == allocation) { return (element); } return (element->previous); } /******************************************************************************* * * FUNCTION: acpi_ut_track_allocation * * PARAMETERS: allocation - Address of allocated memory * size - Size of the allocation * alloc_type - MEM_MALLOC or MEM_CALLOC * component - Component type of caller * module - Source file name of caller * line - Line number of caller * * RETURN: Status * * DESCRIPTION: Inserts an element into the global allocation tracking list. * ******************************************************************************/ static acpi_status acpi_ut_track_allocation(struct acpi_debug_mem_block *allocation, acpi_size size, u8 alloc_type, u32 component, const char *module, u32 line) { struct acpi_memory_list *mem_list; struct acpi_debug_mem_block *element; acpi_status status = AE_OK; ACPI_FUNCTION_TRACE_PTR(ut_track_allocation, allocation); if (acpi_gbl_disable_mem_tracking) { return_ACPI_STATUS(AE_OK); } mem_list = acpi_gbl_global_list; status = acpi_ut_acquire_mutex(ACPI_MTX_MEMORY); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* * Search the global list for this address to make sure it is not * already present. This will catch several kinds of problems. */ element = acpi_ut_find_allocation(allocation); if (element == allocation) { ACPI_ERROR((AE_INFO, "UtTrackAllocation: Allocation (%p) already present in global list!", allocation)); goto unlock_and_exit; } /* Fill in the instance data */ allocation->size = (u32)size; allocation->alloc_type = alloc_type; allocation->component = component; allocation->line = line; ACPI_STRNCPY(allocation->module, module, ACPI_MAX_MODULE_NAME); allocation->module[ACPI_MAX_MODULE_NAME - 1] = 0; if (!element) { /* Insert at list head */ if (mem_list->list_head) { ((struct acpi_debug_mem_block *)(mem_list->list_head))-> previous = allocation; } allocation->next = mem_list->list_head; allocation->previous = NULL; mem_list->list_head = allocation; } else { /* Insert after element */ allocation->next = element->next; allocation->previous = element; if (element->next) { (element->next)->previous = allocation; } element->next = allocation; } unlock_and_exit: status = acpi_ut_release_mutex(ACPI_MTX_MEMORY); return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ut_remove_allocation * * PARAMETERS: allocation - Address of allocated memory * component - Component type of caller * module - Source file name of caller * line - Line number of caller * * RETURN: Status * * DESCRIPTION: Deletes an element from the global allocation tracking list. * ******************************************************************************/ static acpi_status acpi_ut_remove_allocation(struct acpi_debug_mem_block *allocation, u32 component, const char *module, u32 line) { struct acpi_memory_list *mem_list; acpi_status status; ACPI_FUNCTION_NAME(ut_remove_allocation); if (acpi_gbl_disable_mem_tracking) { return (AE_OK); } mem_list = acpi_gbl_global_list; if (NULL == mem_list->list_head) { /* No allocations! */ ACPI_ERROR((module, line, "Empty allocation list, nothing to free!")); return (AE_OK); } status = acpi_ut_acquire_mutex(ACPI_MTX_MEMORY); if (ACPI_FAILURE(status)) { return (status); } /* Unlink */ if (allocation->previous) { (allocation->previous)->next = allocation->next; } else { mem_list->list_head = allocation->next; } if (allocation->next) { (allocation->next)->previous = allocation->previous; } ACPI_DEBUG_PRINT((ACPI_DB_ALLOCATIONS, "Freeing %p, size 0%X\n", &allocation->user_space, allocation->size)); /* Mark the segment as deleted */ ACPI_MEMSET(&allocation->user_space, 0xEA, allocation->size); status = acpi_ut_release_mutex(ACPI_MTX_MEMORY); return (status); } /******************************************************************************* * * FUNCTION: acpi_ut_dump_allocation_info * * PARAMETERS: None * * RETURN: None * * DESCRIPTION: Print some info about the outstanding allocations. * ******************************************************************************/ void acpi_ut_dump_allocation_info(void) { /* struct acpi_memory_list *mem_list; */ ACPI_FUNCTION_TRACE(ut_dump_allocation_info); /* ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, ("%30s: %4d (%3d Kb)\n", "Current allocations", mem_list->current_count, ROUND_UP_TO_1K (mem_list->current_size))); ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, ("%30s: %4d (%3d Kb)\n", "Max concurrent allocations", mem_list->max_concurrent_count, ROUND_UP_TO_1K (mem_list->max_concurrent_size))); ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, ("%30s: %4d (%3d Kb)\n", "Total (all) internal objects", running_object_count, ROUND_UP_TO_1K (running_object_size))); ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, ("%30s: %4d (%3d Kb)\n", "Total (all) allocations", running_alloc_count, ROUND_UP_TO_1K (running_alloc_size))); ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, ("%30s: %4d (%3d Kb)\n", "Current Nodes", acpi_gbl_current_node_count, ROUND_UP_TO_1K (acpi_gbl_current_node_size))); ACPI_DEBUG_PRINT (TRACE_ALLOCATIONS | TRACE_TABLES, ("%30s: %4d (%3d Kb)\n", "Max Nodes", acpi_gbl_max_concurrent_node_count, ROUND_UP_TO_1K ((acpi_gbl_max_concurrent_node_count * sizeof (struct acpi_namespace_node))))); */ return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ut_dump_allocations * * PARAMETERS: component - Component(s) to dump info for. * module - Module to dump info for. NULL means all. * * RETURN: None * * DESCRIPTION: Print a list of all outstanding allocations. * ******************************************************************************/ void acpi_ut_dump_allocations(u32 component, const char *module) { struct acpi_debug_mem_block *element; union acpi_descriptor *descriptor; u32 num_outstanding = 0; u8 descriptor_type; ACPI_FUNCTION_TRACE(ut_dump_allocations); if (acpi_gbl_disable_mem_tracking) { return_VOID; } /* * Walk the allocation list. */ if (ACPI_FAILURE(acpi_ut_acquire_mutex(ACPI_MTX_MEMORY))) { return_VOID; } element = acpi_gbl_global_list->list_head; while (element) { if ((element->component & component) && ((module == NULL) || (0 == ACPI_STRCMP(module, element->module)))) { descriptor = ACPI_CAST_PTR(union acpi_descriptor, &element->user_space); if (element->size < sizeof(struct acpi_common_descriptor)) { acpi_os_printf("%p Length 0x%04X %9.9s-%u " "[Not a Descriptor - too small]\n", descriptor, element->size, element->module, element->line); } else { /* Ignore allocated objects that are in a cache */ if (ACPI_GET_DESCRIPTOR_TYPE(descriptor) != ACPI_DESC_TYPE_CACHED) { acpi_os_printf ("%p Length 0x%04X %9.9s-%u [%s] ", descriptor, element->size, element->module, element->line, acpi_ut_get_descriptor_name (descriptor)); /* Validate the descriptor type using Type field and length */ descriptor_type = 0; /* Not a valid descriptor type */ switch (ACPI_GET_DESCRIPTOR_TYPE (descriptor)) { case ACPI_DESC_TYPE_OPERAND: if (element->size == sizeof(union acpi_operand_object)) { descriptor_type = ACPI_DESC_TYPE_OPERAND; } break; case ACPI_DESC_TYPE_PARSER: if (element->size == sizeof(union acpi_parse_object)) { descriptor_type = ACPI_DESC_TYPE_PARSER; } break; case ACPI_DESC_TYPE_NAMED: if (element->size == sizeof(struct acpi_namespace_node)) { descriptor_type = ACPI_DESC_TYPE_NAMED; } break; default: break; } /* Display additional info for the major descriptor types */ switch (descriptor_type) { case ACPI_DESC_TYPE_OPERAND: acpi_os_printf ("%12.12s RefCount 0x%04X\n", acpi_ut_get_type_name (descriptor->object.common. type), descriptor->object.common. reference_count); break; case ACPI_DESC_TYPE_PARSER: acpi_os_printf ("AmlOpcode 0x%04hX\n", descriptor->op.asl. aml_opcode); break; case ACPI_DESC_TYPE_NAMED: acpi_os_printf("%4.4s\n", acpi_ut_get_node_name (&descriptor-> node)); break; default: acpi_os_printf("\n"); break; } } } num_outstanding++; } element = element->next; } (void)acpi_ut_release_mutex(ACPI_MTX_MEMORY); /* Print summary */ if (!num_outstanding) { ACPI_INFO((AE_INFO, "No outstanding allocations")); } else { ACPI_ERROR((AE_INFO, "%u(0x%X) Outstanding allocations", num_outstanding, num_outstanding)); } return_VOID; } #endif /* ACPI_DBG_TRACK_ALLOCATIONS */
gpl-2.0
BoyGau/linux
fs/xfs/xfs_attr_inactive.c
620
11963
/* * Copyright (c) 2000-2005 Silicon Graphics, Inc. * Copyright (c) 2013 Red Hat, Inc. * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include "xfs.h" #include "xfs_fs.h" #include "xfs_shared.h" #include "xfs_format.h" #include "xfs_log_format.h" #include "xfs_trans_resv.h" #include "xfs_bit.h" #include "xfs_mount.h" #include "xfs_da_format.h" #include "xfs_da_btree.h" #include "xfs_inode.h" #include "xfs_alloc.h" #include "xfs_attr_remote.h" #include "xfs_trans.h" #include "xfs_inode_item.h" #include "xfs_bmap.h" #include "xfs_attr.h" #include "xfs_attr_leaf.h" #include "xfs_error.h" #include "xfs_quota.h" #include "xfs_trace.h" #include "xfs_dir2.h" /* * Look at all the extents for this logical region, * invalidate any buffers that are incore/in transactions. */ STATIC int xfs_attr3_leaf_freextent( struct xfs_trans **trans, struct xfs_inode *dp, xfs_dablk_t blkno, int blkcnt) { struct xfs_bmbt_irec map; struct xfs_buf *bp; xfs_dablk_t tblkno; xfs_daddr_t dblkno; int tblkcnt; int dblkcnt; int nmap; int error; /* * Roll through the "value", invalidating the attribute value's * blocks. */ tblkno = blkno; tblkcnt = blkcnt; while (tblkcnt > 0) { /* * Try to remember where we decided to put the value. */ nmap = 1; error = xfs_bmapi_read(dp, (xfs_fileoff_t)tblkno, tblkcnt, &map, &nmap, XFS_BMAPI_ATTRFORK); if (error) { return error; } ASSERT(nmap == 1); ASSERT(map.br_startblock != DELAYSTARTBLOCK); /* * If it's a hole, these are already unmapped * so there's nothing to invalidate. */ if (map.br_startblock != HOLESTARTBLOCK) { dblkno = XFS_FSB_TO_DADDR(dp->i_mount, map.br_startblock); dblkcnt = XFS_FSB_TO_BB(dp->i_mount, map.br_blockcount); bp = xfs_trans_get_buf(*trans, dp->i_mount->m_ddev_targp, dblkno, dblkcnt, 0); if (!bp) return -ENOMEM; xfs_trans_binval(*trans, bp); /* * Roll to next transaction. */ error = xfs_trans_roll(trans, dp); if (error) return error; } tblkno += map.br_blockcount; tblkcnt -= map.br_blockcount; } return 0; } /* * Invalidate all of the "remote" value regions pointed to by a particular * leaf block. * Note that we must release the lock on the buffer so that we are not * caught holding something that the logging code wants to flush to disk. */ STATIC int xfs_attr3_leaf_inactive( struct xfs_trans **trans, struct xfs_inode *dp, struct xfs_buf *bp) { struct xfs_attr_leafblock *leaf; struct xfs_attr3_icleaf_hdr ichdr; struct xfs_attr_leaf_entry *entry; struct xfs_attr_leaf_name_remote *name_rmt; struct xfs_attr_inactive_list *list; struct xfs_attr_inactive_list *lp; int error; int count; int size; int tmp; int i; struct xfs_mount *mp = bp->b_target->bt_mount; leaf = bp->b_addr; xfs_attr3_leaf_hdr_from_disk(mp->m_attr_geo, &ichdr, leaf); /* * Count the number of "remote" value extents. */ count = 0; entry = xfs_attr3_leaf_entryp(leaf); for (i = 0; i < ichdr.count; entry++, i++) { if (be16_to_cpu(entry->nameidx) && ((entry->flags & XFS_ATTR_LOCAL) == 0)) { name_rmt = xfs_attr3_leaf_name_remote(leaf, i); if (name_rmt->valueblk) count++; } } /* * If there are no "remote" values, we're done. */ if (count == 0) { xfs_trans_brelse(*trans, bp); return 0; } /* * Allocate storage for a list of all the "remote" value extents. */ size = count * sizeof(xfs_attr_inactive_list_t); list = kmem_alloc(size, KM_SLEEP); /* * Identify each of the "remote" value extents. */ lp = list; entry = xfs_attr3_leaf_entryp(leaf); for (i = 0; i < ichdr.count; entry++, i++) { if (be16_to_cpu(entry->nameidx) && ((entry->flags & XFS_ATTR_LOCAL) == 0)) { name_rmt = xfs_attr3_leaf_name_remote(leaf, i); if (name_rmt->valueblk) { lp->valueblk = be32_to_cpu(name_rmt->valueblk); lp->valuelen = xfs_attr3_rmt_blocks(dp->i_mount, be32_to_cpu(name_rmt->valuelen)); lp++; } } } xfs_trans_brelse(*trans, bp); /* unlock for trans. in freextent() */ /* * Invalidate each of the "remote" value extents. */ error = 0; for (lp = list, i = 0; i < count; i++, lp++) { tmp = xfs_attr3_leaf_freextent(trans, dp, lp->valueblk, lp->valuelen); if (error == 0) error = tmp; /* save only the 1st errno */ } kmem_free(list); return error; } /* * Recurse (gasp!) through the attribute nodes until we find leaves. * We're doing a depth-first traversal in order to invalidate everything. */ STATIC int xfs_attr3_node_inactive( struct xfs_trans **trans, struct xfs_inode *dp, struct xfs_buf *bp, int level) { xfs_da_blkinfo_t *info; xfs_da_intnode_t *node; xfs_dablk_t child_fsb; xfs_daddr_t parent_blkno, child_blkno; int error, i; struct xfs_buf *child_bp; struct xfs_da_node_entry *btree; struct xfs_da3_icnode_hdr ichdr; /* * Since this code is recursive (gasp!) we must protect ourselves. */ if (level > XFS_DA_NODE_MAXDEPTH) { xfs_trans_brelse(*trans, bp); /* no locks for later trans */ return -EIO; } node = bp->b_addr; dp->d_ops->node_hdr_from_disk(&ichdr, node); parent_blkno = bp->b_bn; if (!ichdr.count) { xfs_trans_brelse(*trans, bp); return 0; } btree = dp->d_ops->node_tree_p(node); child_fsb = be32_to_cpu(btree[0].before); xfs_trans_brelse(*trans, bp); /* no locks for later trans */ /* * If this is the node level just above the leaves, simply loop * over the leaves removing all of them. If this is higher up * in the tree, recurse downward. */ for (i = 0; i < ichdr.count; i++) { /* * Read the subsidiary block to see what we have to work with. * Don't do this in a transaction. This is a depth-first * traversal of the tree so we may deal with many blocks * before we come back to this one. */ error = xfs_da3_node_read(*trans, dp, child_fsb, -2, &child_bp, XFS_ATTR_FORK); if (error) return error; if (child_bp) { /* save for re-read later */ child_blkno = XFS_BUF_ADDR(child_bp); /* * Invalidate the subtree, however we have to. */ info = child_bp->b_addr; switch (info->magic) { case cpu_to_be16(XFS_DA_NODE_MAGIC): case cpu_to_be16(XFS_DA3_NODE_MAGIC): error = xfs_attr3_node_inactive(trans, dp, child_bp, level + 1); break; case cpu_to_be16(XFS_ATTR_LEAF_MAGIC): case cpu_to_be16(XFS_ATTR3_LEAF_MAGIC): error = xfs_attr3_leaf_inactive(trans, dp, child_bp); break; default: error = -EIO; xfs_trans_brelse(*trans, child_bp); break; } if (error) return error; /* * Remove the subsidiary block from the cache * and from the log. */ error = xfs_da_get_buf(*trans, dp, 0, child_blkno, &child_bp, XFS_ATTR_FORK); if (error) return error; xfs_trans_binval(*trans, child_bp); } /* * If we're not done, re-read the parent to get the next * child block number. */ if (i + 1 < ichdr.count) { error = xfs_da3_node_read(*trans, dp, 0, parent_blkno, &bp, XFS_ATTR_FORK); if (error) return error; child_fsb = be32_to_cpu(btree[i + 1].before); xfs_trans_brelse(*trans, bp); } /* * Atomically commit the whole invalidate stuff. */ error = xfs_trans_roll(trans, dp); if (error) return error; } return 0; } /* * Indiscriminately delete the entire attribute fork * * Recurse (gasp!) through the attribute nodes until we find leaves. * We're doing a depth-first traversal in order to invalidate everything. */ int xfs_attr3_root_inactive( struct xfs_trans **trans, struct xfs_inode *dp) { struct xfs_da_blkinfo *info; struct xfs_buf *bp; xfs_daddr_t blkno; int error; /* * Read block 0 to see what we have to work with. * We only get here if we have extents, since we remove * the extents in reverse order the extent containing * block 0 must still be there. */ error = xfs_da3_node_read(*trans, dp, 0, -1, &bp, XFS_ATTR_FORK); if (error) return error; blkno = bp->b_bn; /* * Invalidate the tree, even if the "tree" is only a single leaf block. * This is a depth-first traversal! */ info = bp->b_addr; switch (info->magic) { case cpu_to_be16(XFS_DA_NODE_MAGIC): case cpu_to_be16(XFS_DA3_NODE_MAGIC): error = xfs_attr3_node_inactive(trans, dp, bp, 1); break; case cpu_to_be16(XFS_ATTR_LEAF_MAGIC): case cpu_to_be16(XFS_ATTR3_LEAF_MAGIC): error = xfs_attr3_leaf_inactive(trans, dp, bp); break; default: error = -EIO; xfs_trans_brelse(*trans, bp); break; } if (error) return error; /* * Invalidate the incore copy of the root block. */ error = xfs_da_get_buf(*trans, dp, 0, blkno, &bp, XFS_ATTR_FORK); if (error) return error; xfs_trans_binval(*trans, bp); /* remove from cache */ /* * Commit the invalidate and start the next transaction. */ error = xfs_trans_roll(trans, dp); return error; } /* * xfs_attr_inactive kills all traces of an attribute fork on an inode. It * removes both the on-disk and in-memory inode fork. Note that this also has to * handle the condition of inodes without attributes but with an attribute fork * configured, so we can't use xfs_inode_hasattr() here. * * The in-memory attribute fork is removed even on error. */ int xfs_attr_inactive( struct xfs_inode *dp) { struct xfs_trans *trans; struct xfs_mount *mp; int lock_mode = XFS_ILOCK_SHARED; int error = 0; mp = dp->i_mount; ASSERT(! XFS_NOT_DQATTACHED(mp, dp)); xfs_ilock(dp, lock_mode); if (!XFS_IFORK_Q(dp)) goto out_destroy_fork; xfs_iunlock(dp, lock_mode); /* * Start our first transaction of the day. * * All future transactions during this code must be "chained" off * this one via the trans_dup() call. All transactions will contain * the inode, and the inode will always be marked with trans_ihold(). * Since the inode will be locked in all transactions, we must log * the inode in every transaction to let it float upward through * the log. */ lock_mode = 0; trans = xfs_trans_alloc(mp, XFS_TRANS_ATTRINVAL); error = xfs_trans_reserve(trans, &M_RES(mp)->tr_attrinval, 0, 0); if (error) goto out_cancel; lock_mode = XFS_ILOCK_EXCL; xfs_ilock(dp, lock_mode); if (!XFS_IFORK_Q(dp)) goto out_cancel; /* * No need to make quota reservations here. We expect to release some * blocks, not allocate, in the common case. */ xfs_trans_ijoin(trans, dp, 0); /* * Invalidate and truncate the attribute fork extents. Make sure the * fork actually has attributes as otherwise the invalidation has no * blocks to read and returns an error. In this case, just do the fork * removal below. */ if (xfs_inode_hasattr(dp) && dp->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) { error = xfs_attr3_root_inactive(&trans, dp); if (error) goto out_cancel; error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0); if (error) goto out_cancel; } /* Reset the attribute fork - this also destroys the in-core fork */ xfs_attr_fork_remove(dp, trans); error = xfs_trans_commit(trans); xfs_iunlock(dp, lock_mode); return error; out_cancel: xfs_trans_cancel(trans); out_destroy_fork: /* kill the in-core attr fork before we drop the inode lock */ if (dp->i_afp) xfs_idestroy_fork(dp, XFS_ATTR_FORK); if (lock_mode) xfs_iunlock(dp, lock_mode); return error; }
gpl-2.0
AOSPA-L/android_kernel_oneplus_msm8974
arch/mips/mm/c-r4k.c
620
38690
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1996 David S. Miller (davem@davemloft.net) * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002 Ralf Baechle (ralf@gnu.org) * Copyright (C) 1999, 2000 Silicon Graphics, Inc. */ #include <linux/hardirq.h> #include <linux/init.h> #include <linux/highmem.h> #include <linux/kernel.h> #include <linux/linkage.h> #include <linux/preempt.h> #include <linux/sched.h> #include <linux/smp.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/bitops.h> #include <asm/bcache.h> #include <asm/bootinfo.h> #include <asm/cache.h> #include <asm/cacheops.h> #include <asm/cpu.h> #include <asm/cpu-features.h> #include <asm/io.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/r4kcache.h> #include <asm/sections.h> #include <asm/mmu_context.h> #include <asm/war.h> #include <asm/cacheflush.h> /* for run_uncached() */ /* * Special Variant of smp_call_function for use by cache functions: * * o No return value * o collapses to normal function call on UP kernels * o collapses to normal function call on systems with a single shared * primary cache. * o doesn't disable interrupts on the local CPU */ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info) { preempt_disable(); #if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) smp_call_function(func, info, 1); #endif func(info); preempt_enable(); } #if defined(CONFIG_MIPS_CMP) #define cpu_has_safe_index_cacheops 0 #else #define cpu_has_safe_index_cacheops 1 #endif /* * Must die. */ static unsigned long icache_size __read_mostly; static unsigned long dcache_size __read_mostly; static unsigned long scache_size __read_mostly; /* * Dummy cache handling routines for machines without boardcaches */ static void cache_noop(void) {} static struct bcache_ops no_sc_ops = { .bc_enable = (void *)cache_noop, .bc_disable = (void *)cache_noop, .bc_wback_inv = (void *)cache_noop, .bc_inv = (void *)cache_noop }; struct bcache_ops *bcops = &no_sc_ops; #define cpu_is_r4600_v1_x() ((read_c0_prid() & 0xfffffff0) == 0x00002010) #define cpu_is_r4600_v2_x() ((read_c0_prid() & 0xfffffff0) == 0x00002020) #define R4600_HIT_CACHEOP_WAR_IMPL \ do { \ if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) \ *(volatile unsigned long *)CKSEG1; \ if (R4600_V1_HIT_CACHEOP_WAR) \ __asm__ __volatile__("nop;nop;nop;nop"); \ } while (0) static void (*r4k_blast_dcache_page)(unsigned long addr); static inline void r4k_blast_dcache_page_dc32(unsigned long addr) { R4600_HIT_CACHEOP_WAR_IMPL; blast_dcache32_page(addr); } static inline void r4k_blast_dcache_page_dc64(unsigned long addr) { R4600_HIT_CACHEOP_WAR_IMPL; blast_dcache64_page(addr); } static void __cpuinit r4k_blast_dcache_page_setup(void) { unsigned long dc_lsize = cpu_dcache_line_size(); if (dc_lsize == 0) r4k_blast_dcache_page = (void *)cache_noop; else if (dc_lsize == 16) r4k_blast_dcache_page = blast_dcache16_page; else if (dc_lsize == 32) r4k_blast_dcache_page = r4k_blast_dcache_page_dc32; else if (dc_lsize == 64) r4k_blast_dcache_page = r4k_blast_dcache_page_dc64; } static void (* r4k_blast_dcache_page_indexed)(unsigned long addr); static void __cpuinit r4k_blast_dcache_page_indexed_setup(void) { unsigned long dc_lsize = cpu_dcache_line_size(); if (dc_lsize == 0) r4k_blast_dcache_page_indexed = (void *)cache_noop; else if (dc_lsize == 16) r4k_blast_dcache_page_indexed = blast_dcache16_page_indexed; else if (dc_lsize == 32) r4k_blast_dcache_page_indexed = blast_dcache32_page_indexed; else if (dc_lsize == 64) r4k_blast_dcache_page_indexed = blast_dcache64_page_indexed; } static void (* r4k_blast_dcache)(void); static void __cpuinit r4k_blast_dcache_setup(void) { unsigned long dc_lsize = cpu_dcache_line_size(); if (dc_lsize == 0) r4k_blast_dcache = (void *)cache_noop; else if (dc_lsize == 16) r4k_blast_dcache = blast_dcache16; else if (dc_lsize == 32) r4k_blast_dcache = blast_dcache32; else if (dc_lsize == 64) r4k_blast_dcache = blast_dcache64; } /* force code alignment (used for TX49XX_ICACHE_INDEX_INV_WAR) */ #define JUMP_TO_ALIGN(order) \ __asm__ __volatile__( \ "b\t1f\n\t" \ ".align\t" #order "\n\t" \ "1:\n\t" \ ) #define CACHE32_UNROLL32_ALIGN JUMP_TO_ALIGN(10) /* 32 * 32 = 1024 */ #define CACHE32_UNROLL32_ALIGN2 JUMP_TO_ALIGN(11) static inline void blast_r4600_v1_icache32(void) { unsigned long flags; local_irq_save(flags); blast_icache32(); local_irq_restore(flags); } static inline void tx49_blast_icache32(void) { unsigned long start = INDEX_BASE; unsigned long end = start + current_cpu_data.icache.waysize; unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; unsigned long ws_end = current_cpu_data.icache.ways << current_cpu_data.icache.waybit; unsigned long ws, addr; CACHE32_UNROLL32_ALIGN2; /* I'm in even chunk. blast odd chunks */ for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start + 0x400; addr < end; addr += 0x400 * 2) cache32_unroll32(addr|ws, Index_Invalidate_I); CACHE32_UNROLL32_ALIGN; /* I'm in odd chunk. blast even chunks */ for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 0x400 * 2) cache32_unroll32(addr|ws, Index_Invalidate_I); } static inline void blast_icache32_r4600_v1_page_indexed(unsigned long page) { unsigned long flags; local_irq_save(flags); blast_icache32_page_indexed(page); local_irq_restore(flags); } static inline void tx49_blast_icache32_page_indexed(unsigned long page) { unsigned long indexmask = current_cpu_data.icache.waysize - 1; unsigned long start = INDEX_BASE + (page & indexmask); unsigned long end = start + PAGE_SIZE; unsigned long ws_inc = 1UL << current_cpu_data.icache.waybit; unsigned long ws_end = current_cpu_data.icache.ways << current_cpu_data.icache.waybit; unsigned long ws, addr; CACHE32_UNROLL32_ALIGN2; /* I'm in even chunk. blast odd chunks */ for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start + 0x400; addr < end; addr += 0x400 * 2) cache32_unroll32(addr|ws, Index_Invalidate_I); CACHE32_UNROLL32_ALIGN; /* I'm in odd chunk. blast even chunks */ for (ws = 0; ws < ws_end; ws += ws_inc) for (addr = start; addr < end; addr += 0x400 * 2) cache32_unroll32(addr|ws, Index_Invalidate_I); } static void (* r4k_blast_icache_page)(unsigned long addr); static void __cpuinit r4k_blast_icache_page_setup(void) { unsigned long ic_lsize = cpu_icache_line_size(); if (ic_lsize == 0) r4k_blast_icache_page = (void *)cache_noop; else if (ic_lsize == 16) r4k_blast_icache_page = blast_icache16_page; else if (ic_lsize == 32) r4k_blast_icache_page = blast_icache32_page; else if (ic_lsize == 64) r4k_blast_icache_page = blast_icache64_page; } static void (* r4k_blast_icache_page_indexed)(unsigned long addr); static void __cpuinit r4k_blast_icache_page_indexed_setup(void) { unsigned long ic_lsize = cpu_icache_line_size(); if (ic_lsize == 0) r4k_blast_icache_page_indexed = (void *)cache_noop; else if (ic_lsize == 16) r4k_blast_icache_page_indexed = blast_icache16_page_indexed; else if (ic_lsize == 32) { if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) r4k_blast_icache_page_indexed = blast_icache32_r4600_v1_page_indexed; else if (TX49XX_ICACHE_INDEX_INV_WAR) r4k_blast_icache_page_indexed = tx49_blast_icache32_page_indexed; else r4k_blast_icache_page_indexed = blast_icache32_page_indexed; } else if (ic_lsize == 64) r4k_blast_icache_page_indexed = blast_icache64_page_indexed; } static void (* r4k_blast_icache)(void); static void __cpuinit r4k_blast_icache_setup(void) { unsigned long ic_lsize = cpu_icache_line_size(); if (ic_lsize == 0) r4k_blast_icache = (void *)cache_noop; else if (ic_lsize == 16) r4k_blast_icache = blast_icache16; else if (ic_lsize == 32) { if (R4600_V1_INDEX_ICACHEOP_WAR && cpu_is_r4600_v1_x()) r4k_blast_icache = blast_r4600_v1_icache32; else if (TX49XX_ICACHE_INDEX_INV_WAR) r4k_blast_icache = tx49_blast_icache32; else r4k_blast_icache = blast_icache32; } else if (ic_lsize == 64) r4k_blast_icache = blast_icache64; } static void (* r4k_blast_scache_page)(unsigned long addr); static void __cpuinit r4k_blast_scache_page_setup(void) { unsigned long sc_lsize = cpu_scache_line_size(); if (scache_size == 0) r4k_blast_scache_page = (void *)cache_noop; else if (sc_lsize == 16) r4k_blast_scache_page = blast_scache16_page; else if (sc_lsize == 32) r4k_blast_scache_page = blast_scache32_page; else if (sc_lsize == 64) r4k_blast_scache_page = blast_scache64_page; else if (sc_lsize == 128) r4k_blast_scache_page = blast_scache128_page; } static void (* r4k_blast_scache_page_indexed)(unsigned long addr); static void __cpuinit r4k_blast_scache_page_indexed_setup(void) { unsigned long sc_lsize = cpu_scache_line_size(); if (scache_size == 0) r4k_blast_scache_page_indexed = (void *)cache_noop; else if (sc_lsize == 16) r4k_blast_scache_page_indexed = blast_scache16_page_indexed; else if (sc_lsize == 32) r4k_blast_scache_page_indexed = blast_scache32_page_indexed; else if (sc_lsize == 64) r4k_blast_scache_page_indexed = blast_scache64_page_indexed; else if (sc_lsize == 128) r4k_blast_scache_page_indexed = blast_scache128_page_indexed; } static void (* r4k_blast_scache)(void); static void __cpuinit r4k_blast_scache_setup(void) { unsigned long sc_lsize = cpu_scache_line_size(); if (scache_size == 0) r4k_blast_scache = (void *)cache_noop; else if (sc_lsize == 16) r4k_blast_scache = blast_scache16; else if (sc_lsize == 32) r4k_blast_scache = blast_scache32; else if (sc_lsize == 64) r4k_blast_scache = blast_scache64; else if (sc_lsize == 128) r4k_blast_scache = blast_scache128; } static inline void local_r4k___flush_cache_all(void * args) { #if defined(CONFIG_CPU_LOONGSON2) r4k_blast_scache(); return; #endif r4k_blast_dcache(); r4k_blast_icache(); switch (current_cpu_type()) { case CPU_R4000SC: case CPU_R4000MC: case CPU_R4400SC: case CPU_R4400MC: case CPU_R10000: case CPU_R12000: case CPU_R14000: r4k_blast_scache(); } } static void r4k___flush_cache_all(void) { r4k_on_each_cpu(local_r4k___flush_cache_all, NULL); } static inline int has_valid_asid(const struct mm_struct *mm) { #if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) int i; for_each_online_cpu(i) if (cpu_context(i, mm)) return 1; return 0; #else return cpu_context(smp_processor_id(), mm); #endif } static void r4k__flush_cache_vmap(void) { r4k_blast_dcache(); } static void r4k__flush_cache_vunmap(void) { r4k_blast_dcache(); } static inline void local_r4k_flush_cache_range(void * args) { struct vm_area_struct *vma = args; int exec = vma->vm_flags & VM_EXEC; if (!(has_valid_asid(vma->vm_mm))) return; r4k_blast_dcache(); if (exec) r4k_blast_icache(); } static void r4k_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { int exec = vma->vm_flags & VM_EXEC; if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) r4k_on_each_cpu(local_r4k_flush_cache_range, vma); } static inline void local_r4k_flush_cache_mm(void * args) { struct mm_struct *mm = args; if (!has_valid_asid(mm)) return; /* * Kludge alert. For obscure reasons R4000SC and R4400SC go nuts if we * only flush the primary caches but R10000 and R12000 behave sane ... * R4000SC and R4400SC indexed S-cache ops also invalidate primary * caches, so we can bail out early. */ if (current_cpu_type() == CPU_R4000SC || current_cpu_type() == CPU_R4000MC || current_cpu_type() == CPU_R4400SC || current_cpu_type() == CPU_R4400MC) { r4k_blast_scache(); return; } r4k_blast_dcache(); } static void r4k_flush_cache_mm(struct mm_struct *mm) { if (!cpu_has_dc_aliases) return; r4k_on_each_cpu(local_r4k_flush_cache_mm, mm); } struct flush_cache_page_args { struct vm_area_struct *vma; unsigned long addr; unsigned long pfn; }; static inline void local_r4k_flush_cache_page(void *args) { struct flush_cache_page_args *fcp_args = args; struct vm_area_struct *vma = fcp_args->vma; unsigned long addr = fcp_args->addr; struct page *page = pfn_to_page(fcp_args->pfn); int exec = vma->vm_flags & VM_EXEC; struct mm_struct *mm = vma->vm_mm; int map_coherent = 0; pgd_t *pgdp; pud_t *pudp; pmd_t *pmdp; pte_t *ptep; void *vaddr; /* * If ownes no valid ASID yet, cannot possibly have gotten * this page into the cache. */ if (!has_valid_asid(mm)) return; addr &= PAGE_MASK; pgdp = pgd_offset(mm, addr); pudp = pud_offset(pgdp, addr); pmdp = pmd_offset(pudp, addr); ptep = pte_offset(pmdp, addr); /* * If the page isn't marked valid, the page cannot possibly be * in the cache. */ if (!(pte_present(*ptep))) return; if ((mm == current->active_mm) && (pte_val(*ptep) & _PAGE_VALID)) vaddr = NULL; else { /* * Use kmap_coherent or kmap_atomic to do flushes for * another ASID than the current one. */ map_coherent = (cpu_has_dc_aliases && page_mapped(page) && !Page_dcache_dirty(page)); if (map_coherent) vaddr = kmap_coherent(page, addr); else vaddr = kmap_atomic(page); addr = (unsigned long)vaddr; } if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) { r4k_blast_dcache_page(addr); if (exec && !cpu_icache_snoops_remote_store) r4k_blast_scache_page(addr); } if (exec) { if (vaddr && cpu_has_vtag_icache && mm == current->active_mm) { int cpu = smp_processor_id(); if (cpu_context(cpu, mm) != 0) drop_mmu_context(mm, cpu); } else r4k_blast_icache_page(addr); } if (vaddr) { if (map_coherent) kunmap_coherent(); else kunmap_atomic(vaddr); } } static void r4k_flush_cache_page(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn) { struct flush_cache_page_args args; args.vma = vma; args.addr = addr; args.pfn = pfn; r4k_on_each_cpu(local_r4k_flush_cache_page, &args); } static inline void local_r4k_flush_data_cache_page(void * addr) { r4k_blast_dcache_page((unsigned long) addr); } static void r4k_flush_data_cache_page(unsigned long addr) { if (in_atomic()) local_r4k_flush_data_cache_page((void *)addr); else r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr); } struct flush_icache_range_args { unsigned long start; unsigned long end; }; static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end) { if (!cpu_has_ic_fills_f_dc) { if (end - start >= dcache_size) { r4k_blast_dcache(); } else { R4600_HIT_CACHEOP_WAR_IMPL; protected_blast_dcache_range(start, end); } } if (end - start > icache_size) r4k_blast_icache(); else protected_blast_icache_range(start, end); } static inline void local_r4k_flush_icache_range_ipi(void *args) { struct flush_icache_range_args *fir_args = args; unsigned long start = fir_args->start; unsigned long end = fir_args->end; local_r4k_flush_icache_range(start, end); } static void r4k_flush_icache_range(unsigned long start, unsigned long end) { struct flush_icache_range_args args; args.start = start; args.end = end; r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args); instruction_hazard(); } #ifdef CONFIG_DMA_NONCOHERENT static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size) { /* Catch bad driver code */ BUG_ON(size == 0); preempt_disable(); if (cpu_has_inclusive_pcaches) { if (size >= scache_size) r4k_blast_scache(); else blast_scache_range(addr, addr + size); __sync(); return; } /* * Either no secondary cache or the available caches don't have the * subset property so we have to flush the primary caches * explicitly */ if (cpu_has_safe_index_cacheops && size >= dcache_size) { r4k_blast_dcache(); } else { R4600_HIT_CACHEOP_WAR_IMPL; blast_dcache_range(addr, addr + size); } preempt_enable(); bc_wback_inv(addr, size); __sync(); } static void r4k_dma_cache_inv(unsigned long addr, unsigned long size) { /* Catch bad driver code */ BUG_ON(size == 0); preempt_disable(); if (cpu_has_inclusive_pcaches) { if (size >= scache_size) r4k_blast_scache(); else { unsigned long lsize = cpu_scache_line_size(); unsigned long almask = ~(lsize - 1); /* * There is no clearly documented alignment requirement * for the cache instruction on MIPS processors and * some processors, among them the RM5200 and RM7000 * QED processors will throw an address error for cache * hit ops with insufficient alignment. Solved by * aligning the address to cache line size. */ cache_op(Hit_Writeback_Inv_SD, addr & almask); cache_op(Hit_Writeback_Inv_SD, (addr + size - 1) & almask); blast_inv_scache_range(addr, addr + size); } __sync(); return; } if (cpu_has_safe_index_cacheops && size >= dcache_size) { r4k_blast_dcache(); } else { unsigned long lsize = cpu_dcache_line_size(); unsigned long almask = ~(lsize - 1); R4600_HIT_CACHEOP_WAR_IMPL; cache_op(Hit_Writeback_Inv_D, addr & almask); cache_op(Hit_Writeback_Inv_D, (addr + size - 1) & almask); blast_inv_dcache_range(addr, addr + size); } preempt_enable(); bc_inv(addr, size); __sync(); } #endif /* CONFIG_DMA_NONCOHERENT */ /* * While we're protected against bad userland addresses we don't care * very much about what happens in that case. Usually a segmentation * fault will dump the process later on anyway ... */ static void local_r4k_flush_cache_sigtramp(void * arg) { unsigned long ic_lsize = cpu_icache_line_size(); unsigned long dc_lsize = cpu_dcache_line_size(); unsigned long sc_lsize = cpu_scache_line_size(); unsigned long addr = (unsigned long) arg; R4600_HIT_CACHEOP_WAR_IMPL; if (dc_lsize) protected_writeback_dcache_line(addr & ~(dc_lsize - 1)); if (!cpu_icache_snoops_remote_store && scache_size) protected_writeback_scache_line(addr & ~(sc_lsize - 1)); if (ic_lsize) protected_flush_icache_line(addr & ~(ic_lsize - 1)); if (MIPS4K_ICACHE_REFILL_WAR) { __asm__ __volatile__ ( ".set push\n\t" ".set noat\n\t" ".set mips3\n\t" #ifdef CONFIG_32BIT "la $at,1f\n\t" #endif #ifdef CONFIG_64BIT "dla $at,1f\n\t" #endif "cache %0,($at)\n\t" "nop; nop; nop\n" "1:\n\t" ".set pop" : : "i" (Hit_Invalidate_I)); } if (MIPS_CACHE_SYNC_WAR) __asm__ __volatile__ ("sync"); } static void r4k_flush_cache_sigtramp(unsigned long addr) { r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr); } static void r4k_flush_icache_all(void) { if (cpu_has_vtag_icache) r4k_blast_icache(); } struct flush_kernel_vmap_range_args { unsigned long vaddr; int size; }; static inline void local_r4k_flush_kernel_vmap_range(void *args) { struct flush_kernel_vmap_range_args *vmra = args; unsigned long vaddr = vmra->vaddr; int size = vmra->size; /* * Aliases only affect the primary caches so don't bother with * S-caches or T-caches. */ if (cpu_has_safe_index_cacheops && size >= dcache_size) r4k_blast_dcache(); else { R4600_HIT_CACHEOP_WAR_IMPL; blast_dcache_range(vaddr, vaddr + size); } } static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size) { struct flush_kernel_vmap_range_args args; args.vaddr = (unsigned long) vaddr; args.size = size; r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args); } static inline void rm7k_erratum31(void) { const unsigned long ic_lsize = 32; unsigned long addr; /* RM7000 erratum #31. The icache is screwed at startup. */ write_c0_taglo(0); write_c0_taghi(0); for (addr = INDEX_BASE; addr <= INDEX_BASE + 4096; addr += ic_lsize) { __asm__ __volatile__ ( ".set push\n\t" ".set noreorder\n\t" ".set mips3\n\t" "cache\t%1, 0(%0)\n\t" "cache\t%1, 0x1000(%0)\n\t" "cache\t%1, 0x2000(%0)\n\t" "cache\t%1, 0x3000(%0)\n\t" "cache\t%2, 0(%0)\n\t" "cache\t%2, 0x1000(%0)\n\t" "cache\t%2, 0x2000(%0)\n\t" "cache\t%2, 0x3000(%0)\n\t" "cache\t%1, 0(%0)\n\t" "cache\t%1, 0x1000(%0)\n\t" "cache\t%1, 0x2000(%0)\n\t" "cache\t%1, 0x3000(%0)\n\t" ".set pop\n" : : "r" (addr), "i" (Index_Store_Tag_I), "i" (Fill)); } } static char *way_string[] __cpuinitdata = { NULL, "direct mapped", "2-way", "3-way", "4-way", "5-way", "6-way", "7-way", "8-way" }; static void __cpuinit probe_pcache(void) { struct cpuinfo_mips *c = &current_cpu_data; unsigned int config = read_c0_config(); unsigned int prid = read_c0_prid(); unsigned long config1; unsigned int lsize; switch (c->cputype) { case CPU_R4600: /* QED style two way caches? */ case CPU_R4700: case CPU_R5000: case CPU_NEVADA: icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); c->icache.linesz = 16 << ((config & CONF_IB) >> 5); c->icache.ways = 2; c->icache.waybit = __ffs(icache_size/2); dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); c->dcache.ways = 2; c->dcache.waybit= __ffs(dcache_size/2); c->options |= MIPS_CPU_CACHE_CDEX_P; break; case CPU_R5432: case CPU_R5500: icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); c->icache.linesz = 16 << ((config & CONF_IB) >> 5); c->icache.ways = 2; c->icache.waybit= 0; dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); c->dcache.ways = 2; c->dcache.waybit = 0; c->options |= MIPS_CPU_CACHE_CDEX_P | MIPS_CPU_PREFETCH; break; case CPU_TX49XX: icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); c->icache.linesz = 16 << ((config & CONF_IB) >> 5); c->icache.ways = 4; c->icache.waybit= 0; dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); c->dcache.ways = 4; c->dcache.waybit = 0; c->options |= MIPS_CPU_CACHE_CDEX_P; c->options |= MIPS_CPU_PREFETCH; break; case CPU_R4000PC: case CPU_R4000SC: case CPU_R4000MC: case CPU_R4400PC: case CPU_R4400SC: case CPU_R4400MC: case CPU_R4300: icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); c->icache.linesz = 16 << ((config & CONF_IB) >> 5); c->icache.ways = 1; c->icache.waybit = 0; /* doesn't matter */ dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); c->dcache.ways = 1; c->dcache.waybit = 0; /* does not matter */ c->options |= MIPS_CPU_CACHE_CDEX_P; break; case CPU_R10000: case CPU_R12000: case CPU_R14000: icache_size = 1 << (12 + ((config & R10K_CONF_IC) >> 29)); c->icache.linesz = 64; c->icache.ways = 2; c->icache.waybit = 0; dcache_size = 1 << (12 + ((config & R10K_CONF_DC) >> 26)); c->dcache.linesz = 32; c->dcache.ways = 2; c->dcache.waybit = 0; c->options |= MIPS_CPU_PREFETCH; break; case CPU_VR4133: write_c0_config(config & ~VR41_CONF_P4K); case CPU_VR4131: /* Workaround for cache instruction bug of VR4131 */ if (c->processor_id == 0x0c80U || c->processor_id == 0x0c81U || c->processor_id == 0x0c82U) { config |= 0x00400000U; if (c->processor_id == 0x0c80U) config |= VR41_CONF_BP; write_c0_config(config); } else c->options |= MIPS_CPU_CACHE_CDEX_P; icache_size = 1 << (10 + ((config & CONF_IC) >> 9)); c->icache.linesz = 16 << ((config & CONF_IB) >> 5); c->icache.ways = 2; c->icache.waybit = __ffs(icache_size/2); dcache_size = 1 << (10 + ((config & CONF_DC) >> 6)); c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); c->dcache.ways = 2; c->dcache.waybit = __ffs(dcache_size/2); break; case CPU_VR41XX: case CPU_VR4111: case CPU_VR4121: case CPU_VR4122: case CPU_VR4181: case CPU_VR4181A: icache_size = 1 << (10 + ((config & CONF_IC) >> 9)); c->icache.linesz = 16 << ((config & CONF_IB) >> 5); c->icache.ways = 1; c->icache.waybit = 0; /* doesn't matter */ dcache_size = 1 << (10 + ((config & CONF_DC) >> 6)); c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); c->dcache.ways = 1; c->dcache.waybit = 0; /* does not matter */ c->options |= MIPS_CPU_CACHE_CDEX_P; break; case CPU_RM7000: rm7k_erratum31(); case CPU_RM9000: icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); c->icache.linesz = 16 << ((config & CONF_IB) >> 5); c->icache.ways = 4; c->icache.waybit = __ffs(icache_size / c->icache.ways); dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); c->dcache.ways = 4; c->dcache.waybit = __ffs(dcache_size / c->dcache.ways); #if !defined(CONFIG_SMP) || !defined(RM9000_CDEX_SMP_WAR) c->options |= MIPS_CPU_CACHE_CDEX_P; #endif c->options |= MIPS_CPU_PREFETCH; break; case CPU_LOONGSON2: icache_size = 1 << (12 + ((config & CONF_IC) >> 9)); c->icache.linesz = 16 << ((config & CONF_IB) >> 5); if (prid & 0x3) c->icache.ways = 4; else c->icache.ways = 2; c->icache.waybit = 0; dcache_size = 1 << (12 + ((config & CONF_DC) >> 6)); c->dcache.linesz = 16 << ((config & CONF_DB) >> 4); if (prid & 0x3) c->dcache.ways = 4; else c->dcache.ways = 2; c->dcache.waybit = 0; break; default: if (!(config & MIPS_CONF_M)) panic("Don't know how to probe P-caches on this cpu."); /* * So we seem to be a MIPS32 or MIPS64 CPU * So let's probe the I-cache ... */ config1 = read_c0_config1(); if ((lsize = ((config1 >> 19) & 7))) c->icache.linesz = 2 << lsize; else c->icache.linesz = lsize; c->icache.sets = 64 << ((config1 >> 22) & 7); c->icache.ways = 1 + ((config1 >> 16) & 7); icache_size = c->icache.sets * c->icache.ways * c->icache.linesz; c->icache.waybit = __ffs(icache_size/c->icache.ways); if (config & 0x8) /* VI bit */ c->icache.flags |= MIPS_CACHE_VTAG; /* * Now probe the MIPS32 / MIPS64 data cache. */ c->dcache.flags = 0; if ((lsize = ((config1 >> 10) & 7))) c->dcache.linesz = 2 << lsize; else c->dcache.linesz= lsize; c->dcache.sets = 64 << ((config1 >> 13) & 7); c->dcache.ways = 1 + ((config1 >> 7) & 7); dcache_size = c->dcache.sets * c->dcache.ways * c->dcache.linesz; c->dcache.waybit = __ffs(dcache_size/c->dcache.ways); c->options |= MIPS_CPU_PREFETCH; break; } /* * Processor configuration sanity check for the R4000SC erratum * #5. With page sizes larger than 32kB there is no possibility * to get a VCE exception anymore so we don't care about this * misconfiguration. The case is rather theoretical anyway; * presumably no vendor is shipping his hardware in the "bad" * configuration. */ if ((prid & 0xff00) == PRID_IMP_R4000 && (prid & 0xff) < 0x40 && !(config & CONF_SC) && c->icache.linesz != 16 && PAGE_SIZE <= 0x8000) panic("Improper R4000SC processor configuration detected"); /* compute a couple of other cache variables */ c->icache.waysize = icache_size / c->icache.ways; c->dcache.waysize = dcache_size / c->dcache.ways; c->icache.sets = c->icache.linesz ? icache_size / (c->icache.linesz * c->icache.ways) : 0; c->dcache.sets = c->dcache.linesz ? dcache_size / (c->dcache.linesz * c->dcache.ways) : 0; /* * R10000 and R12000 P-caches are odd in a positive way. They're 32kB * 2-way virtually indexed so normally would suffer from aliases. So * normally they'd suffer from aliases but magic in the hardware deals * with that for us so we don't need to take care ourselves. */ switch (c->cputype) { case CPU_20KC: case CPU_25KF: case CPU_SB1: case CPU_SB1A: case CPU_XLR: c->dcache.flags |= MIPS_CACHE_PINDEX; break; case CPU_R10000: case CPU_R12000: case CPU_R14000: break; case CPU_24K: case CPU_34K: case CPU_74K: case CPU_1004K: if ((read_c0_config7() & (1 << 16))) { /* effectively physically indexed dcache, thus no virtual aliases. */ c->dcache.flags |= MIPS_CACHE_PINDEX; break; } default: if (c->dcache.waysize > PAGE_SIZE) c->dcache.flags |= MIPS_CACHE_ALIASES; } switch (c->cputype) { case CPU_20KC: /* * Some older 20Kc chips doesn't have the 'VI' bit in * the config register. */ c->icache.flags |= MIPS_CACHE_VTAG; break; case CPU_ALCHEMY: c->icache.flags |= MIPS_CACHE_IC_F_DC; break; } #ifdef CONFIG_CPU_LOONGSON2 /* * LOONGSON2 has 4 way icache, but when using indexed cache op, * one op will act on all 4 ways */ c->icache.ways = 1; #endif printk("Primary instruction cache %ldkB, %s, %s, linesize %d bytes.\n", icache_size >> 10, c->icache.flags & MIPS_CACHE_VTAG ? "VIVT" : "VIPT", way_string[c->icache.ways], c->icache.linesz); printk("Primary data cache %ldkB, %s, %s, %s, linesize %d bytes\n", dcache_size >> 10, way_string[c->dcache.ways], (c->dcache.flags & MIPS_CACHE_PINDEX) ? "PIPT" : "VIPT", (c->dcache.flags & MIPS_CACHE_ALIASES) ? "cache aliases" : "no aliases", c->dcache.linesz); } /* * If you even _breathe_ on this function, look at the gcc output and make sure * it does not pop things on and off the stack for the cache sizing loop that * executes in KSEG1 space or else you will crash and burn badly. You have * been warned. */ static int __cpuinit probe_scache(void) { unsigned long flags, addr, begin, end, pow2; unsigned int config = read_c0_config(); struct cpuinfo_mips *c = &current_cpu_data; if (config & CONF_SC) return 0; begin = (unsigned long) &_stext; begin &= ~((4 * 1024 * 1024) - 1); end = begin + (4 * 1024 * 1024); /* * This is such a bitch, you'd think they would make it easy to do * this. Away you daemons of stupidity! */ local_irq_save(flags); /* Fill each size-multiple cache line with a valid tag. */ pow2 = (64 * 1024); for (addr = begin; addr < end; addr = (begin + pow2)) { unsigned long *p = (unsigned long *) addr; __asm__ __volatile__("nop" : : "r" (*p)); /* whee... */ pow2 <<= 1; } /* Load first line with zero (therefore invalid) tag. */ write_c0_taglo(0); write_c0_taghi(0); __asm__ __volatile__("nop; nop; nop; nop;"); /* avoid the hazard */ cache_op(Index_Store_Tag_I, begin); cache_op(Index_Store_Tag_D, begin); cache_op(Index_Store_Tag_SD, begin); /* Now search for the wrap around point. */ pow2 = (128 * 1024); for (addr = begin + (128 * 1024); addr < end; addr = begin + pow2) { cache_op(Index_Load_Tag_SD, addr); __asm__ __volatile__("nop; nop; nop; nop;"); /* hazard... */ if (!read_c0_taglo()) break; pow2 <<= 1; } local_irq_restore(flags); addr -= begin; scache_size = addr; c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22); c->scache.ways = 1; c->dcache.waybit = 0; /* does not matter */ return 1; } #if defined(CONFIG_CPU_LOONGSON2) static void __init loongson2_sc_init(void) { struct cpuinfo_mips *c = &current_cpu_data; scache_size = 512*1024; c->scache.linesz = 32; c->scache.ways = 4; c->scache.waybit = 0; c->scache.waysize = scache_size / (c->scache.ways); c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways); pr_info("Unified secondary cache %ldkB %s, linesize %d bytes.\n", scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); c->options |= MIPS_CPU_INCLUSIVE_CACHES; } #endif extern int r5k_sc_init(void); extern int rm7k_sc_init(void); extern int mips_sc_init(void); static void __cpuinit setup_scache(void) { struct cpuinfo_mips *c = &current_cpu_data; unsigned int config = read_c0_config(); int sc_present = 0; /* * Do the probing thing on R4000SC and R4400SC processors. Other * processors don't have a S-cache that would be relevant to the * Linux memory management. */ switch (c->cputype) { case CPU_R4000SC: case CPU_R4000MC: case CPU_R4400SC: case CPU_R4400MC: sc_present = run_uncached(probe_scache); if (sc_present) c->options |= MIPS_CPU_CACHE_CDEX_S; break; case CPU_R10000: case CPU_R12000: case CPU_R14000: scache_size = 0x80000 << ((config & R10K_CONF_SS) >> 16); c->scache.linesz = 64 << ((config >> 13) & 1); c->scache.ways = 2; c->scache.waybit= 0; sc_present = 1; break; case CPU_R5000: case CPU_NEVADA: #ifdef CONFIG_R5000_CPU_SCACHE r5k_sc_init(); #endif return; case CPU_RM7000: case CPU_RM9000: #ifdef CONFIG_RM7000_CPU_SCACHE rm7k_sc_init(); #endif return; #if defined(CONFIG_CPU_LOONGSON2) case CPU_LOONGSON2: loongson2_sc_init(); return; #endif case CPU_XLP: /* don't need to worry about L2, fully coherent */ return; default: if (c->isa_level == MIPS_CPU_ISA_M32R1 || c->isa_level == MIPS_CPU_ISA_M32R2 || c->isa_level == MIPS_CPU_ISA_M64R1 || c->isa_level == MIPS_CPU_ISA_M64R2) { #ifdef CONFIG_MIPS_CPU_SCACHE if (mips_sc_init ()) { scache_size = c->scache.ways * c->scache.sets * c->scache.linesz; printk("MIPS secondary cache %ldkB, %s, linesize %d bytes.\n", scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); } #else if (!(c->scache.flags & MIPS_CACHE_NOT_PRESENT)) panic("Dunno how to handle MIPS32 / MIPS64 second level cache"); #endif return; } sc_present = 0; } if (!sc_present) return; /* compute a couple of other cache variables */ c->scache.waysize = scache_size / c->scache.ways; c->scache.sets = scache_size / (c->scache.linesz * c->scache.ways); printk("Unified secondary cache %ldkB %s, linesize %d bytes.\n", scache_size >> 10, way_string[c->scache.ways], c->scache.linesz); c->options |= MIPS_CPU_INCLUSIVE_CACHES; } void au1x00_fixup_config_od(void) { /* * c0_config.od (bit 19) was write only (and read as 0) * on the early revisions of Alchemy SOCs. It disables the bus * transaction overlapping and needs to be set to fix various errata. */ switch (read_c0_prid()) { case 0x00030100: /* Au1000 DA */ case 0x00030201: /* Au1000 HA */ case 0x00030202: /* Au1000 HB */ case 0x01030200: /* Au1500 AB */ /* * Au1100 errata actually keeps silence about this bit, so we set it * just in case for those revisions that require it to be set according * to the (now gone) cpu table. */ case 0x02030200: /* Au1100 AB */ case 0x02030201: /* Au1100 BA */ case 0x02030202: /* Au1100 BC */ set_c0_config(1 << 19); break; } } /* CP0 hazard avoidance. */ #define NXP_BARRIER() \ __asm__ __volatile__( \ ".set noreorder\n\t" \ "nop; nop; nop; nop; nop; nop;\n\t" \ ".set reorder\n\t") static void nxp_pr4450_fixup_config(void) { unsigned long config0; config0 = read_c0_config(); /* clear all three cache coherency fields */ config0 &= ~(0x7 | (7 << 25) | (7 << 28)); config0 |= (((_page_cachable_default >> _CACHE_SHIFT) << 0) | ((_page_cachable_default >> _CACHE_SHIFT) << 25) | ((_page_cachable_default >> _CACHE_SHIFT) << 28)); write_c0_config(config0); NXP_BARRIER(); } static int __cpuinitdata cca = -1; static int __init cca_setup(char *str) { get_option(&str, &cca); return 1; } __setup("cca=", cca_setup); static void __cpuinit coherency_setup(void) { if (cca < 0 || cca > 7) cca = read_c0_config() & CONF_CM_CMASK; _page_cachable_default = cca << _CACHE_SHIFT; pr_debug("Using cache attribute %d\n", cca); change_c0_config(CONF_CM_CMASK, cca); /* * c0_status.cu=0 specifies that updates by the sc instruction use * the coherency mode specified by the TLB; 1 means cachable * coherent update on write will be used. Not all processors have * this bit and; some wire it to zero, others like Toshiba had the * silly idea of putting something else there ... */ switch (current_cpu_type()) { case CPU_R4000PC: case CPU_R4000SC: case CPU_R4000MC: case CPU_R4400PC: case CPU_R4400SC: case CPU_R4400MC: clear_c0_config(CONF_CU); break; /* * We need to catch the early Alchemy SOCs with * the write-only co_config.od bit and set it back to one on: * Au1000 rev DA, HA, HB; Au1100 AB, BA, BC, Au1500 AB */ case CPU_ALCHEMY: au1x00_fixup_config_od(); break; case PRID_IMP_PR4450: nxp_pr4450_fixup_config(); break; } } #if defined(CONFIG_DMA_NONCOHERENT) static int __cpuinitdata coherentio; static int __init setcoherentio(char *str) { coherentio = 1; return 1; } __setup("coherentio", setcoherentio); #endif void __cpuinit r4k_cache_init(void) { extern void build_clear_page(void); extern void build_copy_page(void); extern char __weak except_vec2_generic; extern char __weak except_vec2_sb1; struct cpuinfo_mips *c = &current_cpu_data; switch (c->cputype) { case CPU_SB1: case CPU_SB1A: set_uncached_handler(0x100, &except_vec2_sb1, 0x80); break; default: set_uncached_handler(0x100, &except_vec2_generic, 0x80); break; } probe_pcache(); setup_scache(); r4k_blast_dcache_page_setup(); r4k_blast_dcache_page_indexed_setup(); r4k_blast_dcache_setup(); r4k_blast_icache_page_setup(); r4k_blast_icache_page_indexed_setup(); r4k_blast_icache_setup(); r4k_blast_scache_page_setup(); r4k_blast_scache_page_indexed_setup(); r4k_blast_scache_setup(); /* * Some MIPS32 and MIPS64 processors have physically indexed caches. * This code supports virtually indexed processors and will be * unnecessarily inefficient on physically indexed processors. */ if (c->dcache.linesz) shm_align_mask = max_t( unsigned long, c->dcache.sets * c->dcache.linesz - 1, PAGE_SIZE - 1); else shm_align_mask = PAGE_SIZE-1; __flush_cache_vmap = r4k__flush_cache_vmap; __flush_cache_vunmap = r4k__flush_cache_vunmap; flush_cache_all = cache_noop; __flush_cache_all = r4k___flush_cache_all; flush_cache_mm = r4k_flush_cache_mm; flush_cache_page = r4k_flush_cache_page; flush_cache_range = r4k_flush_cache_range; __flush_kernel_vmap_range = r4k_flush_kernel_vmap_range; flush_cache_sigtramp = r4k_flush_cache_sigtramp; flush_icache_all = r4k_flush_icache_all; local_flush_data_cache_page = local_r4k_flush_data_cache_page; flush_data_cache_page = r4k_flush_data_cache_page; flush_icache_range = r4k_flush_icache_range; local_flush_icache_range = local_r4k_flush_icache_range; #if defined(CONFIG_DMA_NONCOHERENT) if (coherentio) { _dma_cache_wback_inv = (void *)cache_noop; _dma_cache_wback = (void *)cache_noop; _dma_cache_inv = (void *)cache_noop; } else { _dma_cache_wback_inv = r4k_dma_cache_wback_inv; _dma_cache_wback = r4k_dma_cache_wback_inv; _dma_cache_inv = r4k_dma_cache_inv; } #endif build_clear_page(); build_copy_page(); #if !defined(CONFIG_MIPS_CMP) local_r4k___flush_cache_all(NULL); #endif coherency_setup(); }
gpl-2.0
JoeyJiao/kernel-2.6.32-V858
drivers/media/video/cx18/cx18-gpio.c
620
9268
/* * cx18 gpio functions * * Derived from ivtv-gpio.c * * Copyright (C) 2007 Hans Verkuil <hverkuil@xs4all.nl> * Copyright (C) 2008 Andy Walls <awalls@radix.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA * 02111-1307 USA */ #include "cx18-driver.h" #include "cx18-io.h" #include "cx18-cards.h" #include "cx18-gpio.h" #include "tuner-xc2028.h" /********************* GPIO stuffs *********************/ /* GPIO registers */ #define CX18_REG_GPIO_IN 0xc72010 #define CX18_REG_GPIO_OUT1 0xc78100 #define CX18_REG_GPIO_DIR1 0xc78108 #define CX18_REG_GPIO_OUT2 0xc78104 #define CX18_REG_GPIO_DIR2 0xc7810c /* * HVR-1600 GPIO pins, courtesy of Hauppauge: * * gpio0: zilog ir process reset pin * gpio1: zilog programming pin (you should never use this) * gpio12: cx24227 reset pin * gpio13: cs5345 reset pin */ /* * File scope utility functions */ static void gpio_write(struct cx18 *cx) { u32 dir_lo = cx->gpio_dir & 0xffff; u32 val_lo = cx->gpio_val & 0xffff; u32 dir_hi = cx->gpio_dir >> 16; u32 val_hi = cx->gpio_val >> 16; cx18_write_reg_expect(cx, dir_lo << 16, CX18_REG_GPIO_DIR1, ~dir_lo, dir_lo); cx18_write_reg_expect(cx, (dir_lo << 16) | val_lo, CX18_REG_GPIO_OUT1, val_lo, dir_lo); cx18_write_reg_expect(cx, dir_hi << 16, CX18_REG_GPIO_DIR2, ~dir_hi, dir_hi); cx18_write_reg_expect(cx, (dir_hi << 16) | val_hi, CX18_REG_GPIO_OUT2, val_hi, dir_hi); } static void gpio_update(struct cx18 *cx, u32 mask, u32 data) { if (mask == 0) return; mutex_lock(&cx->gpio_lock); cx->gpio_val = (cx->gpio_val & ~mask) | (data & mask); gpio_write(cx); mutex_unlock(&cx->gpio_lock); } static void gpio_reset_seq(struct cx18 *cx, u32 active_lo, u32 active_hi, unsigned int assert_msecs, unsigned int recovery_msecs) { u32 mask; mask = active_lo | active_hi; if (mask == 0) return; /* * Assuming that active_hi and active_lo are a subsets of the bits in * gpio_dir. Also assumes that active_lo and active_hi don't overlap * in any bit position */ /* Assert */ gpio_update(cx, mask, ~active_lo); schedule_timeout_uninterruptible(msecs_to_jiffies(assert_msecs)); /* Deassert */ gpio_update(cx, mask, ~active_hi); schedule_timeout_uninterruptible(msecs_to_jiffies(recovery_msecs)); } /* * GPIO Multiplexer - logical device */ static int gpiomux_log_status(struct v4l2_subdev *sd) { struct cx18 *cx = v4l2_get_subdevdata(sd); mutex_lock(&cx->gpio_lock); CX18_INFO_DEV(sd, "GPIO: direction 0x%08x, value 0x%08x\n", cx->gpio_dir, cx->gpio_val); mutex_unlock(&cx->gpio_lock); return 0; } static int gpiomux_s_radio(struct v4l2_subdev *sd) { struct cx18 *cx = v4l2_get_subdevdata(sd); /* * FIXME - work out the cx->active/audio_input mess - this is * intended to handle the switch to radio mode and set the * audio routing, but we need to update the state in cx */ gpio_update(cx, cx->card->gpio_audio_input.mask, cx->card->gpio_audio_input.radio); return 0; } static int gpiomux_s_std(struct v4l2_subdev *sd, v4l2_std_id norm) { struct cx18 *cx = v4l2_get_subdevdata(sd); u32 data; switch (cx->card->audio_inputs[cx->audio_input].muxer_input) { case 1: data = cx->card->gpio_audio_input.linein; break; case 0: data = cx->card->gpio_audio_input.tuner; break; default: /* * FIXME - work out the cx->active/audio_input mess - this is * intended to handle the switch from radio mode and set the * audio routing, but we need to update the state in cx */ data = cx->card->gpio_audio_input.tuner; break; } gpio_update(cx, cx->card->gpio_audio_input.mask, data); return 0; } static int gpiomux_s_audio_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct cx18 *cx = v4l2_get_subdevdata(sd); u32 data; switch (input) { case 0: data = cx->card->gpio_audio_input.tuner; break; case 1: data = cx->card->gpio_audio_input.linein; break; case 2: data = cx->card->gpio_audio_input.radio; break; default: return -EINVAL; } gpio_update(cx, cx->card->gpio_audio_input.mask, data); return 0; } static const struct v4l2_subdev_core_ops gpiomux_core_ops = { .log_status = gpiomux_log_status, .s_std = gpiomux_s_std, }; static const struct v4l2_subdev_tuner_ops gpiomux_tuner_ops = { .s_radio = gpiomux_s_radio, }; static const struct v4l2_subdev_audio_ops gpiomux_audio_ops = { .s_routing = gpiomux_s_audio_routing, }; static const struct v4l2_subdev_ops gpiomux_ops = { .core = &gpiomux_core_ops, .tuner = &gpiomux_tuner_ops, .audio = &gpiomux_audio_ops, }; /* * GPIO Reset Controller - logical device */ static int resetctrl_log_status(struct v4l2_subdev *sd) { struct cx18 *cx = v4l2_get_subdevdata(sd); mutex_lock(&cx->gpio_lock); CX18_INFO_DEV(sd, "GPIO: direction 0x%08x, value 0x%08x\n", cx->gpio_dir, cx->gpio_val); mutex_unlock(&cx->gpio_lock); return 0; } static int resetctrl_reset(struct v4l2_subdev *sd, u32 val) { struct cx18 *cx = v4l2_get_subdevdata(sd); const struct cx18_gpio_i2c_slave_reset *p; p = &cx->card->gpio_i2c_slave_reset; switch (val) { case CX18_GPIO_RESET_I2C: gpio_reset_seq(cx, p->active_lo_mask, p->active_hi_mask, p->msecs_asserted, p->msecs_recovery); break; case CX18_GPIO_RESET_Z8F0811: /* * Assert timing for the Z8F0811 on HVR-1600 boards: * 1. Assert RESET for min of 4 clock cycles at 18.432 MHz to * initiate * 2. Reset then takes 66 WDT cycles at 10 kHz + 16 xtal clock * cycles (6,601,085 nanoseconds ~= 7 milliseconds) * 3. DBG pin must be high before chip exits reset for normal * operation. DBG is open drain and hopefully pulled high * since we don't normally drive it (GPIO 1?) for the * HVR-1600 * 4. Z8F0811 won't exit reset until RESET is deasserted * 5. Zilog comes out of reset, loads reset vector address and * executes from there. Required recovery delay unknown. */ gpio_reset_seq(cx, p->ir_reset_mask, 0, p->msecs_asserted, p->msecs_recovery); break; case CX18_GPIO_RESET_XC2028: if (cx->card->tuners[0].tuner == TUNER_XC2028) gpio_reset_seq(cx, (1 << cx->card->xceive_pin), 0, 1, 1); break; } return 0; } static const struct v4l2_subdev_core_ops resetctrl_core_ops = { .log_status = resetctrl_log_status, .reset = resetctrl_reset, }; static const struct v4l2_subdev_ops resetctrl_ops = { .core = &resetctrl_core_ops, }; /* * External entry points */ void cx18_gpio_init(struct cx18 *cx) { mutex_lock(&cx->gpio_lock); cx->gpio_dir = cx->card->gpio_init.direction; cx->gpio_val = cx->card->gpio_init.initial_value; if (cx->card->tuners[0].tuner == TUNER_XC2028) { cx->gpio_dir |= 1 << cx->card->xceive_pin; cx->gpio_val |= 1 << cx->card->xceive_pin; } if (cx->gpio_dir == 0) { mutex_unlock(&cx->gpio_lock); return; } CX18_DEBUG_INFO("GPIO initial dir: %08x/%08x out: %08x/%08x\n", cx18_read_reg(cx, CX18_REG_GPIO_DIR1), cx18_read_reg(cx, CX18_REG_GPIO_DIR2), cx18_read_reg(cx, CX18_REG_GPIO_OUT1), cx18_read_reg(cx, CX18_REG_GPIO_OUT2)); gpio_write(cx); mutex_unlock(&cx->gpio_lock); } int cx18_gpio_register(struct cx18 *cx, u32 hw) { struct v4l2_subdev *sd; const struct v4l2_subdev_ops *ops; char *str; switch (hw) { case CX18_HW_GPIO_MUX: sd = &cx->sd_gpiomux; ops = &gpiomux_ops; str = "gpio-mux"; break; case CX18_HW_GPIO_RESET_CTRL: sd = &cx->sd_resetctrl; ops = &resetctrl_ops; str = "gpio-reset-ctrl"; break; default: return -EINVAL; } v4l2_subdev_init(sd, ops); v4l2_set_subdevdata(sd, cx); snprintf(sd->name, sizeof(sd->name), "%s %s", cx->v4l2_dev.name, str); sd->grp_id = hw; return v4l2_device_register_subdev(&cx->v4l2_dev, sd); } void cx18_reset_ir_gpio(void *data) { struct cx18 *cx = to_cx18((struct v4l2_device *)data); if (cx->card->gpio_i2c_slave_reset.ir_reset_mask == 0) return; CX18_DEBUG_INFO("Resetting IR microcontroller\n"); v4l2_subdev_call(&cx->sd_resetctrl, core, reset, CX18_GPIO_RESET_Z8F0811); } EXPORT_SYMBOL(cx18_reset_ir_gpio); /* This symbol is exported for use by lirc_pvr150 for the IR-blaster */ /* Xceive tuner reset function */ int cx18_reset_tuner_gpio(void *dev, int component, int cmd, int value) { struct i2c_algo_bit_data *algo = dev; struct cx18_i2c_algo_callback_data *cb_data = algo->data; struct cx18 *cx = cb_data->cx; if (cmd != XC2028_TUNER_RESET || cx->card->tuners[0].tuner != TUNER_XC2028) return 0; CX18_DEBUG_INFO("Resetting XCeive tuner\n"); return v4l2_subdev_call(&cx->sd_resetctrl, core, reset, CX18_GPIO_RESET_XC2028); }
gpl-2.0
rcoscali/nethunter-kernel-samsung-tuna
net/can/bcm.c
876
40027
/* * bcm.c - Broadcast Manager to filter/send (cyclic) CAN content * * Copyright (c) 2002-2007 Volkswagen Group Electronic Research * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of Volkswagen nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * Alternatively, provided that this notice is retained in full, this * software may be distributed under the terms of the GNU General * Public License ("GPL") version 2, in which case the provisions of the * GPL apply INSTEAD OF those given above. * * The provided data structures and external interfaces from this code * are not restricted to be used by modules with a GPL compatible license. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * * Send feedback to <socketcan-users@lists.berlios.de> * */ #include <linux/module.h> #include <linux/init.h> #include <linux/hrtimer.h> #include <linux/list.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/uio.h> #include <linux/net.h> #include <linux/netdevice.h> #include <linux/socket.h> #include <linux/if_arp.h> #include <linux/skbuff.h> #include <linux/can.h> #include <linux/can/core.h> #include <linux/can/bcm.h> #include <linux/slab.h> #include <net/sock.h> #include <net/net_namespace.h> /* * To send multiple CAN frame content within TX_SETUP or to filter * CAN messages with multiplex index within RX_SETUP, the number of * different filters is limited to 256 due to the one byte index value. */ #define MAX_NFRAMES 256 /* use of last_frames[index].can_dlc */ #define RX_RECV 0x40 /* received data for this element */ #define RX_THR 0x80 /* element not been sent due to throttle feature */ #define BCM_CAN_DLC_MASK 0x0F /* clean private flags in can_dlc by masking */ /* get best masking value for can_rx_register() for a given single can_id */ #define REGMASK(id) ((id & CAN_EFF_FLAG) ? \ (CAN_EFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG) : \ (CAN_SFF_MASK | CAN_EFF_FLAG | CAN_RTR_FLAG)) #define CAN_BCM_VERSION CAN_VERSION static __initdata const char banner[] = KERN_INFO "can: broadcast manager protocol (rev " CAN_BCM_VERSION " t)\n"; MODULE_DESCRIPTION("PF_CAN broadcast manager protocol"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_AUTHOR("Oliver Hartkopp <oliver.hartkopp@volkswagen.de>"); MODULE_ALIAS("can-proto-2"); /* easy access to can_frame payload */ static inline u64 GET_U64(const struct can_frame *cp) { return *(u64 *)cp->data; } struct bcm_op { struct list_head list; int ifindex; canid_t can_id; u32 flags; unsigned long frames_abs, frames_filtered; struct timeval ival1, ival2; struct hrtimer timer, thrtimer; struct tasklet_struct tsklet, thrtsklet; ktime_t rx_stamp, kt_ival1, kt_ival2, kt_lastmsg; int rx_ifindex; u32 count; u32 nframes; u32 currframe; struct can_frame *frames; struct can_frame *last_frames; struct can_frame sframe; struct can_frame last_sframe; struct sock *sk; struct net_device *rx_reg_dev; }; static struct proc_dir_entry *proc_dir; struct bcm_sock { struct sock sk; int bound; int ifindex; struct notifier_block notifier; struct list_head rx_ops; struct list_head tx_ops; unsigned long dropped_usr_msgs; struct proc_dir_entry *bcm_proc_read; char procname [32]; /* inode number in decimal with \0 */ }; static inline struct bcm_sock *bcm_sk(const struct sock *sk) { return (struct bcm_sock *)sk; } #define CFSIZ sizeof(struct can_frame) #define OPSIZ sizeof(struct bcm_op) #define MHSIZ sizeof(struct bcm_msg_head) /* * procfs functions */ static char *bcm_proc_getifname(char *result, int ifindex) { struct net_device *dev; if (!ifindex) return "any"; rcu_read_lock(); dev = dev_get_by_index_rcu(&init_net, ifindex); if (dev) strcpy(result, dev->name); else strcpy(result, "???"); rcu_read_unlock(); return result; } static int bcm_proc_show(struct seq_file *m, void *v) { char ifname[IFNAMSIZ]; struct sock *sk = (struct sock *)m->private; struct bcm_sock *bo = bcm_sk(sk); struct bcm_op *op; seq_printf(m, ">>> socket %pK", sk->sk_socket); seq_printf(m, " / sk %pK", sk); seq_printf(m, " / bo %pK", bo); seq_printf(m, " / dropped %lu", bo->dropped_usr_msgs); seq_printf(m, " / bound %s", bcm_proc_getifname(ifname, bo->ifindex)); seq_printf(m, " <<<\n"); list_for_each_entry(op, &bo->rx_ops, list) { unsigned long reduction; /* print only active entries & prevent division by zero */ if (!op->frames_abs) continue; seq_printf(m, "rx_op: %03X %-5s ", op->can_id, bcm_proc_getifname(ifname, op->ifindex)); seq_printf(m, "[%u]%c ", op->nframes, (op->flags & RX_CHECK_DLC)?'d':' '); if (op->kt_ival1.tv64) seq_printf(m, "timeo=%lld ", (long long) ktime_to_us(op->kt_ival1)); if (op->kt_ival2.tv64) seq_printf(m, "thr=%lld ", (long long) ktime_to_us(op->kt_ival2)); seq_printf(m, "# recv %ld (%ld) => reduction: ", op->frames_filtered, op->frames_abs); reduction = 100 - (op->frames_filtered * 100) / op->frames_abs; seq_printf(m, "%s%ld%%\n", (reduction == 100)?"near ":"", reduction); } list_for_each_entry(op, &bo->tx_ops, list) { seq_printf(m, "tx_op: %03X %s [%u] ", op->can_id, bcm_proc_getifname(ifname, op->ifindex), op->nframes); if (op->kt_ival1.tv64) seq_printf(m, "t1=%lld ", (long long) ktime_to_us(op->kt_ival1)); if (op->kt_ival2.tv64) seq_printf(m, "t2=%lld ", (long long) ktime_to_us(op->kt_ival2)); seq_printf(m, "# sent %ld\n", op->frames_abs); } seq_putc(m, '\n'); return 0; } static int bcm_proc_open(struct inode *inode, struct file *file) { return single_open(file, bcm_proc_show, PDE(inode)->data); } static const struct file_operations bcm_proc_fops = { .owner = THIS_MODULE, .open = bcm_proc_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; /* * bcm_can_tx - send the (next) CAN frame to the appropriate CAN interface * of the given bcm tx op */ static void bcm_can_tx(struct bcm_op *op) { struct sk_buff *skb; struct net_device *dev; struct can_frame *cf = &op->frames[op->currframe]; /* no target device? => exit */ if (!op->ifindex) return; dev = dev_get_by_index(&init_net, op->ifindex); if (!dev) { /* RFC: should this bcm_op remove itself here? */ return; } skb = alloc_skb(CFSIZ, gfp_any()); if (!skb) goto out; memcpy(skb_put(skb, CFSIZ), cf, CFSIZ); /* send with loopback */ skb->dev = dev; skb->sk = op->sk; can_send(skb, 1); /* update statistics */ op->currframe++; op->frames_abs++; /* reached last frame? */ if (op->currframe >= op->nframes) op->currframe = 0; out: dev_put(dev); } /* * bcm_send_to_user - send a BCM message to the userspace * (consisting of bcm_msg_head + x CAN frames) */ static void bcm_send_to_user(struct bcm_op *op, struct bcm_msg_head *head, struct can_frame *frames, int has_timestamp) { struct sk_buff *skb; struct can_frame *firstframe; struct sockaddr_can *addr; struct sock *sk = op->sk; unsigned int datalen = head->nframes * CFSIZ; int err; skb = alloc_skb(sizeof(*head) + datalen, gfp_any()); if (!skb) return; memcpy(skb_put(skb, sizeof(*head)), head, sizeof(*head)); if (head->nframes) { /* can_frames starting here */ firstframe = (struct can_frame *)skb_tail_pointer(skb); memcpy(skb_put(skb, datalen), frames, datalen); /* * the BCM uses the can_dlc-element of the can_frame * structure for internal purposes. This is only * relevant for updates that are generated by the * BCM, where nframes is 1 */ if (head->nframes == 1) firstframe->can_dlc &= BCM_CAN_DLC_MASK; } if (has_timestamp) { /* restore rx timestamp */ skb->tstamp = op->rx_stamp; } /* * Put the datagram to the queue so that bcm_recvmsg() can * get it from there. We need to pass the interface index to * bcm_recvmsg(). We pass a whole struct sockaddr_can in skb->cb * containing the interface index. */ BUILD_BUG_ON(sizeof(skb->cb) < sizeof(struct sockaddr_can)); addr = (struct sockaddr_can *)skb->cb; memset(addr, 0, sizeof(*addr)); addr->can_family = AF_CAN; addr->can_ifindex = op->rx_ifindex; err = sock_queue_rcv_skb(sk, skb); if (err < 0) { struct bcm_sock *bo = bcm_sk(sk); kfree_skb(skb); /* don't care about overflows in this statistic */ bo->dropped_usr_msgs++; } } static void bcm_tx_start_timer(struct bcm_op *op) { if (op->kt_ival1.tv64 && op->count) hrtimer_start(&op->timer, ktime_add(ktime_get(), op->kt_ival1), HRTIMER_MODE_ABS); else if (op->kt_ival2.tv64) hrtimer_start(&op->timer, ktime_add(ktime_get(), op->kt_ival2), HRTIMER_MODE_ABS); } static void bcm_tx_timeout_tsklet(unsigned long data) { struct bcm_op *op = (struct bcm_op *)data; struct bcm_msg_head msg_head; if (op->kt_ival1.tv64 && (op->count > 0)) { op->count--; if (!op->count && (op->flags & TX_COUNTEVT)) { /* create notification to user */ msg_head.opcode = TX_EXPIRED; msg_head.flags = op->flags; msg_head.count = op->count; msg_head.ival1 = op->ival1; msg_head.ival2 = op->ival2; msg_head.can_id = op->can_id; msg_head.nframes = 0; bcm_send_to_user(op, &msg_head, NULL, 0); } bcm_can_tx(op); } else if (op->kt_ival2.tv64) bcm_can_tx(op); bcm_tx_start_timer(op); } /* * bcm_tx_timeout_handler - performs cyclic CAN frame transmissions */ static enum hrtimer_restart bcm_tx_timeout_handler(struct hrtimer *hrtimer) { struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); tasklet_schedule(&op->tsklet); return HRTIMER_NORESTART; } /* * bcm_rx_changed - create a RX_CHANGED notification due to changed content */ static void bcm_rx_changed(struct bcm_op *op, struct can_frame *data) { struct bcm_msg_head head; /* update statistics */ op->frames_filtered++; /* prevent statistics overflow */ if (op->frames_filtered > ULONG_MAX/100) op->frames_filtered = op->frames_abs = 0; /* this element is not throttled anymore */ data->can_dlc &= (BCM_CAN_DLC_MASK|RX_RECV); head.opcode = RX_CHANGED; head.flags = op->flags; head.count = op->count; head.ival1 = op->ival1; head.ival2 = op->ival2; head.can_id = op->can_id; head.nframes = 1; bcm_send_to_user(op, &head, data, 1); } /* * bcm_rx_update_and_send - process a detected relevant receive content change * 1. update the last received data * 2. send a notification to the user (if possible) */ static void bcm_rx_update_and_send(struct bcm_op *op, struct can_frame *lastdata, const struct can_frame *rxdata) { memcpy(lastdata, rxdata, CFSIZ); /* mark as used and throttled by default */ lastdata->can_dlc |= (RX_RECV|RX_THR); /* throtteling mode inactive ? */ if (!op->kt_ival2.tv64) { /* send RX_CHANGED to the user immediately */ bcm_rx_changed(op, lastdata); return; } /* with active throttling timer we are just done here */ if (hrtimer_active(&op->thrtimer)) return; /* first receiption with enabled throttling mode */ if (!op->kt_lastmsg.tv64) goto rx_changed_settime; /* got a second frame inside a potential throttle period? */ if (ktime_us_delta(ktime_get(), op->kt_lastmsg) < ktime_to_us(op->kt_ival2)) { /* do not send the saved data - only start throttle timer */ hrtimer_start(&op->thrtimer, ktime_add(op->kt_lastmsg, op->kt_ival2), HRTIMER_MODE_ABS); return; } /* the gap was that big, that throttling was not needed here */ rx_changed_settime: bcm_rx_changed(op, lastdata); op->kt_lastmsg = ktime_get(); } /* * bcm_rx_cmp_to_index - (bit)compares the currently received data to formerly * received data stored in op->last_frames[] */ static void bcm_rx_cmp_to_index(struct bcm_op *op, unsigned int index, const struct can_frame *rxdata) { /* * no one uses the MSBs of can_dlc for comparation, * so we use it here to detect the first time of reception */ if (!(op->last_frames[index].can_dlc & RX_RECV)) { /* received data for the first time => send update to user */ bcm_rx_update_and_send(op, &op->last_frames[index], rxdata); return; } /* do a real check in can_frame data section */ if ((GET_U64(&op->frames[index]) & GET_U64(rxdata)) != (GET_U64(&op->frames[index]) & GET_U64(&op->last_frames[index]))) { bcm_rx_update_and_send(op, &op->last_frames[index], rxdata); return; } if (op->flags & RX_CHECK_DLC) { /* do a real check in can_frame dlc */ if (rxdata->can_dlc != (op->last_frames[index].can_dlc & BCM_CAN_DLC_MASK)) { bcm_rx_update_and_send(op, &op->last_frames[index], rxdata); return; } } } /* * bcm_rx_starttimer - enable timeout monitoring for CAN frame receiption */ static void bcm_rx_starttimer(struct bcm_op *op) { if (op->flags & RX_NO_AUTOTIMER) return; if (op->kt_ival1.tv64) hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL); } static void bcm_rx_timeout_tsklet(unsigned long data) { struct bcm_op *op = (struct bcm_op *)data; struct bcm_msg_head msg_head; /* create notification to user */ msg_head.opcode = RX_TIMEOUT; msg_head.flags = op->flags; msg_head.count = op->count; msg_head.ival1 = op->ival1; msg_head.ival2 = op->ival2; msg_head.can_id = op->can_id; msg_head.nframes = 0; bcm_send_to_user(op, &msg_head, NULL, 0); } /* * bcm_rx_timeout_handler - when the (cyclic) CAN frame receiption timed out */ static enum hrtimer_restart bcm_rx_timeout_handler(struct hrtimer *hrtimer) { struct bcm_op *op = container_of(hrtimer, struct bcm_op, timer); /* schedule before NET_RX_SOFTIRQ */ tasklet_hi_schedule(&op->tsklet); /* no restart of the timer is done here! */ /* if user wants to be informed, when cyclic CAN-Messages come back */ if ((op->flags & RX_ANNOUNCE_RESUME) && op->last_frames) { /* clear received can_frames to indicate 'nothing received' */ memset(op->last_frames, 0, op->nframes * CFSIZ); } return HRTIMER_NORESTART; } /* * bcm_rx_do_flush - helper for bcm_rx_thr_flush */ static inline int bcm_rx_do_flush(struct bcm_op *op, int update, unsigned int index) { if ((op->last_frames) && (op->last_frames[index].can_dlc & RX_THR)) { if (update) bcm_rx_changed(op, &op->last_frames[index]); return 1; } return 0; } /* * bcm_rx_thr_flush - Check for throttled data and send it to the userspace * * update == 0 : just check if throttled data is available (any irq context) * update == 1 : check and send throttled data to userspace (soft_irq context) */ static int bcm_rx_thr_flush(struct bcm_op *op, int update) { int updated = 0; if (op->nframes > 1) { unsigned int i; /* for MUX filter we start at index 1 */ for (i = 1; i < op->nframes; i++) updated += bcm_rx_do_flush(op, update, i); } else { /* for RX_FILTER_ID and simple filter */ updated += bcm_rx_do_flush(op, update, 0); } return updated; } static void bcm_rx_thr_tsklet(unsigned long data) { struct bcm_op *op = (struct bcm_op *)data; /* push the changed data to the userspace */ bcm_rx_thr_flush(op, 1); } /* * bcm_rx_thr_handler - the time for blocked content updates is over now: * Check for throttled data and send it to the userspace */ static enum hrtimer_restart bcm_rx_thr_handler(struct hrtimer *hrtimer) { struct bcm_op *op = container_of(hrtimer, struct bcm_op, thrtimer); tasklet_schedule(&op->thrtsklet); if (bcm_rx_thr_flush(op, 0)) { hrtimer_forward(hrtimer, ktime_get(), op->kt_ival2); return HRTIMER_RESTART; } else { /* rearm throttle handling */ op->kt_lastmsg = ktime_set(0, 0); return HRTIMER_NORESTART; } } /* * bcm_rx_handler - handle a CAN frame receiption */ static void bcm_rx_handler(struct sk_buff *skb, void *data) { struct bcm_op *op = (struct bcm_op *)data; const struct can_frame *rxframe = (struct can_frame *)skb->data; unsigned int i; /* disable timeout */ hrtimer_cancel(&op->timer); if (op->can_id != rxframe->can_id) return; /* save rx timestamp */ op->rx_stamp = skb->tstamp; /* save originator for recvfrom() */ op->rx_ifindex = skb->dev->ifindex; /* update statistics */ op->frames_abs++; if (op->flags & RX_RTR_FRAME) { /* send reply for RTR-request (placed in op->frames[0]) */ bcm_can_tx(op); return; } if (op->flags & RX_FILTER_ID) { /* the easiest case */ bcm_rx_update_and_send(op, &op->last_frames[0], rxframe); goto rx_starttimer; } if (op->nframes == 1) { /* simple compare with index 0 */ bcm_rx_cmp_to_index(op, 0, rxframe); goto rx_starttimer; } if (op->nframes > 1) { /* * multiplex compare * * find the first multiplex mask that fits. * Remark: The MUX-mask is stored in index 0 */ for (i = 1; i < op->nframes; i++) { if ((GET_U64(&op->frames[0]) & GET_U64(rxframe)) == (GET_U64(&op->frames[0]) & GET_U64(&op->frames[i]))) { bcm_rx_cmp_to_index(op, i, rxframe); break; } } } rx_starttimer: bcm_rx_starttimer(op); } /* * helpers for bcm_op handling: find & delete bcm [rx|tx] op elements */ static struct bcm_op *bcm_find_op(struct list_head *ops, canid_t can_id, int ifindex) { struct bcm_op *op; list_for_each_entry(op, ops, list) { if ((op->can_id == can_id) && (op->ifindex == ifindex)) return op; } return NULL; } static void bcm_remove_op(struct bcm_op *op) { hrtimer_cancel(&op->timer); hrtimer_cancel(&op->thrtimer); if (op->tsklet.func) tasklet_kill(&op->tsklet); if (op->thrtsklet.func) tasklet_kill(&op->thrtsklet); if ((op->frames) && (op->frames != &op->sframe)) kfree(op->frames); if ((op->last_frames) && (op->last_frames != &op->last_sframe)) kfree(op->last_frames); kfree(op); } static void bcm_rx_unreg(struct net_device *dev, struct bcm_op *op) { if (op->rx_reg_dev == dev) { can_rx_unregister(dev, op->can_id, REGMASK(op->can_id), bcm_rx_handler, op); /* mark as removed subscription */ op->rx_reg_dev = NULL; } else printk(KERN_ERR "can-bcm: bcm_rx_unreg: registered device " "mismatch %p %p\n", op->rx_reg_dev, dev); } /* * bcm_delete_rx_op - find and remove a rx op (returns number of removed ops) */ static int bcm_delete_rx_op(struct list_head *ops, canid_t can_id, int ifindex) { struct bcm_op *op, *n; list_for_each_entry_safe(op, n, ops, list) { if ((op->can_id == can_id) && (op->ifindex == ifindex)) { /* * Don't care if we're bound or not (due to netdev * problems) can_rx_unregister() is always a save * thing to do here. */ if (op->ifindex) { /* * Only remove subscriptions that had not * been removed due to NETDEV_UNREGISTER * in bcm_notifier() */ if (op->rx_reg_dev) { struct net_device *dev; dev = dev_get_by_index(&init_net, op->ifindex); if (dev) { bcm_rx_unreg(dev, op); dev_put(dev); } } } else can_rx_unregister(NULL, op->can_id, REGMASK(op->can_id), bcm_rx_handler, op); list_del(&op->list); bcm_remove_op(op); return 1; /* done */ } } return 0; /* not found */ } /* * bcm_delete_tx_op - find and remove a tx op (returns number of removed ops) */ static int bcm_delete_tx_op(struct list_head *ops, canid_t can_id, int ifindex) { struct bcm_op *op, *n; list_for_each_entry_safe(op, n, ops, list) { if ((op->can_id == can_id) && (op->ifindex == ifindex)) { list_del(&op->list); bcm_remove_op(op); return 1; /* done */ } } return 0; /* not found */ } /* * bcm_read_op - read out a bcm_op and send it to the user (for bcm_sendmsg) */ static int bcm_read_op(struct list_head *ops, struct bcm_msg_head *msg_head, int ifindex) { struct bcm_op *op = bcm_find_op(ops, msg_head->can_id, ifindex); if (!op) return -EINVAL; /* put current values into msg_head */ msg_head->flags = op->flags; msg_head->count = op->count; msg_head->ival1 = op->ival1; msg_head->ival2 = op->ival2; msg_head->nframes = op->nframes; bcm_send_to_user(op, msg_head, op->frames, 0); return MHSIZ; } /* * bcm_tx_setup - create or update a bcm tx op (for bcm_sendmsg) */ static int bcm_tx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, int ifindex, struct sock *sk) { struct bcm_sock *bo = bcm_sk(sk); struct bcm_op *op; unsigned int i; int err; /* we need a real device to send frames */ if (!ifindex) return -ENODEV; /* check nframes boundaries - we need at least one can_frame */ if (msg_head->nframes < 1 || msg_head->nframes > MAX_NFRAMES) return -EINVAL; /* check the given can_id */ op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex); if (op) { /* update existing BCM operation */ /* * Do we need more space for the can_frames than currently * allocated? -> This is a _really_ unusual use-case and * therefore (complexity / locking) it is not supported. */ if (msg_head->nframes > op->nframes) return -E2BIG; /* update can_frames content */ for (i = 0; i < msg_head->nframes; i++) { err = memcpy_fromiovec((u8 *)&op->frames[i], msg->msg_iov, CFSIZ); if (op->frames[i].can_dlc > 8) err = -EINVAL; if (err < 0) return err; if (msg_head->flags & TX_CP_CAN_ID) { /* copy can_id into frame */ op->frames[i].can_id = msg_head->can_id; } } } else { /* insert new BCM operation for the given can_id */ op = kzalloc(OPSIZ, GFP_KERNEL); if (!op) return -ENOMEM; op->can_id = msg_head->can_id; /* create array for can_frames and copy the data */ if (msg_head->nframes > 1) { op->frames = kmalloc(msg_head->nframes * CFSIZ, GFP_KERNEL); if (!op->frames) { kfree(op); return -ENOMEM; } } else op->frames = &op->sframe; for (i = 0; i < msg_head->nframes; i++) { err = memcpy_fromiovec((u8 *)&op->frames[i], msg->msg_iov, CFSIZ); if (op->frames[i].can_dlc > 8) err = -EINVAL; if (err < 0) { if (op->frames != &op->sframe) kfree(op->frames); kfree(op); return err; } if (msg_head->flags & TX_CP_CAN_ID) { /* copy can_id into frame */ op->frames[i].can_id = msg_head->can_id; } } /* tx_ops never compare with previous received messages */ op->last_frames = NULL; /* bcm_can_tx / bcm_tx_timeout_handler needs this */ op->sk = sk; op->ifindex = ifindex; /* initialize uninitialized (kzalloc) structure */ hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); op->timer.function = bcm_tx_timeout_handler; /* initialize tasklet for tx countevent notification */ tasklet_init(&op->tsklet, bcm_tx_timeout_tsklet, (unsigned long) op); /* currently unused in tx_ops */ hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); /* add this bcm_op to the list of the tx_ops */ list_add(&op->list, &bo->tx_ops); } /* if ((op = bcm_find_op(&bo->tx_ops, msg_head->can_id, ifindex))) */ if (op->nframes != msg_head->nframes) { op->nframes = msg_head->nframes; /* start multiple frame transmission with index 0 */ op->currframe = 0; } /* check flags */ op->flags = msg_head->flags; if (op->flags & TX_RESET_MULTI_IDX) { /* start multiple frame transmission with index 0 */ op->currframe = 0; } if (op->flags & SETTIMER) { /* set timer values */ op->count = msg_head->count; op->ival1 = msg_head->ival1; op->ival2 = msg_head->ival2; op->kt_ival1 = timeval_to_ktime(msg_head->ival1); op->kt_ival2 = timeval_to_ktime(msg_head->ival2); /* disable an active timer due to zero values? */ if (!op->kt_ival1.tv64 && !op->kt_ival2.tv64) hrtimer_cancel(&op->timer); } if (op->flags & STARTTIMER) { hrtimer_cancel(&op->timer); /* spec: send can_frame when starting timer */ op->flags |= TX_ANNOUNCE; } if (op->flags & TX_ANNOUNCE) { bcm_can_tx(op); if (op->count) op->count--; } if (op->flags & STARTTIMER) bcm_tx_start_timer(op); return msg_head->nframes * CFSIZ + MHSIZ; } /* * bcm_rx_setup - create or update a bcm rx op (for bcm_sendmsg) */ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, int ifindex, struct sock *sk) { struct bcm_sock *bo = bcm_sk(sk); struct bcm_op *op; int do_rx_register; int err = 0; if ((msg_head->flags & RX_FILTER_ID) || (!(msg_head->nframes))) { /* be robust against wrong usage ... */ msg_head->flags |= RX_FILTER_ID; /* ignore trailing garbage */ msg_head->nframes = 0; } /* the first element contains the mux-mask => MAX_NFRAMES + 1 */ if (msg_head->nframes > MAX_NFRAMES + 1) return -EINVAL; if ((msg_head->flags & RX_RTR_FRAME) && ((msg_head->nframes != 1) || (!(msg_head->can_id & CAN_RTR_FLAG)))) return -EINVAL; /* check the given can_id */ op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex); if (op) { /* update existing BCM operation */ /* * Do we need more space for the can_frames than currently * allocated? -> This is a _really_ unusual use-case and * therefore (complexity / locking) it is not supported. */ if (msg_head->nframes > op->nframes) return -E2BIG; if (msg_head->nframes) { /* update can_frames content */ err = memcpy_fromiovec((u8 *)op->frames, msg->msg_iov, msg_head->nframes * CFSIZ); if (err < 0) return err; /* clear last_frames to indicate 'nothing received' */ memset(op->last_frames, 0, msg_head->nframes * CFSIZ); } op->nframes = msg_head->nframes; /* Only an update -> do not call can_rx_register() */ do_rx_register = 0; } else { /* insert new BCM operation for the given can_id */ op = kzalloc(OPSIZ, GFP_KERNEL); if (!op) return -ENOMEM; op->can_id = msg_head->can_id; op->nframes = msg_head->nframes; if (msg_head->nframes > 1) { /* create array for can_frames and copy the data */ op->frames = kmalloc(msg_head->nframes * CFSIZ, GFP_KERNEL); if (!op->frames) { kfree(op); return -ENOMEM; } /* create and init array for received can_frames */ op->last_frames = kzalloc(msg_head->nframes * CFSIZ, GFP_KERNEL); if (!op->last_frames) { kfree(op->frames); kfree(op); return -ENOMEM; } } else { op->frames = &op->sframe; op->last_frames = &op->last_sframe; } if (msg_head->nframes) { err = memcpy_fromiovec((u8 *)op->frames, msg->msg_iov, msg_head->nframes * CFSIZ); if (err < 0) { if (op->frames != &op->sframe) kfree(op->frames); if (op->last_frames != &op->last_sframe) kfree(op->last_frames); kfree(op); return err; } } /* bcm_can_tx / bcm_tx_timeout_handler needs this */ op->sk = sk; op->ifindex = ifindex; /* ifindex for timeout events w/o previous frame reception */ op->rx_ifindex = ifindex; /* initialize uninitialized (kzalloc) structure */ hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); op->timer.function = bcm_rx_timeout_handler; /* initialize tasklet for rx timeout notification */ tasklet_init(&op->tsklet, bcm_rx_timeout_tsklet, (unsigned long) op); hrtimer_init(&op->thrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); op->thrtimer.function = bcm_rx_thr_handler; /* initialize tasklet for rx throttle handling */ tasklet_init(&op->thrtsklet, bcm_rx_thr_tsklet, (unsigned long) op); /* add this bcm_op to the list of the rx_ops */ list_add(&op->list, &bo->rx_ops); /* call can_rx_register() */ do_rx_register = 1; } /* if ((op = bcm_find_op(&bo->rx_ops, msg_head->can_id, ifindex))) */ /* check flags */ op->flags = msg_head->flags; if (op->flags & RX_RTR_FRAME) { /* no timers in RTR-mode */ hrtimer_cancel(&op->thrtimer); hrtimer_cancel(&op->timer); /* * funny feature in RX(!)_SETUP only for RTR-mode: * copy can_id into frame BUT without RTR-flag to * prevent a full-load-loopback-test ... ;-] */ if ((op->flags & TX_CP_CAN_ID) || (op->frames[0].can_id == op->can_id)) op->frames[0].can_id = op->can_id & ~CAN_RTR_FLAG; } else { if (op->flags & SETTIMER) { /* set timer value */ op->ival1 = msg_head->ival1; op->ival2 = msg_head->ival2; op->kt_ival1 = timeval_to_ktime(msg_head->ival1); op->kt_ival2 = timeval_to_ktime(msg_head->ival2); /* disable an active timer due to zero value? */ if (!op->kt_ival1.tv64) hrtimer_cancel(&op->timer); /* * In any case cancel the throttle timer, flush * potentially blocked msgs and reset throttle handling */ op->kt_lastmsg = ktime_set(0, 0); hrtimer_cancel(&op->thrtimer); bcm_rx_thr_flush(op, 1); } if ((op->flags & STARTTIMER) && op->kt_ival1.tv64) hrtimer_start(&op->timer, op->kt_ival1, HRTIMER_MODE_REL); } /* now we can register for can_ids, if we added a new bcm_op */ if (do_rx_register) { if (ifindex) { struct net_device *dev; dev = dev_get_by_index(&init_net, ifindex); if (dev) { err = can_rx_register(dev, op->can_id, REGMASK(op->can_id), bcm_rx_handler, op, "bcm"); op->rx_reg_dev = dev; dev_put(dev); } } else err = can_rx_register(NULL, op->can_id, REGMASK(op->can_id), bcm_rx_handler, op, "bcm"); if (err) { /* this bcm rx op is broken -> remove it */ list_del(&op->list); bcm_remove_op(op); return err; } } return msg_head->nframes * CFSIZ + MHSIZ; } /* * bcm_tx_send - send a single CAN frame to the CAN interface (for bcm_sendmsg) */ static int bcm_tx_send(struct msghdr *msg, int ifindex, struct sock *sk) { struct sk_buff *skb; struct net_device *dev; int err; /* we need a real device to send frames */ if (!ifindex) return -ENODEV; skb = alloc_skb(CFSIZ, GFP_KERNEL); if (!skb) return -ENOMEM; err = memcpy_fromiovec(skb_put(skb, CFSIZ), msg->msg_iov, CFSIZ); if (err < 0) { kfree_skb(skb); return err; } dev = dev_get_by_index(&init_net, ifindex); if (!dev) { kfree_skb(skb); return -ENODEV; } skb->dev = dev; skb->sk = sk; err = can_send(skb, 1); /* send with loopback */ dev_put(dev); if (err) return err; return CFSIZ + MHSIZ; } /* * bcm_sendmsg - process BCM commands (opcodes) from the userspace */ static int bcm_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size) { struct sock *sk = sock->sk; struct bcm_sock *bo = bcm_sk(sk); int ifindex = bo->ifindex; /* default ifindex for this bcm_op */ struct bcm_msg_head msg_head; int ret; /* read bytes or error codes as return value */ if (!bo->bound) return -ENOTCONN; /* check for valid message length from userspace */ if (size < MHSIZ || (size - MHSIZ) % CFSIZ) return -EINVAL; /* check for alternative ifindex for this bcm_op */ if (!ifindex && msg->msg_name) { /* no bound device as default => check msg_name */ struct sockaddr_can *addr = (struct sockaddr_can *)msg->msg_name; if (msg->msg_namelen < sizeof(*addr)) return -EINVAL; if (addr->can_family != AF_CAN) return -EINVAL; /* ifindex from sendto() */ ifindex = addr->can_ifindex; if (ifindex) { struct net_device *dev; dev = dev_get_by_index(&init_net, ifindex); if (!dev) return -ENODEV; if (dev->type != ARPHRD_CAN) { dev_put(dev); return -ENODEV; } dev_put(dev); } } /* read message head information */ ret = memcpy_fromiovec((u8 *)&msg_head, msg->msg_iov, MHSIZ); if (ret < 0) return ret; lock_sock(sk); switch (msg_head.opcode) { case TX_SETUP: ret = bcm_tx_setup(&msg_head, msg, ifindex, sk); break; case RX_SETUP: ret = bcm_rx_setup(&msg_head, msg, ifindex, sk); break; case TX_DELETE: if (bcm_delete_tx_op(&bo->tx_ops, msg_head.can_id, ifindex)) ret = MHSIZ; else ret = -EINVAL; break; case RX_DELETE: if (bcm_delete_rx_op(&bo->rx_ops, msg_head.can_id, ifindex)) ret = MHSIZ; else ret = -EINVAL; break; case TX_READ: /* reuse msg_head for the reply to TX_READ */ msg_head.opcode = TX_STATUS; ret = bcm_read_op(&bo->tx_ops, &msg_head, ifindex); break; case RX_READ: /* reuse msg_head for the reply to RX_READ */ msg_head.opcode = RX_STATUS; ret = bcm_read_op(&bo->rx_ops, &msg_head, ifindex); break; case TX_SEND: /* we need exactly one can_frame behind the msg head */ if ((msg_head.nframes != 1) || (size != CFSIZ + MHSIZ)) ret = -EINVAL; else ret = bcm_tx_send(msg, ifindex, sk); break; default: ret = -EINVAL; break; } release_sock(sk); return ret; } /* * notification handler for netdevice status changes */ static int bcm_notifier(struct notifier_block *nb, unsigned long msg, void *data) { struct net_device *dev = (struct net_device *)data; struct bcm_sock *bo = container_of(nb, struct bcm_sock, notifier); struct sock *sk = &bo->sk; struct bcm_op *op; int notify_enodev = 0; if (!net_eq(dev_net(dev), &init_net)) return NOTIFY_DONE; if (dev->type != ARPHRD_CAN) return NOTIFY_DONE; switch (msg) { case NETDEV_UNREGISTER: lock_sock(sk); /* remove device specific receive entries */ list_for_each_entry(op, &bo->rx_ops, list) if (op->rx_reg_dev == dev) bcm_rx_unreg(dev, op); /* remove device reference, if this is our bound device */ if (bo->bound && bo->ifindex == dev->ifindex) { bo->bound = 0; bo->ifindex = 0; notify_enodev = 1; } release_sock(sk); if (notify_enodev) { sk->sk_err = ENODEV; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_error_report(sk); } break; case NETDEV_DOWN: if (bo->bound && bo->ifindex == dev->ifindex) { sk->sk_err = ENETDOWN; if (!sock_flag(sk, SOCK_DEAD)) sk->sk_error_report(sk); } } return NOTIFY_DONE; } /* * initial settings for all BCM sockets to be set at socket creation time */ static int bcm_init(struct sock *sk) { struct bcm_sock *bo = bcm_sk(sk); bo->bound = 0; bo->ifindex = 0; bo->dropped_usr_msgs = 0; bo->bcm_proc_read = NULL; INIT_LIST_HEAD(&bo->tx_ops); INIT_LIST_HEAD(&bo->rx_ops); /* set notifier */ bo->notifier.notifier_call = bcm_notifier; register_netdevice_notifier(&bo->notifier); return 0; } /* * standard socket functions */ static int bcm_release(struct socket *sock) { struct sock *sk = sock->sk; struct bcm_sock *bo; struct bcm_op *op, *next; if (sk == NULL) return 0; bo = bcm_sk(sk); /* remove bcm_ops, timer, rx_unregister(), etc. */ unregister_netdevice_notifier(&bo->notifier); lock_sock(sk); list_for_each_entry_safe(op, next, &bo->tx_ops, list) bcm_remove_op(op); list_for_each_entry_safe(op, next, &bo->rx_ops, list) { /* * Don't care if we're bound or not (due to netdev problems) * can_rx_unregister() is always a save thing to do here. */ if (op->ifindex) { /* * Only remove subscriptions that had not * been removed due to NETDEV_UNREGISTER * in bcm_notifier() */ if (op->rx_reg_dev) { struct net_device *dev; dev = dev_get_by_index(&init_net, op->ifindex); if (dev) { bcm_rx_unreg(dev, op); dev_put(dev); } } } else can_rx_unregister(NULL, op->can_id, REGMASK(op->can_id), bcm_rx_handler, op); bcm_remove_op(op); } /* remove procfs entry */ if (proc_dir && bo->bcm_proc_read) remove_proc_entry(bo->procname, proc_dir); /* remove device reference */ if (bo->bound) { bo->bound = 0; bo->ifindex = 0; } sock_orphan(sk); sock->sk = NULL; release_sock(sk); sock_put(sk); return 0; } static int bcm_connect(struct socket *sock, struct sockaddr *uaddr, int len, int flags) { struct sockaddr_can *addr = (struct sockaddr_can *)uaddr; struct sock *sk = sock->sk; struct bcm_sock *bo = bcm_sk(sk); if (len < sizeof(*addr)) return -EINVAL; if (bo->bound) return -EISCONN; /* bind a device to this socket */ if (addr->can_ifindex) { struct net_device *dev; dev = dev_get_by_index(&init_net, addr->can_ifindex); if (!dev) return -ENODEV; if (dev->type != ARPHRD_CAN) { dev_put(dev); return -ENODEV; } bo->ifindex = dev->ifindex; dev_put(dev); } else { /* no interface reference for ifindex = 0 ('any' CAN device) */ bo->ifindex = 0; } bo->bound = 1; if (proc_dir) { /* unique socket address as filename */ sprintf(bo->procname, "%lu", sock_i_ino(sk)); bo->bcm_proc_read = proc_create_data(bo->procname, 0644, proc_dir, &bcm_proc_fops, sk); } return 0; } static int bcm_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t size, int flags) { struct sock *sk = sock->sk; struct sk_buff *skb; int error = 0; int noblock; int err; noblock = flags & MSG_DONTWAIT; flags &= ~MSG_DONTWAIT; skb = skb_recv_datagram(sk, flags, noblock, &error); if (!skb) return error; if (skb->len < size) size = skb->len; err = memcpy_toiovec(msg->msg_iov, skb->data, size); if (err < 0) { skb_free_datagram(sk, skb); return err; } sock_recv_ts_and_drops(msg, sk, skb); if (msg->msg_name) { msg->msg_namelen = sizeof(struct sockaddr_can); memcpy(msg->msg_name, skb->cb, msg->msg_namelen); } skb_free_datagram(sk, skb); return size; } static const struct proto_ops bcm_ops = { .family = PF_CAN, .release = bcm_release, .bind = sock_no_bind, .connect = bcm_connect, .socketpair = sock_no_socketpair, .accept = sock_no_accept, .getname = sock_no_getname, .poll = datagram_poll, .ioctl = can_ioctl, /* use can_ioctl() from af_can.c */ .listen = sock_no_listen, .shutdown = sock_no_shutdown, .setsockopt = sock_no_setsockopt, .getsockopt = sock_no_getsockopt, .sendmsg = bcm_sendmsg, .recvmsg = bcm_recvmsg, .mmap = sock_no_mmap, .sendpage = sock_no_sendpage, }; static struct proto bcm_proto __read_mostly = { .name = "CAN_BCM", .owner = THIS_MODULE, .obj_size = sizeof(struct bcm_sock), .init = bcm_init, }; static const struct can_proto bcm_can_proto = { .type = SOCK_DGRAM, .protocol = CAN_BCM, .ops = &bcm_ops, .prot = &bcm_proto, }; static int __init bcm_module_init(void) { int err; printk(banner); err = can_proto_register(&bcm_can_proto); if (err < 0) { printk(KERN_ERR "can: registration of bcm protocol failed\n"); return err; } /* create /proc/net/can-bcm directory */ proc_dir = proc_mkdir("can-bcm", init_net.proc_net); return 0; } static void __exit bcm_module_exit(void) { can_proto_unregister(&bcm_can_proto); if (proc_dir) proc_net_remove(&init_net, "can-bcm"); } module_init(bcm_module_init); module_exit(bcm_module_exit);
gpl-2.0
jiangjiali66/linux-xlnx
drivers/gpu/drm/msm/hdmi/hdmi_audio.c
1132
8691
/* * Copyright (C) 2013 Red Hat * Author: Rob Clark <robdclark@gmail.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include <linux/hdmi.h> #include "hdmi.h" /* Supported HDMI Audio channels */ #define MSM_HDMI_AUDIO_CHANNEL_2 0 #define MSM_HDMI_AUDIO_CHANNEL_4 1 #define MSM_HDMI_AUDIO_CHANNEL_6 2 #define MSM_HDMI_AUDIO_CHANNEL_8 3 /* maps MSM_HDMI_AUDIO_CHANNEL_n consts used by audio driver to # of channels: */ static int nchannels[] = { 2, 4, 6, 8 }; /* Supported HDMI Audio sample rates */ #define MSM_HDMI_SAMPLE_RATE_32KHZ 0 #define MSM_HDMI_SAMPLE_RATE_44_1KHZ 1 #define MSM_HDMI_SAMPLE_RATE_48KHZ 2 #define MSM_HDMI_SAMPLE_RATE_88_2KHZ 3 #define MSM_HDMI_SAMPLE_RATE_96KHZ 4 #define MSM_HDMI_SAMPLE_RATE_176_4KHZ 5 #define MSM_HDMI_SAMPLE_RATE_192KHZ 6 #define MSM_HDMI_SAMPLE_RATE_MAX 7 struct hdmi_msm_audio_acr { uint32_t n; /* N parameter for clock regeneration */ uint32_t cts; /* CTS parameter for clock regeneration */ }; struct hdmi_msm_audio_arcs { unsigned long int pixclock; struct hdmi_msm_audio_acr lut[MSM_HDMI_SAMPLE_RATE_MAX]; }; #define HDMI_MSM_AUDIO_ARCS(pclk, ...) { (1000 * (pclk)), __VA_ARGS__ } /* Audio constants lookup table for hdmi_msm_audio_acr_setup */ /* Valid Pixel-Clock rates: 25.2MHz, 27MHz, 27.03MHz, 74.25MHz, 148.5MHz */ static const struct hdmi_msm_audio_arcs acr_lut[] = { /* 25.200MHz */ HDMI_MSM_AUDIO_ARCS(25200, { {4096, 25200}, {6272, 28000}, {6144, 25200}, {12544, 28000}, {12288, 25200}, {25088, 28000}, {24576, 25200} }), /* 27.000MHz */ HDMI_MSM_AUDIO_ARCS(27000, { {4096, 27000}, {6272, 30000}, {6144, 27000}, {12544, 30000}, {12288, 27000}, {25088, 30000}, {24576, 27000} }), /* 27.027MHz */ HDMI_MSM_AUDIO_ARCS(27030, { {4096, 27027}, {6272, 30030}, {6144, 27027}, {12544, 30030}, {12288, 27027}, {25088, 30030}, {24576, 27027} }), /* 74.250MHz */ HDMI_MSM_AUDIO_ARCS(74250, { {4096, 74250}, {6272, 82500}, {6144, 74250}, {12544, 82500}, {12288, 74250}, {25088, 82500}, {24576, 74250} }), /* 148.500MHz */ HDMI_MSM_AUDIO_ARCS(148500, { {4096, 148500}, {6272, 165000}, {6144, 148500}, {12544, 165000}, {12288, 148500}, {25088, 165000}, {24576, 148500} }), }; static const struct hdmi_msm_audio_arcs *get_arcs(unsigned long int pixclock) { int i; for (i = 0; i < ARRAY_SIZE(acr_lut); i++) { const struct hdmi_msm_audio_arcs *arcs = &acr_lut[i]; if (arcs->pixclock == pixclock) return arcs; } return NULL; } int hdmi_audio_update(struct hdmi *hdmi) { struct hdmi_audio *audio = &hdmi->audio; struct hdmi_audio_infoframe *info = &audio->infoframe; const struct hdmi_msm_audio_arcs *arcs = NULL; bool enabled = audio->enabled; uint32_t acr_pkt_ctrl, vbi_pkt_ctrl, aud_pkt_ctrl; uint32_t infofrm_ctrl, audio_config; DBG("audio: enabled=%d, channels=%d, channel_allocation=0x%x, " "level_shift_value=%d, downmix_inhibit=%d, rate=%d", audio->enabled, info->channels, info->channel_allocation, info->level_shift_value, info->downmix_inhibit, audio->rate); DBG("video: power_on=%d, pixclock=%lu", hdmi->power_on, hdmi->pixclock); if (enabled && !(hdmi->power_on && hdmi->pixclock)) { DBG("disabling audio: no video"); enabled = false; } if (enabled) { arcs = get_arcs(hdmi->pixclock); if (!arcs) { DBG("disabling audio: unsupported pixclock: %lu", hdmi->pixclock); enabled = false; } } /* Read first before writing */ acr_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_ACR_PKT_CTRL); vbi_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL); aud_pkt_ctrl = hdmi_read(hdmi, REG_HDMI_AUDIO_PKT_CTRL1); infofrm_ctrl = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0); audio_config = hdmi_read(hdmi, REG_HDMI_AUDIO_CFG); /* Clear N/CTS selection bits */ acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_SELECT__MASK; if (enabled) { uint32_t n, cts, multiplier; enum hdmi_acr_cts select; uint8_t buf[14]; n = arcs->lut[audio->rate].n; cts = arcs->lut[audio->rate].cts; if ((MSM_HDMI_SAMPLE_RATE_192KHZ == audio->rate) || (MSM_HDMI_SAMPLE_RATE_176_4KHZ == audio->rate)) { multiplier = 4; n >>= 2; /* divide N by 4 and use multiplier */ } else if ((MSM_HDMI_SAMPLE_RATE_96KHZ == audio->rate) || (MSM_HDMI_SAMPLE_RATE_88_2KHZ == audio->rate)) { multiplier = 2; n >>= 1; /* divide N by 2 and use multiplier */ } else { multiplier = 1; } DBG("n=%u, cts=%u, multiplier=%u", n, cts, multiplier); acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_SOURCE; acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_AUDIO_PRIORITY; acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_N_MULTIPLIER(multiplier); if ((MSM_HDMI_SAMPLE_RATE_48KHZ == audio->rate) || (MSM_HDMI_SAMPLE_RATE_96KHZ == audio->rate) || (MSM_HDMI_SAMPLE_RATE_192KHZ == audio->rate)) select = ACR_48; else if ((MSM_HDMI_SAMPLE_RATE_44_1KHZ == audio->rate) || (MSM_HDMI_SAMPLE_RATE_88_2KHZ == audio->rate) || (MSM_HDMI_SAMPLE_RATE_176_4KHZ == audio->rate)) select = ACR_44; else /* default to 32k */ select = ACR_32; acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_SELECT(select); hdmi_write(hdmi, REG_HDMI_ACR_0(select - 1), HDMI_ACR_0_CTS(cts)); hdmi_write(hdmi, REG_HDMI_ACR_1(select - 1), HDMI_ACR_1_N(n)); hdmi_write(hdmi, REG_HDMI_AUDIO_PKT_CTRL2, COND(info->channels != 2, HDMI_AUDIO_PKT_CTRL2_LAYOUT) | HDMI_AUDIO_PKT_CTRL2_OVERRIDE); acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_CONT; acr_pkt_ctrl |= HDMI_ACR_PKT_CTRL_SEND; /* configure infoframe: */ hdmi_audio_infoframe_pack(info, buf, sizeof(buf)); hdmi_write(hdmi, REG_HDMI_AUDIO_INFO0, (buf[3] << 0) || (buf[4] << 8) || (buf[5] << 16) || (buf[6] << 24)); hdmi_write(hdmi, REG_HDMI_AUDIO_INFO1, (buf[7] << 0) || (buf[8] << 8)); hdmi_write(hdmi, REG_HDMI_GC, 0); vbi_pkt_ctrl |= HDMI_VBI_PKT_CTRL_GC_ENABLE; vbi_pkt_ctrl |= HDMI_VBI_PKT_CTRL_GC_EVERY_FRAME; aud_pkt_ctrl |= HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND; infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND; infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT; infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE; infofrm_ctrl |= HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE; audio_config &= ~HDMI_AUDIO_CFG_FIFO_WATERMARK__MASK; audio_config |= HDMI_AUDIO_CFG_FIFO_WATERMARK(4); audio_config |= HDMI_AUDIO_CFG_ENGINE_ENABLE; } else { hdmi_write(hdmi, REG_HDMI_GC, HDMI_GC_MUTE); acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_CONT; acr_pkt_ctrl &= ~HDMI_ACR_PKT_CTRL_SEND; vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_ENABLE; vbi_pkt_ctrl &= ~HDMI_VBI_PKT_CTRL_GC_EVERY_FRAME; aud_pkt_ctrl &= ~HDMI_AUDIO_PKT_CTRL1_AUDIO_SAMPLE_SEND; infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SEND; infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_CONT; infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE; infofrm_ctrl &= ~HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE; audio_config &= ~HDMI_AUDIO_CFG_ENGINE_ENABLE; } hdmi_write(hdmi, REG_HDMI_ACR_PKT_CTRL, acr_pkt_ctrl); hdmi_write(hdmi, REG_HDMI_VBI_PKT_CTRL, vbi_pkt_ctrl); hdmi_write(hdmi, REG_HDMI_AUDIO_PKT_CTRL1, aud_pkt_ctrl); hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0, infofrm_ctrl); hdmi_write(hdmi, REG_HDMI_AUD_INT, COND(enabled, HDMI_AUD_INT_AUD_FIFO_URUN_INT) | COND(enabled, HDMI_AUD_INT_AUD_SAM_DROP_INT)); hdmi_write(hdmi, REG_HDMI_AUDIO_CFG, audio_config); DBG("audio %sabled", enabled ? "en" : "dis"); return 0; } int hdmi_audio_info_setup(struct hdmi *hdmi, bool enabled, uint32_t num_of_channels, uint32_t channel_allocation, uint32_t level_shift, bool down_mix) { struct hdmi_audio *audio; if (!hdmi) return -ENXIO; audio = &hdmi->audio; if (num_of_channels >= ARRAY_SIZE(nchannels)) return -EINVAL; audio->enabled = enabled; audio->infoframe.channels = nchannels[num_of_channels]; audio->infoframe.channel_allocation = channel_allocation; audio->infoframe.level_shift_value = level_shift; audio->infoframe.downmix_inhibit = down_mix; return hdmi_audio_update(hdmi); } void hdmi_audio_set_sample_rate(struct hdmi *hdmi, int rate) { struct hdmi_audio *audio; if (!hdmi) return; audio = &hdmi->audio; if ((rate < 0) || (rate >= MSM_HDMI_SAMPLE_RATE_MAX)) return; audio->rate = rate; hdmi_audio_update(hdmi); }
gpl-2.0
FrozenCow/FIRE-ICE
drivers/usb/core/hcd-pci.c
1388
17635
/* * (C) Copyright David Brownell 2000-2002 * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software Foundation, * Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/pci.h> #include <linux/usb.h> #include <linux/usb/hcd.h> #include <asm/io.h> #include <asm/irq.h> #ifdef CONFIG_PPC_PMAC #include <asm/machdep.h> #include <asm/pmac_feature.h> #include <asm/pci-bridge.h> #include <asm/prom.h> #endif #include "usb.h" /* PCI-based HCs are common, but plenty of non-PCI HCs are used too */ /* * Coordinate handoffs between EHCI and companion controllers * during EHCI probing and system resume. */ static DECLARE_RWSEM(companions_rwsem); #define CL_UHCI PCI_CLASS_SERIAL_USB_UHCI #define CL_OHCI PCI_CLASS_SERIAL_USB_OHCI #define CL_EHCI PCI_CLASS_SERIAL_USB_EHCI static inline int is_ohci_or_uhci(struct pci_dev *pdev) { return pdev->class == CL_OHCI || pdev->class == CL_UHCI; } typedef void (*companion_fn)(struct pci_dev *pdev, struct usb_hcd *hcd, struct pci_dev *companion, struct usb_hcd *companion_hcd); /* Iterate over PCI devices in the same slot as pdev and call fn for each */ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd, companion_fn fn) { struct pci_dev *companion; struct usb_hcd *companion_hcd; unsigned int slot = PCI_SLOT(pdev->devfn); /* * Iterate through other PCI functions in the same slot. * If the function's drvdata isn't set then it isn't bound to * a USB host controller driver, so skip it. */ companion = NULL; for_each_pci_dev(companion) { if (companion->bus != pdev->bus || PCI_SLOT(companion->devfn) != slot) continue; companion_hcd = pci_get_drvdata(companion); if (!companion_hcd || !companion_hcd->self.root_hub) continue; fn(pdev, hcd, companion, companion_hcd); } } /* * We're about to add an EHCI controller, which will unceremoniously grab * all the port connections away from its companions. To prevent annoying * error messages, lock the companion's root hub and gracefully unconfigure * it beforehand. Leave it locked until the EHCI controller is all set. */ static void ehci_pre_add(struct pci_dev *pdev, struct usb_hcd *hcd, struct pci_dev *companion, struct usb_hcd *companion_hcd) { struct usb_device *udev; if (is_ohci_or_uhci(companion)) { udev = companion_hcd->self.root_hub; usb_lock_device(udev); usb_set_configuration(udev, 0); } } /* * Adding the EHCI controller has either succeeded or failed. Set the * companion pointer accordingly, and in either case, reconfigure and * unlock the root hub. */ static void ehci_post_add(struct pci_dev *pdev, struct usb_hcd *hcd, struct pci_dev *companion, struct usb_hcd *companion_hcd) { struct usb_device *udev; if (is_ohci_or_uhci(companion)) { if (dev_get_drvdata(&pdev->dev)) { /* Succeeded */ dev_dbg(&pdev->dev, "HS companion for %s\n", dev_name(&companion->dev)); companion_hcd->self.hs_companion = &hcd->self; } udev = companion_hcd->self.root_hub; usb_set_configuration(udev, 1); usb_unlock_device(udev); } } /* * We just added a non-EHCI controller. Find the EHCI controller to * which it is a companion, and store a pointer to the bus structure. */ static void non_ehci_add(struct pci_dev *pdev, struct usb_hcd *hcd, struct pci_dev *companion, struct usb_hcd *companion_hcd) { if (is_ohci_or_uhci(pdev) && companion->class == CL_EHCI) { dev_dbg(&pdev->dev, "FS/LS companion for %s\n", dev_name(&companion->dev)); hcd->self.hs_companion = &companion_hcd->self; } } /* We are removing an EHCI controller. Clear the companions' pointers. */ static void ehci_remove(struct pci_dev *pdev, struct usb_hcd *hcd, struct pci_dev *companion, struct usb_hcd *companion_hcd) { if (is_ohci_or_uhci(companion)) companion_hcd->self.hs_companion = NULL; } #ifdef CONFIG_PM /* An EHCI controller must wait for its companions before resuming. */ static void ehci_wait_for_companions(struct pci_dev *pdev, struct usb_hcd *hcd, struct pci_dev *companion, struct usb_hcd *companion_hcd) { if (is_ohci_or_uhci(companion)) device_pm_wait_for_dev(&pdev->dev, &companion->dev); } #endif /* CONFIG_PM */ /*-------------------------------------------------------------------------*/ /* configure so an HC device and id are always provided */ /* always called with process context; sleeping is OK */ /** * usb_hcd_pci_probe - initialize PCI-based HCDs * @dev: USB Host Controller being probed * @id: pci hotplug id connecting controller to HCD framework * Context: !in_interrupt() * * Allocates basic PCI resources for this USB host controller, and * then invokes the start() method for the HCD associated with it * through the hotplug entry's driver_data. * * Store this function in the HCD's struct pci_driver as probe(). */ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct hc_driver *driver; struct usb_hcd *hcd; int retval; int hcd_irq = 0; if (usb_disabled()) return -ENODEV; if (!id) return -EINVAL; driver = (struct hc_driver *)id->driver_data; if (!driver) return -EINVAL; if (pci_enable_device(dev) < 0) return -ENODEV; dev->current_state = PCI_D0; /* * The xHCI driver has its own irq management * make sure irq setup is not touched for xhci in generic hcd code */ if ((driver->flags & HCD_MASK) != HCD_USB3) { if (!dev->irq) { dev_err(&dev->dev, "Found HC with no IRQ. Check BIOS/PCI %s setup!\n", pci_name(dev)); retval = -ENODEV; goto disable_pci; } hcd_irq = dev->irq; } hcd = usb_create_hcd(driver, &dev->dev, pci_name(dev)); if (!hcd) { retval = -ENOMEM; goto disable_pci; } if (driver->flags & HCD_MEMORY) { /* EHCI, OHCI */ hcd->rsrc_start = pci_resource_start(dev, 0); hcd->rsrc_len = pci_resource_len(dev, 0); if (!request_mem_region(hcd->rsrc_start, hcd->rsrc_len, driver->description)) { dev_dbg(&dev->dev, "controller already in use\n"); retval = -EBUSY; goto put_hcd; } hcd->regs = ioremap_nocache(hcd->rsrc_start, hcd->rsrc_len); if (hcd->regs == NULL) { dev_dbg(&dev->dev, "error mapping memory\n"); retval = -EFAULT; goto release_mem_region; } } else { /* UHCI */ int region; for (region = 0; region < PCI_ROM_RESOURCE; region++) { if (!(pci_resource_flags(dev, region) & IORESOURCE_IO)) continue; hcd->rsrc_start = pci_resource_start(dev, region); hcd->rsrc_len = pci_resource_len(dev, region); if (request_region(hcd->rsrc_start, hcd->rsrc_len, driver->description)) break; } if (region == PCI_ROM_RESOURCE) { dev_dbg(&dev->dev, "no i/o regions available\n"); retval = -EBUSY; goto put_hcd; } } pci_set_master(dev); /* Note: dev_set_drvdata must be called while holding the rwsem */ if (dev->class == CL_EHCI) { down_write(&companions_rwsem); dev_set_drvdata(&dev->dev, hcd); for_each_companion(dev, hcd, ehci_pre_add); retval = usb_add_hcd(hcd, hcd_irq, IRQF_SHARED); if (retval != 0) dev_set_drvdata(&dev->dev, NULL); for_each_companion(dev, hcd, ehci_post_add); up_write(&companions_rwsem); } else { down_read(&companions_rwsem); dev_set_drvdata(&dev->dev, hcd); retval = usb_add_hcd(hcd, hcd_irq, IRQF_SHARED); if (retval != 0) dev_set_drvdata(&dev->dev, NULL); else for_each_companion(dev, hcd, non_ehci_add); up_read(&companions_rwsem); } if (retval != 0) goto unmap_registers; if (pci_dev_run_wake(dev)) pm_runtime_put_noidle(&dev->dev); return retval; unmap_registers: if (driver->flags & HCD_MEMORY) { iounmap(hcd->regs); release_mem_region: release_mem_region(hcd->rsrc_start, hcd->rsrc_len); } else release_region(hcd->rsrc_start, hcd->rsrc_len); put_hcd: usb_put_hcd(hcd); disable_pci: pci_disable_device(dev); dev_err(&dev->dev, "init %s fail, %d\n", pci_name(dev), retval); return retval; } EXPORT_SYMBOL_GPL(usb_hcd_pci_probe); /* may be called without controller electrically present */ /* may be called with controller, bus, and devices active */ /** * usb_hcd_pci_remove - shutdown processing for PCI-based HCDs * @dev: USB Host Controller being removed * Context: !in_interrupt() * * Reverses the effect of usb_hcd_pci_probe(), first invoking * the HCD's stop() method. It is always called from a thread * context, normally "rmmod", "apmd", or something similar. * * Store this function in the HCD's struct pci_driver as remove(). */ void usb_hcd_pci_remove(struct pci_dev *dev) { struct usb_hcd *hcd; hcd = pci_get_drvdata(dev); if (!hcd) return; if (pci_dev_run_wake(dev)) pm_runtime_get_noresume(&dev->dev); /* Fake an interrupt request in order to give the driver a chance * to test whether the controller hardware has been removed (e.g., * cardbus physical eject). */ local_irq_disable(); usb_hcd_irq(0, hcd); local_irq_enable(); /* Note: dev_set_drvdata must be called while holding the rwsem */ if (dev->class == CL_EHCI) { down_write(&companions_rwsem); for_each_companion(dev, hcd, ehci_remove); usb_remove_hcd(hcd); dev_set_drvdata(&dev->dev, NULL); up_write(&companions_rwsem); } else { /* Not EHCI; just clear the companion pointer */ down_read(&companions_rwsem); hcd->self.hs_companion = NULL; usb_remove_hcd(hcd); dev_set_drvdata(&dev->dev, NULL); up_read(&companions_rwsem); } if (hcd->driver->flags & HCD_MEMORY) { iounmap(hcd->regs); release_mem_region(hcd->rsrc_start, hcd->rsrc_len); } else { release_region(hcd->rsrc_start, hcd->rsrc_len); } usb_put_hcd(hcd); pci_disable_device(dev); } EXPORT_SYMBOL_GPL(usb_hcd_pci_remove); /** * usb_hcd_pci_shutdown - shutdown host controller * @dev: USB Host Controller being shutdown */ void usb_hcd_pci_shutdown(struct pci_dev *dev) { struct usb_hcd *hcd; hcd = pci_get_drvdata(dev); if (!hcd) return; if (test_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags) && hcd->driver->shutdown) { hcd->driver->shutdown(hcd); pci_disable_device(dev); } } EXPORT_SYMBOL_GPL(usb_hcd_pci_shutdown); #ifdef CONFIG_PM #ifdef CONFIG_PPC_PMAC static void powermac_set_asic(struct pci_dev *pci_dev, int enable) { /* Enanble or disable ASIC clocks for USB */ if (machine_is(powermac)) { struct device_node *of_node; of_node = pci_device_to_OF_node(pci_dev); if (of_node) pmac_call_feature(PMAC_FTR_USB_ENABLE, of_node, 0, enable); } } #else static inline void powermac_set_asic(struct pci_dev *pci_dev, int enable) {} #endif /* CONFIG_PPC_PMAC */ static int check_root_hub_suspended(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct usb_hcd *hcd = pci_get_drvdata(pci_dev); if (HCD_RH_RUNNING(hcd)) { dev_warn(dev, "Root hub is not suspended\n"); return -EBUSY; } if (hcd->shared_hcd) { hcd = hcd->shared_hcd; if (HCD_RH_RUNNING(hcd)) { dev_warn(dev, "Secondary root hub is not suspended\n"); return -EBUSY; } } return 0; } #if defined(CONFIG_PM_SLEEP) || defined(CONFIG_PM_RUNTIME) static int suspend_common(struct device *dev, bool do_wakeup) { struct pci_dev *pci_dev = to_pci_dev(dev); struct usb_hcd *hcd = pci_get_drvdata(pci_dev); int retval; /* Root hub suspend should have stopped all downstream traffic, * and all bus master traffic. And done so for both the interface * and the stub usb_device (which we check here). But maybe it * didn't; writing sysfs power/state files ignores such rules... */ retval = check_root_hub_suspended(dev); if (retval) return retval; if (hcd->driver->pci_suspend && !HCD_DEAD(hcd)) { /* Optimization: Don't suspend if a root-hub wakeup is * pending and it would cause the HCD to wake up anyway. */ if (do_wakeup && HCD_WAKEUP_PENDING(hcd)) return -EBUSY; if (do_wakeup && hcd->shared_hcd && HCD_WAKEUP_PENDING(hcd->shared_hcd)) return -EBUSY; retval = hcd->driver->pci_suspend(hcd, do_wakeup); suspend_report_result(hcd->driver->pci_suspend, retval); /* Check again in case wakeup raced with pci_suspend */ if ((retval == 0 && do_wakeup && HCD_WAKEUP_PENDING(hcd)) || (retval == 0 && do_wakeup && hcd->shared_hcd && HCD_WAKEUP_PENDING(hcd->shared_hcd))) { if (hcd->driver->pci_resume) hcd->driver->pci_resume(hcd, false); retval = -EBUSY; } if (retval) return retval; } /* If MSI-X is enabled, the driver will have synchronized all vectors * in pci_suspend(). If MSI or legacy PCI is enabled, that will be * synchronized here. */ if (!hcd->msix_enabled) synchronize_irq(pci_dev->irq); /* Downstream ports from this root hub should already be quiesced, so * there will be no DMA activity. Now we can shut down the upstream * link (except maybe for PME# resume signaling). We'll enter a * low power state during suspend_noirq, if the hardware allows. */ pci_disable_device(pci_dev); return retval; } static int resume_common(struct device *dev, int event) { struct pci_dev *pci_dev = to_pci_dev(dev); struct usb_hcd *hcd = pci_get_drvdata(pci_dev); int retval; if (HCD_RH_RUNNING(hcd) || (hcd->shared_hcd && HCD_RH_RUNNING(hcd->shared_hcd))) { dev_dbg(dev, "can't resume, not suspended!\n"); return 0; } retval = pci_enable_device(pci_dev); if (retval < 0) { dev_err(dev, "can't re-enable after resume, %d!\n", retval); return retval; } pci_set_master(pci_dev); if (hcd->driver->pci_resume && !HCD_DEAD(hcd)) { /* * Only EHCI controllers have to wait for their companions. * No locking is needed because PCI controller drivers do not * get unbound during system resume. */ if (pci_dev->class == CL_EHCI && event != PM_EVENT_AUTO_RESUME) for_each_companion(pci_dev, hcd, ehci_wait_for_companions); retval = hcd->driver->pci_resume(hcd, event == PM_EVENT_RESTORE); if (retval) { dev_err(dev, "PCI post-resume error %d!\n", retval); if (hcd->shared_hcd) usb_hc_died(hcd->shared_hcd); usb_hc_died(hcd); } } return retval; } #endif /* SLEEP || RUNTIME */ #ifdef CONFIG_PM_SLEEP static int hcd_pci_suspend(struct device *dev) { return suspend_common(dev, device_may_wakeup(dev)); } static int hcd_pci_suspend_noirq(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); struct usb_hcd *hcd = pci_get_drvdata(pci_dev); int retval; retval = check_root_hub_suspended(dev); if (retval) return retval; pci_save_state(pci_dev); /* If the root hub is dead rather than suspended, disallow remote * wakeup. usb_hc_died() should ensure that both hosts are marked as * dying, so we only need to check the primary roothub. */ if (HCD_DEAD(hcd)) device_set_wakeup_enable(dev, 0); dev_dbg(dev, "wakeup: %d\n", device_may_wakeup(dev)); /* Possibly enable remote wakeup, * choose the appropriate low-power state, and go to that state. */ retval = pci_prepare_to_sleep(pci_dev); if (retval == -EIO) { /* Low-power not supported */ dev_dbg(dev, "--> PCI D0 legacy\n"); retval = 0; } else if (retval == 0) { dev_dbg(dev, "--> PCI %s\n", pci_power_name(pci_dev->current_state)); } else { suspend_report_result(pci_prepare_to_sleep, retval); return retval; } powermac_set_asic(pci_dev, 0); return retval; } static int hcd_pci_resume_noirq(struct device *dev) { struct pci_dev *pci_dev = to_pci_dev(dev); powermac_set_asic(pci_dev, 1); /* Go back to D0 and disable remote wakeup */ pci_back_from_sleep(pci_dev); return 0; } static int hcd_pci_resume(struct device *dev) { return resume_common(dev, PM_EVENT_RESUME); } static int hcd_pci_restore(struct device *dev) { return resume_common(dev, PM_EVENT_RESTORE); } #else #define hcd_pci_suspend NULL #define hcd_pci_suspend_noirq NULL #define hcd_pci_resume_noirq NULL #define hcd_pci_resume NULL #define hcd_pci_restore NULL #endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_PM_RUNTIME static int hcd_pci_runtime_suspend(struct device *dev) { int retval; retval = suspend_common(dev, true); if (retval == 0) powermac_set_asic(to_pci_dev(dev), 0); dev_dbg(dev, "hcd_pci_runtime_suspend: %d\n", retval); return retval; } static int hcd_pci_runtime_resume(struct device *dev) { int retval; powermac_set_asic(to_pci_dev(dev), 1); retval = resume_common(dev, PM_EVENT_AUTO_RESUME); dev_dbg(dev, "hcd_pci_runtime_resume: %d\n", retval); return retval; } #else #define hcd_pci_runtime_suspend NULL #define hcd_pci_runtime_resume NULL #endif /* CONFIG_PM_RUNTIME */ const struct dev_pm_ops usb_hcd_pci_pm_ops = { .suspend = hcd_pci_suspend, .suspend_noirq = hcd_pci_suspend_noirq, .resume_noirq = hcd_pci_resume_noirq, .resume = hcd_pci_resume, .freeze = check_root_hub_suspended, .freeze_noirq = check_root_hub_suspended, .thaw_noirq = NULL, .thaw = NULL, .poweroff = hcd_pci_suspend, .poweroff_noirq = hcd_pci_suspend_noirq, .restore_noirq = hcd_pci_resume_noirq, .restore = hcd_pci_restore, .runtime_suspend = hcd_pci_runtime_suspend, .runtime_resume = hcd_pci_runtime_resume, }; EXPORT_SYMBOL_GPL(usb_hcd_pci_pm_ops); #endif /* CONFIG_PM */
gpl-2.0
dbones/linux
drivers/mfd/ssbi.c
1388
8090
/* Copyright (c) 2009-2013, The Linux Foundation. All rights reserved. * Copyright (c) 2010, Google Inc. * * Original authors: Code Aurora Forum * * Author: Dima Zavin <dima@android.com> * - Largely rewritten from original to not be an i2c driver. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/delay.h> #include <linux/err.h> #include <linux/io.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/ssbi.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> /* SSBI 2.0 controller registers */ #define SSBI2_CMD 0x0008 #define SSBI2_RD 0x0010 #define SSBI2_STATUS 0x0014 #define SSBI2_MODE2 0x001C /* SSBI_CMD fields */ #define SSBI_CMD_RDWRN (1 << 24) /* SSBI_STATUS fields */ #define SSBI_STATUS_RD_READY (1 << 2) #define SSBI_STATUS_READY (1 << 1) #define SSBI_STATUS_MCHN_BUSY (1 << 0) /* SSBI_MODE2 fields */ #define SSBI_MODE2_REG_ADDR_15_8_SHFT 0x04 #define SSBI_MODE2_REG_ADDR_15_8_MASK (0x7f << SSBI_MODE2_REG_ADDR_15_8_SHFT) #define SET_SSBI_MODE2_REG_ADDR_15_8(MD, AD) \ (((MD) & 0x0F) | ((((AD) >> 8) << SSBI_MODE2_REG_ADDR_15_8_SHFT) & \ SSBI_MODE2_REG_ADDR_15_8_MASK)) /* SSBI PMIC Arbiter command registers */ #define SSBI_PA_CMD 0x0000 #define SSBI_PA_RD_STATUS 0x0004 /* SSBI_PA_CMD fields */ #define SSBI_PA_CMD_RDWRN (1 << 24) #define SSBI_PA_CMD_ADDR_MASK 0x7fff /* REG_ADDR_7_0, REG_ADDR_8_14*/ /* SSBI_PA_RD_STATUS fields */ #define SSBI_PA_RD_STATUS_TRANS_DONE (1 << 27) #define SSBI_PA_RD_STATUS_TRANS_DENIED (1 << 26) #define SSBI_TIMEOUT_US 100 enum ssbi_controller_type { MSM_SBI_CTRL_SSBI = 0, MSM_SBI_CTRL_SSBI2, MSM_SBI_CTRL_PMIC_ARBITER, }; struct ssbi { struct device *slave; void __iomem *base; spinlock_t lock; enum ssbi_controller_type controller_type; int (*read)(struct ssbi *, u16 addr, u8 *buf, int len); int (*write)(struct ssbi *, u16 addr, const u8 *buf, int len); }; #define to_ssbi(dev) platform_get_drvdata(to_platform_device(dev)) static inline u32 ssbi_readl(struct ssbi *ssbi, u32 reg) { return readl(ssbi->base + reg); } static inline void ssbi_writel(struct ssbi *ssbi, u32 val, u32 reg) { writel(val, ssbi->base + reg); } /* * Via private exchange with one of the original authors, the hardware * should generally finish a transaction in about 5us. The worst * case, is when using the arbiter and both other CPUs have just * started trying to use the SSBI bus will result in a time of about * 20us. It should never take longer than this. * * As such, this wait merely spins, with a udelay. */ static int ssbi_wait_mask(struct ssbi *ssbi, u32 set_mask, u32 clr_mask) { u32 timeout = SSBI_TIMEOUT_US; u32 val; while (timeout--) { val = ssbi_readl(ssbi, SSBI2_STATUS); if (((val & set_mask) == set_mask) && ((val & clr_mask) == 0)) return 0; udelay(1); } return -ETIMEDOUT; } static int ssbi_read_bytes(struct ssbi *ssbi, u16 addr, u8 *buf, int len) { u32 cmd = SSBI_CMD_RDWRN | ((addr & 0xff) << 16); int ret = 0; if (ssbi->controller_type == MSM_SBI_CTRL_SSBI2) { u32 mode2 = ssbi_readl(ssbi, SSBI2_MODE2); mode2 = SET_SSBI_MODE2_REG_ADDR_15_8(mode2, addr); ssbi_writel(ssbi, mode2, SSBI2_MODE2); } while (len) { ret = ssbi_wait_mask(ssbi, SSBI_STATUS_READY, 0); if (ret) goto err; ssbi_writel(ssbi, cmd, SSBI2_CMD); ret = ssbi_wait_mask(ssbi, SSBI_STATUS_RD_READY, 0); if (ret) goto err; *buf++ = ssbi_readl(ssbi, SSBI2_RD) & 0xff; len--; } err: return ret; } static int ssbi_write_bytes(struct ssbi *ssbi, u16 addr, const u8 *buf, int len) { int ret = 0; if (ssbi->controller_type == MSM_SBI_CTRL_SSBI2) { u32 mode2 = ssbi_readl(ssbi, SSBI2_MODE2); mode2 = SET_SSBI_MODE2_REG_ADDR_15_8(mode2, addr); ssbi_writel(ssbi, mode2, SSBI2_MODE2); } while (len) { ret = ssbi_wait_mask(ssbi, SSBI_STATUS_READY, 0); if (ret) goto err; ssbi_writel(ssbi, ((addr & 0xff) << 16) | *buf, SSBI2_CMD); ret = ssbi_wait_mask(ssbi, 0, SSBI_STATUS_MCHN_BUSY); if (ret) goto err; buf++; len--; } err: return ret; } /* * See ssbi_wait_mask for an explanation of the time and the * busywait. */ static inline int ssbi_pa_transfer(struct ssbi *ssbi, u32 cmd, u8 *data) { u32 timeout = SSBI_TIMEOUT_US; u32 rd_status = 0; ssbi_writel(ssbi, cmd, SSBI_PA_CMD); while (timeout--) { rd_status = ssbi_readl(ssbi, SSBI_PA_RD_STATUS); if (rd_status & SSBI_PA_RD_STATUS_TRANS_DENIED) return -EPERM; if (rd_status & SSBI_PA_RD_STATUS_TRANS_DONE) { if (data) *data = rd_status & 0xff; return 0; } udelay(1); } return -ETIMEDOUT; } static int ssbi_pa_read_bytes(struct ssbi *ssbi, u16 addr, u8 *buf, int len) { u32 cmd; int ret = 0; cmd = SSBI_PA_CMD_RDWRN | (addr & SSBI_PA_CMD_ADDR_MASK) << 8; while (len) { ret = ssbi_pa_transfer(ssbi, cmd, buf); if (ret) goto err; buf++; len--; } err: return ret; } static int ssbi_pa_write_bytes(struct ssbi *ssbi, u16 addr, const u8 *buf, int len) { u32 cmd; int ret = 0; while (len) { cmd = (addr & SSBI_PA_CMD_ADDR_MASK) << 8 | *buf; ret = ssbi_pa_transfer(ssbi, cmd, NULL); if (ret) goto err; buf++; len--; } err: return ret; } int ssbi_read(struct device *dev, u16 addr, u8 *buf, int len) { struct ssbi *ssbi = to_ssbi(dev); unsigned long flags; int ret; spin_lock_irqsave(&ssbi->lock, flags); ret = ssbi->read(ssbi, addr, buf, len); spin_unlock_irqrestore(&ssbi->lock, flags); return ret; } EXPORT_SYMBOL_GPL(ssbi_read); int ssbi_write(struct device *dev, u16 addr, const u8 *buf, int len) { struct ssbi *ssbi = to_ssbi(dev); unsigned long flags; int ret; spin_lock_irqsave(&ssbi->lock, flags); ret = ssbi->write(ssbi, addr, buf, len); spin_unlock_irqrestore(&ssbi->lock, flags); return ret; } EXPORT_SYMBOL_GPL(ssbi_write); static int ssbi_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; struct resource *mem_res; struct ssbi *ssbi; const char *type; ssbi = devm_kzalloc(&pdev->dev, sizeof(*ssbi), GFP_KERNEL); if (!ssbi) return -ENOMEM; mem_res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ssbi->base = devm_ioremap_resource(&pdev->dev, mem_res); if (IS_ERR(ssbi->base)) return PTR_ERR(ssbi->base); platform_set_drvdata(pdev, ssbi); type = of_get_property(np, "qcom,controller-type", NULL); if (type == NULL) { dev_err(&pdev->dev, "Missing qcom,controller-type property\n"); return -EINVAL; } dev_info(&pdev->dev, "SSBI controller type: '%s'\n", type); if (strcmp(type, "ssbi") == 0) ssbi->controller_type = MSM_SBI_CTRL_SSBI; else if (strcmp(type, "ssbi2") == 0) ssbi->controller_type = MSM_SBI_CTRL_SSBI2; else if (strcmp(type, "pmic-arbiter") == 0) ssbi->controller_type = MSM_SBI_CTRL_PMIC_ARBITER; else { dev_err(&pdev->dev, "Unknown qcom,controller-type\n"); return -EINVAL; } if (ssbi->controller_type == MSM_SBI_CTRL_PMIC_ARBITER) { ssbi->read = ssbi_pa_read_bytes; ssbi->write = ssbi_pa_write_bytes; } else { ssbi->read = ssbi_read_bytes; ssbi->write = ssbi_write_bytes; } spin_lock_init(&ssbi->lock); return of_platform_populate(np, NULL, NULL, &pdev->dev); } static const struct of_device_id ssbi_match_table[] = { { .compatible = "qcom,ssbi" }, {} }; MODULE_DEVICE_TABLE(of, ssbi_match_table); static struct platform_driver ssbi_driver = { .probe = ssbi_probe, .driver = { .name = "ssbi", .of_match_table = ssbi_match_table, }, }; module_platform_driver(ssbi_driver); MODULE_LICENSE("GPL v2"); MODULE_VERSION("1.0"); MODULE_ALIAS("platform:ssbi"); MODULE_AUTHOR("Dima Zavin <dima@android.com>");
gpl-2.0
ea4862/boeffla41
arch/arm/mach-msm/gpio-v2.c
1644
12453
/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/bitmap.h> #include <linux/bitops.h> #include <linux/gpio.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #include <asm/mach/irq.h> #include <mach/msm_iomap.h> #include "gpiomux.h" /* Bits of interest in the GPIO_IN_OUT register. */ enum { GPIO_IN = 0, GPIO_OUT = 1 }; /* Bits of interest in the GPIO_INTR_STATUS register. */ enum { INTR_STATUS = 0, }; /* Bits of interest in the GPIO_CFG register. */ enum { GPIO_OE = 9, }; /* Bits of interest in the GPIO_INTR_CFG register. * When a GPIO triggers, two separate decisions are made, controlled * by two separate flags. * * - First, INTR_RAW_STATUS_EN controls whether or not the GPIO_INTR_STATUS * register for that GPIO will be updated to reflect the triggering of that * gpio. If this bit is 0, this register will not be updated. * - Second, INTR_ENABLE controls whether an interrupt is triggered. * * If INTR_ENABLE is set and INTR_RAW_STATUS_EN is NOT set, an interrupt * can be triggered but the status register will not reflect it. */ enum { INTR_ENABLE = 0, INTR_POL_CTL = 1, INTR_DECT_CTL = 2, INTR_RAW_STATUS_EN = 3, }; /* Codes of interest in GPIO_INTR_CFG_SU. */ enum { TARGET_PROC_SCORPION = 4, TARGET_PROC_NONE = 7, }; #define GPIO_INTR_CFG_SU(gpio) (MSM_TLMM_BASE + 0x0400 + (0x04 * (gpio))) #define GPIO_CONFIG(gpio) (MSM_TLMM_BASE + 0x1000 + (0x10 * (gpio))) #define GPIO_IN_OUT(gpio) (MSM_TLMM_BASE + 0x1004 + (0x10 * (gpio))) #define GPIO_INTR_CFG(gpio) (MSM_TLMM_BASE + 0x1008 + (0x10 * (gpio))) #define GPIO_INTR_STATUS(gpio) (MSM_TLMM_BASE + 0x100c + (0x10 * (gpio))) /** * struct msm_gpio_dev: the MSM8660 SoC GPIO device structure * * @enabled_irqs: a bitmap used to optimize the summary-irq handler. By * keeping track of which gpios are unmasked as irq sources, we avoid * having to do readl calls on hundreds of iomapped registers each time * the summary interrupt fires in order to locate the active interrupts. * * @wake_irqs: a bitmap for tracking which interrupt lines are enabled * as wakeup sources. When the device is suspended, interrupts which are * not wakeup sources are disabled. * * @dual_edge_irqs: a bitmap used to track which irqs are configured * as dual-edge, as this is not supported by the hardware and requires * some special handling in the driver. */ struct msm_gpio_dev { struct gpio_chip gpio_chip; DECLARE_BITMAP(enabled_irqs, NR_GPIO_IRQS); DECLARE_BITMAP(wake_irqs, NR_GPIO_IRQS); DECLARE_BITMAP(dual_edge_irqs, NR_GPIO_IRQS); }; static DEFINE_SPINLOCK(tlmm_lock); static inline struct msm_gpio_dev *to_msm_gpio_dev(struct gpio_chip *chip) { return container_of(chip, struct msm_gpio_dev, gpio_chip); } static inline void set_gpio_bits(unsigned n, void __iomem *reg) { writel(readl(reg) | n, reg); } static inline void clear_gpio_bits(unsigned n, void __iomem *reg) { writel(readl(reg) & ~n, reg); } static int msm_gpio_get(struct gpio_chip *chip, unsigned offset) { return readl(GPIO_IN_OUT(offset)) & BIT(GPIO_IN); } static void msm_gpio_set(struct gpio_chip *chip, unsigned offset, int val) { writel(val ? BIT(GPIO_OUT) : 0, GPIO_IN_OUT(offset)); } static int msm_gpio_direction_input(struct gpio_chip *chip, unsigned offset) { unsigned long irq_flags; spin_lock_irqsave(&tlmm_lock, irq_flags); clear_gpio_bits(BIT(GPIO_OE), GPIO_CONFIG(offset)); spin_unlock_irqrestore(&tlmm_lock, irq_flags); return 0; } static int msm_gpio_direction_output(struct gpio_chip *chip, unsigned offset, int val) { unsigned long irq_flags; spin_lock_irqsave(&tlmm_lock, irq_flags); msm_gpio_set(chip, offset, val); set_gpio_bits(BIT(GPIO_OE), GPIO_CONFIG(offset)); spin_unlock_irqrestore(&tlmm_lock, irq_flags); return 0; } static int msm_gpio_request(struct gpio_chip *chip, unsigned offset) { return msm_gpiomux_get(chip->base + offset); } static void msm_gpio_free(struct gpio_chip *chip, unsigned offset) { msm_gpiomux_put(chip->base + offset); } static int msm_gpio_to_irq(struct gpio_chip *chip, unsigned offset) { return MSM_GPIO_TO_INT(chip->base + offset); } static inline int msm_irq_to_gpio(struct gpio_chip *chip, unsigned irq) { return irq - MSM_GPIO_TO_INT(chip->base); } static struct msm_gpio_dev msm_gpio = { .gpio_chip = { .base = 0, .ngpio = NR_GPIO_IRQS, .direction_input = msm_gpio_direction_input, .direction_output = msm_gpio_direction_output, .get = msm_gpio_get, .set = msm_gpio_set, .to_irq = msm_gpio_to_irq, .request = msm_gpio_request, .free = msm_gpio_free, }, }; /* For dual-edge interrupts in software, since the hardware has no * such support: * * At appropriate moments, this function may be called to flip the polarity * settings of both-edge irq lines to try and catch the next edge. * * The attempt is considered successful if: * - the status bit goes high, indicating that an edge was caught, or * - the input value of the gpio doesn't change during the attempt. * If the value changes twice during the process, that would cause the first * test to fail but would force the second, as two opposite * transitions would cause a detection no matter the polarity setting. * * The do-loop tries to sledge-hammer closed the timing hole between * the initial value-read and the polarity-write - if the line value changes * during that window, an interrupt is lost, the new polarity setting is * incorrect, and the first success test will fail, causing a retry. * * Algorithm comes from Google's msmgpio driver, see mach-msm/gpio.c. */ static void msm_gpio_update_dual_edge_pos(unsigned gpio) { int loop_limit = 100; unsigned val, val2, intstat; do { val = readl(GPIO_IN_OUT(gpio)) & BIT(GPIO_IN); if (val) clear_gpio_bits(BIT(INTR_POL_CTL), GPIO_INTR_CFG(gpio)); else set_gpio_bits(BIT(INTR_POL_CTL), GPIO_INTR_CFG(gpio)); val2 = readl(GPIO_IN_OUT(gpio)) & BIT(GPIO_IN); intstat = readl(GPIO_INTR_STATUS(gpio)) & BIT(INTR_STATUS); if (intstat || val == val2) return; } while (loop_limit-- > 0); pr_err("dual-edge irq failed to stabilize, " "interrupts dropped. %#08x != %#08x\n", val, val2); } static void msm_gpio_irq_ack(struct irq_data *d) { int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); writel(BIT(INTR_STATUS), GPIO_INTR_STATUS(gpio)); if (test_bit(gpio, msm_gpio.dual_edge_irqs)) msm_gpio_update_dual_edge_pos(gpio); } static void msm_gpio_irq_mask(struct irq_data *d) { int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); unsigned long irq_flags; spin_lock_irqsave(&tlmm_lock, irq_flags); writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio)); clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); __clear_bit(gpio, msm_gpio.enabled_irqs); spin_unlock_irqrestore(&tlmm_lock, irq_flags); } static void msm_gpio_irq_unmask(struct irq_data *d) { int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); unsigned long irq_flags; spin_lock_irqsave(&tlmm_lock, irq_flags); __set_bit(gpio, msm_gpio.enabled_irqs); set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio)); spin_unlock_irqrestore(&tlmm_lock, irq_flags); } static int msm_gpio_irq_set_type(struct irq_data *d, unsigned int flow_type) { int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); unsigned long irq_flags; uint32_t bits; spin_lock_irqsave(&tlmm_lock, irq_flags); bits = readl(GPIO_INTR_CFG(gpio)); if (flow_type & IRQ_TYPE_EDGE_BOTH) { bits |= BIT(INTR_DECT_CTL); __irq_set_handler_locked(d->irq, handle_edge_irq); if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) __set_bit(gpio, msm_gpio.dual_edge_irqs); else __clear_bit(gpio, msm_gpio.dual_edge_irqs); } else { bits &= ~BIT(INTR_DECT_CTL); __irq_set_handler_locked(d->irq, handle_level_irq); __clear_bit(gpio, msm_gpio.dual_edge_irqs); } if (flow_type & (IRQ_TYPE_EDGE_RISING | IRQ_TYPE_LEVEL_HIGH)) bits |= BIT(INTR_POL_CTL); else bits &= ~BIT(INTR_POL_CTL); writel(bits, GPIO_INTR_CFG(gpio)); if ((flow_type & IRQ_TYPE_EDGE_BOTH) == IRQ_TYPE_EDGE_BOTH) msm_gpio_update_dual_edge_pos(gpio); spin_unlock_irqrestore(&tlmm_lock, irq_flags); return 0; } /* * When the summary IRQ is raised, any number of GPIO lines may be high. * It is the job of the summary handler to find all those GPIO lines * which have been set as summary IRQ lines and which are triggered, * and to call their interrupt handlers. */ static void msm_summary_irq_handler(unsigned int irq, struct irq_desc *desc) { unsigned long i; struct irq_chip *chip = irq_desc_get_chip(desc); chained_irq_enter(chip, desc); for (i = find_first_bit(msm_gpio.enabled_irqs, NR_GPIO_IRQS); i < NR_GPIO_IRQS; i = find_next_bit(msm_gpio.enabled_irqs, NR_GPIO_IRQS, i + 1)) { if (readl(GPIO_INTR_STATUS(i)) & BIT(INTR_STATUS)) generic_handle_irq(msm_gpio_to_irq(&msm_gpio.gpio_chip, i)); } chained_irq_exit(chip, desc); } static int msm_gpio_irq_set_wake(struct irq_data *d, unsigned int on) { int gpio = msm_irq_to_gpio(&msm_gpio.gpio_chip, d->irq); if (on) { if (bitmap_empty(msm_gpio.wake_irqs, NR_GPIO_IRQS)) irq_set_irq_wake(TLMM_SCSS_SUMMARY_IRQ, 1); set_bit(gpio, msm_gpio.wake_irqs); } else { clear_bit(gpio, msm_gpio.wake_irqs); if (bitmap_empty(msm_gpio.wake_irqs, NR_GPIO_IRQS)) irq_set_irq_wake(TLMM_SCSS_SUMMARY_IRQ, 0); } return 0; } static struct irq_chip msm_gpio_irq_chip = { .name = "msmgpio", .irq_mask = msm_gpio_irq_mask, .irq_unmask = msm_gpio_irq_unmask, .irq_ack = msm_gpio_irq_ack, .irq_set_type = msm_gpio_irq_set_type, .irq_set_wake = msm_gpio_irq_set_wake, }; static int __devinit msm_gpio_probe(struct platform_device *dev) { int i, irq, ret; bitmap_zero(msm_gpio.enabled_irqs, NR_GPIO_IRQS); bitmap_zero(msm_gpio.wake_irqs, NR_GPIO_IRQS); bitmap_zero(msm_gpio.dual_edge_irqs, NR_GPIO_IRQS); msm_gpio.gpio_chip.label = dev->name; ret = gpiochip_add(&msm_gpio.gpio_chip); if (ret < 0) return ret; for (i = 0; i < msm_gpio.gpio_chip.ngpio; ++i) { irq = msm_gpio_to_irq(&msm_gpio.gpio_chip, i); irq_set_chip_and_handler(irq, &msm_gpio_irq_chip, handle_level_irq); set_irq_flags(irq, IRQF_VALID); } irq_set_chained_handler(TLMM_SCSS_SUMMARY_IRQ, msm_summary_irq_handler); return 0; } static int __devexit msm_gpio_remove(struct platform_device *dev) { int ret = gpiochip_remove(&msm_gpio.gpio_chip); if (ret < 0) return ret; irq_set_handler(TLMM_SCSS_SUMMARY_IRQ, NULL); return 0; } static struct platform_driver msm_gpio_driver = { .probe = msm_gpio_probe, .remove = __devexit_p(msm_gpio_remove), .driver = { .name = "msmgpio", .owner = THIS_MODULE, }, }; static struct platform_device msm_device_gpio = { .name = "msmgpio", .id = -1, }; static int __init msm_gpio_init(void) { int rc; rc = platform_driver_register(&msm_gpio_driver); if (!rc) { rc = platform_device_register(&msm_device_gpio); if (rc) platform_driver_unregister(&msm_gpio_driver); } return rc; } static void __exit msm_gpio_exit(void) { platform_device_unregister(&msm_device_gpio); platform_driver_unregister(&msm_gpio_driver); } postcore_initcall(msm_gpio_init); module_exit(msm_gpio_exit); MODULE_AUTHOR("Gregory Bean <gbean@codeaurora.org>"); MODULE_DESCRIPTION("Driver for Qualcomm MSM TLMMv2 SoC GPIOs"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("platform:msmgpio");
gpl-2.0
lzh6710/ubuntu-trusty
arch/arm/mach-footbridge/dc21285.c
1900
9033
/* * linux/arch/arm/kernel/dec21285.c: PCI functions for DC21285 * * Copyright (C) 1998-2001 Russell King * Copyright (C) 1998-2000 Phil Blundell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/ioport.h> #include <linux/irq.h> #include <linux/io.h> #include <linux/spinlock.h> #include <asm/irq.h> #include <asm/mach/pci.h> #include <asm/hardware/dec21285.h> #define MAX_SLOTS 21 #define PCICMD_ABORT ((PCI_STATUS_REC_MASTER_ABORT| \ PCI_STATUS_REC_TARGET_ABORT)<<16) #define PCICMD_ERROR_BITS ((PCI_STATUS_DETECTED_PARITY | \ PCI_STATUS_REC_MASTER_ABORT | \ PCI_STATUS_REC_TARGET_ABORT | \ PCI_STATUS_PARITY) << 16) extern int setup_arm_irq(int, struct irqaction *); extern void pcibios_report_status(u_int status_mask, int warn); static unsigned long dc21285_base_address(struct pci_bus *bus, unsigned int devfn) { unsigned long addr = 0; if (bus->number == 0) { if (PCI_SLOT(devfn) == 0) /* * For devfn 0, point at the 21285 */ addr = ARMCSR_BASE; else { devfn -= 1 << 3; if (devfn < PCI_DEVFN(MAX_SLOTS, 0)) addr = PCICFG0_BASE | 0xc00000 | (devfn << 8); } } else addr = PCICFG1_BASE | (bus->number << 16) | (devfn << 8); return addr; } static int dc21285_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *value) { unsigned long addr = dc21285_base_address(bus, devfn); u32 v = 0xffffffff; if (addr) switch (size) { case 1: asm("ldrb %0, [%1, %2]" : "=r" (v) : "r" (addr), "r" (where) : "cc"); break; case 2: asm("ldrh %0, [%1, %2]" : "=r" (v) : "r" (addr), "r" (where) : "cc"); break; case 4: asm("ldr %0, [%1, %2]" : "=r" (v) : "r" (addr), "r" (where) : "cc"); break; } *value = v; v = *CSR_PCICMD; if (v & PCICMD_ABORT) { *CSR_PCICMD = v & (0xffff|PCICMD_ABORT); return -1; } return PCIBIOS_SUCCESSFUL; } static int dc21285_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 value) { unsigned long addr = dc21285_base_address(bus, devfn); u32 v; if (addr) switch (size) { case 1: asm("strb %0, [%1, %2]" : : "r" (value), "r" (addr), "r" (where) : "cc"); break; case 2: asm("strh %0, [%1, %2]" : : "r" (value), "r" (addr), "r" (where) : "cc"); break; case 4: asm("str %0, [%1, %2]" : : "r" (value), "r" (addr), "r" (where) : "cc"); break; } v = *CSR_PCICMD; if (v & PCICMD_ABORT) { *CSR_PCICMD = v & (0xffff|PCICMD_ABORT); return -1; } return PCIBIOS_SUCCESSFUL; } struct pci_ops dc21285_ops = { .read = dc21285_read_config, .write = dc21285_write_config, }; static struct timer_list serr_timer; static struct timer_list perr_timer; static void dc21285_enable_error(unsigned long __data) { switch (__data) { case IRQ_PCI_SERR: del_timer(&serr_timer); break; case IRQ_PCI_PERR: del_timer(&perr_timer); break; } enable_irq(__data); } /* * Warn on PCI errors. */ static irqreturn_t dc21285_abort_irq(int irq, void *dev_id) { unsigned int cmd; unsigned int status; cmd = *CSR_PCICMD; status = cmd >> 16; cmd = cmd & 0xffff; if (status & PCI_STATUS_REC_MASTER_ABORT) { printk(KERN_DEBUG "PCI: master abort, pc=0x%08lx\n", instruction_pointer(get_irq_regs())); cmd |= PCI_STATUS_REC_MASTER_ABORT << 16; } if (status & PCI_STATUS_REC_TARGET_ABORT) { printk(KERN_DEBUG "PCI: target abort: "); pcibios_report_status(PCI_STATUS_REC_MASTER_ABORT | PCI_STATUS_SIG_TARGET_ABORT | PCI_STATUS_REC_TARGET_ABORT, 1); printk("\n"); cmd |= PCI_STATUS_REC_TARGET_ABORT << 16; } *CSR_PCICMD = cmd; return IRQ_HANDLED; } static irqreturn_t dc21285_serr_irq(int irq, void *dev_id) { struct timer_list *timer = dev_id; unsigned int cntl; printk(KERN_DEBUG "PCI: system error received: "); pcibios_report_status(PCI_STATUS_SIG_SYSTEM_ERROR, 1); printk("\n"); cntl = *CSR_SA110_CNTL & 0xffffdf07; *CSR_SA110_CNTL = cntl | SA110_CNTL_RXSERR; /* * back off this interrupt */ disable_irq(irq); timer->expires = jiffies + HZ; add_timer(timer); return IRQ_HANDLED; } static irqreturn_t dc21285_discard_irq(int irq, void *dev_id) { printk(KERN_DEBUG "PCI: discard timer expired\n"); *CSR_SA110_CNTL &= 0xffffde07; return IRQ_HANDLED; } static irqreturn_t dc21285_dparity_irq(int irq, void *dev_id) { unsigned int cmd; printk(KERN_DEBUG "PCI: data parity error detected: "); pcibios_report_status(PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY, 1); printk("\n"); cmd = *CSR_PCICMD & 0xffff; *CSR_PCICMD = cmd | 1 << 24; return IRQ_HANDLED; } static irqreturn_t dc21285_parity_irq(int irq, void *dev_id) { struct timer_list *timer = dev_id; unsigned int cmd; printk(KERN_DEBUG "PCI: parity error detected: "); pcibios_report_status(PCI_STATUS_PARITY | PCI_STATUS_DETECTED_PARITY, 1); printk("\n"); cmd = *CSR_PCICMD & 0xffff; *CSR_PCICMD = cmd | 1 << 31; /* * back off this interrupt */ disable_irq(irq); timer->expires = jiffies + HZ; add_timer(timer); return IRQ_HANDLED; } int __init dc21285_setup(int nr, struct pci_sys_data *sys) { struct resource *res; if (nr || !footbridge_cfn_mode()) return 0; res = kzalloc(sizeof(struct resource) * 2, GFP_KERNEL); if (!res) { printk("out of memory for root bus resources"); return 0; } res[0].flags = IORESOURCE_MEM; res[0].name = "Footbridge non-prefetch"; res[1].flags = IORESOURCE_MEM | IORESOURCE_PREFETCH; res[1].name = "Footbridge prefetch"; allocate_resource(&iomem_resource, &res[1], 0x20000000, 0xa0000000, 0xffffffff, 0x20000000, NULL, NULL); allocate_resource(&iomem_resource, &res[0], 0x40000000, 0x80000000, 0xffffffff, 0x40000000, NULL, NULL); sys->mem_offset = DC21285_PCI_MEM; pci_add_resource_offset(&sys->resources, &res[0], sys->mem_offset); pci_add_resource_offset(&sys->resources, &res[1], sys->mem_offset); return 1; } #define dc21285_request_irq(_a, _b, _c, _d, _e) \ WARN_ON(request_irq(_a, _b, _c, _d, _e) < 0) void __init dc21285_preinit(void) { unsigned int mem_size, mem_mask; int cfn_mode; pcibios_min_mem = 0x81000000; mem_size = (unsigned int)high_memory - PAGE_OFFSET; for (mem_mask = 0x00100000; mem_mask < 0x10000000; mem_mask <<= 1) if (mem_mask >= mem_size) break; /* * These registers need to be set up whether we're the * central function or not. */ *CSR_SDRAMBASEMASK = (mem_mask - 1) & 0x0ffc0000; *CSR_SDRAMBASEOFFSET = 0; *CSR_ROMBASEMASK = 0x80000000; *CSR_CSRBASEMASK = 0; *CSR_CSRBASEOFFSET = 0; *CSR_PCIADDR_EXTN = 0; cfn_mode = __footbridge_cfn_mode(); printk(KERN_INFO "PCI: DC21285 footbridge, revision %02lX, in " "%s mode\n", *CSR_CLASSREV & 0xff, cfn_mode ? "central function" : "addin"); if (footbridge_cfn_mode()) { /* * Clear any existing errors - we aren't * interested in historical data... */ *CSR_SA110_CNTL = (*CSR_SA110_CNTL & 0xffffde07) | SA110_CNTL_RXSERR; *CSR_PCICMD = (*CSR_PCICMD & 0xffff) | PCICMD_ERROR_BITS; } init_timer(&serr_timer); init_timer(&perr_timer); serr_timer.data = IRQ_PCI_SERR; serr_timer.function = dc21285_enable_error; perr_timer.data = IRQ_PCI_PERR; perr_timer.function = dc21285_enable_error; /* * We don't care if these fail. */ dc21285_request_irq(IRQ_PCI_SERR, dc21285_serr_irq, 0, "PCI system error", &serr_timer); dc21285_request_irq(IRQ_PCI_PERR, dc21285_parity_irq, 0, "PCI parity error", &perr_timer); dc21285_request_irq(IRQ_PCI_ABORT, dc21285_abort_irq, 0, "PCI abort", NULL); dc21285_request_irq(IRQ_DISCARD_TIMER, dc21285_discard_irq, 0, "Discard timer", NULL); dc21285_request_irq(IRQ_PCI_DPERR, dc21285_dparity_irq, 0, "PCI data parity", NULL); if (cfn_mode) { /* * Map our SDRAM at a known address in PCI space, just in case * the firmware had other ideas. Using a nonzero base is * necessary, since some VGA cards forcefully use PCI addresses * in the range 0x000a0000 to 0x000c0000. (eg, S3 cards). */ *CSR_PCICSRBASE = 0xf4000000; *CSR_PCICSRIOBASE = 0; *CSR_PCISDRAMBASE = __virt_to_bus(PAGE_OFFSET); *CSR_PCIROMBASE = 0; *CSR_PCICMD = PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER | PCI_COMMAND_INVALIDATE | PCICMD_ERROR_BITS; } else if (footbridge_cfn_mode() != 0) { /* * If we are not compiled to accept "add-in" mode, then * we are using a constant virt_to_bus translation which * can not hope to cater for the way the host BIOS has * set up the machine. */ panic("PCI: this kernel is compiled for central " "function mode only"); } } void __init dc21285_postinit(void) { register_isa_ports(DC21285_PCI_MEM, DC21285_PCI_IO, 0); }
gpl-2.0
idl3r/P8000-Kernel
arch/powerpc/kernel/legacy_serial.c
2156
17729
#include <linux/kernel.h> #include <linux/serial.h> #include <linux/serial_8250.h> #include <linux/serial_core.h> #include <linux/console.h> #include <linux/pci.h> #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/serial_reg.h> #include <asm/io.h> #include <asm/mmu.h> #include <asm/prom.h> #include <asm/serial.h> #include <asm/udbg.h> #include <asm/pci-bridge.h> #include <asm/ppc-pci.h> #undef DEBUG #ifdef DEBUG #define DBG(fmt...) do { printk(fmt); } while(0) #else #define DBG(fmt...) do { } while(0) #endif #define MAX_LEGACY_SERIAL_PORTS 8 static struct plat_serial8250_port legacy_serial_ports[MAX_LEGACY_SERIAL_PORTS+1]; static struct legacy_serial_info { struct device_node *np; unsigned int speed; unsigned int clock; int irq_check_parent; phys_addr_t taddr; } legacy_serial_infos[MAX_LEGACY_SERIAL_PORTS]; static struct __initdata of_device_id legacy_serial_parents[] = { {.type = "soc",}, {.type = "tsi-bridge",}, {.type = "opb", }, {.compatible = "ibm,opb",}, {.compatible = "simple-bus",}, {.compatible = "wrs,epld-localbus",}, {}, }; static unsigned int legacy_serial_count; static int legacy_serial_console = -1; static unsigned int tsi_serial_in(struct uart_port *p, int offset) { unsigned int tmp; offset = offset << p->regshift; if (offset == UART_IIR) { tmp = readl(p->membase + (UART_IIR & ~3)); return (tmp >> 16) & 0xff; /* UART_IIR % 4 == 2 */ } else return readb(p->membase + offset); } static void tsi_serial_out(struct uart_port *p, int offset, int value) { offset = offset << p->regshift; if (!((offset == UART_IER) && (value & UART_IER_UUE))) writeb(value, p->membase + offset); } static int __init add_legacy_port(struct device_node *np, int want_index, int iotype, phys_addr_t base, phys_addr_t taddr, unsigned long irq, upf_t flags, int irq_check_parent) { const __be32 *clk, *spd; u32 clock = BASE_BAUD * 16; int index; /* get clock freq. if present */ clk = of_get_property(np, "clock-frequency", NULL); if (clk && *clk) clock = be32_to_cpup(clk); /* get default speed if present */ spd = of_get_property(np, "current-speed", NULL); /* If we have a location index, then try to use it */ if (want_index >= 0 && want_index < MAX_LEGACY_SERIAL_PORTS) index = want_index; else index = legacy_serial_count; /* if our index is still out of range, that mean that * array is full, we could scan for a free slot but that * make little sense to bother, just skip the port */ if (index >= MAX_LEGACY_SERIAL_PORTS) return -1; if (index >= legacy_serial_count) legacy_serial_count = index + 1; /* Check if there is a port who already claimed our slot */ if (legacy_serial_infos[index].np != 0) { /* if we still have some room, move it, else override */ if (legacy_serial_count < MAX_LEGACY_SERIAL_PORTS) { printk(KERN_DEBUG "Moved legacy port %d -> %d\n", index, legacy_serial_count); legacy_serial_ports[legacy_serial_count] = legacy_serial_ports[index]; legacy_serial_infos[legacy_serial_count] = legacy_serial_infos[index]; legacy_serial_count++; } else { printk(KERN_DEBUG "Replacing legacy port %d\n", index); } } /* Now fill the entry */ memset(&legacy_serial_ports[index], 0, sizeof(struct plat_serial8250_port)); if (iotype == UPIO_PORT) legacy_serial_ports[index].iobase = base; else legacy_serial_ports[index].mapbase = base; legacy_serial_ports[index].iotype = iotype; legacy_serial_ports[index].uartclk = clock; legacy_serial_ports[index].irq = irq; legacy_serial_ports[index].flags = flags; legacy_serial_infos[index].taddr = taddr; legacy_serial_infos[index].np = of_node_get(np); legacy_serial_infos[index].clock = clock; legacy_serial_infos[index].speed = spd ? be32_to_cpup(spd) : 0; legacy_serial_infos[index].irq_check_parent = irq_check_parent; if (iotype == UPIO_TSI) { legacy_serial_ports[index].serial_in = tsi_serial_in; legacy_serial_ports[index].serial_out = tsi_serial_out; } printk(KERN_DEBUG "Found legacy serial port %d for %s\n", index, np->full_name); printk(KERN_DEBUG " %s=%llx, taddr=%llx, irq=%lx, clk=%d, speed=%d\n", (iotype == UPIO_PORT) ? "port" : "mem", (unsigned long long)base, (unsigned long long)taddr, irq, legacy_serial_ports[index].uartclk, legacy_serial_infos[index].speed); return index; } static int __init add_legacy_soc_port(struct device_node *np, struct device_node *soc_dev) { u64 addr; const u32 *addrp; upf_t flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST | UPF_SHARE_IRQ | UPF_FIXED_PORT; struct device_node *tsi = of_get_parent(np); /* We only support ports that have a clock frequency properly * encoded in the device-tree. */ if (of_get_property(np, "clock-frequency", NULL) == NULL) return -1; /* if reg-shift or offset, don't try to use it */ if ((of_get_property(np, "reg-shift", NULL) != NULL) || (of_get_property(np, "reg-offset", NULL) != NULL)) return -1; /* if rtas uses this device, don't try to use it as well */ if (of_get_property(np, "used-by-rtas", NULL) != NULL) return -1; /* Get the address */ addrp = of_get_address(soc_dev, 0, NULL, NULL); if (addrp == NULL) return -1; addr = of_translate_address(soc_dev, addrp); if (addr == OF_BAD_ADDR) return -1; /* Add port, irq will be dealt with later. We passed a translated * IO port value. It will be fixed up later along with the irq */ if (tsi && !strcmp(tsi->type, "tsi-bridge")) return add_legacy_port(np, -1, UPIO_TSI, addr, addr, NO_IRQ, flags, 0); else return add_legacy_port(np, -1, UPIO_MEM, addr, addr, NO_IRQ, flags, 0); } static int __init add_legacy_isa_port(struct device_node *np, struct device_node *isa_brg) { const __be32 *reg; const char *typep; int index = -1; u64 taddr; DBG(" -> add_legacy_isa_port(%s)\n", np->full_name); /* Get the ISA port number */ reg = of_get_property(np, "reg", NULL); if (reg == NULL) return -1; /* Verify it's an IO port, we don't support anything else */ if (!(be32_to_cpu(reg[0]) & 0x00000001)) return -1; /* Now look for an "ibm,aix-loc" property that gives us ordering * if any... */ typep = of_get_property(np, "ibm,aix-loc", NULL); /* If we have a location index, then use it */ if (typep && *typep == 'S') index = simple_strtol(typep+1, NULL, 0) - 1; /* Translate ISA address. If it fails, we still register the port * with no translated address so that it can be picked up as an IO * port later by the serial driver */ taddr = of_translate_address(np, reg); if (taddr == OF_BAD_ADDR) taddr = 0; /* Add port, irq will be dealt with later */ return add_legacy_port(np, index, UPIO_PORT, be32_to_cpu(reg[1]), taddr, NO_IRQ, UPF_BOOT_AUTOCONF, 0); } #ifdef CONFIG_PCI static int __init add_legacy_pci_port(struct device_node *np, struct device_node *pci_dev) { u64 addr, base; const u32 *addrp; unsigned int flags; int iotype, index = -1, lindex = 0; DBG(" -> add_legacy_pci_port(%s)\n", np->full_name); /* We only support ports that have a clock frequency properly * encoded in the device-tree (that is have an fcode). Anything * else can't be used that early and will be normally probed by * the generic 8250_pci driver later on. The reason is that 8250 * compatible UARTs on PCI need all sort of quirks (port offsets * etc...) that this code doesn't know about */ if (of_get_property(np, "clock-frequency", NULL) == NULL) return -1; /* Get the PCI address. Assume BAR 0 */ addrp = of_get_pci_address(pci_dev, 0, NULL, &flags); if (addrp == NULL) return -1; /* We only support BAR 0 for now */ iotype = (flags & IORESOURCE_MEM) ? UPIO_MEM : UPIO_PORT; addr = of_translate_address(pci_dev, addrp); if (addr == OF_BAD_ADDR) return -1; /* Set the IO base to the same as the translated address for MMIO, * or to the domain local IO base for PIO (it will be fixed up later) */ if (iotype == UPIO_MEM) base = addr; else base = addrp[2]; /* Try to guess an index... If we have subdevices of the pci dev, * we get to their "reg" property */ if (np != pci_dev) { const __be32 *reg = of_get_property(np, "reg", NULL); if (reg && (be32_to_cpup(reg) < 4)) index = lindex = be32_to_cpup(reg); } /* Local index means it's the Nth port in the PCI chip. Unfortunately * the offset to add here is device specific. We know about those * EXAR ports and we default to the most common case. If your UART * doesn't work for these settings, you'll have to add your own special * cases here */ if (of_device_is_compatible(pci_dev, "pci13a8,152") || of_device_is_compatible(pci_dev, "pci13a8,154") || of_device_is_compatible(pci_dev, "pci13a8,158")) { addr += 0x200 * lindex; base += 0x200 * lindex; } else { addr += 8 * lindex; base += 8 * lindex; } /* Add port, irq will be dealt with later. We passed a translated * IO port value. It will be fixed up later along with the irq */ return add_legacy_port(np, index, iotype, base, addr, NO_IRQ, UPF_BOOT_AUTOCONF, np != pci_dev); } #endif static void __init setup_legacy_serial_console(int console) { struct legacy_serial_info *info = &legacy_serial_infos[console]; void __iomem *addr; if (info->taddr == 0) return; addr = ioremap(info->taddr, 0x1000); if (addr == NULL) return; if (info->speed == 0) info->speed = udbg_probe_uart_speed(addr, info->clock); DBG("default console speed = %d\n", info->speed); udbg_init_uart(addr, info->speed, info->clock); } /* * This is called very early, as part of setup_system() or eventually * setup_arch(), basically before anything else in this file. This function * will try to build a list of all the available 8250-compatible serial ports * in the machine using the Open Firmware device-tree. It currently only deals * with ISA and PCI busses but could be extended. It allows a very early boot * console to be initialized, that list is also used later to provide 8250 with * the machine non-PCI ports and to properly pick the default console port */ void __init find_legacy_serial_ports(void) { struct device_node *np, *stdout = NULL; const char *path; int index; DBG(" -> find_legacy_serial_port()\n"); /* Now find out if one of these is out firmware console */ path = of_get_property(of_chosen, "linux,stdout-path", NULL); if (path != NULL) { stdout = of_find_node_by_path(path); if (stdout) DBG("stdout is %s\n", stdout->full_name); } else { DBG(" no linux,stdout-path !\n"); } /* Iterate over all the 16550 ports, looking for known parents */ for_each_compatible_node(np, "serial", "ns16550") { struct device_node *parent = of_get_parent(np); if (!parent) continue; if (of_match_node(legacy_serial_parents, parent) != NULL) { if (of_device_is_available(np)) { index = add_legacy_soc_port(np, np); if (index >= 0 && np == stdout) legacy_serial_console = index; } } of_node_put(parent); } /* Next, fill our array with ISA ports */ for_each_node_by_type(np, "serial") { struct device_node *isa = of_get_parent(np); if (isa && !strcmp(isa->name, "isa")) { index = add_legacy_isa_port(np, isa); if (index >= 0 && np == stdout) legacy_serial_console = index; } of_node_put(isa); } #ifdef CONFIG_PCI /* Next, try to locate PCI ports */ for (np = NULL; (np = of_find_all_nodes(np));) { struct device_node *pci, *parent = of_get_parent(np); if (parent && !strcmp(parent->name, "isa")) { of_node_put(parent); continue; } if (strcmp(np->name, "serial") && strcmp(np->type, "serial")) { of_node_put(parent); continue; } /* Check for known pciclass, and also check whether we have * a device with child nodes for ports or not */ if (of_device_is_compatible(np, "pciclass,0700") || of_device_is_compatible(np, "pciclass,070002")) pci = np; else if (of_device_is_compatible(parent, "pciclass,0700") || of_device_is_compatible(parent, "pciclass,070002")) pci = parent; else { of_node_put(parent); continue; } index = add_legacy_pci_port(np, pci); if (index >= 0 && np == stdout) legacy_serial_console = index; of_node_put(parent); } #endif DBG("legacy_serial_console = %d\n", legacy_serial_console); if (legacy_serial_console >= 0) setup_legacy_serial_console(legacy_serial_console); DBG(" <- find_legacy_serial_port()\n"); } static struct platform_device serial_device = { .name = "serial8250", .id = PLAT8250_DEV_PLATFORM, .dev = { .platform_data = legacy_serial_ports, }, }; static void __init fixup_port_irq(int index, struct device_node *np, struct plat_serial8250_port *port) { unsigned int virq; DBG("fixup_port_irq(%d)\n", index); virq = irq_of_parse_and_map(np, 0); if (virq == NO_IRQ && legacy_serial_infos[index].irq_check_parent) { np = of_get_parent(np); if (np == NULL) return; virq = irq_of_parse_and_map(np, 0); of_node_put(np); } if (virq == NO_IRQ) return; port->irq = virq; #ifdef CONFIG_SERIAL_8250_FSL if (of_device_is_compatible(np, "fsl,ns16550")) port->handle_irq = fsl8250_handle_irq; #endif } static void __init fixup_port_pio(int index, struct device_node *np, struct plat_serial8250_port *port) { #ifdef CONFIG_PCI struct pci_controller *hose; DBG("fixup_port_pio(%d)\n", index); hose = pci_find_hose_for_OF_device(np); if (hose) { unsigned long offset = (unsigned long)hose->io_base_virt - #ifdef CONFIG_PPC64 pci_io_base; #else isa_io_base; #endif DBG("port %d, IO %lx -> %lx\n", index, port->iobase, port->iobase + offset); port->iobase += offset; } #endif } static void __init fixup_port_mmio(int index, struct device_node *np, struct plat_serial8250_port *port) { DBG("fixup_port_mmio(%d)\n", index); port->membase = ioremap(port->mapbase, 0x100); } /* * This is called as an arch initcall, hopefully before the PCI bus is * probed and/or the 8250 driver loaded since we need to register our * platform devices before 8250 PCI ones are detected as some of them * must properly "override" the platform ones. * * This function fixes up the interrupt value for platform ports as it * couldn't be done earlier before interrupt maps have been parsed. It * also "corrects" the IO address for PIO ports for the same reason, * since earlier, the PHBs virtual IO space wasn't assigned yet. It then * registers all those platform ports for use by the 8250 driver when it * finally loads. */ static int __init serial_dev_init(void) { int i; if (legacy_serial_count == 0) return -ENODEV; /* * Before we register the platform serial devices, we need * to fixup their interrupts and their IO ports. */ DBG("Fixing serial ports interrupts and IO ports ...\n"); for (i = 0; i < legacy_serial_count; i++) { struct plat_serial8250_port *port = &legacy_serial_ports[i]; struct device_node *np = legacy_serial_infos[i].np; if (port->irq == NO_IRQ) fixup_port_irq(i, np, port); if (port->iotype == UPIO_PORT) fixup_port_pio(i, np, port); if ((port->iotype == UPIO_MEM) || (port->iotype == UPIO_TSI)) fixup_port_mmio(i, np, port); } DBG("Registering platform serial ports\n"); return platform_device_register(&serial_device); } device_initcall(serial_dev_init); #ifdef CONFIG_SERIAL_8250_CONSOLE /* * This is called very early, as part of console_init() (typically just after * time_init()). This function is respondible for trying to find a good * default console on serial ports. It tries to match the open firmware * default output with one of the available serial console drivers that have * been probed earlier by find_legacy_serial_ports() */ static int __init check_legacy_serial_console(void) { struct device_node *prom_stdout = NULL; int i, speed = 0, offset = 0; const char *name; const __be32 *spd; DBG(" -> check_legacy_serial_console()\n"); /* The user has requested a console so this is already set up. */ if (strstr(boot_command_line, "console=")) { DBG(" console was specified !\n"); return -EBUSY; } if (!of_chosen) { DBG(" of_chosen is NULL !\n"); return -ENODEV; } if (legacy_serial_console < 0) { DBG(" legacy_serial_console not found !\n"); return -ENODEV; } /* We are getting a weird phandle from OF ... */ /* ... So use the full path instead */ name = of_get_property(of_chosen, "linux,stdout-path", NULL); if (name == NULL) { DBG(" no linux,stdout-path !\n"); return -ENODEV; } prom_stdout = of_find_node_by_path(name); if (!prom_stdout) { DBG(" can't find stdout package %s !\n", name); return -ENODEV; } DBG("stdout is %s\n", prom_stdout->full_name); name = of_get_property(prom_stdout, "name", NULL); if (!name) { DBG(" stdout package has no name !\n"); goto not_found; } spd = of_get_property(prom_stdout, "current-speed", NULL); if (spd) speed = be32_to_cpup(spd); if (strcmp(name, "serial") != 0) goto not_found; /* Look for it in probed array */ for (i = 0; i < legacy_serial_count; i++) { if (prom_stdout != legacy_serial_infos[i].np) continue; offset = i; speed = legacy_serial_infos[i].speed; break; } if (i >= legacy_serial_count) goto not_found; of_node_put(prom_stdout); DBG("Found serial console at ttyS%d\n", offset); if (speed) { static char __initdata opt[16]; sprintf(opt, "%d", speed); return add_preferred_console("ttyS", offset, opt); } else return add_preferred_console("ttyS", offset, NULL); not_found: DBG("No preferred console found !\n"); of_node_put(prom_stdout); return -ENODEV; } console_initcall(check_legacy_serial_console); #endif /* CONFIG_SERIAL_8250_CONSOLE */
gpl-2.0
wjb/mx-common
drivers/misc/lis3lv02d/lis3lv02d_i2c.c
2924
7322
/* * drivers/hwmon/lis3lv02d_i2c.c * * Implements I2C interface for lis3lv02d (STMicroelectronics) accelerometer. * Driver is based on corresponding SPI driver written by Daniel Mack * (lis3lv02d_spi.c (C) 2009 Daniel Mack <daniel@caiaq.de> ). * * Copyright (C) 2009 Nokia Corporation and/or its subsidiary(-ies). * * Contact: Samu Onkalo <samu.p.onkalo@nokia.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/err.h> #include <linux/i2c.h> #include <linux/pm_runtime.h> #include <linux/delay.h> #include "lis3lv02d.h" #define DRV_NAME "lis3lv02d_i2c" static const char reg_vdd[] = "Vdd"; static const char reg_vdd_io[] = "Vdd_IO"; static int lis3_reg_ctrl(struct lis3lv02d *lis3, bool state) { int ret; if (state == LIS3_REG_OFF) { ret = regulator_bulk_disable(ARRAY_SIZE(lis3->regulators), lis3->regulators); } else { ret = regulator_bulk_enable(ARRAY_SIZE(lis3->regulators), lis3->regulators); /* Chip needs time to wakeup. Not mentioned in datasheet */ usleep_range(10000, 20000); } return ret; } static inline s32 lis3_i2c_write(struct lis3lv02d *lis3, int reg, u8 value) { struct i2c_client *c = lis3->bus_priv; return i2c_smbus_write_byte_data(c, reg, value); } static inline s32 lis3_i2c_read(struct lis3lv02d *lis3, int reg, u8 *v) { struct i2c_client *c = lis3->bus_priv; *v = i2c_smbus_read_byte_data(c, reg); return 0; } static inline s32 lis3_i2c_blockread(struct lis3lv02d *lis3, int reg, int len, u8 *v) { struct i2c_client *c = lis3->bus_priv; reg |= (1 << 7); /* 7th bit enables address auto incrementation */ return i2c_smbus_read_i2c_block_data(c, reg, len, v); } static int lis3_i2c_init(struct lis3lv02d *lis3) { u8 reg; int ret; if (lis3->reg_ctrl) lis3_reg_ctrl(lis3, LIS3_REG_ON); lis3->read(lis3, WHO_AM_I, &reg); if (reg != lis3->whoami) printk(KERN_ERR "lis3: power on failure\n"); /* power up the device */ ret = lis3->read(lis3, CTRL_REG1, &reg); if (ret < 0) return ret; reg |= CTRL1_PD0 | CTRL1_Xen | CTRL1_Yen | CTRL1_Zen; return lis3->write(lis3, CTRL_REG1, reg); } /* Default axis mapping but it can be overwritten by platform data */ static union axis_conversion lis3lv02d_axis_map = { .as_array = { LIS3_DEV_X, LIS3_DEV_Y, LIS3_DEV_Z } }; static int __devinit lis3lv02d_i2c_probe(struct i2c_client *client, const struct i2c_device_id *id) { int ret = 0; struct lis3lv02d_platform_data *pdata = client->dev.platform_data; if (pdata) { /* Regulator control is optional */ if (pdata->driver_features & LIS3_USE_REGULATOR_CTRL) lis3_dev.reg_ctrl = lis3_reg_ctrl; if ((pdata->driver_features & LIS3_USE_BLOCK_READ) && (i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK))) lis3_dev.blkread = lis3_i2c_blockread; if (pdata->axis_x) lis3lv02d_axis_map.x = pdata->axis_x; if (pdata->axis_y) lis3lv02d_axis_map.y = pdata->axis_y; if (pdata->axis_z) lis3lv02d_axis_map.z = pdata->axis_z; if (pdata->setup_resources) ret = pdata->setup_resources(); if (ret) goto fail; } if (lis3_dev.reg_ctrl) { lis3_dev.regulators[0].supply = reg_vdd; lis3_dev.regulators[1].supply = reg_vdd_io; ret = regulator_bulk_get(&client->dev, ARRAY_SIZE(lis3_dev.regulators), lis3_dev.regulators); if (ret < 0) goto fail; } lis3_dev.pdata = pdata; lis3_dev.bus_priv = client; lis3_dev.init = lis3_i2c_init; lis3_dev.read = lis3_i2c_read; lis3_dev.write = lis3_i2c_write; lis3_dev.irq = client->irq; lis3_dev.ac = lis3lv02d_axis_map; lis3_dev.pm_dev = &client->dev; i2c_set_clientdata(client, &lis3_dev); /* Provide power over the init call */ if (lis3_dev.reg_ctrl) lis3_reg_ctrl(&lis3_dev, LIS3_REG_ON); ret = lis3lv02d_init_device(&lis3_dev); if (lis3_dev.reg_ctrl) lis3_reg_ctrl(&lis3_dev, LIS3_REG_OFF); if (ret == 0) return 0; fail: if (pdata && pdata->release_resources) pdata->release_resources(); return ret; } static int __devexit lis3lv02d_i2c_remove(struct i2c_client *client) { struct lis3lv02d *lis3 = i2c_get_clientdata(client); struct lis3lv02d_platform_data *pdata = client->dev.platform_data; if (pdata && pdata->release_resources) pdata->release_resources(); lis3lv02d_joystick_disable(); lis3lv02d_remove_fs(&lis3_dev); if (lis3_dev.reg_ctrl) regulator_bulk_free(ARRAY_SIZE(lis3->regulators), lis3_dev.regulators); return 0; } #ifdef CONFIG_PM_SLEEP static int lis3lv02d_i2c_suspend(struct device *dev) { struct i2c_client *client = container_of(dev, struct i2c_client, dev); struct lis3lv02d *lis3 = i2c_get_clientdata(client); if (!lis3->pdata || !lis3->pdata->wakeup_flags) lis3lv02d_poweroff(lis3); return 0; } static int lis3lv02d_i2c_resume(struct device *dev) { struct i2c_client *client = container_of(dev, struct i2c_client, dev); struct lis3lv02d *lis3 = i2c_get_clientdata(client); /* * pm_runtime documentation says that devices should always * be powered on at resume. Pm_runtime turns them off after system * wide resume is complete. */ if (!lis3->pdata || !lis3->pdata->wakeup_flags || pm_runtime_suspended(dev)) lis3lv02d_poweron(lis3); return 0; } #endif /* CONFIG_PM_SLEEP */ #ifdef CONFIG_PM_RUNTIME static int lis3_i2c_runtime_suspend(struct device *dev) { struct i2c_client *client = container_of(dev, struct i2c_client, dev); struct lis3lv02d *lis3 = i2c_get_clientdata(client); lis3lv02d_poweroff(lis3); return 0; } static int lis3_i2c_runtime_resume(struct device *dev) { struct i2c_client *client = container_of(dev, struct i2c_client, dev); struct lis3lv02d *lis3 = i2c_get_clientdata(client); lis3lv02d_poweron(lis3); return 0; } #endif /* CONFIG_PM_RUNTIME */ static const struct i2c_device_id lis3lv02d_id[] = { {"lis3lv02d", 0 }, {} }; MODULE_DEVICE_TABLE(i2c, lis3lv02d_id); static const struct dev_pm_ops lis3_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(lis3lv02d_i2c_suspend, lis3lv02d_i2c_resume) SET_RUNTIME_PM_OPS(lis3_i2c_runtime_suspend, lis3_i2c_runtime_resume, NULL) }; static struct i2c_driver lis3lv02d_i2c_driver = { .driver = { .name = DRV_NAME, .owner = THIS_MODULE, .pm = &lis3_pm_ops, }, .probe = lis3lv02d_i2c_probe, .remove = __devexit_p(lis3lv02d_i2c_remove), .id_table = lis3lv02d_id, }; static int __init lis3lv02d_init(void) { return i2c_add_driver(&lis3lv02d_i2c_driver); } static void __exit lis3lv02d_exit(void) { i2c_del_driver(&lis3lv02d_i2c_driver); } MODULE_AUTHOR("Nokia Corporation"); MODULE_DESCRIPTION("lis3lv02d I2C interface"); MODULE_LICENSE("GPL"); module_init(lis3lv02d_init); module_exit(lis3lv02d_exit);
gpl-2.0
bourne015/kernel-3.0-s5pv210
drivers/staging/rtl8192e/r8180_93cx6.c
2924
3420
/* This files contains card eeprom (93c46 or 93c56) programming routines, memory is addressed by 16 bits words. This is part of rtl8180 OpenSource driver. Copyright (C) Andrea Merello 2004 <andreamrl@tiscali.it> Released under the terms of GPL (General Public Licence) Parts of this driver are based on the GPL part of the official realtek driver. Parts of this driver are based on the rtl8180 driver skeleton from Patric Schenke & Andres Salomon. Parts of this driver are based on the Intel Pro Wireless 2100 GPL driver. We want to tanks the Authors of those projects and the Ndiswrapper project Authors. */ #include "r8180_93cx6.h" static void eprom_cs(struct r8192_priv *priv, short bit) { if (bit) write_nic_byte(priv, EPROM_CMD, (1<<EPROM_CS_SHIFT) | read_nic_byte(priv, EPROM_CMD)); //enable EPROM else write_nic_byte(priv, EPROM_CMD, read_nic_byte(priv, EPROM_CMD) &~(1<<EPROM_CS_SHIFT)); //disable EPROM udelay(EPROM_DELAY); } static void eprom_ck_cycle(struct r8192_priv *priv) { write_nic_byte(priv, EPROM_CMD, (1<<EPROM_CK_SHIFT) | read_nic_byte(priv, EPROM_CMD)); udelay(EPROM_DELAY); write_nic_byte(priv, EPROM_CMD, read_nic_byte(priv, EPROM_CMD) & ~(1<<EPROM_CK_SHIFT)); udelay(EPROM_DELAY); } static void eprom_w(struct r8192_priv *priv, short bit) { if (bit) write_nic_byte(priv, EPROM_CMD, (1<<EPROM_W_SHIFT) | read_nic_byte(priv, EPROM_CMD)); else write_nic_byte(priv, EPROM_CMD, read_nic_byte(priv, EPROM_CMD) &~(1<<EPROM_W_SHIFT)); udelay(EPROM_DELAY); } static short eprom_r(struct r8192_priv *priv) { short bit; bit = (read_nic_byte(priv, EPROM_CMD) & (1<<EPROM_R_SHIFT)); udelay(EPROM_DELAY); if (bit) return 1; return 0; } static void eprom_send_bits_string(struct r8192_priv *priv, short b[], int len) { int i; for (i = 0; i < len; i++) { eprom_w(priv, b[i]); eprom_ck_cycle(priv); } } u32 eprom_read(struct r8192_priv *priv, u32 addr) { short read_cmd[] = {1, 1, 0}; short addr_str[8]; int i; int addr_len; u32 ret; ret = 0; //enable EPROM programming write_nic_byte(priv, EPROM_CMD, (EPROM_CMD_PROGRAM<<EPROM_CMD_OPERATING_MODE_SHIFT)); udelay(EPROM_DELAY); if (priv->epromtype == EPROM_93c56) { addr_str[7] = addr & 1; addr_str[6] = addr & (1<<1); addr_str[5] = addr & (1<<2); addr_str[4] = addr & (1<<3); addr_str[3] = addr & (1<<4); addr_str[2] = addr & (1<<5); addr_str[1] = addr & (1<<6); addr_str[0] = addr & (1<<7); addr_len = 8; } else { addr_str[5] = addr & 1; addr_str[4] = addr & (1<<1); addr_str[3] = addr & (1<<2); addr_str[2] = addr & (1<<3); addr_str[1] = addr & (1<<4); addr_str[0] = addr & (1<<5); addr_len = 6; } eprom_cs(priv, 1); eprom_ck_cycle(priv); eprom_send_bits_string(priv, read_cmd, 3); eprom_send_bits_string(priv, addr_str, addr_len); //keep chip pin D to low state while reading. //I'm unsure if it is necessary, but anyway shouldn't hurt eprom_w(priv, 0); for (i = 0; i < 16; i++) { //eeprom needs a clk cycle between writing opcode&adr //and reading data. (eeprom outs a dummy 0) eprom_ck_cycle(priv); ret |= (eprom_r(priv)<<(15-i)); } eprom_cs(priv, 0); eprom_ck_cycle(priv); //disable EPROM programming write_nic_byte(priv, EPROM_CMD, (EPROM_CMD_NORMAL<<EPROM_CMD_OPERATING_MODE_SHIFT)); return ret; }
gpl-2.0
dark-falcon/android_kernel_motorola_msm8916
net/sunrpc/bc_svc.c
3692
2030
/****************************************************************************** (c) 2007 Network Appliance, Inc. All Rights Reserved. (c) 2009 NetApp. All Rights Reserved. NetApp provides this source code under the GPL v2 License. The GPL v2 license is available at http://opensource.org/licenses/gpl-license.php. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************/ /* * The NFSv4.1 callback service helper routines. * They implement the transport level processing required to send the * reply over an existing open connection previously established by the client. */ #include <linux/module.h> #include <linux/sunrpc/xprt.h> #include <linux/sunrpc/sched.h> #include <linux/sunrpc/bc_xprt.h> #define RPCDBG_FACILITY RPCDBG_SVCDSP /* Empty callback ops */ static const struct rpc_call_ops nfs41_callback_ops = { }; /* * Send the callback reply */ int bc_send(struct rpc_rqst *req) { struct rpc_task *task; int ret; dprintk("RPC: bc_send req= %p\n", req); task = rpc_run_bc_task(req, &nfs41_callback_ops); if (IS_ERR(task)) ret = PTR_ERR(task); else { WARN_ON_ONCE(atomic_read(&task->tk_count) != 1); ret = task->tk_status; rpc_put_task(task); } dprintk("RPC: bc_send ret= %d\n", ret); return ret; }
gpl-2.0
trlsmax/rk3188_kernel_tinyastro
drivers/usb/gadget/config.c
3948
5783
/* * usb/gadget/config.c -- simplify building config descriptors * * Copyright (C) 2003 David Brownell * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/errno.h> #include <linux/slab.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/string.h> #include <linux/device.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> /** * usb_descriptor_fillbuf - fill buffer with descriptors * @buf: Buffer to be filled * @buflen: Size of buf * @src: Array of descriptor pointers, terminated by null pointer. * * Copies descriptors into the buffer, returning the length or a * negative error code if they can't all be copied. Useful when * assembling descriptors for an associated set of interfaces used * as part of configuring a composite device; or in other cases where * sets of descriptors need to be marshaled. */ int usb_descriptor_fillbuf(void *buf, unsigned buflen, const struct usb_descriptor_header **src) { u8 *dest = buf; if (!src) return -EINVAL; /* fill buffer from src[] until null descriptor ptr */ for (; NULL != *src; src++) { unsigned len = (*src)->bLength; if (len > buflen) return -EINVAL; memcpy(dest, *src, len); buflen -= len; dest += len; } return dest - (u8 *)buf; } /** * usb_gadget_config_buf - builts a complete configuration descriptor * @config: Header for the descriptor, including characteristics such * as power requirements and number of interfaces. * @desc: Null-terminated vector of pointers to the descriptors (interface, * endpoint, etc) defining all functions in this device configuration. * @buf: Buffer for the resulting configuration descriptor. * @length: Length of buffer. If this is not big enough to hold the * entire configuration descriptor, an error code will be returned. * * This copies descriptors into the response buffer, building a descriptor * for that configuration. It returns the buffer length or a negative * status code. The config.wTotalLength field is set to match the length * of the result, but other descriptor fields (including power usage and * interface count) must be set by the caller. * * Gadget drivers could use this when constructing a config descriptor * in response to USB_REQ_GET_DESCRIPTOR. They will need to patch the * resulting bDescriptorType value if USB_DT_OTHER_SPEED_CONFIG is needed. */ int usb_gadget_config_buf( const struct usb_config_descriptor *config, void *buf, unsigned length, const struct usb_descriptor_header **desc ) { struct usb_config_descriptor *cp = buf; int len; /* config descriptor first */ if (length < USB_DT_CONFIG_SIZE || !desc) return -EINVAL; *cp = *config; /* then interface/endpoint/class/vendor/... */ len = usb_descriptor_fillbuf(USB_DT_CONFIG_SIZE + (u8*)buf, length - USB_DT_CONFIG_SIZE, desc); if (len < 0) return len; len += USB_DT_CONFIG_SIZE; if (len > 0xffff) return -EINVAL; /* patch up the config descriptor */ cp->bLength = USB_DT_CONFIG_SIZE; cp->bDescriptorType = USB_DT_CONFIG; cp->wTotalLength = cpu_to_le16(len); cp->bmAttributes |= USB_CONFIG_ATT_ONE; return len; } /** * usb_copy_descriptors - copy a vector of USB descriptors * @src: null-terminated vector to copy * Context: initialization code, which may sleep * * This makes a copy of a vector of USB descriptors. Its primary use * is to support usb_function objects which can have multiple copies, * each needing different descriptors. Functions may have static * tables of descriptors, which are used as templates and customized * with identifiers (for interfaces, strings, endpoints, and more) * as needed by a given function instance. */ struct usb_descriptor_header ** usb_copy_descriptors(struct usb_descriptor_header **src) { struct usb_descriptor_header **tmp; unsigned bytes; unsigned n_desc; void *mem; struct usb_descriptor_header **ret; /* count descriptors and their sizes; then add vector size */ for (bytes = 0, n_desc = 0, tmp = src; *tmp; tmp++, n_desc++) bytes += (*tmp)->bLength; bytes += (n_desc + 1) * sizeof(*tmp); mem = kmalloc(bytes, GFP_KERNEL); if (!mem) return NULL; /* fill in pointers starting at "tmp", * to descriptors copied starting at "mem"; * and return "ret" */ tmp = mem; ret = mem; mem += (n_desc + 1) * sizeof(*tmp); while (*src) { memcpy(mem, *src, (*src)->bLength); *tmp = mem; tmp++; mem += (*src)->bLength; src++; } *tmp = NULL; return ret; } /** * usb_find_endpoint - find a copy of an endpoint descriptor * @src: original vector of descriptors * @copy: copy of @src * @match: endpoint descriptor found in @src * * This returns the copy of the @match descriptor made for @copy. Its * intended use is to help remembering the endpoint descriptor to use * when enabling a given endpoint. */ struct usb_endpoint_descriptor * usb_find_endpoint( struct usb_descriptor_header **src, struct usb_descriptor_header **copy, struct usb_endpoint_descriptor *match ) { while (*src) { if (*src == (void *) match) return (void *)*copy; src++; copy++; } return NULL; }
gpl-2.0
DrGrip/tiamat-2.6.38-LEO-Dr_Grip
drivers/mfd/ucb1x00-core.c
4204
19086
/* * linux/drivers/mfd/ucb1x00-core.c * * Copyright (C) 2001 Russell King, All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License. * * The UCB1x00 core driver provides basic services for handling IO, * the ADC, interrupts, and accessing registers. It is designed * such that everything goes through this layer, thereby providing * a consistent locking methodology, as well as allowing the drivers * to be used on other non-MCP-enabled hardware platforms. * * Note that all locks are private to this file. Nothing else may * touch them. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/mutex.h> #include <linux/mfd/ucb1x00.h> #include <linux/gpio.h> #include <linux/semaphore.h> #include <mach/dma.h> #include <mach/hardware.h> static DEFINE_MUTEX(ucb1x00_mutex); static LIST_HEAD(ucb1x00_drivers); static LIST_HEAD(ucb1x00_devices); /** * ucb1x00_io_set_dir - set IO direction * @ucb: UCB1x00 structure describing chip * @in: bitfield of IO pins to be set as inputs * @out: bitfield of IO pins to be set as outputs * * Set the IO direction of the ten general purpose IO pins on * the UCB1x00 chip. The @in bitfield has priority over the * @out bitfield, in that if you specify a pin as both input * and output, it will end up as an input. * * ucb1x00_enable must have been called to enable the comms * before using this function. * * This function takes a spinlock, disabling interrupts. */ void ucb1x00_io_set_dir(struct ucb1x00 *ucb, unsigned int in, unsigned int out) { unsigned long flags; spin_lock_irqsave(&ucb->io_lock, flags); ucb->io_dir |= out; ucb->io_dir &= ~in; ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); spin_unlock_irqrestore(&ucb->io_lock, flags); } /** * ucb1x00_io_write - set or clear IO outputs * @ucb: UCB1x00 structure describing chip * @set: bitfield of IO pins to set to logic '1' * @clear: bitfield of IO pins to set to logic '0' * * Set the IO output state of the specified IO pins. The value * is retained if the pins are subsequently configured as inputs. * The @clear bitfield has priority over the @set bitfield - * outputs will be cleared. * * ucb1x00_enable must have been called to enable the comms * before using this function. * * This function takes a spinlock, disabling interrupts. */ void ucb1x00_io_write(struct ucb1x00 *ucb, unsigned int set, unsigned int clear) { unsigned long flags; spin_lock_irqsave(&ucb->io_lock, flags); ucb->io_out |= set; ucb->io_out &= ~clear; ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); spin_unlock_irqrestore(&ucb->io_lock, flags); } /** * ucb1x00_io_read - read the current state of the IO pins * @ucb: UCB1x00 structure describing chip * * Return a bitfield describing the logic state of the ten * general purpose IO pins. * * ucb1x00_enable must have been called to enable the comms * before using this function. * * This function does not take any semaphores or spinlocks. */ unsigned int ucb1x00_io_read(struct ucb1x00 *ucb) { return ucb1x00_reg_read(ucb, UCB_IO_DATA); } static void ucb1x00_gpio_set(struct gpio_chip *chip, unsigned offset, int value) { struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio); unsigned long flags; spin_lock_irqsave(&ucb->io_lock, flags); if (value) ucb->io_out |= 1 << offset; else ucb->io_out &= ~(1 << offset); ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); spin_unlock_irqrestore(&ucb->io_lock, flags); } static int ucb1x00_gpio_get(struct gpio_chip *chip, unsigned offset) { struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio); return ucb1x00_reg_read(ucb, UCB_IO_DATA) & (1 << offset); } static int ucb1x00_gpio_direction_input(struct gpio_chip *chip, unsigned offset) { struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio); unsigned long flags; spin_lock_irqsave(&ucb->io_lock, flags); ucb->io_dir &= ~(1 << offset); ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); spin_unlock_irqrestore(&ucb->io_lock, flags); return 0; } static int ucb1x00_gpio_direction_output(struct gpio_chip *chip, unsigned offset , int value) { struct ucb1x00 *ucb = container_of(chip, struct ucb1x00, gpio); unsigned long flags; spin_lock_irqsave(&ucb->io_lock, flags); ucb->io_dir |= (1 << offset); ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); if (value) ucb->io_out |= 1 << offset; else ucb->io_out &= ~(1 << offset); ucb1x00_reg_write(ucb, UCB_IO_DATA, ucb->io_out); spin_unlock_irqrestore(&ucb->io_lock, flags); return 0; } /* * UCB1300 data sheet says we must: * 1. enable ADC => 5us (including reference startup time) * 2. select input => 51*tsibclk => 4.3us * 3. start conversion => 102*tsibclk => 8.5us * (tsibclk = 1/11981000) * Period between SIB 128-bit frames = 10.7us */ /** * ucb1x00_adc_enable - enable the ADC converter * @ucb: UCB1x00 structure describing chip * * Enable the ucb1x00 and ADC converter on the UCB1x00 for use. * Any code wishing to use the ADC converter must call this * function prior to using it. * * This function takes the ADC semaphore to prevent two or more * concurrent uses, and therefore may sleep. As a result, it * can only be called from process context, not interrupt * context. * * You should release the ADC as soon as possible using * ucb1x00_adc_disable. */ void ucb1x00_adc_enable(struct ucb1x00 *ucb) { down(&ucb->adc_sem); ucb->adc_cr |= UCB_ADC_ENA; ucb1x00_enable(ucb); ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr); } /** * ucb1x00_adc_read - read the specified ADC channel * @ucb: UCB1x00 structure describing chip * @adc_channel: ADC channel mask * @sync: wait for syncronisation pulse. * * Start an ADC conversion and wait for the result. Note that * synchronised ADC conversions (via the ADCSYNC pin) must wait * until the trigger is asserted and the conversion is finished. * * This function currently spins waiting for the conversion to * complete (2 frames max without sync). * * If called for a synchronised ADC conversion, it may sleep * with the ADC semaphore held. */ unsigned int ucb1x00_adc_read(struct ucb1x00 *ucb, int adc_channel, int sync) { unsigned int val; if (sync) adc_channel |= UCB_ADC_SYNC_ENA; ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr | adc_channel); ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr | adc_channel | UCB_ADC_START); for (;;) { val = ucb1x00_reg_read(ucb, UCB_ADC_DATA); if (val & UCB_ADC_DAT_VAL) break; /* yield to other processes */ set_current_state(TASK_INTERRUPTIBLE); schedule_timeout(1); } return UCB_ADC_DAT(val); } /** * ucb1x00_adc_disable - disable the ADC converter * @ucb: UCB1x00 structure describing chip * * Disable the ADC converter and release the ADC semaphore. */ void ucb1x00_adc_disable(struct ucb1x00 *ucb) { ucb->adc_cr &= ~UCB_ADC_ENA; ucb1x00_reg_write(ucb, UCB_ADC_CR, ucb->adc_cr); ucb1x00_disable(ucb); up(&ucb->adc_sem); } /* * UCB1x00 Interrupt handling. * * The UCB1x00 can generate interrupts when the SIBCLK is stopped. * Since we need to read an internal register, we must re-enable * SIBCLK to talk to the chip. We leave the clock running until * we have finished processing all interrupts from the chip. */ static irqreturn_t ucb1x00_irq(int irqnr, void *devid) { struct ucb1x00 *ucb = devid; struct ucb1x00_irq *irq; unsigned int isr, i; ucb1x00_enable(ucb); isr = ucb1x00_reg_read(ucb, UCB_IE_STATUS); ucb1x00_reg_write(ucb, UCB_IE_CLEAR, isr); ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0); for (i = 0, irq = ucb->irq_handler; i < 16 && isr; i++, isr >>= 1, irq++) if (isr & 1 && irq->fn) irq->fn(i, irq->devid); ucb1x00_disable(ucb); return IRQ_HANDLED; } /** * ucb1x00_hook_irq - hook a UCB1x00 interrupt * @ucb: UCB1x00 structure describing chip * @idx: interrupt index * @fn: function to call when interrupt is triggered * @devid: device id to pass to interrupt handler * * Hook the specified interrupt. You can only register one handler * for each interrupt source. The interrupt source is not enabled * by this function; use ucb1x00_enable_irq instead. * * Interrupt handlers will be called with other interrupts enabled. * * Returns zero on success, or one of the following errors: * -EINVAL if the interrupt index is invalid * -EBUSY if the interrupt has already been hooked */ int ucb1x00_hook_irq(struct ucb1x00 *ucb, unsigned int idx, void (*fn)(int, void *), void *devid) { struct ucb1x00_irq *irq; int ret = -EINVAL; if (idx < 16) { irq = ucb->irq_handler + idx; ret = -EBUSY; spin_lock_irq(&ucb->lock); if (irq->fn == NULL) { irq->devid = devid; irq->fn = fn; ret = 0; } spin_unlock_irq(&ucb->lock); } return ret; } /** * ucb1x00_enable_irq - enable an UCB1x00 interrupt source * @ucb: UCB1x00 structure describing chip * @idx: interrupt index * @edges: interrupt edges to enable * * Enable the specified interrupt to trigger on %UCB_RISING, * %UCB_FALLING or both edges. The interrupt should have been * hooked by ucb1x00_hook_irq. */ void ucb1x00_enable_irq(struct ucb1x00 *ucb, unsigned int idx, int edges) { unsigned long flags; if (idx < 16) { spin_lock_irqsave(&ucb->lock, flags); ucb1x00_enable(ucb); if (edges & UCB_RISING) { ucb->irq_ris_enbl |= 1 << idx; ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl); } if (edges & UCB_FALLING) { ucb->irq_fal_enbl |= 1 << idx; ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl); } ucb1x00_disable(ucb); spin_unlock_irqrestore(&ucb->lock, flags); } } /** * ucb1x00_disable_irq - disable an UCB1x00 interrupt source * @ucb: UCB1x00 structure describing chip * @edges: interrupt edges to disable * * Disable the specified interrupt triggering on the specified * (%UCB_RISING, %UCB_FALLING or both) edges. */ void ucb1x00_disable_irq(struct ucb1x00 *ucb, unsigned int idx, int edges) { unsigned long flags; if (idx < 16) { spin_lock_irqsave(&ucb->lock, flags); ucb1x00_enable(ucb); if (edges & UCB_RISING) { ucb->irq_ris_enbl &= ~(1 << idx); ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl); } if (edges & UCB_FALLING) { ucb->irq_fal_enbl &= ~(1 << idx); ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl); } ucb1x00_disable(ucb); spin_unlock_irqrestore(&ucb->lock, flags); } } /** * ucb1x00_free_irq - disable and free the specified UCB1x00 interrupt * @ucb: UCB1x00 structure describing chip * @idx: interrupt index * @devid: device id. * * Disable the interrupt source and remove the handler. devid must * match the devid passed when hooking the interrupt. * * Returns zero on success, or one of the following errors: * -EINVAL if the interrupt index is invalid * -ENOENT if devid does not match */ int ucb1x00_free_irq(struct ucb1x00 *ucb, unsigned int idx, void *devid) { struct ucb1x00_irq *irq; int ret; if (idx >= 16) goto bad; irq = ucb->irq_handler + idx; ret = -ENOENT; spin_lock_irq(&ucb->lock); if (irq->devid == devid) { ucb->irq_ris_enbl &= ~(1 << idx); ucb->irq_fal_enbl &= ~(1 << idx); ucb1x00_enable(ucb); ucb1x00_reg_write(ucb, UCB_IE_RIS, ucb->irq_ris_enbl); ucb1x00_reg_write(ucb, UCB_IE_FAL, ucb->irq_fal_enbl); ucb1x00_disable(ucb); irq->fn = NULL; irq->devid = NULL; ret = 0; } spin_unlock_irq(&ucb->lock); return ret; bad: printk(KERN_ERR "Freeing bad UCB1x00 irq %d\n", idx); return -EINVAL; } static int ucb1x00_add_dev(struct ucb1x00 *ucb, struct ucb1x00_driver *drv) { struct ucb1x00_dev *dev; int ret = -ENOMEM; dev = kmalloc(sizeof(struct ucb1x00_dev), GFP_KERNEL); if (dev) { dev->ucb = ucb; dev->drv = drv; ret = drv->add(dev); if (ret == 0) { list_add(&dev->dev_node, &ucb->devs); list_add(&dev->drv_node, &drv->devs); } else { kfree(dev); } } return ret; } static void ucb1x00_remove_dev(struct ucb1x00_dev *dev) { dev->drv->remove(dev); list_del(&dev->dev_node); list_del(&dev->drv_node); kfree(dev); } /* * Try to probe our interrupt, rather than relying on lots of * hard-coded machine dependencies. For reference, the expected * IRQ mappings are: * * Machine Default IRQ * adsbitsy IRQ_GPCIN4 * cerf IRQ_GPIO_UCB1200_IRQ * flexanet IRQ_GPIO_GUI * freebird IRQ_GPIO_FREEBIRD_UCB1300_IRQ * graphicsclient ADS_EXT_IRQ(8) * graphicsmaster ADS_EXT_IRQ(8) * lart LART_IRQ_UCB1200 * omnimeter IRQ_GPIO23 * pfs168 IRQ_GPIO_UCB1300_IRQ * simpad IRQ_GPIO_UCB1300_IRQ * shannon SHANNON_IRQ_GPIO_IRQ_CODEC * yopy IRQ_GPIO_UCB1200_IRQ */ static int ucb1x00_detect_irq(struct ucb1x00 *ucb) { unsigned long mask; mask = probe_irq_on(); if (!mask) { probe_irq_off(mask); return NO_IRQ; } /* * Enable the ADC interrupt. */ ucb1x00_reg_write(ucb, UCB_IE_RIS, UCB_IE_ADC); ucb1x00_reg_write(ucb, UCB_IE_FAL, UCB_IE_ADC); ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0xffff); ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0); /* * Cause an ADC interrupt. */ ucb1x00_reg_write(ucb, UCB_ADC_CR, UCB_ADC_ENA); ucb1x00_reg_write(ucb, UCB_ADC_CR, UCB_ADC_ENA | UCB_ADC_START); /* * Wait for the conversion to complete. */ while ((ucb1x00_reg_read(ucb, UCB_ADC_DATA) & UCB_ADC_DAT_VAL) == 0); ucb1x00_reg_write(ucb, UCB_ADC_CR, 0); /* * Disable and clear interrupt. */ ucb1x00_reg_write(ucb, UCB_IE_RIS, 0); ucb1x00_reg_write(ucb, UCB_IE_FAL, 0); ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0xffff); ucb1x00_reg_write(ucb, UCB_IE_CLEAR, 0); /* * Read triggered interrupt. */ return probe_irq_off(mask); } static void ucb1x00_release(struct device *dev) { struct ucb1x00 *ucb = classdev_to_ucb1x00(dev); kfree(ucb); } static struct class ucb1x00_class = { .name = "ucb1x00", .dev_release = ucb1x00_release, }; static int ucb1x00_probe(struct mcp *mcp) { struct ucb1x00 *ucb; struct ucb1x00_driver *drv; unsigned int id; int ret = -ENODEV; int temp; mcp_enable(mcp); id = mcp_reg_read(mcp, UCB_ID); if (id != UCB_ID_1200 && id != UCB_ID_1300 && id != UCB_ID_TC35143) { printk(KERN_WARNING "UCB1x00 ID not found: %04x\n", id); goto err_disable; } ucb = kzalloc(sizeof(struct ucb1x00), GFP_KERNEL); ret = -ENOMEM; if (!ucb) goto err_disable; ucb->dev.class = &ucb1x00_class; ucb->dev.parent = &mcp->attached_device; dev_set_name(&ucb->dev, "ucb1x00"); spin_lock_init(&ucb->lock); spin_lock_init(&ucb->io_lock); sema_init(&ucb->adc_sem, 1); ucb->id = id; ucb->mcp = mcp; ucb->irq = ucb1x00_detect_irq(ucb); if (ucb->irq == NO_IRQ) { printk(KERN_ERR "UCB1x00: IRQ probe failed\n"); ret = -ENODEV; goto err_free; } ucb->gpio.base = -1; if (mcp->gpio_base != 0) { ucb->gpio.label = dev_name(&ucb->dev); ucb->gpio.base = mcp->gpio_base; ucb->gpio.ngpio = 10; ucb->gpio.set = ucb1x00_gpio_set; ucb->gpio.get = ucb1x00_gpio_get; ucb->gpio.direction_input = ucb1x00_gpio_direction_input; ucb->gpio.direction_output = ucb1x00_gpio_direction_output; ret = gpiochip_add(&ucb->gpio); if (ret) goto err_free; } else dev_info(&ucb->dev, "gpio_base not set so no gpiolib support"); ret = request_irq(ucb->irq, ucb1x00_irq, IRQF_TRIGGER_RISING, "UCB1x00", ucb); if (ret) { printk(KERN_ERR "ucb1x00: unable to grab irq%d: %d\n", ucb->irq, ret); goto err_gpio; } mcp_set_drvdata(mcp, ucb); ret = device_register(&ucb->dev); if (ret) goto err_irq; INIT_LIST_HEAD(&ucb->devs); mutex_lock(&ucb1x00_mutex); list_add(&ucb->node, &ucb1x00_devices); list_for_each_entry(drv, &ucb1x00_drivers, node) { ucb1x00_add_dev(ucb, drv); } mutex_unlock(&ucb1x00_mutex); goto out; err_irq: free_irq(ucb->irq, ucb); err_gpio: if (ucb->gpio.base != -1) temp = gpiochip_remove(&ucb->gpio); err_free: kfree(ucb); err_disable: mcp_disable(mcp); out: return ret; } static void ucb1x00_remove(struct mcp *mcp) { struct ucb1x00 *ucb = mcp_get_drvdata(mcp); struct list_head *l, *n; int ret; mutex_lock(&ucb1x00_mutex); list_del(&ucb->node); list_for_each_safe(l, n, &ucb->devs) { struct ucb1x00_dev *dev = list_entry(l, struct ucb1x00_dev, dev_node); ucb1x00_remove_dev(dev); } mutex_unlock(&ucb1x00_mutex); if (ucb->gpio.base != -1) { ret = gpiochip_remove(&ucb->gpio); if (ret) dev_err(&ucb->dev, "Can't remove gpio chip: %d\n", ret); } free_irq(ucb->irq, ucb); device_unregister(&ucb->dev); } int ucb1x00_register_driver(struct ucb1x00_driver *drv) { struct ucb1x00 *ucb; INIT_LIST_HEAD(&drv->devs); mutex_lock(&ucb1x00_mutex); list_add(&drv->node, &ucb1x00_drivers); list_for_each_entry(ucb, &ucb1x00_devices, node) { ucb1x00_add_dev(ucb, drv); } mutex_unlock(&ucb1x00_mutex); return 0; } void ucb1x00_unregister_driver(struct ucb1x00_driver *drv) { struct list_head *n, *l; mutex_lock(&ucb1x00_mutex); list_del(&drv->node); list_for_each_safe(l, n, &drv->devs) { struct ucb1x00_dev *dev = list_entry(l, struct ucb1x00_dev, drv_node); ucb1x00_remove_dev(dev); } mutex_unlock(&ucb1x00_mutex); } static int ucb1x00_suspend(struct mcp *mcp, pm_message_t state) { struct ucb1x00 *ucb = mcp_get_drvdata(mcp); struct ucb1x00_dev *dev; mutex_lock(&ucb1x00_mutex); list_for_each_entry(dev, &ucb->devs, dev_node) { if (dev->drv->suspend) dev->drv->suspend(dev, state); } mutex_unlock(&ucb1x00_mutex); return 0; } static int ucb1x00_resume(struct mcp *mcp) { struct ucb1x00 *ucb = mcp_get_drvdata(mcp); struct ucb1x00_dev *dev; ucb1x00_reg_write(ucb, UCB_IO_DIR, ucb->io_dir); mutex_lock(&ucb1x00_mutex); list_for_each_entry(dev, &ucb->devs, dev_node) { if (dev->drv->resume) dev->drv->resume(dev); } mutex_unlock(&ucb1x00_mutex); return 0; } static struct mcp_driver ucb1x00_driver = { .drv = { .name = "ucb1x00", }, .probe = ucb1x00_probe, .remove = ucb1x00_remove, .suspend = ucb1x00_suspend, .resume = ucb1x00_resume, }; static int __init ucb1x00_init(void) { int ret = class_register(&ucb1x00_class); if (ret == 0) { ret = mcp_driver_register(&ucb1x00_driver); if (ret) class_unregister(&ucb1x00_class); } return ret; } static void __exit ucb1x00_exit(void) { mcp_driver_unregister(&ucb1x00_driver); class_unregister(&ucb1x00_class); } module_init(ucb1x00_init); module_exit(ucb1x00_exit); EXPORT_SYMBOL(ucb1x00_io_set_dir); EXPORT_SYMBOL(ucb1x00_io_write); EXPORT_SYMBOL(ucb1x00_io_read); EXPORT_SYMBOL(ucb1x00_adc_enable); EXPORT_SYMBOL(ucb1x00_adc_read); EXPORT_SYMBOL(ucb1x00_adc_disable); EXPORT_SYMBOL(ucb1x00_hook_irq); EXPORT_SYMBOL(ucb1x00_free_irq); EXPORT_SYMBOL(ucb1x00_enable_irq); EXPORT_SYMBOL(ucb1x00_disable_irq); EXPORT_SYMBOL(ucb1x00_register_driver); EXPORT_SYMBOL(ucb1x00_unregister_driver); MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); MODULE_DESCRIPTION("UCB1x00 core driver"); MODULE_LICENSE("GPL");
gpl-2.0
MichaelQQ/Linux_PE
arch/x86/realmode/rm/wakemain.c
4716
1541
#include "wakeup.h" #include "boot.h" static void udelay(int loops) { while (loops--) io_delay(); /* Approximately 1 us */ } static void beep(unsigned int hz) { u8 enable; if (!hz) { enable = 0x00; /* Turn off speaker */ } else { u16 div = 1193181/hz; outb(0xb6, 0x43); /* Ctr 2, squarewave, load, binary */ io_delay(); outb(div, 0x42); /* LSB of counter */ io_delay(); outb(div >> 8, 0x42); /* MSB of counter */ io_delay(); enable = 0x03; /* Turn on speaker */ } inb(0x61); /* Dummy read of System Control Port B */ io_delay(); outb(enable, 0x61); /* Enable timer 2 output to speaker */ io_delay(); } #define DOT_HZ 880 #define DASH_HZ 587 #define US_PER_DOT 125000 /* Okay, this is totally silly, but it's kind of fun. */ static void send_morse(const char *pattern) { char s; while ((s = *pattern++)) { switch (s) { case '.': beep(DOT_HZ); udelay(US_PER_DOT); beep(0); udelay(US_PER_DOT); break; case '-': beep(DASH_HZ); udelay(US_PER_DOT * 3); beep(0); udelay(US_PER_DOT); break; default: /* Assume it's a space */ udelay(US_PER_DOT * 3); break; } } } void main(void) { /* Kill machine if structures are wrong */ if (wakeup_header.real_magic != 0x12345678) while (1) ; if (wakeup_header.realmode_flags & 4) send_morse("...-"); if (wakeup_header.realmode_flags & 1) asm volatile("lcallw $0xc000,$3"); if (wakeup_header.realmode_flags & 2) { /* Need to call BIOS */ probe_cards(0); set_mode(wakeup_header.video_mode); } }
gpl-2.0
wulsic/Hyper_CM11
arch/m68k/mm/cache.c
4716
2915
/* * linux/arch/m68k/mm/cache.c * * Instruction cache handling * * Copyright (C) 1995 Hamish Macdonald */ #include <linux/module.h> #include <asm/pgalloc.h> #include <asm/traps.h> static unsigned long virt_to_phys_slow(unsigned long vaddr) { if (CPU_IS_060) { unsigned long paddr; /* The PLPAR instruction causes an access error if the translation * is not possible. To catch this we use the same exception mechanism * as for user space accesses in <asm/uaccess.h>. */ asm volatile (".chip 68060\n" "1: plpar (%0)\n" ".chip 68k\n" "2:\n" ".section .fixup,\"ax\"\n" " .even\n" "3: sub.l %0,%0\n" " jra 2b\n" ".previous\n" ".section __ex_table,\"a\"\n" " .align 4\n" " .long 1b,3b\n" ".previous" : "=a" (paddr) : "0" (vaddr)); return paddr; } else if (CPU_IS_040) { unsigned long mmusr; asm volatile (".chip 68040\n\t" "ptestr (%1)\n\t" "movec %%mmusr, %0\n\t" ".chip 68k" : "=r" (mmusr) : "a" (vaddr)); if (mmusr & MMU_R_040) return (mmusr & PAGE_MASK) | (vaddr & ~PAGE_MASK); } else { unsigned short mmusr; unsigned long *descaddr; asm volatile ("ptestr %3,%2@,#7,%0\n\t" "pmove %%psr,%1@" : "=a&" (descaddr) : "a" (&mmusr), "a" (vaddr), "d" (get_fs().seg)); if (mmusr & (MMU_I|MMU_B|MMU_L)) return 0; descaddr = phys_to_virt((unsigned long)descaddr); switch (mmusr & MMU_NUM) { case 1: return (*descaddr & 0xfe000000) | (vaddr & 0x01ffffff); case 2: return (*descaddr & 0xfffc0000) | (vaddr & 0x0003ffff); case 3: return (*descaddr & PAGE_MASK) | (vaddr & ~PAGE_MASK); } } return 0; } /* Push n pages at kernel virtual address and clear the icache */ /* RZ: use cpush %bc instead of cpush %dc, cinv %ic */ void flush_icache_range(unsigned long address, unsigned long endaddr) { if (CPU_IS_040_OR_060) { address &= PAGE_MASK; do { asm volatile ("nop\n\t" ".chip 68040\n\t" "cpushp %%bc,(%0)\n\t" ".chip 68k" : : "a" (virt_to_phys_slow(address))); address += PAGE_SIZE; } while (address < endaddr); } else { unsigned long tmp; asm volatile ("movec %%cacr,%0\n\t" "orw %1,%0\n\t" "movec %0,%%cacr" : "=&d" (tmp) : "di" (FLUSH_I)); } } EXPORT_SYMBOL(flush_icache_range); void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, unsigned long addr, int len) { if (CPU_IS_040_OR_060) { asm volatile ("nop\n\t" ".chip 68040\n\t" "cpushp %%bc,(%0)\n\t" ".chip 68k" : : "a" (page_to_phys(page))); } else { unsigned long tmp; asm volatile ("movec %%cacr,%0\n\t" "orw %1,%0\n\t" "movec %0,%%cacr" : "=&d" (tmp) : "di" (FLUSH_I)); } }
gpl-2.0
Grace5921/untouched
drivers/gpio/gpio-mpc5200.c
5228
9826
/* * MPC52xx gpio driver * * Copyright (c) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/of.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/of_gpio.h> #include <linux/io.h> #include <linux/of_platform.h> #include <linux/module.h> #include <asm/gpio.h> #include <asm/mpc52xx.h> #include <sysdev/fsl_soc.h> static DEFINE_SPINLOCK(gpio_lock); struct mpc52xx_gpiochip { struct of_mm_gpio_chip mmchip; unsigned int shadow_dvo; unsigned int shadow_gpioe; unsigned int shadow_ddr; }; /* * GPIO LIB API implementation for wakeup GPIOs. * * There's a maximum of 8 wakeup GPIOs. Which of these are available * for use depends on your board setup. * * 0 -> GPIO_WKUP_7 * 1 -> GPIO_WKUP_6 * 2 -> PSC6_1 * 3 -> PSC6_0 * 4 -> ETH_17 * 5 -> PSC3_9 * 6 -> PSC2_4 * 7 -> PSC1_4 * */ static int mpc52xx_wkup_gpio_get(struct gpio_chip *gc, unsigned int gpio) { struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs; unsigned int ret; ret = (in_8(&regs->wkup_ival) >> (7 - gpio)) & 1; pr_debug("%s: gpio: %d ret: %d\n", __func__, gpio, ret); return ret; } static inline void __mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); struct mpc52xx_gpiochip *chip = container_of(mm_gc, struct mpc52xx_gpiochip, mmchip); struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs; if (val) chip->shadow_dvo |= 1 << (7 - gpio); else chip->shadow_dvo &= ~(1 << (7 - gpio)); out_8(&regs->wkup_dvo, chip->shadow_dvo); } static void mpc52xx_wkup_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { unsigned long flags; spin_lock_irqsave(&gpio_lock, flags); __mpc52xx_wkup_gpio_set(gc, gpio, val); spin_unlock_irqrestore(&gpio_lock, flags); pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val); } static int mpc52xx_wkup_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) { struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); struct mpc52xx_gpiochip *chip = container_of(mm_gc, struct mpc52xx_gpiochip, mmchip); struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs; unsigned long flags; spin_lock_irqsave(&gpio_lock, flags); /* set the direction */ chip->shadow_ddr &= ~(1 << (7 - gpio)); out_8(&regs->wkup_ddr, chip->shadow_ddr); /* and enable the pin */ chip->shadow_gpioe |= 1 << (7 - gpio); out_8(&regs->wkup_gpioe, chip->shadow_gpioe); spin_unlock_irqrestore(&gpio_lock, flags); return 0; } static int mpc52xx_wkup_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); struct mpc52xx_gpio_wkup __iomem *regs = mm_gc->regs; struct mpc52xx_gpiochip *chip = container_of(mm_gc, struct mpc52xx_gpiochip, mmchip); unsigned long flags; spin_lock_irqsave(&gpio_lock, flags); __mpc52xx_wkup_gpio_set(gc, gpio, val); /* Then set direction */ chip->shadow_ddr |= 1 << (7 - gpio); out_8(&regs->wkup_ddr, chip->shadow_ddr); /* Finally enable the pin */ chip->shadow_gpioe |= 1 << (7 - gpio); out_8(&regs->wkup_gpioe, chip->shadow_gpioe); spin_unlock_irqrestore(&gpio_lock, flags); pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val); return 0; } static int __devinit mpc52xx_wkup_gpiochip_probe(struct platform_device *ofdev) { struct mpc52xx_gpiochip *chip; struct mpc52xx_gpio_wkup __iomem *regs; struct gpio_chip *gc; int ret; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; gc = &chip->mmchip.gc; gc->ngpio = 8; gc->direction_input = mpc52xx_wkup_gpio_dir_in; gc->direction_output = mpc52xx_wkup_gpio_dir_out; gc->get = mpc52xx_wkup_gpio_get; gc->set = mpc52xx_wkup_gpio_set; ret = of_mm_gpiochip_add(ofdev->dev.of_node, &chip->mmchip); if (ret) return ret; regs = chip->mmchip.regs; chip->shadow_gpioe = in_8(&regs->wkup_gpioe); chip->shadow_ddr = in_8(&regs->wkup_ddr); chip->shadow_dvo = in_8(&regs->wkup_dvo); return 0; } static int mpc52xx_gpiochip_remove(struct platform_device *ofdev) { return -EBUSY; } static const struct of_device_id mpc52xx_wkup_gpiochip_match[] = { { .compatible = "fsl,mpc5200-gpio-wkup", }, {} }; static struct platform_driver mpc52xx_wkup_gpiochip_driver = { .driver = { .name = "mpc5200-gpio-wkup", .owner = THIS_MODULE, .of_match_table = mpc52xx_wkup_gpiochip_match, }, .probe = mpc52xx_wkup_gpiochip_probe, .remove = mpc52xx_gpiochip_remove, }; /* * GPIO LIB API implementation for simple GPIOs * * There's a maximum of 32 simple GPIOs. Which of these are available * for use depends on your board setup. * The numbering reflects the bit numbering in the port registers: * * 0..1 > reserved * 2..3 > IRDA * 4..7 > ETHR * 8..11 > reserved * 12..15 > USB * 16..17 > reserved * 18..23 > PSC3 * 24..27 > PSC2 * 28..31 > PSC1 */ static int mpc52xx_simple_gpio_get(struct gpio_chip *gc, unsigned int gpio) { struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); struct mpc52xx_gpio __iomem *regs = mm_gc->regs; unsigned int ret; ret = (in_be32(&regs->simple_ival) >> (31 - gpio)) & 1; return ret; } static inline void __mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); struct mpc52xx_gpiochip *chip = container_of(mm_gc, struct mpc52xx_gpiochip, mmchip); struct mpc52xx_gpio __iomem *regs = mm_gc->regs; if (val) chip->shadow_dvo |= 1 << (31 - gpio); else chip->shadow_dvo &= ~(1 << (31 - gpio)); out_be32(&regs->simple_dvo, chip->shadow_dvo); } static void mpc52xx_simple_gpio_set(struct gpio_chip *gc, unsigned int gpio, int val) { unsigned long flags; spin_lock_irqsave(&gpio_lock, flags); __mpc52xx_simple_gpio_set(gc, gpio, val); spin_unlock_irqrestore(&gpio_lock, flags); pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val); } static int mpc52xx_simple_gpio_dir_in(struct gpio_chip *gc, unsigned int gpio) { struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); struct mpc52xx_gpiochip *chip = container_of(mm_gc, struct mpc52xx_gpiochip, mmchip); struct mpc52xx_gpio __iomem *regs = mm_gc->regs; unsigned long flags; spin_lock_irqsave(&gpio_lock, flags); /* set the direction */ chip->shadow_ddr &= ~(1 << (31 - gpio)); out_be32(&regs->simple_ddr, chip->shadow_ddr); /* and enable the pin */ chip->shadow_gpioe |= 1 << (31 - gpio); out_be32(&regs->simple_gpioe, chip->shadow_gpioe); spin_unlock_irqrestore(&gpio_lock, flags); return 0; } static int mpc52xx_simple_gpio_dir_out(struct gpio_chip *gc, unsigned int gpio, int val) { struct of_mm_gpio_chip *mm_gc = to_of_mm_gpio_chip(gc); struct mpc52xx_gpiochip *chip = container_of(mm_gc, struct mpc52xx_gpiochip, mmchip); struct mpc52xx_gpio __iomem *regs = mm_gc->regs; unsigned long flags; spin_lock_irqsave(&gpio_lock, flags); /* First set initial value */ __mpc52xx_simple_gpio_set(gc, gpio, val); /* Then set direction */ chip->shadow_ddr |= 1 << (31 - gpio); out_be32(&regs->simple_ddr, chip->shadow_ddr); /* Finally enable the pin */ chip->shadow_gpioe |= 1 << (31 - gpio); out_be32(&regs->simple_gpioe, chip->shadow_gpioe); spin_unlock_irqrestore(&gpio_lock, flags); pr_debug("%s: gpio: %d val: %d\n", __func__, gpio, val); return 0; } static int __devinit mpc52xx_simple_gpiochip_probe(struct platform_device *ofdev) { struct mpc52xx_gpiochip *chip; struct gpio_chip *gc; struct mpc52xx_gpio __iomem *regs; int ret; chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; gc = &chip->mmchip.gc; gc->ngpio = 32; gc->direction_input = mpc52xx_simple_gpio_dir_in; gc->direction_output = mpc52xx_simple_gpio_dir_out; gc->get = mpc52xx_simple_gpio_get; gc->set = mpc52xx_simple_gpio_set; ret = of_mm_gpiochip_add(ofdev->dev.of_node, &chip->mmchip); if (ret) return ret; regs = chip->mmchip.regs; chip->shadow_gpioe = in_be32(&regs->simple_gpioe); chip->shadow_ddr = in_be32(&regs->simple_ddr); chip->shadow_dvo = in_be32(&regs->simple_dvo); return 0; } static const struct of_device_id mpc52xx_simple_gpiochip_match[] = { { .compatible = "fsl,mpc5200-gpio", }, {} }; static struct platform_driver mpc52xx_simple_gpiochip_driver = { .driver = { .name = "mpc5200-gpio", .owner = THIS_MODULE, .of_match_table = mpc52xx_simple_gpiochip_match, }, .probe = mpc52xx_simple_gpiochip_probe, .remove = mpc52xx_gpiochip_remove, }; static int __init mpc52xx_gpio_init(void) { if (platform_driver_register(&mpc52xx_wkup_gpiochip_driver)) printk(KERN_ERR "Unable to register wakeup GPIO driver\n"); if (platform_driver_register(&mpc52xx_simple_gpiochip_driver)) printk(KERN_ERR "Unable to register simple GPIO driver\n"); return 0; } /* Make sure we get initialised before anyone else tries to use us */ subsys_initcall(mpc52xx_gpio_init); /* No exit call at the moment as we cannot unregister of gpio chips */ MODULE_DESCRIPTION("Freescale MPC52xx gpio driver"); MODULE_AUTHOR("Sascha Hauer <s.hauer@pengutronix.de"); MODULE_LICENSE("GPL v2");
gpl-2.0
ochiman/e405-kernel
drivers/staging/rtl8192e/r819xE_phy.c
109
91655
#include "r8192E.h" #include "r8192E_hw.h" #include "r819xE_phyreg.h" #include "r8190_rtl8256.h" #include "r819xE_phy.h" #include "r8192E_dm.h" #ifdef ENABLE_DOT11D #include "ieee80211/dot11d.h" #endif static const u32 RF_CHANNEL_TABLE_ZEBRA[] = { 0, 0x085c, //2412 1 0x08dc, //2417 2 0x095c, //2422 3 0x09dc, //2427 4 0x0a5c, //2432 5 0x0adc, //2437 6 0x0b5c, //2442 7 0x0bdc, //2447 8 0x0c5c, //2452 9 0x0cdc, //2457 10 0x0d5c, //2462 11 0x0ddc, //2467 12 0x0e5c, //2472 13 0x0f72, //2484 }; #ifdef RTL8190P u32 Rtl8190PciMACPHY_Array[] = { 0x03c,0xffff0000,0x00000f0f, 0x340,0xffffffff,0x161a1a1a, 0x344,0xffffffff,0x12121416, 0x348,0x0000ffff,0x00001818, 0x12c,0xffffffff,0x04000802, 0x318,0x00000fff,0x00000800, }; u32 Rtl8190PciMACPHY_Array_PG[] = { 0x03c,0xffff0000,0x00000f0f, 0x340,0xffffffff,0x0a0c0d0f, 0x344,0xffffffff,0x06070809, 0x344,0xffffffff,0x06070809, 0x348,0x0000ffff,0x00000000, 0x12c,0xffffffff,0x04000802, 0x318,0x00000fff,0x00000800, }; u32 Rtl8190PciAGCTAB_Array[AGCTAB_ArrayLength] = { 0xc78,0x7d000001, 0xc78,0x7d010001, 0xc78,0x7d020001, 0xc78,0x7d030001, 0xc78,0x7c040001, 0xc78,0x7b050001, 0xc78,0x7a060001, 0xc78,0x79070001, 0xc78,0x78080001, 0xc78,0x77090001, 0xc78,0x760a0001, 0xc78,0x750b0001, 0xc78,0x740c0001, 0xc78,0x730d0001, 0xc78,0x720e0001, 0xc78,0x710f0001, 0xc78,0x70100001, 0xc78,0x6f110001, 0xc78,0x6e120001, 0xc78,0x6d130001, 0xc78,0x6c140001, 0xc78,0x6b150001, 0xc78,0x6a160001, 0xc78,0x69170001, 0xc78,0x68180001, 0xc78,0x67190001, 0xc78,0x661a0001, 0xc78,0x651b0001, 0xc78,0x641c0001, 0xc78,0x491d0001, 0xc78,0x481e0001, 0xc78,0x471f0001, 0xc78,0x46200001, 0xc78,0x45210001, 0xc78,0x44220001, 0xc78,0x43230001, 0xc78,0x28240001, 0xc78,0x27250001, 0xc78,0x26260001, 0xc78,0x25270001, 0xc78,0x24280001, 0xc78,0x23290001, 0xc78,0x222a0001, 0xc78,0x212b0001, 0xc78,0x202c0001, 0xc78,0x0a2d0001, 0xc78,0x082e0001, 0xc78,0x062f0001, 0xc78,0x05300001, 0xc78,0x04310001, 0xc78,0x03320001, 0xc78,0x02330001, 0xc78,0x01340001, 0xc78,0x00350001, 0xc78,0x00360001, 0xc78,0x00370001, 0xc78,0x00380001, 0xc78,0x00390001, 0xc78,0x003a0001, 0xc78,0x003b0001, 0xc78,0x003c0001, 0xc78,0x003d0001, 0xc78,0x003e0001, 0xc78,0x003f0001, 0xc78,0x7d400001, 0xc78,0x7d410001, 0xc78,0x7d420001, 0xc78,0x7d430001, 0xc78,0x7c440001, 0xc78,0x7b450001, 0xc78,0x7a460001, 0xc78,0x79470001, 0xc78,0x78480001, 0xc78,0x77490001, 0xc78,0x764a0001, 0xc78,0x754b0001, 0xc78,0x744c0001, 0xc78,0x734d0001, 0xc78,0x724e0001, 0xc78,0x714f0001, 0xc78,0x70500001, 0xc78,0x6f510001, 0xc78,0x6e520001, 0xc78,0x6d530001, 0xc78,0x6c540001, 0xc78,0x6b550001, 0xc78,0x6a560001, 0xc78,0x69570001, 0xc78,0x68580001, 0xc78,0x67590001, 0xc78,0x665a0001, 0xc78,0x655b0001, 0xc78,0x645c0001, 0xc78,0x495d0001, 0xc78,0x485e0001, 0xc78,0x475f0001, 0xc78,0x46600001, 0xc78,0x45610001, 0xc78,0x44620001, 0xc78,0x43630001, 0xc78,0x28640001, 0xc78,0x27650001, 0xc78,0x26660001, 0xc78,0x25670001, 0xc78,0x24680001, 0xc78,0x23690001, 0xc78,0x226a0001, 0xc78,0x216b0001, 0xc78,0x206c0001, 0xc78,0x0a6d0001, 0xc78,0x086e0001, 0xc78,0x066f0001, 0xc78,0x05700001, 0xc78,0x04710001, 0xc78,0x03720001, 0xc78,0x02730001, 0xc78,0x01740001, 0xc78,0x00750001, 0xc78,0x00760001, 0xc78,0x00770001, 0xc78,0x00780001, 0xc78,0x00790001, 0xc78,0x007a0001, 0xc78,0x007b0001, 0xc78,0x007c0001, 0xc78,0x007d0001, 0xc78,0x007e0001, 0xc78,0x007f0001, 0xc78,0x3600001e, 0xc78,0x3601001e, 0xc78,0x3602001e, 0xc78,0x3603001e, 0xc78,0x3604001e, 0xc78,0x3605001e, 0xc78,0x3a06001e, 0xc78,0x3c07001e, 0xc78,0x3e08001e, 0xc78,0x4209001e, 0xc78,0x430a001e, 0xc78,0x450b001e, 0xc78,0x470c001e, 0xc78,0x480d001e, 0xc78,0x490e001e, 0xc78,0x4b0f001e, 0xc78,0x4c10001e, 0xc78,0x4d11001e, 0xc78,0x4d12001e, 0xc78,0x4e13001e, 0xc78,0x4f14001e, 0xc78,0x5015001e, 0xc78,0x5116001e, 0xc78,0x5117001e, 0xc78,0x5218001e, 0xc78,0x5219001e, 0xc78,0x531a001e, 0xc78,0x541b001e, 0xc78,0x541c001e, 0xc78,0x551d001e, 0xc78,0x561e001e, 0xc78,0x561f001e, 0xc78,0x5720001e, 0xc78,0x5821001e, 0xc78,0x5822001e, 0xc78,0x5923001e, 0xc78,0x5924001e, 0xc78,0x5a25001e, 0xc78,0x5b26001e, 0xc78,0x5b27001e, 0xc78,0x5c28001e, 0xc78,0x5c29001e, 0xc78,0x5d2a001e, 0xc78,0x5d2b001e, 0xc78,0x5e2c001e, 0xc78,0x5e2d001e, 0xc78,0x5f2e001e, 0xc78,0x602f001e, 0xc78,0x6030001e, 0xc78,0x6131001e, 0xc78,0x6132001e, 0xc78,0x6233001e, 0xc78,0x6234001e, 0xc78,0x6335001e, 0xc78,0x6336001e, 0xc78,0x6437001e, 0xc78,0x6538001e, 0xc78,0x6639001e, 0xc78,0x663a001e, 0xc78,0x673b001e, 0xc78,0x683c001e, 0xc78,0x693d001e, 0xc78,0x6a3e001e, 0xc78,0x6b3f001e, }; u32 Rtl8190PciPHY_REGArray[PHY_REGArrayLength] = { 0x800,0x00050060, 0x804,0x00000005, 0x808,0x0000fc00, 0x80c,0x0000001c, 0x810,0x801010aa, 0x814,0x000908c0, 0x818,0x00000000, 0x81c,0x00000000, 0x820,0x00000004, 0x824,0x00690000, 0x828,0x00000004, 0x82c,0x00e90000, 0x830,0x00000004, 0x834,0x00690000, 0x838,0x00000004, 0x83c,0x00e90000, 0x840,0x00000000, 0x844,0x00000000, 0x848,0x00000000, 0x84c,0x00000000, 0x850,0x00000000, 0x854,0x00000000, 0x858,0x65a965a9, 0x85c,0x65a965a9, 0x860,0x001f0010, 0x864,0x007f0010, 0x868,0x001f0010, 0x86c,0x007f0010, 0x870,0x0f100f70, 0x874,0x0f100f70, 0x878,0x00000000, 0x87c,0x00000000, 0x880,0x5c385eb8, 0x884,0x6357060d, 0x888,0x0460c341, 0x88c,0x0000ff00, 0x890,0x00000000, 0x894,0xfffffffe, 0x898,0x4c42382f, 0x89c,0x00656056, 0x8b0,0x00000000, 0x8e0,0x00000000, 0x8e4,0x00000000, 0x900,0x00000000, 0x904,0x00000023, 0x908,0x00000000, 0x90c,0x35541545, 0xa00,0x00d0c7d8, 0xa04,0xab1f0008, 0xa08,0x80cd8300, 0xa0c,0x2e62740f, 0xa10,0x95009b78, 0xa14,0x11145008, 0xa18,0x00881117, 0xa1c,0x89140fa0, 0xa20,0x1a1b0000, 0xa24,0x090e1317, 0xa28,0x00000204, 0xa2c,0x00000000, 0xc00,0x00000040, 0xc04,0x0000500f, 0xc08,0x000000e4, 0xc0c,0x6c6c6c6c, 0xc10,0x08000000, 0xc14,0x40000100, 0xc18,0x08000000, 0xc1c,0x40000100, 0xc20,0x08000000, 0xc24,0x40000100, 0xc28,0x08000000, 0xc2c,0x40000100, 0xc30,0x6de9ac44, 0xc34,0x164052cd, 0xc38,0x00070a14, 0xc3c,0x0a969764, 0xc40,0x1f7c403f, 0xc44,0x000100b7, 0xc48,0xec020000, 0xc4c,0x00000300, 0xc50,0x69543420, 0xc54,0x433c0094, 0xc58,0x69543420, 0xc5c,0x433c0094, 0xc60,0x69543420, 0xc64,0x433c0094, 0xc68,0x69543420, 0xc6c,0x433c0094, 0xc70,0x2c7f000d, 0xc74,0x0186175b, 0xc78,0x0000001f, 0xc7c,0x00b91612, 0xc80,0x40000100, 0xc84,0x00000000, 0xc88,0x40000100, 0xc8c,0x08000000, 0xc90,0x40000100, 0xc94,0x00000000, 0xc98,0x40000100, 0xc9c,0x00000000, 0xca0,0x00492492, 0xca4,0x00000000, 0xca8,0x00000000, 0xcac,0x00000000, 0xcb0,0x00000000, 0xcb4,0x00000000, 0xcb8,0x00000000, 0xcbc,0x00492492, 0xcc0,0x00000000, 0xcc4,0x00000000, 0xcc8,0x00000000, 0xccc,0x00000000, 0xcd0,0x00000000, 0xcd4,0x00000000, 0xcd8,0x64b22427, 0xcdc,0x00766932, 0xce0,0x00222222, 0xd00,0x00000740, 0xd04,0x0000040f, 0xd08,0x0000803f, 0xd0c,0x00000001, 0xd10,0xa0633333, 0xd14,0x33333c63, 0xd18,0x6a8f5b6b, 0xd1c,0x00000000, 0xd20,0x00000000, 0xd24,0x00000000, 0xd28,0x00000000, 0xd2c,0xcc979975, 0xd30,0x00000000, 0xd34,0x00000000, 0xd38,0x00000000, 0xd3c,0x00027293, 0xd40,0x00000000, 0xd44,0x00000000, 0xd48,0x00000000, 0xd4c,0x00000000, 0xd50,0x6437140a, 0xd54,0x024dbd02, 0xd58,0x00000000, 0xd5c,0x14032064, }; u32 Rtl8190PciPHY_REG_1T2RArray[PHY_REG_1T2RArrayLength] = { 0x800,0x00050060, 0x804,0x00000004, 0x808,0x0000fc00, 0x80c,0x0000001c, 0x810,0x801010aa, 0x814,0x000908c0, 0x818,0x00000000, 0x81c,0x00000000, 0x820,0x00000004, 0x824,0x00690000, 0x828,0x00000004, 0x82c,0x00e90000, 0x830,0x00000004, 0x834,0x00690000, 0x838,0x00000004, 0x83c,0x00e90000, 0x840,0x00000000, 0x844,0x00000000, 0x848,0x00000000, 0x84c,0x00000000, 0x850,0x00000000, 0x854,0x00000000, 0x858,0x65a965a9, 0x85c,0x65a965a9, 0x860,0x001f0000, 0x864,0x007f0000, 0x868,0x001f0010, 0x86c,0x007f0010, 0x870,0x0f100f70, 0x874,0x0f100f70, 0x878,0x00000000, 0x87c,0x00000000, 0x880,0x5c385898, 0x884,0x6357060d, 0x888,0x0460c341, 0x88c,0x0000fc00, 0x890,0x00000000, 0x894,0xfffffffe, 0x898,0x4c42382f, 0x89c,0x00656056, 0x8b0,0x00000000, 0x8e0,0x00000000, 0x8e4,0x00000000, 0x900,0x00000000, 0x904,0x00000023, 0x908,0x00000000, 0x90c,0x34441444, 0xa00,0x00d0c7d8, 0xa04,0x2b1f0008, 0xa08,0x80cd8300, 0xa0c,0x2e62740f, 0xa10,0x95009b78, 0xa14,0x11145008, 0xa18,0x00881117, 0xa1c,0x89140fa0, 0xa20,0x1a1b0000, 0xa24,0x090e1317, 0xa28,0x00000204, 0xa2c,0x00000000, 0xc00,0x00000040, 0xc04,0x0000500c, 0xc08,0x000000e4, 0xc0c,0x6c6c6c6c, 0xc10,0x08000000, 0xc14,0x40000100, 0xc18,0x08000000, 0xc1c,0x40000100, 0xc20,0x08000000, 0xc24,0x40000100, 0xc28,0x08000000, 0xc2c,0x40000100, 0xc30,0x6de9ac44, 0xc34,0x164052cd, 0xc38,0x00070a14, 0xc3c,0x0a969764, 0xc40,0x1f7c403f, 0xc44,0x000100b7, 0xc48,0xec020000, 0xc4c,0x00000300, 0xc50,0x69543420, 0xc54,0x433c0094, 0xc58,0x69543420, 0xc5c,0x433c0094, 0xc60,0x69543420, 0xc64,0x433c0094, 0xc68,0x69543420, 0xc6c,0x433c0094, 0xc70,0x2c7f000d, 0xc74,0x0186175b, 0xc78,0x0000001f, 0xc7c,0x00b91612, 0xc80,0x40000100, 0xc84,0x00000000, 0xc88,0x40000100, 0xc8c,0x08000000, 0xc90,0x40000100, 0xc94,0x00000000, 0xc98,0x40000100, 0xc9c,0x00000000, 0xca0,0x00492492, 0xca4,0x00000000, 0xca8,0x00000000, 0xcac,0x00000000, 0xcb0,0x00000000, 0xcb4,0x00000000, 0xcb8,0x00000000, 0xcbc,0x00492492, 0xcc0,0x00000000, 0xcc4,0x00000000, 0xcc8,0x00000000, 0xccc,0x00000000, 0xcd0,0x00000000, 0xcd4,0x00000000, 0xcd8,0x64b22427, 0xcdc,0x00766932, 0xce0,0x00222222, 0xd00,0x00000740, 0xd04,0x0000040c, 0xd08,0x0000803f, 0xd0c,0x00000001, 0xd10,0xa0633333, 0xd14,0x33333c63, 0xd18,0x6a8f5b6b, 0xd1c,0x00000000, 0xd20,0x00000000, 0xd24,0x00000000, 0xd28,0x00000000, 0xd2c,0xcc979975, 0xd30,0x00000000, 0xd34,0x00000000, 0xd38,0x00000000, 0xd3c,0x00027293, 0xd40,0x00000000, 0xd44,0x00000000, 0xd48,0x00000000, 0xd4c,0x00000000, 0xd50,0x6437140a, 0xd54,0x024dbd02, 0xd58,0x00000000, 0xd5c,0x14032064, }; u32 Rtl8190PciRadioA_Array[RadioA_ArrayLength] = { 0x019,0x00000003, 0x000,0x000000bf, 0x001,0x00000ee0, 0x002,0x0000004c, 0x003,0x000007f1, 0x004,0x00000975, 0x005,0x00000c58, 0x006,0x00000ae6, 0x007,0x000000ca, 0x008,0x00000e1c, 0x009,0x000007f0, 0x00a,0x000009d0, 0x00b,0x000001ba, 0x00c,0x00000240, 0x00e,0x00000020, 0x00f,0x00000990, 0x012,0x00000806, 0x014,0x000005ab, 0x015,0x00000f80, 0x016,0x00000020, 0x017,0x00000597, 0x018,0x0000050a, 0x01a,0x00000f80, 0x01b,0x00000f5e, 0x01c,0x00000008, 0x01d,0x00000607, 0x01e,0x000006cc, 0x01f,0x00000000, 0x020,0x000001a5, 0x01f,0x00000001, 0x020,0x00000165, 0x01f,0x00000002, 0x020,0x000000c6, 0x01f,0x00000003, 0x020,0x00000086, 0x01f,0x00000004, 0x020,0x00000046, 0x01f,0x00000005, 0x020,0x000001e6, 0x01f,0x00000006, 0x020,0x000001a6, 0x01f,0x00000007, 0x020,0x00000166, 0x01f,0x00000008, 0x020,0x000000c7, 0x01f,0x00000009, 0x020,0x00000087, 0x01f,0x0000000a, 0x020,0x000000f7, 0x01f,0x0000000b, 0x020,0x000000d7, 0x01f,0x0000000c, 0x020,0x000000b7, 0x01f,0x0000000d, 0x020,0x00000097, 0x01f,0x0000000e, 0x020,0x00000077, 0x01f,0x0000000f, 0x020,0x00000057, 0x01f,0x00000010, 0x020,0x00000037, 0x01f,0x00000011, 0x020,0x000000fb, 0x01f,0x00000012, 0x020,0x000000db, 0x01f,0x00000013, 0x020,0x000000bb, 0x01f,0x00000014, 0x020,0x000000ff, 0x01f,0x00000015, 0x020,0x000000e3, 0x01f,0x00000016, 0x020,0x000000c3, 0x01f,0x00000017, 0x020,0x000000a3, 0x01f,0x00000018, 0x020,0x00000083, 0x01f,0x00000019, 0x020,0x00000063, 0x01f,0x0000001a, 0x020,0x00000043, 0x01f,0x0000001b, 0x020,0x00000023, 0x01f,0x0000001c, 0x020,0x00000003, 0x01f,0x0000001d, 0x020,0x000001e3, 0x01f,0x0000001e, 0x020,0x000001c3, 0x01f,0x0000001f, 0x020,0x000001a3, 0x01f,0x00000020, 0x020,0x00000183, 0x01f,0x00000021, 0x020,0x00000163, 0x01f,0x00000022, 0x020,0x00000143, 0x01f,0x00000023, 0x020,0x00000123, 0x01f,0x00000024, 0x020,0x00000103, 0x023,0x00000203, 0x024,0x00000200, 0x00b,0x000001ba, 0x02c,0x000003d7, 0x02d,0x00000ff0, 0x000,0x00000037, 0x004,0x00000160, 0x007,0x00000080, 0x002,0x0000088d, 0x0fe,0x00000000, 0x0fe,0x00000000, 0x016,0x00000200, 0x016,0x00000380, 0x016,0x00000020, 0x016,0x000001a0, 0x000,0x000000bf, 0x00d,0x0000001f, 0x00d,0x00000c9f, 0x002,0x0000004d, 0x000,0x00000cbf, 0x004,0x00000975, 0x007,0x00000700, }; u32 Rtl8190PciRadioB_Array[RadioB_ArrayLength] = { 0x019,0x00000003, 0x000,0x000000bf, 0x001,0x000006e0, 0x002,0x0000004c, 0x003,0x000007f1, 0x004,0x00000975, 0x005,0x00000c58, 0x006,0x00000ae6, 0x007,0x000000ca, 0x008,0x00000e1c, 0x000,0x000000b7, 0x00a,0x00000850, 0x000,0x000000bf, 0x00b,0x000001ba, 0x00c,0x00000240, 0x00e,0x00000020, 0x015,0x00000f80, 0x016,0x00000020, 0x017,0x00000597, 0x018,0x0000050a, 0x01a,0x00000e00, 0x01b,0x00000f5e, 0x01d,0x00000607, 0x01e,0x000006cc, 0x00b,0x000001ba, 0x023,0x00000203, 0x024,0x00000200, 0x000,0x00000037, 0x004,0x00000160, 0x016,0x00000200, 0x016,0x00000380, 0x016,0x00000020, 0x016,0x000001a0, 0x00d,0x00000ccc, 0x000,0x000000bf, 0x002,0x0000004d, 0x000,0x00000cbf, 0x004,0x00000975, 0x007,0x00000700, }; u32 Rtl8190PciRadioC_Array[RadioC_ArrayLength] = { 0x019,0x00000003, 0x000,0x000000bf, 0x001,0x00000ee0, 0x002,0x0000004c, 0x003,0x000007f1, 0x004,0x00000975, 0x005,0x00000c58, 0x006,0x00000ae6, 0x007,0x000000ca, 0x008,0x00000e1c, 0x009,0x000007f0, 0x00a,0x000009d0, 0x00b,0x000001ba, 0x00c,0x00000240, 0x00e,0x00000020, 0x00f,0x00000990, 0x012,0x00000806, 0x014,0x000005ab, 0x015,0x00000f80, 0x016,0x00000020, 0x017,0x00000597, 0x018,0x0000050a, 0x01a,0x00000f80, 0x01b,0x00000f5e, 0x01c,0x00000008, 0x01d,0x00000607, 0x01e,0x000006cc, 0x01f,0x00000000, 0x020,0x000001a5, 0x01f,0x00000001, 0x020,0x00000165, 0x01f,0x00000002, 0x020,0x000000c6, 0x01f,0x00000003, 0x020,0x00000086, 0x01f,0x00000004, 0x020,0x00000046, 0x01f,0x00000005, 0x020,0x000001e6, 0x01f,0x00000006, 0x020,0x000001a6, 0x01f,0x00000007, 0x020,0x00000166, 0x01f,0x00000008, 0x020,0x000000c7, 0x01f,0x00000009, 0x020,0x00000087, 0x01f,0x0000000a, 0x020,0x000000f7, 0x01f,0x0000000b, 0x020,0x000000d7, 0x01f,0x0000000c, 0x020,0x000000b7, 0x01f,0x0000000d, 0x020,0x00000097, 0x01f,0x0000000e, 0x020,0x00000077, 0x01f,0x0000000f, 0x020,0x00000057, 0x01f,0x00000010, 0x020,0x00000037, 0x01f,0x00000011, 0x020,0x000000fb, 0x01f,0x00000012, 0x020,0x000000db, 0x01f,0x00000013, 0x020,0x000000bb, 0x01f,0x00000014, 0x020,0x000000ff, 0x01f,0x00000015, 0x020,0x000000e3, 0x01f,0x00000016, 0x020,0x000000c3, 0x01f,0x00000017, 0x020,0x000000a3, 0x01f,0x00000018, 0x020,0x00000083, 0x01f,0x00000019, 0x020,0x00000063, 0x01f,0x0000001a, 0x020,0x00000043, 0x01f,0x0000001b, 0x020,0x00000023, 0x01f,0x0000001c, 0x020,0x00000003, 0x01f,0x0000001d, 0x020,0x000001e3, 0x01f,0x0000001e, 0x020,0x000001c3, 0x01f,0x0000001f, 0x020,0x000001a3, 0x01f,0x00000020, 0x020,0x00000183, 0x01f,0x00000021, 0x020,0x00000163, 0x01f,0x00000022, 0x020,0x00000143, 0x01f,0x00000023, 0x020,0x00000123, 0x01f,0x00000024, 0x020,0x00000103, 0x023,0x00000203, 0x024,0x00000200, 0x00b,0x000001ba, 0x02c,0x000003d7, 0x02d,0x00000ff0, 0x000,0x00000037, 0x004,0x00000160, 0x007,0x00000080, 0x002,0x0000088d, 0x0fe,0x00000000, 0x0fe,0x00000000, 0x016,0x00000200, 0x016,0x00000380, 0x016,0x00000020, 0x016,0x000001a0, 0x000,0x000000bf, 0x00d,0x0000001f, 0x00d,0x00000c9f, 0x002,0x0000004d, 0x000,0x00000cbf, 0x004,0x00000975, 0x007,0x00000700, }; u32 Rtl8190PciRadioD_Array[RadioD_ArrayLength] = { 0x019,0x00000003, 0x000,0x000000bf, 0x001,0x000006e0, 0x002,0x0000004c, 0x003,0x000007f1, 0x004,0x00000975, 0x005,0x00000c58, 0x006,0x00000ae6, 0x007,0x000000ca, 0x008,0x00000e1c, 0x000,0x000000b7, 0x00a,0x00000850, 0x000,0x000000bf, 0x00b,0x000001ba, 0x00c,0x00000240, 0x00e,0x00000020, 0x015,0x00000f80, 0x016,0x00000020, 0x017,0x00000597, 0x018,0x0000050a, 0x01a,0x00000e00, 0x01b,0x00000f5e, 0x01d,0x00000607, 0x01e,0x000006cc, 0x00b,0x000001ba, 0x023,0x00000203, 0x024,0x00000200, 0x000,0x00000037, 0x004,0x00000160, 0x016,0x00000200, 0x016,0x00000380, 0x016,0x00000020, 0x016,0x000001a0, 0x00d,0x00000ccc, 0x000,0x000000bf, 0x002,0x0000004d, 0x000,0x00000cbf, 0x004,0x00000975, 0x007,0x00000700, }; #endif #ifdef RTL8192E static u32 Rtl8192PciEMACPHY_Array[] = { 0x03c,0xffff0000,0x00000f0f, 0x340,0xffffffff,0x161a1a1a, 0x344,0xffffffff,0x12121416, 0x348,0x0000ffff,0x00001818, 0x12c,0xffffffff,0x04000802, 0x318,0x00000fff,0x00000100, }; static u32 Rtl8192PciEMACPHY_Array_PG[] = { 0x03c,0xffff0000,0x00000f0f, 0xe00,0xffffffff,0x06090909, 0xe04,0xffffffff,0x00030306, 0xe08,0x0000ff00,0x00000000, 0xe10,0xffffffff,0x0a0c0d0f, 0xe14,0xffffffff,0x06070809, 0xe18,0xffffffff,0x0a0c0d0f, 0xe1c,0xffffffff,0x06070809, 0x12c,0xffffffff,0x04000802, 0x318,0x00000fff,0x00000800, }; static u32 Rtl8192PciEAGCTAB_Array[AGCTAB_ArrayLength] = { 0xc78,0x7d000001, 0xc78,0x7d010001, 0xc78,0x7d020001, 0xc78,0x7d030001, 0xc78,0x7d040001, 0xc78,0x7d050001, 0xc78,0x7c060001, 0xc78,0x7b070001, 0xc78,0x7a080001, 0xc78,0x79090001, 0xc78,0x780a0001, 0xc78,0x770b0001, 0xc78,0x760c0001, 0xc78,0x750d0001, 0xc78,0x740e0001, 0xc78,0x730f0001, 0xc78,0x72100001, 0xc78,0x71110001, 0xc78,0x70120001, 0xc78,0x6f130001, 0xc78,0x6e140001, 0xc78,0x6d150001, 0xc78,0x6c160001, 0xc78,0x6b170001, 0xc78,0x6a180001, 0xc78,0x69190001, 0xc78,0x681a0001, 0xc78,0x671b0001, 0xc78,0x661c0001, 0xc78,0x651d0001, 0xc78,0x641e0001, 0xc78,0x491f0001, 0xc78,0x48200001, 0xc78,0x47210001, 0xc78,0x46220001, 0xc78,0x45230001, 0xc78,0x44240001, 0xc78,0x43250001, 0xc78,0x28260001, 0xc78,0x27270001, 0xc78,0x26280001, 0xc78,0x25290001, 0xc78,0x242a0001, 0xc78,0x232b0001, 0xc78,0x222c0001, 0xc78,0x212d0001, 0xc78,0x202e0001, 0xc78,0x0a2f0001, 0xc78,0x08300001, 0xc78,0x06310001, 0xc78,0x05320001, 0xc78,0x04330001, 0xc78,0x03340001, 0xc78,0x02350001, 0xc78,0x01360001, 0xc78,0x00370001, 0xc78,0x00380001, 0xc78,0x00390001, 0xc78,0x003a0001, 0xc78,0x003b0001, 0xc78,0x003c0001, 0xc78,0x003d0001, 0xc78,0x003e0001, 0xc78,0x003f0001, 0xc78,0x7d400001, 0xc78,0x7d410001, 0xc78,0x7d420001, 0xc78,0x7d430001, 0xc78,0x7d440001, 0xc78,0x7d450001, 0xc78,0x7c460001, 0xc78,0x7b470001, 0xc78,0x7a480001, 0xc78,0x79490001, 0xc78,0x784a0001, 0xc78,0x774b0001, 0xc78,0x764c0001, 0xc78,0x754d0001, 0xc78,0x744e0001, 0xc78,0x734f0001, 0xc78,0x72500001, 0xc78,0x71510001, 0xc78,0x70520001, 0xc78,0x6f530001, 0xc78,0x6e540001, 0xc78,0x6d550001, 0xc78,0x6c560001, 0xc78,0x6b570001, 0xc78,0x6a580001, 0xc78,0x69590001, 0xc78,0x685a0001, 0xc78,0x675b0001, 0xc78,0x665c0001, 0xc78,0x655d0001, 0xc78,0x645e0001, 0xc78,0x495f0001, 0xc78,0x48600001, 0xc78,0x47610001, 0xc78,0x46620001, 0xc78,0x45630001, 0xc78,0x44640001, 0xc78,0x43650001, 0xc78,0x28660001, 0xc78,0x27670001, 0xc78,0x26680001, 0xc78,0x25690001, 0xc78,0x246a0001, 0xc78,0x236b0001, 0xc78,0x226c0001, 0xc78,0x216d0001, 0xc78,0x206e0001, 0xc78,0x0a6f0001, 0xc78,0x08700001, 0xc78,0x06710001, 0xc78,0x05720001, 0xc78,0x04730001, 0xc78,0x03740001, 0xc78,0x02750001, 0xc78,0x01760001, 0xc78,0x00770001, 0xc78,0x00780001, 0xc78,0x00790001, 0xc78,0x007a0001, 0xc78,0x007b0001, 0xc78,0x007c0001, 0xc78,0x007d0001, 0xc78,0x007e0001, 0xc78,0x007f0001, 0xc78,0x2e00001e, 0xc78,0x2e01001e, 0xc78,0x2e02001e, 0xc78,0x2e03001e, 0xc78,0x2e04001e, 0xc78,0x2e05001e, 0xc78,0x3006001e, 0xc78,0x3407001e, 0xc78,0x3908001e, 0xc78,0x3c09001e, 0xc78,0x3f0a001e, 0xc78,0x420b001e, 0xc78,0x440c001e, 0xc78,0x450d001e, 0xc78,0x460e001e, 0xc78,0x460f001e, 0xc78,0x4710001e, 0xc78,0x4811001e, 0xc78,0x4912001e, 0xc78,0x4a13001e, 0xc78,0x4b14001e, 0xc78,0x4b15001e, 0xc78,0x4c16001e, 0xc78,0x4d17001e, 0xc78,0x4e18001e, 0xc78,0x4f19001e, 0xc78,0x4f1a001e, 0xc78,0x501b001e, 0xc78,0x511c001e, 0xc78,0x521d001e, 0xc78,0x521e001e, 0xc78,0x531f001e, 0xc78,0x5320001e, 0xc78,0x5421001e, 0xc78,0x5522001e, 0xc78,0x5523001e, 0xc78,0x5624001e, 0xc78,0x5725001e, 0xc78,0x5726001e, 0xc78,0x5827001e, 0xc78,0x5828001e, 0xc78,0x5929001e, 0xc78,0x592a001e, 0xc78,0x5a2b001e, 0xc78,0x5b2c001e, 0xc78,0x5c2d001e, 0xc78,0x5c2e001e, 0xc78,0x5d2f001e, 0xc78,0x5e30001e, 0xc78,0x5f31001e, 0xc78,0x6032001e, 0xc78,0x6033001e, 0xc78,0x6134001e, 0xc78,0x6235001e, 0xc78,0x6336001e, 0xc78,0x6437001e, 0xc78,0x6438001e, 0xc78,0x6539001e, 0xc78,0x663a001e, 0xc78,0x673b001e, 0xc78,0x673c001e, 0xc78,0x683d001e, 0xc78,0x693e001e, 0xc78,0x6a3f001e, }; static u32 Rtl8192PciEPHY_REGArray[PHY_REGArrayLength] = { 0x0, }; static u32 Rtl8192PciEPHY_REG_1T2RArray[PHY_REG_1T2RArrayLength] = { 0x800,0x00000000, 0x804,0x00000001, 0x808,0x0000fc00, 0x80c,0x0000001c, 0x810,0x801010aa, 0x814,0x008514d0, 0x818,0x00000040, 0x81c,0x00000000, 0x820,0x00000004, 0x824,0x00690000, 0x828,0x00000004, 0x82c,0x00e90000, 0x830,0x00000004, 0x834,0x00690000, 0x838,0x00000004, 0x83c,0x00e90000, 0x840,0x00000000, 0x844,0x00000000, 0x848,0x00000000, 0x84c,0x00000000, 0x850,0x00000000, 0x854,0x00000000, 0x858,0x65a965a9, 0x85c,0x65a965a9, 0x860,0x001f0010, 0x864,0x007f0010, 0x868,0x001f0010, 0x86c,0x007f0010, 0x870,0x0f100f70, 0x874,0x0f100f70, 0x878,0x00000000, 0x87c,0x00000000, 0x880,0x6870e36c, 0x884,0xe3573600, 0x888,0x4260c340, 0x88c,0x0000ff00, 0x890,0x00000000, 0x894,0xfffffffe, 0x898,0x4c42382f, 0x89c,0x00656056, 0x8b0,0x00000000, 0x8e0,0x00000000, 0x8e4,0x00000000, 0x900,0x00000000, 0x904,0x00000023, 0x908,0x00000000, 0x90c,0x31121311, 0xa00,0x00d0c7d8, 0xa04,0x811f0008, 0xa08,0x80cd8300, 0xa0c,0x2e62740f, 0xa10,0x95009b78, 0xa14,0x11145008, 0xa18,0x00881117, 0xa1c,0x89140fa0, 0xa20,0x1a1b0000, 0xa24,0x090e1317, 0xa28,0x00000204, 0xa2c,0x00000000, 0xc00,0x00000040, 0xc04,0x00005433, 0xc08,0x000000e4, 0xc0c,0x6c6c6c6c, 0xc10,0x08800000, 0xc14,0x40000100, 0xc18,0x08000000, 0xc1c,0x40000100, 0xc20,0x08000000, 0xc24,0x40000100, 0xc28,0x08000000, 0xc2c,0x40000100, 0xc30,0x6de9ac44, 0xc34,0x465c52cd, 0xc38,0x497f5994, 0xc3c,0x0a969764, 0xc40,0x1f7c403f, 0xc44,0x000100b7, 0xc48,0xec020000, 0xc4c,0x00000300, 0xc50,0x69543420, 0xc54,0x433c0094, 0xc58,0x69543420, 0xc5c,0x433c0094, 0xc60,0x69543420, 0xc64,0x433c0094, 0xc68,0x69543420, 0xc6c,0x433c0094, 0xc70,0x2c7f000d, 0xc74,0x0186175b, 0xc78,0x0000001f, 0xc7c,0x00b91612, 0xc80,0x40000100, 0xc84,0x20000000, 0xc88,0x40000100, 0xc8c,0x20200000, 0xc90,0x40000100, 0xc94,0x00000000, 0xc98,0x40000100, 0xc9c,0x00000000, 0xca0,0x00492492, 0xca4,0x00000000, 0xca8,0x00000000, 0xcac,0x00000000, 0xcb0,0x00000000, 0xcb4,0x00000000, 0xcb8,0x00000000, 0xcbc,0x00492492, 0xcc0,0x00000000, 0xcc4,0x00000000, 0xcc8,0x00000000, 0xccc,0x00000000, 0xcd0,0x00000000, 0xcd4,0x00000000, 0xcd8,0x64b22427, 0xcdc,0x00766932, 0xce0,0x00222222, 0xd00,0x00000750, 0xd04,0x00000403, 0xd08,0x0000907f, 0xd0c,0x00000001, 0xd10,0xa0633333, 0xd14,0x33333c63, 0xd18,0x6a8f5b6b, 0xd1c,0x00000000, 0xd20,0x00000000, 0xd24,0x00000000, 0xd28,0x00000000, 0xd2c,0xcc979975, 0xd30,0x00000000, 0xd34,0x00000000, 0xd38,0x00000000, 0xd3c,0x00027293, 0xd40,0x00000000, 0xd44,0x00000000, 0xd48,0x00000000, 0xd4c,0x00000000, 0xd50,0x6437140a, 0xd54,0x024dbd02, 0xd58,0x00000000, 0xd5c,0x04032064, 0xe00,0x161a1a1a, 0xe04,0x12121416, 0xe08,0x00001800, 0xe0c,0x00000000, 0xe10,0x161a1a1a, 0xe14,0x12121416, 0xe18,0x161a1a1a, 0xe1c,0x12121416, }; static u32 Rtl8192PciERadioA_Array[RadioA_ArrayLength] = { 0x019,0x00000003, 0x000,0x000000bf, 0x001,0x00000ee0, 0x002,0x0000004c, 0x003,0x000007f1, 0x004,0x00000975, 0x005,0x00000c58, 0x006,0x00000ae6, 0x007,0x000000ca, 0x008,0x00000e1c, 0x009,0x000007f0, 0x00a,0x000009d0, 0x00b,0x000001ba, 0x00c,0x00000240, 0x00e,0x00000020, 0x00f,0x00000990, 0x012,0x00000806, 0x014,0x000005ab, 0x015,0x00000f80, 0x016,0x00000020, 0x017,0x00000597, 0x018,0x0000050a, 0x01a,0x00000f80, 0x01b,0x00000f5e, 0x01c,0x00000008, 0x01d,0x00000607, 0x01e,0x000006cc, 0x01f,0x00000000, 0x020,0x000001a5, 0x01f,0x00000001, 0x020,0x00000165, 0x01f,0x00000002, 0x020,0x000000c6, 0x01f,0x00000003, 0x020,0x00000086, 0x01f,0x00000004, 0x020,0x00000046, 0x01f,0x00000005, 0x020,0x000001e6, 0x01f,0x00000006, 0x020,0x000001a6, 0x01f,0x00000007, 0x020,0x00000166, 0x01f,0x00000008, 0x020,0x000000c7, 0x01f,0x00000009, 0x020,0x00000087, 0x01f,0x0000000a, 0x020,0x000000f7, 0x01f,0x0000000b, 0x020,0x000000d7, 0x01f,0x0000000c, 0x020,0x000000b7, 0x01f,0x0000000d, 0x020,0x00000097, 0x01f,0x0000000e, 0x020,0x00000077, 0x01f,0x0000000f, 0x020,0x00000057, 0x01f,0x00000010, 0x020,0x00000037, 0x01f,0x00000011, 0x020,0x000000fb, 0x01f,0x00000012, 0x020,0x000000db, 0x01f,0x00000013, 0x020,0x000000bb, 0x01f,0x00000014, 0x020,0x000000ff, 0x01f,0x00000015, 0x020,0x000000e3, 0x01f,0x00000016, 0x020,0x000000c3, 0x01f,0x00000017, 0x020,0x000000a3, 0x01f,0x00000018, 0x020,0x00000083, 0x01f,0x00000019, 0x020,0x00000063, 0x01f,0x0000001a, 0x020,0x00000043, 0x01f,0x0000001b, 0x020,0x00000023, 0x01f,0x0000001c, 0x020,0x00000003, 0x01f,0x0000001d, 0x020,0x000001e3, 0x01f,0x0000001e, 0x020,0x000001c3, 0x01f,0x0000001f, 0x020,0x000001a3, 0x01f,0x00000020, 0x020,0x00000183, 0x01f,0x00000021, 0x020,0x00000163, 0x01f,0x00000022, 0x020,0x00000143, 0x01f,0x00000023, 0x020,0x00000123, 0x01f,0x00000024, 0x020,0x00000103, 0x023,0x00000203, 0x024,0x00000100, 0x00b,0x000001ba, 0x02c,0x000003d7, 0x02d,0x00000ff0, 0x000,0x00000037, 0x004,0x00000160, 0x007,0x00000080, 0x002,0x0000088d, 0x0fe,0x00000000, 0x0fe,0x00000000, 0x016,0x00000200, 0x016,0x00000380, 0x016,0x00000020, 0x016,0x000001a0, 0x000,0x000000bf, 0x00d,0x0000001f, 0x00d,0x00000c9f, 0x002,0x0000004d, 0x000,0x00000cbf, 0x004,0x00000975, 0x007,0x00000700, }; static u32 Rtl8192PciERadioB_Array[RadioB_ArrayLength] = { 0x019,0x00000003, 0x000,0x000000bf, 0x001,0x000006e0, 0x002,0x0000004c, 0x003,0x000007f1, 0x004,0x00000975, 0x005,0x00000c58, 0x006,0x00000ae6, 0x007,0x000000ca, 0x008,0x00000e1c, 0x000,0x000000b7, 0x00a,0x00000850, 0x000,0x000000bf, 0x00b,0x000001ba, 0x00c,0x00000240, 0x00e,0x00000020, 0x015,0x00000f80, 0x016,0x00000020, 0x017,0x00000597, 0x018,0x0000050a, 0x01a,0x00000e00, 0x01b,0x00000f5e, 0x01d,0x00000607, 0x01e,0x000006cc, 0x00b,0x000001ba, 0x023,0x00000203, 0x024,0x00000100, 0x000,0x00000037, 0x004,0x00000160, 0x016,0x00000200, 0x016,0x00000380, 0x016,0x00000020, 0x016,0x000001a0, 0x00d,0x00000ccc, 0x000,0x000000bf, 0x002,0x0000004d, 0x000,0x00000cbf, 0x004,0x00000975, 0x007,0x00000700, }; static u32 Rtl8192PciERadioC_Array[RadioC_ArrayLength] = { 0x0, }; static u32 Rtl8192PciERadioD_Array[RadioD_ArrayLength] = { 0x0, }; #endif /*************************Define local function prototype**********************/ static u32 phy_FwRFSerialRead(struct net_device* dev,RF90_RADIO_PATH_E eRFPath,u32 Offset); static void phy_FwRFSerialWrite(struct net_device* dev,RF90_RADIO_PATH_E eRFPath,u32 Offset,u32 Data); /*************************Define local function prototype**********************/ /****************************************************************************** *function: This function read BB parameters from Header file we gen, * and do register read/write * input: u32 dwBitMask //taget bit pos in the addr to be modified * output: none * return: u32 return the shift bit bit position of the mask * ****************************************************************************/ static u32 rtl8192_CalculateBitShift(u32 dwBitMask) { u32 i; for (i=0; i<=31; i++) { if (((dwBitMask>>i)&0x1) == 1) break; } return i; } /****************************************************************************** *function: This function check different RF type to execute legal judgement. If RF Path is illegal, we will return false. * input: none * output: none * return: 0(illegal, false), 1(legal,true) * ***************************************************************************/ u8 rtl8192_phy_CheckIsLegalRFPath(struct net_device* dev, u32 eRFPath) { u8 ret = 1; struct r8192_priv *priv = ieee80211_priv(dev); #ifdef RTL8190P if(priv->rf_type == RF_2T4R) { ret= 1; } else if (priv->rf_type == RF_1T2R) { if(eRFPath == RF90_PATH_A || eRFPath == RF90_PATH_B) ret = 0; else if(eRFPath == RF90_PATH_C || eRFPath == RF90_PATH_D) ret = 1; } #else #ifdef RTL8192E if (priv->rf_type == RF_2T4R) ret = 0; else if (priv->rf_type == RF_1T2R) { if (eRFPath == RF90_PATH_A || eRFPath == RF90_PATH_B) ret = 1; else if (eRFPath == RF90_PATH_C || eRFPath == RF90_PATH_D) ret = 0; } #endif #endif return ret; } /****************************************************************************** *function: This function set specific bits to BB register * input: net_device dev * u32 dwRegAddr //target addr to be modified * u32 dwBitMask //taget bit pos in the addr to be modified * u32 dwData //value to be write * output: none * return: none * notice: * ****************************************************************************/ void rtl8192_setBBreg(struct net_device* dev, u32 dwRegAddr, u32 dwBitMask, u32 dwData) { u32 OriginalValue, BitShift, NewValue; if(dwBitMask!= bMaskDWord) {//if not "double word" write OriginalValue = read_nic_dword(dev, dwRegAddr); BitShift = rtl8192_CalculateBitShift(dwBitMask); NewValue = (((OriginalValue) & (~dwBitMask)) | (dwData << BitShift)); write_nic_dword(dev, dwRegAddr, NewValue); }else write_nic_dword(dev, dwRegAddr, dwData); } /****************************************************************************** *function: This function reads specific bits from BB register * input: net_device dev * u32 dwRegAddr //target addr to be readback * u32 dwBitMask //taget bit pos in the addr to be readback * output: none * return: u32 Data //the readback register value * notice: * ****************************************************************************/ u32 rtl8192_QueryBBReg(struct net_device* dev, u32 dwRegAddr, u32 dwBitMask) { u32 OriginalValue, BitShift; OriginalValue = read_nic_dword(dev, dwRegAddr); BitShift = rtl8192_CalculateBitShift(dwBitMask); return (OriginalValue & dwBitMask) >> BitShift; } /****************************************************************************** *function: This function read register from RF chip * input: net_device dev * RF90_RADIO_PATH_E eRFPath //radio path of A/B/C/D * u32 Offset //target address to be read * output: none * return: u32 readback value * notice: There are three types of serial operations:(1) Software serial write.(2)Hardware LSSI-Low Speed Serial Interface.(3)Hardware HSSI-High speed serial write. Driver here need to implement (1) and (2)---need more spec for this information. * ****************************************************************************/ static u32 rtl8192_phy_RFSerialRead(struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 Offset) { struct r8192_priv *priv = ieee80211_priv(dev); u32 ret = 0; u32 NewOffset = 0; BB_REGISTER_DEFINITION_T* pPhyReg = &priv->PHYRegDef[eRFPath]; //rtl8192_setBBreg(dev, pPhyReg->rfLSSIReadBack, bLSSIReadBackData, 0); //make sure RF register offset is correct Offset &= 0x3f; //switch page for 8256 RF IC if (priv->rf_chip == RF_8256) { #ifdef RTL8190P //analog to digital off, for protection rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0xf00, 0x0);// 0x88c[11:8] #else #ifdef RTL8192E //analog to digital off, for protection rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0xf00, 0x0);// 0x88c[11:8] #endif #endif if (Offset >= 31) { priv->RfReg0Value[eRFPath] |= 0x140; //Switch to Reg_Mode2 for Reg 31-45 rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath]<<16) ); //modify offset NewOffset = Offset -30; } else if (Offset >= 16) { priv->RfReg0Value[eRFPath] |= 0x100; priv->RfReg0Value[eRFPath] &= (~0x40); //Switch to Reg_Mode 1 for Reg16-30 rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath]<<16) ); NewOffset = Offset - 15; } else NewOffset = Offset; } else { RT_TRACE((COMP_PHY|COMP_ERR), "check RF type here, need to be 8256\n"); NewOffset = Offset; } //put desired read addr to LSSI control Register rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, bLSSIReadAddress, NewOffset); //Issue a posedge trigger // rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, bLSSIReadEdge, 0x0); rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, bLSSIReadEdge, 0x1); // TODO: we should not delay such a long time. Ask help from SD3 msleep(1); ret = rtl8192_QueryBBReg(dev, pPhyReg->rfLSSIReadBack, bLSSIReadBackData); // Switch back to Reg_Mode0; if(priv->rf_chip == RF_8256) { priv->RfReg0Value[eRFPath] &= 0xebf; rtl8192_setBBreg( dev, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath] << 16)); #ifdef RTL8190P if(priv->rf_type == RF_2T4R) { //analog to digital on rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0xf00, 0xf);// 0x88c[11:8] } else if(priv->rf_type == RF_1T2R) { //analog to digital on rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0xc00, 0x3);// 0x88c[11:10] } #else #ifdef RTL8192E //analog to digital on rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0x300, 0x3);// 0x88c[9:8] #endif #endif } return ret; } /****************************************************************************** *function: This function write data to RF register * input: net_device dev * RF90_RADIO_PATH_E eRFPath //radio path of A/B/C/D * u32 Offset //target address to be written * u32 Data //The new register data to be written * output: none * return: none * notice: For RF8256 only. =========================================================== *Reg Mode RegCTL[1] RegCTL[0] Note * (Reg00[12]) (Reg00[10]) *=========================================================== *Reg_Mode0 0 x Reg 0 ~15(0x0 ~ 0xf) *------------------------------------------------------------------ *Reg_Mode1 1 0 Reg 16 ~30(0x1 ~ 0xf) *------------------------------------------------------------------ * Reg_Mode2 1 1 Reg 31 ~ 45(0x1 ~ 0xf) *------------------------------------------------------------------ * ****************************************************************************/ static void rtl8192_phy_RFSerialWrite(struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 Offset, u32 Data) { struct r8192_priv *priv = ieee80211_priv(dev); u32 DataAndAddr = 0, NewOffset = 0; BB_REGISTER_DEFINITION_T *pPhyReg = &priv->PHYRegDef[eRFPath]; Offset &= 0x3f; if (priv->rf_chip == RF_8256) { #ifdef RTL8190P //analog to digital off, for protection rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0xf00, 0x0);// 0x88c[11:8] #else #ifdef RTL8192E //analog to digital off, for protection rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0xf00, 0x0);// 0x88c[11:8] #endif #endif if (Offset >= 31) { priv->RfReg0Value[eRFPath] |= 0x140; rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath] << 16)); NewOffset = Offset - 30; } else if (Offset >= 16) { priv->RfReg0Value[eRFPath] |= 0x100; priv->RfReg0Value[eRFPath] &= (~0x40); rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath]<<16)); NewOffset = Offset - 15; } else NewOffset = Offset; } else { RT_TRACE((COMP_PHY|COMP_ERR), "check RF type here, need to be 8256\n"); NewOffset = Offset; } // Put write addr in [5:0] and write data in [31:16] DataAndAddr = (Data<<16) | (NewOffset&0x3f); // Write Operation rtl8192_setBBreg(dev, pPhyReg->rf3wireOffset, bMaskDWord, DataAndAddr); if(Offset==0x0) priv->RfReg0Value[eRFPath] = Data; // Switch back to Reg_Mode0; if(priv->rf_chip == RF_8256) { if(Offset != 0) { priv->RfReg0Value[eRFPath] &= 0xebf; rtl8192_setBBreg( dev, pPhyReg->rf3wireOffset, bMaskDWord, (priv->RfReg0Value[eRFPath] << 16)); } #ifdef RTL8190P if(priv->rf_type == RF_2T4R) { //analog to digital on rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0xf00, 0xf);// 0x88c[11:8] } else if(priv->rf_type == RF_1T2R) { //analog to digital on rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0xc00, 0x3);// 0x88c[11:10] } #else #ifdef RTL8192E //analog to digital on rtl8192_setBBreg(dev, rFPGA0_AnalogParameter4, 0x300, 0x3);// 0x88c[9:8] #endif #endif } } /****************************************************************************** *function: This function set specific bits to RF register * input: net_device dev * RF90_RADIO_PATH_E eRFPath //radio path of A/B/C/D * u32 RegAddr //target addr to be modified * u32 BitMask //taget bit pos in the addr to be modified * u32 Data //value to be write * output: none * return: none * notice: * ****************************************************************************/ void rtl8192_phy_SetRFReg(struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 RegAddr, u32 BitMask, u32 Data) { struct r8192_priv *priv = ieee80211_priv(dev); u32 Original_Value, BitShift, New_Value; // u8 time = 0; if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath)) return; #ifdef RTL8192E if(priv->ieee80211->eRFPowerState != eRfOn && !priv->being_init_adapter) return; #endif //spin_lock_irqsave(&priv->rf_lock, flags); //down(&priv->rf_sem); RT_TRACE(COMP_PHY, "FW RF CTRL is not ready now\n"); if (priv->Rf_Mode == RF_OP_By_FW) { if (BitMask != bMask12Bits) // RF data is 12 bits only { Original_Value = phy_FwRFSerialRead(dev, eRFPath, RegAddr); BitShift = rtl8192_CalculateBitShift(BitMask); New_Value = (((Original_Value) & (~BitMask)) | (Data<< BitShift)); phy_FwRFSerialWrite(dev, eRFPath, RegAddr, New_Value); }else phy_FwRFSerialWrite(dev, eRFPath, RegAddr, Data); udelay(200); } else { if (BitMask != bMask12Bits) // RF data is 12 bits only { Original_Value = rtl8192_phy_RFSerialRead(dev, eRFPath, RegAddr); BitShift = rtl8192_CalculateBitShift(BitMask); New_Value = (((Original_Value) & (~BitMask)) | (Data<< BitShift)); rtl8192_phy_RFSerialWrite(dev, eRFPath, RegAddr, New_Value); }else rtl8192_phy_RFSerialWrite(dev, eRFPath, RegAddr, Data); } //spin_unlock_irqrestore(&priv->rf_lock, flags); //up(&priv->rf_sem); } /****************************************************************************** *function: This function reads specific bits from RF register * input: net_device dev * u32 RegAddr //target addr to be readback * u32 BitMask //taget bit pos in the addr to be readback * output: none * return: u32 Data //the readback register value * notice: * ****************************************************************************/ u32 rtl8192_phy_QueryRFReg(struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 RegAddr, u32 BitMask) { u32 Original_Value, Readback_Value, BitShift; struct r8192_priv *priv = ieee80211_priv(dev); if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath)) return 0; #ifdef RTL8192E if(priv->ieee80211->eRFPowerState != eRfOn && !priv->being_init_adapter) return 0; #endif down(&priv->rf_sem); if (priv->Rf_Mode == RF_OP_By_FW) { Original_Value = phy_FwRFSerialRead(dev, eRFPath, RegAddr); udelay(200); } else { Original_Value = rtl8192_phy_RFSerialRead(dev, eRFPath, RegAddr); } BitShift = rtl8192_CalculateBitShift(BitMask); Readback_Value = (Original_Value & BitMask) >> BitShift; up(&priv->rf_sem); // udelay(200); return Readback_Value; } /****************************************************************************** *function: We support firmware to execute RF-R/W. * input: dev * output: none * return: none * notice: * ***************************************************************************/ static u32 phy_FwRFSerialRead( struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 Offset ) { u32 Data = 0; u8 time = 0; //DbgPrint("FW RF CTRL\n\r"); /* 2007/11/02 MH Firmware RF Write control. By Francis' suggestion, we can not execute the scheme in the initial step. Otherwise, RF-R/W will waste much time. This is only for site survey. */ // 1. Read operation need not insert data. bit 0-11 //Data &= bMask12Bits; // 2. Write RF register address. Bit 12-19 Data |= ((Offset&0xFF)<<12); // 3. Write RF path. bit 20-21 Data |= ((eRFPath&0x3)<<20); // 4. Set RF read indicator. bit 22=0 //Data |= 0x00000; // 5. Trigger Fw to operate the command. bit 31 Data |= 0x80000000; // 6. We can not execute read operation if bit 31 is 1. while (read_nic_dword(dev, QPNR)&0x80000000) { // If FW can not finish RF-R/W for more than ?? times. We must reset FW. if (time++ < 100) { //DbgPrint("FW not finish RF-R Time=%d\n\r", time); udelay(10); } else break; } // 7. Execute read operation. write_nic_dword(dev, QPNR, Data); // 8. Check if firmawre send back RF content. while (read_nic_dword(dev, QPNR)&0x80000000) { // If FW can not finish RF-R/W for more than ?? times. We must reset FW. if (time++ < 100) { //DbgPrint("FW not finish RF-W Time=%d\n\r", time); udelay(10); } else return 0; } return read_nic_dword(dev, RF_DATA); } /****************************************************************************** *function: We support firmware to execute RF-R/W. * input: dev * output: none * return: none * notice: * ***************************************************************************/ static void phy_FwRFSerialWrite( struct net_device* dev, RF90_RADIO_PATH_E eRFPath, u32 Offset, u32 Data ) { u8 time = 0; //DbgPrint("N FW RF CTRL RF-%d OF%02x DATA=%03x\n\r", eRFPath, Offset, Data); /* 2007/11/02 MH Firmware RF Write control. By Francis' suggestion, we can not execute the scheme in the initial step. Otherwise, RF-R/W will waste much time. This is only for site survey. */ // 1. Set driver write bit and 12 bit data. bit 0-11 //Data &= bMask12Bits; // Done by uper layer. // 2. Write RF register address. bit 12-19 Data |= ((Offset&0xFF)<<12); // 3. Write RF path. bit 20-21 Data |= ((eRFPath&0x3)<<20); // 4. Set RF write indicator. bit 22=1 Data |= 0x400000; // 5. Trigger Fw to operate the command. bit 31=1 Data |= 0x80000000; // 6. Write operation. We can not write if bit 31 is 1. while (read_nic_dword(dev, QPNR)&0x80000000) { // If FW can not finish RF-R/W for more than ?? times. We must reset FW. if (time++ < 100) { //DbgPrint("FW not finish RF-W Time=%d\n\r", time); udelay(10); } else break; } // 7. No matter check bit. We always force the write. Because FW will // not accept the command. write_nic_dword(dev, QPNR, Data); /* 2007/11/02 MH Acoording to test, we must delay 20us to wait firmware to finish RF write operation. */ /* 2008/01/17 MH We support delay in firmware side now. */ //delay_us(20); } /****************************************************************************** *function: This function read BB parameters from Header file we gen, * and do register read/write * input: dev * output: none * return: none * notice: BB parameters may change all the time, so please make * sure it has been synced with the newest. * ***************************************************************************/ void rtl8192_phy_configmac(struct net_device* dev) { u32 dwArrayLen = 0, i = 0; u32* pdwArray = NULL; struct r8192_priv *priv = ieee80211_priv(dev); #ifdef TO_DO_LIST if(Adapter->bInHctTest) { RT_TRACE(COMP_PHY, "Rtl819XMACPHY_ArrayDTM\n"); dwArrayLen = MACPHY_ArrayLengthDTM; pdwArray = Rtl819XMACPHY_ArrayDTM; } else if(priv->bTXPowerDataReadFromEEPORM) #endif if(priv->bTXPowerDataReadFromEEPORM) { RT_TRACE(COMP_PHY, "Rtl819XMACPHY_Array_PG\n"); dwArrayLen = MACPHY_Array_PGLength; pdwArray = Rtl819XMACPHY_Array_PG; } else { RT_TRACE(COMP_PHY,"Read rtl819XMACPHY_Array\n"); dwArrayLen = MACPHY_ArrayLength; pdwArray = Rtl819XMACPHY_Array; } for(i = 0; i<dwArrayLen; i=i+3){ RT_TRACE(COMP_DBG, "The Rtl8190MACPHY_Array[0] is %x Rtl8190MACPHY_Array[1] is %x Rtl8190MACPHY_Array[2] is %x\n", pdwArray[i], pdwArray[i+1], pdwArray[i+2]); if(pdwArray[i] == 0x318) { pdwArray[i+2] = 0x00000800; //DbgPrint("ptrArray[i], ptrArray[i+1], ptrArray[i+2] = %x, %x, %x\n", // ptrArray[i], ptrArray[i+1], ptrArray[i+2]); } rtl8192_setBBreg(dev, pdwArray[i], pdwArray[i+1], pdwArray[i+2]); } } /****************************************************************************** *function: This function do dirty work * input: dev * output: none * return: none * notice: BB parameters may change all the time, so please make * sure it has been synced with the newest. * ***************************************************************************/ void rtl8192_phyConfigBB(struct net_device* dev, u8 ConfigType) { int i; //u8 ArrayLength; u32* Rtl819XPHY_REGArray_Table = NULL; u32* Rtl819XAGCTAB_Array_Table = NULL; u16 AGCTAB_ArrayLen, PHY_REGArrayLen = 0; struct r8192_priv *priv = ieee80211_priv(dev); #ifdef TO_DO_LIST u32 *rtl8192PhyRegArrayTable = NULL, *rtl8192AgcTabArrayTable = NULL; if(Adapter->bInHctTest) { AGCTAB_ArrayLen = AGCTAB_ArrayLengthDTM; Rtl819XAGCTAB_Array_Table = Rtl819XAGCTAB_ArrayDTM; if(priv->RF_Type == RF_2T4R) { PHY_REGArrayLen = PHY_REGArrayLengthDTM; Rtl819XPHY_REGArray_Table = Rtl819XPHY_REGArrayDTM; } else if (priv->RF_Type == RF_1T2R) { PHY_REGArrayLen = PHY_REG_1T2RArrayLengthDTM; Rtl819XPHY_REGArray_Table = Rtl819XPHY_REG_1T2RArrayDTM; } } else #endif { AGCTAB_ArrayLen = AGCTAB_ArrayLength; Rtl819XAGCTAB_Array_Table = Rtl819XAGCTAB_Array; if(priv->rf_type == RF_2T4R) { PHY_REGArrayLen = PHY_REGArrayLength; Rtl819XPHY_REGArray_Table = Rtl819XPHY_REGArray; } else if (priv->rf_type == RF_1T2R) { PHY_REGArrayLen = PHY_REG_1T2RArrayLength; Rtl819XPHY_REGArray_Table = Rtl819XPHY_REG_1T2RArray; } } if (ConfigType == BaseBand_Config_PHY_REG) { for (i=0; i<PHY_REGArrayLen; i+=2) { rtl8192_setBBreg(dev, Rtl819XPHY_REGArray_Table[i], bMaskDWord, Rtl819XPHY_REGArray_Table[i+1]); RT_TRACE(COMP_DBG, "i: %x, The Rtl819xUsbPHY_REGArray[0] is %x Rtl819xUsbPHY_REGArray[1] is %x \n",i, Rtl819XPHY_REGArray_Table[i], Rtl819XPHY_REGArray_Table[i+1]); } } else if (ConfigType == BaseBand_Config_AGC_TAB) { for (i=0; i<AGCTAB_ArrayLen; i+=2) { rtl8192_setBBreg(dev, Rtl819XAGCTAB_Array_Table[i], bMaskDWord, Rtl819XAGCTAB_Array_Table[i+1]); RT_TRACE(COMP_DBG, "i:%x, The rtl819XAGCTAB_Array[0] is %x rtl819XAGCTAB_Array[1] is %x \n",i, Rtl819XAGCTAB_Array_Table[i], Rtl819XAGCTAB_Array_Table[i+1]); } } } /****************************************************************************** *function: This function initialize Register definition offset for Radio Path * A/B/C/D * input: net_device dev * output: none * return: none * notice: Initialization value here is constant and it should never be changed * ***************************************************************************/ static void rtl8192_InitBBRFRegDef(struct net_device* dev) { struct r8192_priv *priv = ieee80211_priv(dev); // RF Interface Sowrtware Control priv->PHYRegDef[RF90_PATH_A].rfintfs = rFPGA0_XAB_RFInterfaceSW; // 16 LSBs if read 32-bit from 0x870 priv->PHYRegDef[RF90_PATH_B].rfintfs = rFPGA0_XAB_RFInterfaceSW; // 16 MSBs if read 32-bit from 0x870 (16-bit for 0x872) priv->PHYRegDef[RF90_PATH_C].rfintfs = rFPGA0_XCD_RFInterfaceSW;// 16 LSBs if read 32-bit from 0x874 priv->PHYRegDef[RF90_PATH_D].rfintfs = rFPGA0_XCD_RFInterfaceSW;// 16 MSBs if read 32-bit from 0x874 (16-bit for 0x876) // RF Interface Readback Value priv->PHYRegDef[RF90_PATH_A].rfintfi = rFPGA0_XAB_RFInterfaceRB; // 16 LSBs if read 32-bit from 0x8E0 priv->PHYRegDef[RF90_PATH_B].rfintfi = rFPGA0_XAB_RFInterfaceRB;// 16 MSBs if read 32-bit from 0x8E0 (16-bit for 0x8E2) priv->PHYRegDef[RF90_PATH_C].rfintfi = rFPGA0_XCD_RFInterfaceRB;// 16 LSBs if read 32-bit from 0x8E4 priv->PHYRegDef[RF90_PATH_D].rfintfi = rFPGA0_XCD_RFInterfaceRB;// 16 MSBs if read 32-bit from 0x8E4 (16-bit for 0x8E6) // RF Interface Output (and Enable) priv->PHYRegDef[RF90_PATH_A].rfintfo = rFPGA0_XA_RFInterfaceOE; // 16 LSBs if read 32-bit from 0x860 priv->PHYRegDef[RF90_PATH_B].rfintfo = rFPGA0_XB_RFInterfaceOE; // 16 LSBs if read 32-bit from 0x864 priv->PHYRegDef[RF90_PATH_C].rfintfo = rFPGA0_XC_RFInterfaceOE;// 16 LSBs if read 32-bit from 0x868 priv->PHYRegDef[RF90_PATH_D].rfintfo = rFPGA0_XD_RFInterfaceOE;// 16 LSBs if read 32-bit from 0x86C // RF Interface (Output and) Enable priv->PHYRegDef[RF90_PATH_A].rfintfe = rFPGA0_XA_RFInterfaceOE; // 16 MSBs if read 32-bit from 0x860 (16-bit for 0x862) priv->PHYRegDef[RF90_PATH_B].rfintfe = rFPGA0_XB_RFInterfaceOE; // 16 MSBs if read 32-bit from 0x864 (16-bit for 0x866) priv->PHYRegDef[RF90_PATH_C].rfintfe = rFPGA0_XC_RFInterfaceOE;// 16 MSBs if read 32-bit from 0x86A (16-bit for 0x86A) priv->PHYRegDef[RF90_PATH_D].rfintfe = rFPGA0_XD_RFInterfaceOE;// 16 MSBs if read 32-bit from 0x86C (16-bit for 0x86E) //Addr of LSSI. Wirte RF register by driver priv->PHYRegDef[RF90_PATH_A].rf3wireOffset = rFPGA0_XA_LSSIParameter; //LSSI Parameter priv->PHYRegDef[RF90_PATH_B].rf3wireOffset = rFPGA0_XB_LSSIParameter; priv->PHYRegDef[RF90_PATH_C].rf3wireOffset = rFPGA0_XC_LSSIParameter; priv->PHYRegDef[RF90_PATH_D].rf3wireOffset = rFPGA0_XD_LSSIParameter; // RF parameter priv->PHYRegDef[RF90_PATH_A].rfLSSI_Select = rFPGA0_XAB_RFParameter; //BB Band Select priv->PHYRegDef[RF90_PATH_B].rfLSSI_Select = rFPGA0_XAB_RFParameter; priv->PHYRegDef[RF90_PATH_C].rfLSSI_Select = rFPGA0_XCD_RFParameter; priv->PHYRegDef[RF90_PATH_D].rfLSSI_Select = rFPGA0_XCD_RFParameter; // Tx AGC Gain Stage (same for all path. Should we remove this?) priv->PHYRegDef[RF90_PATH_A].rfTxGainStage = rFPGA0_TxGainStage; //Tx gain stage priv->PHYRegDef[RF90_PATH_B].rfTxGainStage = rFPGA0_TxGainStage; //Tx gain stage priv->PHYRegDef[RF90_PATH_C].rfTxGainStage = rFPGA0_TxGainStage; //Tx gain stage priv->PHYRegDef[RF90_PATH_D].rfTxGainStage = rFPGA0_TxGainStage; //Tx gain stage // Tranceiver A~D HSSI Parameter-1 priv->PHYRegDef[RF90_PATH_A].rfHSSIPara1 = rFPGA0_XA_HSSIParameter1; //wire control parameter1 priv->PHYRegDef[RF90_PATH_B].rfHSSIPara1 = rFPGA0_XB_HSSIParameter1; //wire control parameter1 priv->PHYRegDef[RF90_PATH_C].rfHSSIPara1 = rFPGA0_XC_HSSIParameter1; //wire control parameter1 priv->PHYRegDef[RF90_PATH_D].rfHSSIPara1 = rFPGA0_XD_HSSIParameter1; //wire control parameter1 // Tranceiver A~D HSSI Parameter-2 priv->PHYRegDef[RF90_PATH_A].rfHSSIPara2 = rFPGA0_XA_HSSIParameter2; //wire control parameter2 priv->PHYRegDef[RF90_PATH_B].rfHSSIPara2 = rFPGA0_XB_HSSIParameter2; //wire control parameter2 priv->PHYRegDef[RF90_PATH_C].rfHSSIPara2 = rFPGA0_XC_HSSIParameter2; //wire control parameter2 priv->PHYRegDef[RF90_PATH_D].rfHSSIPara2 = rFPGA0_XD_HSSIParameter2; //wire control parameter1 // RF switch Control priv->PHYRegDef[RF90_PATH_A].rfSwitchControl = rFPGA0_XAB_SwitchControl; //TR/Ant switch control priv->PHYRegDef[RF90_PATH_B].rfSwitchControl = rFPGA0_XAB_SwitchControl; priv->PHYRegDef[RF90_PATH_C].rfSwitchControl = rFPGA0_XCD_SwitchControl; priv->PHYRegDef[RF90_PATH_D].rfSwitchControl = rFPGA0_XCD_SwitchControl; // AGC control 1 priv->PHYRegDef[RF90_PATH_A].rfAGCControl1 = rOFDM0_XAAGCCore1; priv->PHYRegDef[RF90_PATH_B].rfAGCControl1 = rOFDM0_XBAGCCore1; priv->PHYRegDef[RF90_PATH_C].rfAGCControl1 = rOFDM0_XCAGCCore1; priv->PHYRegDef[RF90_PATH_D].rfAGCControl1 = rOFDM0_XDAGCCore1; // AGC control 2 priv->PHYRegDef[RF90_PATH_A].rfAGCControl2 = rOFDM0_XAAGCCore2; priv->PHYRegDef[RF90_PATH_B].rfAGCControl2 = rOFDM0_XBAGCCore2; priv->PHYRegDef[RF90_PATH_C].rfAGCControl2 = rOFDM0_XCAGCCore2; priv->PHYRegDef[RF90_PATH_D].rfAGCControl2 = rOFDM0_XDAGCCore2; // RX AFE control 1 priv->PHYRegDef[RF90_PATH_A].rfRxIQImbalance = rOFDM0_XARxIQImbalance; priv->PHYRegDef[RF90_PATH_B].rfRxIQImbalance = rOFDM0_XBRxIQImbalance; priv->PHYRegDef[RF90_PATH_C].rfRxIQImbalance = rOFDM0_XCRxIQImbalance; priv->PHYRegDef[RF90_PATH_D].rfRxIQImbalance = rOFDM0_XDRxIQImbalance; // RX AFE control 1 priv->PHYRegDef[RF90_PATH_A].rfRxAFE = rOFDM0_XARxAFE; priv->PHYRegDef[RF90_PATH_B].rfRxAFE = rOFDM0_XBRxAFE; priv->PHYRegDef[RF90_PATH_C].rfRxAFE = rOFDM0_XCRxAFE; priv->PHYRegDef[RF90_PATH_D].rfRxAFE = rOFDM0_XDRxAFE; // Tx AFE control 1 priv->PHYRegDef[RF90_PATH_A].rfTxIQImbalance = rOFDM0_XATxIQImbalance; priv->PHYRegDef[RF90_PATH_B].rfTxIQImbalance = rOFDM0_XBTxIQImbalance; priv->PHYRegDef[RF90_PATH_C].rfTxIQImbalance = rOFDM0_XCTxIQImbalance; priv->PHYRegDef[RF90_PATH_D].rfTxIQImbalance = rOFDM0_XDTxIQImbalance; // Tx AFE control 2 priv->PHYRegDef[RF90_PATH_A].rfTxAFE = rOFDM0_XATxAFE; priv->PHYRegDef[RF90_PATH_B].rfTxAFE = rOFDM0_XBTxAFE; priv->PHYRegDef[RF90_PATH_C].rfTxAFE = rOFDM0_XCTxAFE; priv->PHYRegDef[RF90_PATH_D].rfTxAFE = rOFDM0_XDTxAFE; // Tranceiver LSSI Readback priv->PHYRegDef[RF90_PATH_A].rfLSSIReadBack = rFPGA0_XA_LSSIReadBack; priv->PHYRegDef[RF90_PATH_B].rfLSSIReadBack = rFPGA0_XB_LSSIReadBack; priv->PHYRegDef[RF90_PATH_C].rfLSSIReadBack = rFPGA0_XC_LSSIReadBack; priv->PHYRegDef[RF90_PATH_D].rfLSSIReadBack = rFPGA0_XD_LSSIReadBack; } /****************************************************************************** *function: This function is to write register and then readback to make sure whether BB and RF is OK * input: net_device dev * HW90_BLOCK_E CheckBlock * RF90_RADIO_PATH_E eRFPath //only used when checkblock is HW90_BLOCK_RF * output: none * return: return whether BB and RF is ok(0:OK; 1:Fail) * notice: This function may be removed in the ASIC * ***************************************************************************/ RT_STATUS rtl8192_phy_checkBBAndRF(struct net_device* dev, HW90_BLOCK_E CheckBlock, RF90_RADIO_PATH_E eRFPath) { //struct r8192_priv *priv = ieee80211_priv(dev); // BB_REGISTER_DEFINITION_T *pPhyReg = &priv->PHYRegDef[eRFPath]; RT_STATUS ret = RT_STATUS_SUCCESS; u32 i, CheckTimes = 4, dwRegRead = 0; u32 WriteAddr[4]; u32 WriteData[] = {0xfffff027, 0xaa55a02f, 0x00000027, 0x55aa502f}; // Initialize register address offset to be checked WriteAddr[HW90_BLOCK_MAC] = 0x100; WriteAddr[HW90_BLOCK_PHY0] = 0x900; WriteAddr[HW90_BLOCK_PHY1] = 0x800; WriteAddr[HW90_BLOCK_RF] = 0x3; RT_TRACE(COMP_PHY, "=======>%s(), CheckBlock:%d\n", __FUNCTION__, CheckBlock); for(i=0 ; i < CheckTimes ; i++) { // // Write Data to register and readback // switch(CheckBlock) { case HW90_BLOCK_MAC: RT_TRACE(COMP_ERR, "PHY_CheckBBRFOK(): Never Write 0x100 here!"); break; case HW90_BLOCK_PHY0: case HW90_BLOCK_PHY1: write_nic_dword(dev, WriteAddr[CheckBlock], WriteData[i]); dwRegRead = read_nic_dword(dev, WriteAddr[CheckBlock]); break; case HW90_BLOCK_RF: WriteData[i] &= 0xfff; rtl8192_phy_SetRFReg(dev, eRFPath, WriteAddr[HW90_BLOCK_RF], bMask12Bits, WriteData[i]); // TODO: we should not delay for such a long time. Ask SD3 mdelay(10); dwRegRead = rtl8192_phy_QueryRFReg(dev, eRFPath, WriteAddr[HW90_BLOCK_RF], bMaskDWord); mdelay(10); break; default: ret = RT_STATUS_FAILURE; break; } // // Check whether readback data is correct // if(dwRegRead != WriteData[i]) { RT_TRACE(COMP_ERR, "====>error=====dwRegRead: %x, WriteData: %x \n", dwRegRead, WriteData[i]); ret = RT_STATUS_FAILURE; break; } } return ret; } /****************************************************************************** *function: This function initialize BB&RF * input: net_device dev * output: none * return: none * notice: Initialization value may change all the time, so please make * sure it has been synced with the newest. * ***************************************************************************/ static RT_STATUS rtl8192_BB_Config_ParaFile(struct net_device* dev) { struct r8192_priv *priv = ieee80211_priv(dev); RT_STATUS rtStatus = RT_STATUS_SUCCESS; u8 bRegValue = 0, eCheckItem = 0; u32 dwRegValue = 0; /************************************** //<1>Initialize BaseBand **************************************/ /*--set BB Global Reset--*/ bRegValue = read_nic_byte(dev, BB_GLOBAL_RESET); write_nic_byte(dev, BB_GLOBAL_RESET,(bRegValue|BB_GLOBAL_RESET_BIT)); /*---set BB reset Active---*/ dwRegValue = read_nic_dword(dev, CPU_GEN); write_nic_dword(dev, CPU_GEN, (dwRegValue&(~CPU_GEN_BB_RST))); /*----Ckeck FPGAPHY0 and PHY1 board is OK----*/ // TODO: this function should be removed on ASIC , Emily 2007.2.2 for(eCheckItem=(HW90_BLOCK_E)HW90_BLOCK_PHY0; eCheckItem<=HW90_BLOCK_PHY1; eCheckItem++) { rtStatus = rtl8192_phy_checkBBAndRF(dev, (HW90_BLOCK_E)eCheckItem, (RF90_RADIO_PATH_E)0); //don't care RF path if(rtStatus != RT_STATUS_SUCCESS) { RT_TRACE((COMP_ERR | COMP_PHY), "PHY_RF8256_Config():Check PHY%d Fail!!\n", eCheckItem-1); return rtStatus; } } /*---- Set CCK and OFDM Block "OFF"----*/ rtl8192_setBBreg(dev, rFPGA0_RFMOD, bCCKEn|bOFDMEn, 0x0); /*----BB Register Initilazation----*/ //==m==>Set PHY REG From Header<==m== rtl8192_phyConfigBB(dev, BaseBand_Config_PHY_REG); /*----Set BB reset de-Active----*/ dwRegValue = read_nic_dword(dev, CPU_GEN); write_nic_dword(dev, CPU_GEN, (dwRegValue|CPU_GEN_BB_RST)); /*----BB AGC table Initialization----*/ //==m==>Set PHY REG From Header<==m== rtl8192_phyConfigBB(dev, BaseBand_Config_AGC_TAB); if (priv->card_8192_version > VERSION_8190_BD) { if(priv->rf_type == RF_2T4R) { // Antenna gain offset from B/C/D to A dwRegValue = ( priv->AntennaTxPwDiff[2]<<8 | priv->AntennaTxPwDiff[1]<<4 | priv->AntennaTxPwDiff[0]); } else dwRegValue = 0x0; //Antenna gain offset doesn't make sense in RF 1T2R. rtl8192_setBBreg(dev, rFPGA0_TxGainStage, (bXBTxAGC|bXCTxAGC|bXDTxAGC), dwRegValue); //XSTALLCap #ifdef RTL8190P dwRegValue = priv->CrystalCap & 0x3; // bit0~1 of crystal cap rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, bXtalCap01, dwRegValue); dwRegValue = ((priv->CrystalCap & 0xc)>>2); // bit2~3 of crystal cap rtl8192_setBBreg(dev, rFPGA0_AnalogParameter2, bXtalCap23, dwRegValue); #else #ifdef RTL8192E dwRegValue = priv->CrystalCap; rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, bXtalCap92x, dwRegValue); #endif #endif } // Check if the CCK HighPower is turned ON. // This is used to calculate PWDB. // priv->bCckHighPower = (u8)(rtl8192_QueryBBReg(dev, rFPGA0_XA_HSSIParameter2, 0x200)); return rtStatus; } /****************************************************************************** *function: This function initialize BB&RF * input: net_device dev * output: none * return: none * notice: Initialization value may change all the time, so please make * sure it has been synced with the newest. * ***************************************************************************/ RT_STATUS rtl8192_BBConfig(struct net_device* dev) { rtl8192_InitBBRFRegDef(dev); //config BB&RF. As hardCode based initialization has not been well //implemented, so use file first.FIXME:should implement it for hardcode? return rtl8192_BB_Config_ParaFile(dev); } /****************************************************************************** *function: This function obtains the initialization value of Tx power Level offset * input: net_device dev * output: none * return: none * ***************************************************************************/ void rtl8192_phy_getTxPower(struct net_device* dev) { struct r8192_priv *priv = ieee80211_priv(dev); #ifdef RTL8190P priv->MCSTxPowerLevelOriginalOffset[0] = read_nic_dword(dev, MCS_TXAGC); priv->MCSTxPowerLevelOriginalOffset[1] = read_nic_dword(dev, (MCS_TXAGC+4)); priv->CCKTxPowerLevelOriginalOffset = read_nic_dword(dev, CCK_TXAGC); #else #ifdef RTL8192E priv->MCSTxPowerLevelOriginalOffset[0] = read_nic_dword(dev, rTxAGC_Rate18_06); priv->MCSTxPowerLevelOriginalOffset[1] = read_nic_dword(dev, rTxAGC_Rate54_24); priv->MCSTxPowerLevelOriginalOffset[2] = read_nic_dword(dev, rTxAGC_Mcs03_Mcs00); priv->MCSTxPowerLevelOriginalOffset[3] = read_nic_dword(dev, rTxAGC_Mcs07_Mcs04); priv->MCSTxPowerLevelOriginalOffset[4] = read_nic_dword(dev, rTxAGC_Mcs11_Mcs08); priv->MCSTxPowerLevelOriginalOffset[5] = read_nic_dword(dev, rTxAGC_Mcs15_Mcs12); #endif #endif // read rx initial gain priv->DefaultInitialGain[0] = read_nic_byte(dev, rOFDM0_XAAGCCore1); priv->DefaultInitialGain[1] = read_nic_byte(dev, rOFDM0_XBAGCCore1); priv->DefaultInitialGain[2] = read_nic_byte(dev, rOFDM0_XCAGCCore1); priv->DefaultInitialGain[3] = read_nic_byte(dev, rOFDM0_XDAGCCore1); RT_TRACE(COMP_INIT, "Default initial gain (c50=0x%x, c58=0x%x, c60=0x%x, c68=0x%x) \n", priv->DefaultInitialGain[0], priv->DefaultInitialGain[1], priv->DefaultInitialGain[2], priv->DefaultInitialGain[3]); // read framesync priv->framesync = read_nic_byte(dev, rOFDM0_RxDetector3); priv->framesyncC34 = read_nic_dword(dev, rOFDM0_RxDetector2); RT_TRACE(COMP_INIT, "Default framesync (0x%x) = 0x%x \n", rOFDM0_RxDetector3, priv->framesync); // read SIFS (save the value read fome MACPHY_REG.txt) priv->SifsTime = read_nic_word(dev, SIFS); } /****************************************************************************** *function: This function obtains the initialization value of Tx power Level offset * input: net_device dev * output: none * return: none * ***************************************************************************/ void rtl8192_phy_setTxPower(struct net_device* dev, u8 channel) { struct r8192_priv *priv = ieee80211_priv(dev); u8 powerlevel = 0,powerlevelOFDM24G = 0; char ant_pwr_diff; u32 u4RegValue; if(priv->epromtype == EPROM_93c46) { powerlevel = priv->TxPowerLevelCCK[channel-1]; powerlevelOFDM24G = priv->TxPowerLevelOFDM24G[channel-1]; } else if(priv->epromtype == EPROM_93c56) { if(priv->rf_type == RF_1T2R) { powerlevel = priv->TxPowerLevelCCK_C[channel-1]; powerlevelOFDM24G = priv->TxPowerLevelOFDM24G_C[channel-1]; } else if(priv->rf_type == RF_2T4R) { // Mainly we use RF-A Tx Power to write the Tx Power registers, but the RF-C Tx // Power must be calculated by the antenna diff. // So we have to rewrite Antenna gain offset register here. powerlevel = priv->TxPowerLevelCCK_A[channel-1]; powerlevelOFDM24G = priv->TxPowerLevelOFDM24G_A[channel-1]; ant_pwr_diff = priv->TxPowerLevelOFDM24G_C[channel-1] -priv->TxPowerLevelOFDM24G_A[channel-1]; ant_pwr_diff &= 0xf; //DbgPrint(" ant_pwr_diff = 0x%x", (u8)(ant_pwr_diff)); priv->RF_C_TxPwDiff = ant_pwr_diff; priv->AntennaTxPwDiff[2] = 0;// RF-D, don't care priv->AntennaTxPwDiff[1] = (u8)(ant_pwr_diff);// RF-C priv->AntennaTxPwDiff[0] = 0;// RF-B, don't care // Antenna gain offset from B/C/D to A u4RegValue = ( priv->AntennaTxPwDiff[2]<<8 | priv->AntennaTxPwDiff[1]<<4 | priv->AntennaTxPwDiff[0]); rtl8192_setBBreg(dev, rFPGA0_TxGainStage, (bXBTxAGC|bXCTxAGC|bXDTxAGC), u4RegValue); } } #ifdef TODO // // CCX 2 S31, AP control of client transmit power: // 1. We shall not exceed Cell Power Limit as possible as we can. // 2. Tolerance is +/- 5dB. // 3. 802.11h Power Contraint takes higher precedence over CCX Cell Power Limit. // // TODO: // 1. 802.11h power contraint // // 071011, by rcnjko. // if( pMgntInfo->OpMode == RT_OP_MODE_INFRASTRUCTURE && pMgntInfo->bWithCcxCellPwr && channel == pMgntInfo->dot11CurrentChannelNumber) { u8 CckCellPwrIdx = DbmToTxPwrIdx(Adapter, WIRELESS_MODE_B, pMgntInfo->CcxCellPwr); u8 LegacyOfdmCellPwrIdx = DbmToTxPwrIdx(Adapter, WIRELESS_MODE_G, pMgntInfo->CcxCellPwr); u8 OfdmCellPwrIdx = DbmToTxPwrIdx(Adapter, WIRELESS_MODE_N_24G, pMgntInfo->CcxCellPwr); RT_TRACE(COMP_TXAGC, DBG_LOUD, ("CCX Cell Limit: %d dbm => CCK Tx power index : %d, Legacy OFDM Tx power index : %d, OFDM Tx power index: %d\n", pMgntInfo->CcxCellPwr, CckCellPwrIdx, LegacyOfdmCellPwrIdx, OfdmCellPwrIdx)); RT_TRACE(COMP_TXAGC, DBG_LOUD, ("EEPROM channel(%d) => CCK Tx power index: %d, Legacy OFDM Tx power index : %d, OFDM Tx power index: %d\n", channel, powerlevel, powerlevelOFDM24G + pHalData->LegacyHTTxPowerDiff, powerlevelOFDM24G)); // CCK if(powerlevel > CckCellPwrIdx) powerlevel = CckCellPwrIdx; // Legacy OFDM, HT OFDM if(powerlevelOFDM24G + pHalData->LegacyHTTxPowerDiff > OfdmCellPwrIdx) { if((OfdmCellPwrIdx - pHalData->LegacyHTTxPowerDiff) > 0) { powerlevelOFDM24G = OfdmCellPwrIdx - pHalData->LegacyHTTxPowerDiff; } else { LegacyOfdmCellPwrIdx = 0; } } RT_TRACE(COMP_TXAGC, DBG_LOUD, ("Altered CCK Tx power index : %d, Legacy OFDM Tx power index: %d, OFDM Tx power index: %d\n", powerlevel, powerlevelOFDM24G + pHalData->LegacyHTTxPowerDiff, powerlevelOFDM24G)); } pHalData->CurrentCckTxPwrIdx = powerlevel; pHalData->CurrentOfdm24GTxPwrIdx = powerlevelOFDM24G; #endif switch(priv->rf_chip) { case RF_8225: // PHY_SetRF8225CckTxPower(Adapter, powerlevel); // PHY_SetRF8225OfdmTxPower(Adapter, powerlevelOFDM24G); break; case RF_8256: PHY_SetRF8256CCKTxPower(dev, powerlevel); //need further implement PHY_SetRF8256OFDMTxPower(dev, powerlevelOFDM24G); break; case RF_8258: break; default: RT_TRACE(COMP_ERR, "unknown rf chip in funtion %s()\n", __FUNCTION__); break; } } /****************************************************************************** *function: This function check Rf chip to do RF config * input: net_device dev * output: none * return: only 8256 is supported * ***************************************************************************/ RT_STATUS rtl8192_phy_RFConfig(struct net_device* dev) { struct r8192_priv *priv = ieee80211_priv(dev); RT_STATUS rtStatus = RT_STATUS_SUCCESS; switch(priv->rf_chip) { case RF_8225: // rtStatus = PHY_RF8225_Config(Adapter); break; case RF_8256: rtStatus = PHY_RF8256_Config(dev); break; case RF_8258: break; case RF_PSEUDO_11N: //rtStatus = PHY_RF8225_Config(Adapter); break; default: RT_TRACE(COMP_ERR, "error chip id\n"); break; } return rtStatus; } /****************************************************************************** *function: This function update Initial gain * input: net_device dev * output: none * return: As Windows has not implemented this, wait for complement * ***************************************************************************/ void rtl8192_phy_updateInitGain(struct net_device* dev) { } /****************************************************************************** *function: This function read RF parameters from general head file, and do RF 3-wire * input: net_device dev * output: none * return: return code show if RF configuration is successful(0:pass, 1:fail) * Note: Delay may be required for RF configuration * ***************************************************************************/ u8 rtl8192_phy_ConfigRFWithHeaderFile(struct net_device* dev, RF90_RADIO_PATH_E eRFPath) { int i; //u32* pRFArray; u8 ret = 0; switch(eRFPath){ case RF90_PATH_A: for(i = 0;i<RadioA_ArrayLength; i=i+2){ if(Rtl819XRadioA_Array[i] == 0xfe){ msleep(100); continue; } rtl8192_phy_SetRFReg(dev, eRFPath, Rtl819XRadioA_Array[i], bMask12Bits, Rtl819XRadioA_Array[i+1]); //msleep(1); } break; case RF90_PATH_B: for(i = 0;i<RadioB_ArrayLength; i=i+2){ if(Rtl819XRadioB_Array[i] == 0xfe){ msleep(100); continue; } rtl8192_phy_SetRFReg(dev, eRFPath, Rtl819XRadioB_Array[i], bMask12Bits, Rtl819XRadioB_Array[i+1]); //msleep(1); } break; case RF90_PATH_C: for(i = 0;i<RadioC_ArrayLength; i=i+2){ if(Rtl819XRadioC_Array[i] == 0xfe){ msleep(100); continue; } rtl8192_phy_SetRFReg(dev, eRFPath, Rtl819XRadioC_Array[i], bMask12Bits, Rtl819XRadioC_Array[i+1]); //msleep(1); } break; case RF90_PATH_D: for(i = 0;i<RadioD_ArrayLength; i=i+2){ if(Rtl819XRadioD_Array[i] == 0xfe){ msleep(100); continue; } rtl8192_phy_SetRFReg(dev, eRFPath, Rtl819XRadioD_Array[i], bMask12Bits, Rtl819XRadioD_Array[i+1]); //msleep(1); } break; default: break; } return ret; } /****************************************************************************** *function: This function set Tx Power of the channel * input: struct net_device *dev * u8 channel * output: none * return: none * Note: * ***************************************************************************/ static void rtl8192_SetTxPowerLevel(struct net_device *dev, u8 channel) { struct r8192_priv *priv = ieee80211_priv(dev); u8 powerlevel = priv->TxPowerLevelCCK[channel-1]; u8 powerlevelOFDM24G = priv->TxPowerLevelOFDM24G[channel-1]; switch(priv->rf_chip) { case RF_8225: #ifdef TO_DO_LIST PHY_SetRF8225CckTxPower(Adapter, powerlevel); PHY_SetRF8225OfdmTxPower(Adapter, powerlevelOFDM24G); #endif break; case RF_8256: PHY_SetRF8256CCKTxPower(dev, powerlevel); PHY_SetRF8256OFDMTxPower(dev, powerlevelOFDM24G); break; case RF_8258: break; default: RT_TRACE(COMP_ERR, "unknown rf chip ID in rtl8192_SetTxPowerLevel()\n"); break; } } /**************************************************************************************** *function: This function set command table variable(struct SwChnlCmd). * input: SwChnlCmd* CmdTable //table to be set. * u32 CmdTableIdx //variable index in table to be set * u32 CmdTableSz //table size. * SwChnlCmdID CmdID //command ID to set. * u32 Para1 * u32 Para2 * u32 msDelay * output: * return: true if finished, false otherwise * Note: * ************************************************************************************/ static u8 rtl8192_phy_SetSwChnlCmdArray( SwChnlCmd* CmdTable, u32 CmdTableIdx, u32 CmdTableSz, SwChnlCmdID CmdID, u32 Para1, u32 Para2, u32 msDelay ) { SwChnlCmd* pCmd; if(CmdTable == NULL) { RT_TRACE(COMP_ERR, "phy_SetSwChnlCmdArray(): CmdTable cannot be NULL.\n"); return false; } if(CmdTableIdx >= CmdTableSz) { RT_TRACE(COMP_ERR, "phy_SetSwChnlCmdArray(): Access invalid index, please check size of the table, CmdTableIdx:%d, CmdTableSz:%d\n", CmdTableIdx, CmdTableSz); return false; } pCmd = CmdTable + CmdTableIdx; pCmd->CmdID = CmdID; pCmd->Para1 = Para1; pCmd->Para2 = Para2; pCmd->msDelay = msDelay; return true; } /****************************************************************************** *function: This function set channel step by step * input: struct net_device *dev * u8 channel * u8* stage //3 stages * u8* step // * u32* delay //whether need to delay * output: store new stage, step and delay for next step(combine with function above) * return: true if finished, false otherwise * Note: Wait for simpler function to replace it //wb * ***************************************************************************/ static u8 rtl8192_phy_SwChnlStepByStep(struct net_device *dev, u8 channel, u8* stage, u8* step, u32* delay) { struct r8192_priv *priv = ieee80211_priv(dev); // PCHANNEL_ACCESS_SETTING pChnlAccessSetting; SwChnlCmd PreCommonCmd[MAX_PRECMD_CNT]; u32 PreCommonCmdCnt; SwChnlCmd PostCommonCmd[MAX_POSTCMD_CNT]; u32 PostCommonCmdCnt; SwChnlCmd RfDependCmd[MAX_RFDEPENDCMD_CNT]; u32 RfDependCmdCnt; SwChnlCmd *CurrentCmd = NULL; //RF90_RADIO_PATH_E eRFPath; u8 eRFPath; // u32 RfRetVal; // u8 RetryCnt; RT_TRACE(COMP_TRACE, "====>%s()====stage:%d, step:%d, channel:%d\n", __FUNCTION__, *stage, *step, channel); // RT_ASSERT(IsLegalChannel(Adapter, channel), ("illegal channel: %d\n", channel)); #ifdef ENABLE_DOT11D if (!IsLegalChannel(priv->ieee80211, channel)) { RT_TRACE(COMP_ERR, "=============>set to illegal channel:%d\n", channel); return true; //return true to tell upper caller function this channel setting is finished! Or it will in while loop. } #endif //for(eRFPath = RF90_PATH_A; eRFPath <pHalData->NumTotalRFPath; eRFPath++) //for(eRFPath = 0; eRFPath <RF90_PATH_MAX; eRFPath++) { //if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath)) // return false; // <1> Fill up pre common command. PreCommonCmdCnt = 0; rtl8192_phy_SetSwChnlCmdArray(PreCommonCmd, PreCommonCmdCnt++, MAX_PRECMD_CNT, CmdID_SetTxPowerLevel, 0, 0, 0); rtl8192_phy_SetSwChnlCmdArray(PreCommonCmd, PreCommonCmdCnt++, MAX_PRECMD_CNT, CmdID_End, 0, 0, 0); // <2> Fill up post common command. PostCommonCmdCnt = 0; rtl8192_phy_SetSwChnlCmdArray(PostCommonCmd, PostCommonCmdCnt++, MAX_POSTCMD_CNT, CmdID_End, 0, 0, 0); // <3> Fill up RF dependent command. RfDependCmdCnt = 0; switch( priv->rf_chip ) { case RF_8225: if (!(channel >= 1 && channel <= 14)) { RT_TRACE(COMP_ERR, "illegal channel for Zebra 8225: %d\n", channel); return false; } rtl8192_phy_SetSwChnlCmdArray(RfDependCmd, RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT, CmdID_RF_WriteReg, rZebra1_Channel, RF_CHANNEL_TABLE_ZEBRA[channel], 10); rtl8192_phy_SetSwChnlCmdArray(RfDependCmd, RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT, CmdID_End, 0, 0, 0); break; case RF_8256: // TEST!! This is not the table for 8256!! if (!(channel >= 1 && channel <= 14)) { RT_TRACE(COMP_ERR, "illegal channel for Zebra 8256: %d\n", channel); return false; } rtl8192_phy_SetSwChnlCmdArray(RfDependCmd, RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT, CmdID_RF_WriteReg, rZebra1_Channel, channel, 10); rtl8192_phy_SetSwChnlCmdArray(RfDependCmd, RfDependCmdCnt++, MAX_RFDEPENDCMD_CNT, CmdID_End, 0, 0, 0); break; case RF_8258: break; default: RT_TRACE(COMP_ERR, "Unknown RFChipID: %d\n", priv->rf_chip); return false; break; } do{ switch(*stage) { case 0: CurrentCmd=&PreCommonCmd[*step]; break; case 1: CurrentCmd=&RfDependCmd[*step]; break; case 2: CurrentCmd=&PostCommonCmd[*step]; break; } if(CurrentCmd->CmdID==CmdID_End) { if((*stage)==2) { return true; } else { (*stage)++; (*step)=0; continue; } } switch(CurrentCmd->CmdID) { case CmdID_SetTxPowerLevel: if(priv->card_8192_version > (u8)VERSION_8190_BD) //xiong: consider it later! rtl8192_SetTxPowerLevel(dev,channel); break; case CmdID_WritePortUlong: write_nic_dword(dev, CurrentCmd->Para1, CurrentCmd->Para2); break; case CmdID_WritePortUshort: write_nic_word(dev, CurrentCmd->Para1, (u16)CurrentCmd->Para2); break; case CmdID_WritePortUchar: write_nic_byte(dev, CurrentCmd->Para1, (u8)CurrentCmd->Para2); break; case CmdID_RF_WriteReg: for(eRFPath = 0; eRFPath <priv->NumTotalRFPath; eRFPath++) rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, CurrentCmd->Para1, bMask12Bits, CurrentCmd->Para2<<7); break; default: break; } break; }while(true); }/*for(Number of RF paths)*/ (*delay)=CurrentCmd->msDelay; (*step)++; return false; } /****************************************************************************** *function: This function does acturally set channel work * input: struct net_device *dev * u8 channel * output: none * return: noin * Note: We should not call this function directly * ***************************************************************************/ static void rtl8192_phy_FinishSwChnlNow(struct net_device *dev, u8 channel) { struct r8192_priv *priv = ieee80211_priv(dev); u32 delay = 0; while(!rtl8192_phy_SwChnlStepByStep(dev,channel,&priv->SwChnlStage,&priv->SwChnlStep,&delay)) { if(delay>0) msleep(delay);//or mdelay? need further consideration if(!priv->up) break; } } /****************************************************************************** *function: Callback routine of the work item for switch channel. * input: * * output: none * return: noin * ***************************************************************************/ void rtl8192_SwChnl_WorkItem(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); RT_TRACE(COMP_TRACE, "==> SwChnlCallback819xUsbWorkItem()\n"); RT_TRACE(COMP_TRACE, "=====>--%s(), set chan:%d, priv:%p\n", __FUNCTION__, priv->chan, priv); rtl8192_phy_FinishSwChnlNow(dev , priv->chan); RT_TRACE(COMP_TRACE, "<== SwChnlCallback819xUsbWorkItem()\n"); } /****************************************************************************** *function: This function scheduled actural workitem to set channel * input: net_device dev * u8 channel //channel to set * output: none * return: return code show if workitem is scheduled(1:pass, 0:fail) * Note: Delay may be required for RF configuration * ***************************************************************************/ u8 rtl8192_phy_SwChnl(struct net_device* dev, u8 channel) { struct r8192_priv *priv = ieee80211_priv(dev); RT_TRACE(COMP_PHY, "=====>%s()\n", __FUNCTION__); if(!priv->up) return false; if(priv->SwChnlInProgress) return false; // if(pHalData->SetBWModeInProgress) // return; //-------------------------------------------- switch(priv->ieee80211->mode) { case WIRELESS_MODE_A: case WIRELESS_MODE_N_5G: if (channel<=14){ RT_TRACE(COMP_ERR, "WIRELESS_MODE_A but channel<=14"); return false; } break; case WIRELESS_MODE_B: if (channel>14){ RT_TRACE(COMP_ERR, "WIRELESS_MODE_B but channel>14"); return false; } break; case WIRELESS_MODE_G: case WIRELESS_MODE_N_24G: if (channel>14){ RT_TRACE(COMP_ERR, "WIRELESS_MODE_G but channel>14"); return false; } break; } //-------------------------------------------- priv->SwChnlInProgress = true; if(channel == 0) channel = 1; priv->chan=channel; priv->SwChnlStage=0; priv->SwChnlStep=0; // schedule_work(&(priv->SwChnlWorkItem)); // rtl8192_SwChnl_WorkItem(dev); if(priv->up) { // queue_work(priv->priv_wq,&(priv->SwChnlWorkItem)); rtl8192_SwChnl_WorkItem(dev); } priv->SwChnlInProgress = false; return true; } static void CCK_Tx_Power_Track_BW_Switch_TSSI(struct net_device *dev ) { struct r8192_priv *priv = ieee80211_priv(dev); switch(priv->CurrentChannelBW) { /* 20 MHz channel*/ case HT_CHANNEL_WIDTH_20: //added by vivi, cck,tx power track, 20080703 priv->CCKPresentAttentuation = priv->CCKPresentAttentuation_20Mdefault + priv->CCKPresentAttentuation_difference; if(priv->CCKPresentAttentuation > (CCKTxBBGainTableLength-1)) priv->CCKPresentAttentuation = CCKTxBBGainTableLength-1; if(priv->CCKPresentAttentuation < 0) priv->CCKPresentAttentuation = 0; RT_TRACE(COMP_POWER_TRACKING, "20M, priv->CCKPresentAttentuation = %d\n", priv->CCKPresentAttentuation); if(priv->ieee80211->current_network.channel== 14 && !priv->bcck_in_ch14) { priv->bcck_in_ch14 = TRUE; dm_cck_txpower_adjust(dev,priv->bcck_in_ch14); } else if(priv->ieee80211->current_network.channel != 14 && priv->bcck_in_ch14) { priv->bcck_in_ch14 = FALSE; dm_cck_txpower_adjust(dev,priv->bcck_in_ch14); } else dm_cck_txpower_adjust(dev,priv->bcck_in_ch14); break; /* 40 MHz channel*/ case HT_CHANNEL_WIDTH_20_40: //added by vivi, cck,tx power track, 20080703 priv->CCKPresentAttentuation = priv->CCKPresentAttentuation_40Mdefault + priv->CCKPresentAttentuation_difference; RT_TRACE(COMP_POWER_TRACKING, "40M, priv->CCKPresentAttentuation = %d\n", priv->CCKPresentAttentuation); if(priv->CCKPresentAttentuation > (CCKTxBBGainTableLength-1)) priv->CCKPresentAttentuation = CCKTxBBGainTableLength-1; if(priv->CCKPresentAttentuation < 0) priv->CCKPresentAttentuation = 0; if(priv->ieee80211->current_network.channel == 14 && !priv->bcck_in_ch14) { priv->bcck_in_ch14 = TRUE; dm_cck_txpower_adjust(dev,priv->bcck_in_ch14); } else if(priv->ieee80211->current_network.channel != 14 && priv->bcck_in_ch14) { priv->bcck_in_ch14 = FALSE; dm_cck_txpower_adjust(dev,priv->bcck_in_ch14); } else dm_cck_txpower_adjust(dev,priv->bcck_in_ch14); break; } } #ifndef RTL8190P static void CCK_Tx_Power_Track_BW_Switch_ThermalMeter(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); if(priv->ieee80211->current_network.channel == 14 && !priv->bcck_in_ch14) priv->bcck_in_ch14 = TRUE; else if(priv->ieee80211->current_network.channel != 14 && priv->bcck_in_ch14) priv->bcck_in_ch14 = FALSE; //write to default index and tx power track will be done in dm. switch(priv->CurrentChannelBW) { /* 20 MHz channel*/ case HT_CHANNEL_WIDTH_20: if(priv->Record_CCK_20Mindex == 0) priv->Record_CCK_20Mindex = 6; //set default value. priv->CCK_index = priv->Record_CCK_20Mindex;//6; RT_TRACE(COMP_POWER_TRACKING, "20MHz, CCK_Tx_Power_Track_BW_Switch_ThermalMeter(),CCK_index = %d\n", priv->CCK_index); break; /* 40 MHz channel*/ case HT_CHANNEL_WIDTH_20_40: priv->CCK_index = priv->Record_CCK_40Mindex;//0; RT_TRACE(COMP_POWER_TRACKING, "40MHz, CCK_Tx_Power_Track_BW_Switch_ThermalMeter(), CCK_index = %d\n", priv->CCK_index); break; } dm_cck_txpower_adjust(dev, priv->bcck_in_ch14); } #endif static void CCK_Tx_Power_Track_BW_Switch(struct net_device *dev) { #ifdef RTL8192E struct r8192_priv *priv = ieee80211_priv(dev); #endif #ifdef RTL8190P CCK_Tx_Power_Track_BW_Switch_TSSI(dev); #else //if(pHalData->bDcut == TRUE) if(priv->IC_Cut >= IC_VersionCut_D) CCK_Tx_Power_Track_BW_Switch_TSSI(dev); else CCK_Tx_Power_Track_BW_Switch_ThermalMeter(dev); #endif } // /****************************************************************************** *function: Callback routine of the work item for set bandwidth mode. * input: struct net_device *dev * HT_CHANNEL_WIDTH Bandwidth //20M or 40M * HT_EXTCHNL_OFFSET Offset //Upper, Lower, or Don't care * output: none * return: none * Note: I doubt whether SetBWModeInProgress flag is necessary as we can * test whether current work in the queue or not.//do I? * ***************************************************************************/ void rtl8192_SetBWModeWorkItem(struct net_device *dev) { struct r8192_priv *priv = ieee80211_priv(dev); u8 regBwOpMode; RT_TRACE(COMP_SWBW, "==>rtl8192_SetBWModeWorkItem() Switch to %s bandwidth\n", priv->CurrentChannelBW == HT_CHANNEL_WIDTH_20?"20MHz":"40MHz") if(priv->rf_chip== RF_PSEUDO_11N) { priv->SetBWModeInProgress= false; return; } if(!priv->up) { priv->SetBWModeInProgress= false; return; } //<1>Set MAC register regBwOpMode = read_nic_byte(dev, BW_OPMODE); switch(priv->CurrentChannelBW) { case HT_CHANNEL_WIDTH_20: regBwOpMode |= BW_OPMODE_20MHZ; // 2007/02/07 Mark by Emily becasue we have not verify whether this register works write_nic_byte(dev, BW_OPMODE, regBwOpMode); break; case HT_CHANNEL_WIDTH_20_40: regBwOpMode &= ~BW_OPMODE_20MHZ; // 2007/02/07 Mark by Emily becasue we have not verify whether this register works write_nic_byte(dev, BW_OPMODE, regBwOpMode); break; default: RT_TRACE(COMP_ERR, "SetChannelBandwidth819xUsb(): unknown Bandwidth: %#X\n",priv->CurrentChannelBW); break; } //<2>Set PHY related register switch(priv->CurrentChannelBW) { case HT_CHANNEL_WIDTH_20: // Add by Vivi 20071119 rtl8192_setBBreg(dev, rFPGA0_RFMOD, bRFMOD, 0x0); rtl8192_setBBreg(dev, rFPGA1_RFMOD, bRFMOD, 0x0); // rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x00100000, 1); // Correct the tx power for CCK rate in 20M. Suggest by YN, 20071207 // write_nic_dword(dev, rCCK0_TxFilter1, 0x1a1b0000); // write_nic_dword(dev, rCCK0_TxFilter2, 0x090e1317); // write_nic_dword(dev, rCCK0_DebugPort, 0x00000204); if(!priv->btxpower_tracking) { write_nic_dword(dev, rCCK0_TxFilter1, 0x1a1b0000); write_nic_dword(dev, rCCK0_TxFilter2, 0x090e1317); write_nic_dword(dev, rCCK0_DebugPort, 0x00000204); } else CCK_Tx_Power_Track_BW_Switch(dev); #ifdef RTL8190P rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, bADClkPhase, 1); rtl8192_setBBreg(dev, rOFDM0_RxDetector1, bMaskByte0, 0x44); // 0xc30 is for 8190 only, Emily #else #ifdef RTL8192E rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x00100000, 1); #endif #endif break; case HT_CHANNEL_WIDTH_20_40: // Add by Vivi 20071119 rtl8192_setBBreg(dev, rFPGA0_RFMOD, bRFMOD, 0x1); rtl8192_setBBreg(dev, rFPGA1_RFMOD, bRFMOD, 0x1); //rtl8192_setBBreg(dev, rCCK0_System, bCCKSideBand, (priv->nCur40MhzPrimeSC>>1)); //rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x00100000, 0); //rtl8192_setBBreg(dev, rOFDM1_LSTF, 0xC00, priv->nCur40MhzPrimeSC); // Correct the tx power for CCK rate in 40M. Suggest by YN, 20071207 //write_nic_dword(dev, rCCK0_TxFilter1, 0x35360000); //write_nic_dword(dev, rCCK0_TxFilter2, 0x121c252e); //write_nic_dword(dev, rCCK0_DebugPort, 0x00000409); if(!priv->btxpower_tracking) { write_nic_dword(dev, rCCK0_TxFilter1, 0x35360000); write_nic_dword(dev, rCCK0_TxFilter2, 0x121c252e); write_nic_dword(dev, rCCK0_DebugPort, 0x00000409); } else CCK_Tx_Power_Track_BW_Switch(dev); // Set Control channel to upper or lower. These settings are required only for 40MHz rtl8192_setBBreg(dev, rCCK0_System, bCCKSideBand, (priv->nCur40MhzPrimeSC>>1)); rtl8192_setBBreg(dev, rOFDM1_LSTF, 0xC00, priv->nCur40MhzPrimeSC); #ifdef RTL8190P rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, bADClkPhase, 0); rtl8192_setBBreg(dev, rOFDM0_RxDetector1, bMaskByte0, 0x42); // 0xc30 is for 8190 only, Emily // Set whether CCK should be sent in upper or lower channel. Suggest by YN. 20071207 // It is set in Tx descriptor for 8192x series if(priv->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_UPPER) { rtl8192_setBBreg(dev, rFPGA0_RFMOD, (BIT6|BIT5), 0x01); }else if(priv->nCur40MhzPrimeSC == HAL_PRIME_CHNL_OFFSET_LOWER) { rtl8192_setBBreg(dev, rFPGA0_RFMOD, (BIT6|BIT5), 0x02); } #else #ifdef RTL8192E rtl8192_setBBreg(dev, rFPGA0_AnalogParameter1, 0x00100000, 0); #endif #endif break; default: RT_TRACE(COMP_ERR, "SetChannelBandwidth819xUsb(): unknown Bandwidth: %#X\n" ,priv->CurrentChannelBW); break; } //Skip over setting of J-mode in BB register here. Default value is "None J mode". Emily 20070315 #if 1 //<3>Set RF related register switch( priv->rf_chip ) { case RF_8225: #ifdef TO_DO_LIST PHY_SetRF8225Bandwidth(Adapter, pHalData->CurrentChannelBW); #endif break; case RF_8256: PHY_SetRF8256Bandwidth(dev, priv->CurrentChannelBW); break; case RF_8258: // PHY_SetRF8258Bandwidth(); break; case RF_PSEUDO_11N: // Do Nothing break; default: RT_TRACE(COMP_ERR, "Unknown RFChipID: %d\n", priv->rf_chip); break; } #endif atomic_dec(&(priv->ieee80211->atm_swbw)); priv->SetBWModeInProgress= false; RT_TRACE(COMP_SWBW, "<==SetBWMode819xUsb()"); } /****************************************************************************** *function: This function schedules bandwith switch work. * input: struct net_device *dev * HT_CHANNEL_WIDTH Bandwidth //20M or 40M * HT_EXTCHNL_OFFSET Offset //Upper, Lower, or Don't care * output: none * return: none * Note: I doubt whether SetBWModeInProgress flag is necessary as we can * test whether current work in the queue or not.//do I? * ***************************************************************************/ void rtl8192_SetBWMode(struct net_device *dev, HT_CHANNEL_WIDTH Bandwidth, HT_EXTCHNL_OFFSET Offset) { struct r8192_priv *priv = ieee80211_priv(dev); if(priv->SetBWModeInProgress) return; atomic_inc(&(priv->ieee80211->atm_swbw)); priv->SetBWModeInProgress= true; priv->CurrentChannelBW = Bandwidth; if(Offset==HT_EXTCHNL_OFFSET_LOWER) priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_UPPER; else if(Offset==HT_EXTCHNL_OFFSET_UPPER) priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_LOWER; else priv->nCur40MhzPrimeSC = HAL_PRIME_CHNL_OFFSET_DONT_CARE; //queue_work(priv->priv_wq, &(priv->SetBWModeWorkItem)); // schedule_work(&(priv->SetBWModeWorkItem)); rtl8192_SetBWModeWorkItem(dev); } void InitialGain819xPci(struct net_device *dev, u8 Operation) { #define SCAN_RX_INITIAL_GAIN 0x17 #define POWER_DETECTION_TH 0x08 struct r8192_priv *priv = ieee80211_priv(dev); u32 BitMask; u8 initial_gain; if(priv->up) { switch(Operation) { case IG_Backup: RT_TRACE(COMP_SCAN, "IG_Backup, backup the initial gain.\n"); initial_gain = SCAN_RX_INITIAL_GAIN;//pHalData->DefaultInitialGain[0];// BitMask = bMaskByte0; if(dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM) rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); // FW DIG OFF priv->initgain_backup.xaagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XAAGCCore1, BitMask); priv->initgain_backup.xbagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XBAGCCore1, BitMask); priv->initgain_backup.xcagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XCAGCCore1, BitMask); priv->initgain_backup.xdagccore1 = (u8)rtl8192_QueryBBReg(dev, rOFDM0_XDAGCCore1, BitMask); BitMask = bMaskByte2; priv->initgain_backup.cca = (u8)rtl8192_QueryBBReg(dev, rCCK0_CCA, BitMask); RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc50 is %x\n",priv->initgain_backup.xaagccore1); RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc58 is %x\n",priv->initgain_backup.xbagccore1); RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc60 is %x\n",priv->initgain_backup.xcagccore1); RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xc68 is %x\n",priv->initgain_backup.xdagccore1); RT_TRACE(COMP_SCAN, "Scan InitialGainBackup 0xa0a is %x\n",priv->initgain_backup.cca); RT_TRACE(COMP_SCAN, "Write scan initial gain = 0x%x \n", initial_gain); write_nic_byte(dev, rOFDM0_XAAGCCore1, initial_gain); write_nic_byte(dev, rOFDM0_XBAGCCore1, initial_gain); write_nic_byte(dev, rOFDM0_XCAGCCore1, initial_gain); write_nic_byte(dev, rOFDM0_XDAGCCore1, initial_gain); RT_TRACE(COMP_SCAN, "Write scan 0xa0a = 0x%x \n", POWER_DETECTION_TH); write_nic_byte(dev, 0xa0a, POWER_DETECTION_TH); break; case IG_Restore: RT_TRACE(COMP_SCAN, "IG_Restore, restore the initial gain.\n"); BitMask = 0x7f; //Bit0~ Bit6 if(dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM) rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x8); // FW DIG OFF rtl8192_setBBreg(dev, rOFDM0_XAAGCCore1, BitMask, (u32)priv->initgain_backup.xaagccore1); rtl8192_setBBreg(dev, rOFDM0_XBAGCCore1, BitMask, (u32)priv->initgain_backup.xbagccore1); rtl8192_setBBreg(dev, rOFDM0_XCAGCCore1, BitMask, (u32)priv->initgain_backup.xcagccore1); rtl8192_setBBreg(dev, rOFDM0_XDAGCCore1, BitMask, (u32)priv->initgain_backup.xdagccore1); BitMask = bMaskByte2; rtl8192_setBBreg(dev, rCCK0_CCA, BitMask, (u32)priv->initgain_backup.cca); RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc50 is %x\n",priv->initgain_backup.xaagccore1); RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc58 is %x\n",priv->initgain_backup.xbagccore1); RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc60 is %x\n",priv->initgain_backup.xcagccore1); RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xc68 is %x\n",priv->initgain_backup.xdagccore1); RT_TRACE(COMP_SCAN, "Scan BBInitialGainRestore 0xa0a is %x\n",priv->initgain_backup.cca); rtl8192_phy_setTxPower(dev,priv->ieee80211->current_network.channel); if(dm_digtable.dig_algorithm == DIG_ALGO_BY_FALSE_ALARM) rtl8192_setBBreg(dev, UFWP, bMaskByte1, 0x1); // FW DIG ON break; default: RT_TRACE(COMP_SCAN, "Unknown IG Operation. \n"); break; } } }
gpl-2.0
y503869692/git
rerere.c
109
17925
#include "cache.h" #include "lockfile.h" #include "string-list.h" #include "rerere.h" #include "xdiff-interface.h" #include "dir.h" #include "resolve-undo.h" #include "ll-merge.h" #include "attr.h" #include "pathspec.h" #define RESOLVED 0 #define PUNTED 1 #define THREE_STAGED 2 void *RERERE_RESOLVED = &RERERE_RESOLVED; /* if rerere_enabled == -1, fall back to detection of .git/rr-cache */ static int rerere_enabled = -1; /* automatically update cleanly resolved paths to the index */ static int rerere_autoupdate; static char *merge_rr_path; const char *rerere_path(const char *hex, const char *file) { return git_path("rr-cache/%s/%s", hex, file); } static int has_rerere_resolution(const char *hex) { struct stat st; return !stat(rerere_path(hex, "postimage"), &st); } static void read_rr(struct string_list *rr) { unsigned char sha1[20]; char buf[PATH_MAX]; FILE *in = fopen(merge_rr_path, "r"); if (!in) return; while (fread(buf, 40, 1, in) == 1) { int i; char *name; if (get_sha1_hex(buf, sha1)) die("corrupt MERGE_RR"); buf[40] = '\0'; name = xstrdup(buf); if (fgetc(in) != '\t') die("corrupt MERGE_RR"); for (i = 0; i < sizeof(buf); i++) { int c = fgetc(in); if (c < 0) die("corrupt MERGE_RR"); buf[i] = c; if (c == 0) break; } if (i == sizeof(buf)) die("filename too long"); string_list_insert(rr, buf)->util = name; } fclose(in); } static struct lock_file write_lock; static int write_rr(struct string_list *rr, int out_fd) { int i; for (i = 0; i < rr->nr; i++) { const char *path; int length; if (!rr->items[i].util) continue; path = rr->items[i].string; length = strlen(path) + 1; if (write_in_full(out_fd, rr->items[i].util, 40) != 40 || write_str_in_full(out_fd, "\t") != 1 || write_in_full(out_fd, path, length) != length) die("unable to write rerere record"); } if (commit_lock_file(&write_lock) != 0) die("unable to write rerere record"); return 0; } static void ferr_write(const void *p, size_t count, FILE *fp, int *err) { if (!count || *err) return; if (fwrite(p, count, 1, fp) != 1) *err = errno; } static inline void ferr_puts(const char *s, FILE *fp, int *err) { ferr_write(s, strlen(s), fp, err); } struct rerere_io { int (*getline)(struct strbuf *, struct rerere_io *); FILE *output; int wrerror; /* some more stuff */ }; static void rerere_io_putstr(const char *str, struct rerere_io *io) { if (io->output) ferr_puts(str, io->output, &io->wrerror); } static void rerere_io_putconflict(int ch, int size, struct rerere_io *io) { char buf[64]; while (size) { if (size < sizeof(buf) - 2) { memset(buf, ch, size); buf[size] = '\n'; buf[size + 1] = '\0'; size = 0; } else { int sz = sizeof(buf) - 1; if (size <= sz) sz -= (sz - size) + 1; memset(buf, ch, sz); buf[sz] = '\0'; size -= sz; } rerere_io_putstr(buf, io); } } static void rerere_io_putmem(const char *mem, size_t sz, struct rerere_io *io) { if (io->output) ferr_write(mem, sz, io->output, &io->wrerror); } struct rerere_io_file { struct rerere_io io; FILE *input; }; static int rerere_file_getline(struct strbuf *sb, struct rerere_io *io_) { struct rerere_io_file *io = (struct rerere_io_file *)io_; return strbuf_getwholeline(sb, io->input, '\n'); } static int is_cmarker(char *buf, int marker_char, int marker_size, int want_sp) { while (marker_size--) if (*buf++ != marker_char) return 0; if (want_sp && *buf != ' ') return 0; return isspace(*buf); } static int handle_path(unsigned char *sha1, struct rerere_io *io, int marker_size) { git_SHA_CTX ctx; int hunk_no = 0; enum { RR_CONTEXT = 0, RR_SIDE_1, RR_SIDE_2, RR_ORIGINAL } hunk = RR_CONTEXT; struct strbuf one = STRBUF_INIT, two = STRBUF_INIT; struct strbuf buf = STRBUF_INIT; if (sha1) git_SHA1_Init(&ctx); while (!io->getline(&buf, io)) { if (is_cmarker(buf.buf, '<', marker_size, 1)) { if (hunk != RR_CONTEXT) goto bad; hunk = RR_SIDE_1; } else if (is_cmarker(buf.buf, '|', marker_size, 0)) { if (hunk != RR_SIDE_1) goto bad; hunk = RR_ORIGINAL; } else if (is_cmarker(buf.buf, '=', marker_size, 0)) { if (hunk != RR_SIDE_1 && hunk != RR_ORIGINAL) goto bad; hunk = RR_SIDE_2; } else if (is_cmarker(buf.buf, '>', marker_size, 1)) { if (hunk != RR_SIDE_2) goto bad; if (strbuf_cmp(&one, &two) > 0) strbuf_swap(&one, &two); hunk_no++; hunk = RR_CONTEXT; rerere_io_putconflict('<', marker_size, io); rerere_io_putmem(one.buf, one.len, io); rerere_io_putconflict('=', marker_size, io); rerere_io_putmem(two.buf, two.len, io); rerere_io_putconflict('>', marker_size, io); if (sha1) { git_SHA1_Update(&ctx, one.buf ? one.buf : "", one.len + 1); git_SHA1_Update(&ctx, two.buf ? two.buf : "", two.len + 1); } strbuf_reset(&one); strbuf_reset(&two); } else if (hunk == RR_SIDE_1) strbuf_addbuf(&one, &buf); else if (hunk == RR_ORIGINAL) ; /* discard */ else if (hunk == RR_SIDE_2) strbuf_addbuf(&two, &buf); else rerere_io_putstr(buf.buf, io); continue; bad: hunk = 99; /* force error exit */ break; } strbuf_release(&one); strbuf_release(&two); strbuf_release(&buf); if (sha1) git_SHA1_Final(sha1, &ctx); if (hunk != RR_CONTEXT) return -1; return hunk_no; } static int handle_file(const char *path, unsigned char *sha1, const char *output) { int hunk_no = 0; struct rerere_io_file io; int marker_size = ll_merge_marker_size(path); memset(&io, 0, sizeof(io)); io.io.getline = rerere_file_getline; io.input = fopen(path, "r"); io.io.wrerror = 0; if (!io.input) return error("Could not open %s", path); if (output) { io.io.output = fopen(output, "w"); if (!io.io.output) { fclose(io.input); return error("Could not write %s", output); } } hunk_no = handle_path(sha1, (struct rerere_io *)&io, marker_size); fclose(io.input); if (io.io.wrerror) error("There were errors while writing %s (%s)", path, strerror(io.io.wrerror)); if (io.io.output && fclose(io.io.output)) io.io.wrerror = error("Failed to flush %s: %s", path, strerror(errno)); if (hunk_no < 0) { if (output) unlink_or_warn(output); return error("Could not parse conflict hunks in %s", path); } if (io.io.wrerror) return -1; return hunk_no; } struct rerere_io_mem { struct rerere_io io; struct strbuf input; }; static int rerere_mem_getline(struct strbuf *sb, struct rerere_io *io_) { struct rerere_io_mem *io = (struct rerere_io_mem *)io_; char *ep; size_t len; strbuf_release(sb); if (!io->input.len) return -1; ep = memchr(io->input.buf, '\n', io->input.len); if (!ep) ep = io->input.buf + io->input.len; else if (*ep == '\n') ep++; len = ep - io->input.buf; strbuf_add(sb, io->input.buf, len); strbuf_remove(&io->input, 0, len); return 0; } static int handle_cache(const char *path, unsigned char *sha1, const char *output) { mmfile_t mmfile[3] = {{NULL}}; mmbuffer_t result = {NULL, 0}; const struct cache_entry *ce; int pos, len, i, hunk_no; struct rerere_io_mem io; int marker_size = ll_merge_marker_size(path); /* * Reproduce the conflicted merge in-core */ len = strlen(path); pos = cache_name_pos(path, len); if (0 <= pos) return -1; pos = -pos - 1; for (i = 0; i < 3; i++) { enum object_type type; unsigned long size; int j; if (active_nr <= pos) break; ce = active_cache[pos++]; if (ce_namelen(ce) != len || memcmp(ce->name, path, len)) continue; j = ce_stage(ce) - 1; mmfile[j].ptr = read_sha1_file(ce->sha1, &type, &size); mmfile[j].size = size; } for (i = 0; i < 3; i++) { if (!mmfile[i].ptr && !mmfile[i].size) mmfile[i].ptr = xstrdup(""); } /* * NEEDSWORK: handle conflicts from merges with * merge.renormalize set, too */ ll_merge(&result, path, &mmfile[0], NULL, &mmfile[1], "ours", &mmfile[2], "theirs", NULL); for (i = 0; i < 3; i++) free(mmfile[i].ptr); memset(&io, 0, sizeof(io)); io.io.getline = rerere_mem_getline; if (output) io.io.output = fopen(output, "w"); else io.io.output = NULL; strbuf_init(&io.input, 0); strbuf_attach(&io.input, result.ptr, result.size, result.size); hunk_no = handle_path(sha1, (struct rerere_io *)&io, marker_size); strbuf_release(&io.input); if (io.io.output) fclose(io.io.output); return hunk_no; } static int check_one_conflict(int i, int *type) { const struct cache_entry *e = active_cache[i]; if (!ce_stage(e)) { *type = RESOLVED; return i + 1; } *type = PUNTED; if (ce_stage(e) == 1) { if (active_nr <= ++i) return i + 1; } /* Only handle regular files with both stages #2 and #3 */ if (i + 1 < active_nr) { const struct cache_entry *e2 = active_cache[i]; const struct cache_entry *e3 = active_cache[i + 1]; if (ce_stage(e2) == 2 && ce_stage(e3) == 3 && ce_same_name(e, e3) && S_ISREG(e2->ce_mode) && S_ISREG(e3->ce_mode)) *type = THREE_STAGED; } /* Skip the entries with the same name */ while (i < active_nr && ce_same_name(e, active_cache[i])) i++; return i; } static int find_conflict(struct string_list *conflict) { int i; if (read_cache() < 0) return error("Could not read index"); for (i = 0; i < active_nr;) { int conflict_type; const struct cache_entry *e = active_cache[i]; i = check_one_conflict(i, &conflict_type); if (conflict_type == THREE_STAGED) string_list_insert(conflict, (const char *)e->name); } return 0; } int rerere_remaining(struct string_list *merge_rr) { int i; if (read_cache() < 0) return error("Could not read index"); for (i = 0; i < active_nr;) { int conflict_type; const struct cache_entry *e = active_cache[i]; i = check_one_conflict(i, &conflict_type); if (conflict_type == PUNTED) string_list_insert(merge_rr, (const char *)e->name); else if (conflict_type == RESOLVED) { struct string_list_item *it; it = string_list_lookup(merge_rr, (const char *)e->name); if (it != NULL) { free(it->util); it->util = RERERE_RESOLVED; } } } return 0; } static int merge(const char *name, const char *path) { int ret; mmfile_t cur = {NULL, 0}, base = {NULL, 0}, other = {NULL, 0}; mmbuffer_t result = {NULL, 0}; if (handle_file(path, NULL, rerere_path(name, "thisimage")) < 0) return 1; if (read_mmfile(&cur, rerere_path(name, "thisimage")) || read_mmfile(&base, rerere_path(name, "preimage")) || read_mmfile(&other, rerere_path(name, "postimage"))) { ret = 1; goto out; } ret = ll_merge(&result, path, &base, NULL, &cur, "", &other, "", NULL); if (!ret) { FILE *f; if (utime(rerere_path(name, "postimage"), NULL) < 0) warning("failed utime() on %s: %s", rerere_path(name, "postimage"), strerror(errno)); f = fopen(path, "w"); if (!f) return error("Could not open %s: %s", path, strerror(errno)); if (fwrite(result.ptr, result.size, 1, f) != 1) error("Could not write %s: %s", path, strerror(errno)); if (fclose(f)) return error("Writing %s failed: %s", path, strerror(errno)); } out: free(cur.ptr); free(base.ptr); free(other.ptr); free(result.ptr); return ret; } static struct lock_file index_lock; static void update_paths(struct string_list *update) { int i; hold_locked_index(&index_lock, 1); for (i = 0; i < update->nr; i++) { struct string_list_item *item = &update->items[i]; if (add_file_to_cache(item->string, 0)) exit(128); } if (active_cache_changed) { if (write_locked_index(&the_index, &index_lock, COMMIT_LOCK)) die("Unable to write new index file"); } else rollback_lock_file(&index_lock); } static int do_plain_rerere(struct string_list *rr, int fd) { struct string_list conflict = STRING_LIST_INIT_DUP; struct string_list update = STRING_LIST_INIT_DUP; int i; find_conflict(&conflict); /* * MERGE_RR records paths with conflicts immediately after merge * failed. Some of the conflicted paths might have been hand resolved * in the working tree since then, but the initial run would catch all * and register their preimages. */ for (i = 0; i < conflict.nr; i++) { const char *path = conflict.items[i].string; if (!string_list_has_string(rr, path)) { unsigned char sha1[20]; char *hex; int ret; ret = handle_file(path, sha1, NULL); if (ret < 1) continue; hex = xstrdup(sha1_to_hex(sha1)); string_list_insert(rr, path)->util = hex; if (mkdir_in_gitdir(git_path("rr-cache/%s", hex))) continue; handle_file(path, NULL, rerere_path(hex, "preimage")); fprintf(stderr, "Recorded preimage for '%s'\n", path); } } /* * Now some of the paths that had conflicts earlier might have been * hand resolved. Others may be similar to a conflict already that * was resolved before. */ for (i = 0; i < rr->nr; i++) { int ret; const char *path = rr->items[i].string; const char *name = (const char *)rr->items[i].util; if (has_rerere_resolution(name)) { if (!merge(name, path)) { const char *msg; if (rerere_autoupdate) { string_list_insert(&update, path); msg = "Staged '%s' using previous resolution.\n"; } else msg = "Resolved '%s' using previous resolution.\n"; fprintf(stderr, msg, path); goto mark_resolved; } } /* Let's see if we have resolved it. */ ret = handle_file(path, NULL, NULL); if (ret) continue; fprintf(stderr, "Recorded resolution for '%s'.\n", path); copy_file(rerere_path(name, "postimage"), path, 0666); mark_resolved: rr->items[i].util = NULL; } if (update.nr) update_paths(&update); return write_rr(rr, fd); } static void git_rerere_config(void) { git_config_get_bool("rerere.enabled", &rerere_enabled); git_config_get_bool("rerere.autoupdate", &rerere_autoupdate); git_config(git_default_config, NULL); } static int is_rerere_enabled(void) { const char *rr_cache; int rr_cache_exists; if (!rerere_enabled) return 0; rr_cache = git_path("rr-cache"); rr_cache_exists = is_directory(rr_cache); if (rerere_enabled < 0) return rr_cache_exists; if (!rr_cache_exists && mkdir_in_gitdir(rr_cache)) die("Could not create directory %s", rr_cache); return 1; } int setup_rerere(struct string_list *merge_rr, int flags) { int fd; git_rerere_config(); if (!is_rerere_enabled()) return -1; if (flags & (RERERE_AUTOUPDATE|RERERE_NOAUTOUPDATE)) rerere_autoupdate = !!(flags & RERERE_AUTOUPDATE); merge_rr_path = git_pathdup("MERGE_RR"); fd = hold_lock_file_for_update(&write_lock, merge_rr_path, LOCK_DIE_ON_ERROR); read_rr(merge_rr); return fd; } int rerere(int flags) { struct string_list merge_rr = STRING_LIST_INIT_DUP; int fd; fd = setup_rerere(&merge_rr, flags); if (fd < 0) return 0; return do_plain_rerere(&merge_rr, fd); } static int rerere_forget_one_path(const char *path, struct string_list *rr) { const char *filename; char *hex; unsigned char sha1[20]; int ret; ret = handle_cache(path, sha1, NULL); if (ret < 1) return error("Could not parse conflict hunks in '%s'", path); hex = xstrdup(sha1_to_hex(sha1)); filename = rerere_path(hex, "postimage"); if (unlink(filename)) return (errno == ENOENT ? error("no remembered resolution for %s", path) : error("cannot unlink %s: %s", filename, strerror(errno))); handle_cache(path, sha1, rerere_path(hex, "preimage")); fprintf(stderr, "Updated preimage for '%s'\n", path); string_list_insert(rr, path)->util = hex; fprintf(stderr, "Forgot resolution for %s\n", path); return 0; } int rerere_forget(struct pathspec *pathspec) { int i, fd; struct string_list conflict = STRING_LIST_INIT_DUP; struct string_list merge_rr = STRING_LIST_INIT_DUP; if (read_cache() < 0) return error("Could not read index"); fd = setup_rerere(&merge_rr, RERERE_NOAUTOUPDATE); if (fd < 0) return 0; unmerge_cache(pathspec); find_conflict(&conflict); for (i = 0; i < conflict.nr; i++) { struct string_list_item *it = &conflict.items[i]; if (!match_pathspec(pathspec, it->string, strlen(it->string), 0, NULL, 0)) continue; rerere_forget_one_path(it->string, &merge_rr); } return write_rr(&merge_rr, fd); } static time_t rerere_created_at(const char *name) { struct stat st; return stat(rerere_path(name, "preimage"), &st) ? (time_t) 0 : st.st_mtime; } static time_t rerere_last_used_at(const char *name) { struct stat st; return stat(rerere_path(name, "postimage"), &st) ? (time_t) 0 : st.st_mtime; } static void unlink_rr_item(const char *name) { unlink(rerere_path(name, "thisimage")); unlink(rerere_path(name, "preimage")); unlink(rerere_path(name, "postimage")); rmdir(git_path("rr-cache/%s", name)); } void rerere_gc(struct string_list *rr) { struct string_list to_remove = STRING_LIST_INIT_DUP; DIR *dir; struct dirent *e; int i, cutoff; time_t now = time(NULL), then; int cutoff_noresolve = 15; int cutoff_resolve = 60; git_config_get_int("gc.rerereresolved", &cutoff_resolve); git_config_get_int("gc.rerereunresolved", &cutoff_noresolve); git_config(git_default_config, NULL); dir = opendir(git_path("rr-cache")); if (!dir) die_errno("unable to open rr-cache directory"); while ((e = readdir(dir))) { if (is_dot_or_dotdot(e->d_name)) continue; then = rerere_last_used_at(e->d_name); if (then) { cutoff = cutoff_resolve; } else { then = rerere_created_at(e->d_name); if (!then) continue; cutoff = cutoff_noresolve; } if (then < now - cutoff * 86400) string_list_append(&to_remove, e->d_name); } closedir(dir); for (i = 0; i < to_remove.nr; i++) unlink_rr_item(to_remove.items[i].string); string_list_clear(&to_remove, 0); } void rerere_clear(struct string_list *merge_rr) { int i; for (i = 0; i < merge_rr->nr; i++) { const char *name = (const char *)merge_rr->items[i].util; if (!has_rerere_resolution(name)) unlink_rr_item(name); } unlink_or_warn(git_path("MERGE_RR")); }
gpl-2.0
ch33kybutt/D3v1l-kernel
fs/logfs/dir.c
109
21625
/* * fs/logfs/dir.c - directory-related code * * As should be obvious for Linux kernel code, license is GPLv2 * * Copyright (c) 2005-2008 Joern Engel <joern@logfs.org> */ #include "logfs.h" #include <linux/slab.h> /* * Atomic dir operations * * Directory operations are by default not atomic. Dentries and Inodes are * created/removed/altered in separate operations. Therefore we need to do * a small amount of journaling. * * Create, link, mkdir, mknod and symlink all share the same function to do * the work: __logfs_create. This function works in two atomic steps: * 1. allocate inode (remember in journal) * 2. allocate dentry (clear journal) * * As we can only get interrupted between the two, when the inode we just * created is simply stored in the anchor. On next mount, if we were * interrupted, we delete the inode. From a users point of view the * operation never happened. * * Unlink and rmdir also share the same function: unlink. Again, this * function works in two atomic steps * 1. remove dentry (remember inode in journal) * 2. unlink inode (clear journal) * * And again, on the next mount, if we were interrupted, we delete the inode. * From a users point of view the operation succeeded. * * Rename is the real pain to deal with, harder than all the other methods * combined. Depending on the circumstances we can run into three cases. * A "target rename" where the target dentry already existed, a "local * rename" where both parent directories are identical or a "cross-directory * rename" in the remaining case. * * Local rename is atomic, as the old dentry is simply rewritten with a new * name. * * Cross-directory rename works in two steps, similar to __logfs_create and * logfs_unlink: * 1. Write new dentry (remember old dentry in journal) * 2. Remove old dentry (clear journal) * * Here we remember a dentry instead of an inode. On next mount, if we were * interrupted, we delete the dentry. From a users point of view, the * operation succeeded. * * Target rename works in three atomic steps: * 1. Attach old inode to new dentry (remember old dentry and new inode) * 2. Remove old dentry (still remember the new inode) * 3. Remove victim inode * * Here we remember both an inode an a dentry. If we get interrupted * between steps 1 and 2, we delete both the dentry and the inode. If * we get interrupted between steps 2 and 3, we delete just the inode. * In either case, the remaining objects are deleted on next mount. From * a users point of view, the operation succeeded. */ static int write_dir(struct inode *dir, struct logfs_disk_dentry *dd, loff_t pos) { return logfs_inode_write(dir, dd, sizeof(*dd), pos, WF_LOCK, NULL); } static int write_inode(struct inode *inode) { return __logfs_write_inode(inode, WF_LOCK); } static s64 dir_seek_data(struct inode *inode, s64 pos) { s64 new_pos = logfs_seek_data(inode, pos); return max(pos, new_pos - 1); } static int beyond_eof(struct inode *inode, loff_t bix) { loff_t pos = bix << inode->i_sb->s_blocksize_bits; return pos >= i_size_read(inode); } /* * Prime value was chosen to be roughly 256 + 26. r5 hash uses 11, * so short names (len <= 9) don't even occupy the complete 32bit name * space. A prime >256 ensures short names quickly spread the 32bit * name space. Add about 26 for the estimated amount of information * of each character and pick a prime nearby, preferrably a bit-sparse * one. */ static u32 hash_32(const char *s, int len, u32 seed) { u32 hash = seed; int i; for (i = 0; i < len; i++) hash = hash * 293 + s[i]; return hash; } /* * We have to satisfy several conflicting requirements here. Small * directories should stay fairly compact and not require too many * indirect blocks. The number of possible locations for a given hash * should be small to make lookup() fast. And we should try hard not * to overflow the 32bit name space or nfs and 32bit host systems will * be unhappy. * * So we use the following scheme. First we reduce the hash to 0..15 * and try a direct block. If that is occupied we reduce the hash to * 16..255 and try an indirect block. Same for 2x and 3x indirect * blocks. Lastly we reduce the hash to 0x800_0000 .. 0xffff_ffff, * but use buckets containing eight entries instead of a single one. * * Using 16 entries should allow for a reasonable amount of hash * collisions, so the 32bit name space can be packed fairly tight * before overflowing. Oh and currently we don't overflow but return * and error. * * How likely are collisions? Doing the appropriate math is beyond me * and the Bronstein textbook. But running a test program to brute * force collisions for a couple of days showed that on average the * first collision occurs after 598M entries, with 290M being the * smallest result. Obviously 21 entries could already cause a * collision if all entries are carefully chosen. */ static pgoff_t hash_index(u32 hash, int round) { u32 i0_blocks = I0_BLOCKS; u32 i1_blocks = I1_BLOCKS; u32 i2_blocks = I2_BLOCKS; u32 i3_blocks = I3_BLOCKS; switch (round) { case 0: return hash % i0_blocks; case 1: return i0_blocks + hash % (i1_blocks - i0_blocks); case 2: return i1_blocks + hash % (i2_blocks - i1_blocks); case 3: return i2_blocks + hash % (i3_blocks - i2_blocks); case 4 ... 19: return i3_blocks + 16 * (hash % (((1<<31) - i3_blocks) / 16)) + round - 4; } BUG(); } static struct page *logfs_get_dd_page(struct inode *dir, struct dentry *dentry) { struct qstr *name = &dentry->d_name; struct page *page; struct logfs_disk_dentry *dd; u32 hash = hash_32(name->name, name->len, 0); pgoff_t index; int round; if (name->len > LOGFS_MAX_NAMELEN) return ERR_PTR(-ENAMETOOLONG); for (round = 0; round < 20; round++) { index = hash_index(hash, round); if (beyond_eof(dir, index)) return NULL; if (!logfs_exist_block(dir, index)) continue; page = read_cache_page(dir->i_mapping, index, (filler_t *)logfs_readpage, NULL); if (IS_ERR(page)) return page; dd = kmap_atomic(page, KM_USER0); BUG_ON(dd->namelen == 0); if (name->len != be16_to_cpu(dd->namelen) || memcmp(name->name, dd->name, name->len)) { kunmap_atomic(dd, KM_USER0); page_cache_release(page); continue; } kunmap_atomic(dd, KM_USER0); return page; } return NULL; } static int logfs_remove_inode(struct inode *inode) { int ret; inode->i_nlink--; ret = write_inode(inode); LOGFS_BUG_ON(ret, inode->i_sb); return ret; } static void abort_transaction(struct inode *inode, struct logfs_transaction *ta) { if (logfs_inode(inode)->li_block) logfs_inode(inode)->li_block->ta = NULL; kfree(ta); } static int logfs_unlink(struct inode *dir, struct dentry *dentry) { struct logfs_super *super = logfs_super(dir->i_sb); struct inode *inode = dentry->d_inode; struct logfs_transaction *ta; struct page *page; pgoff_t index; int ret; ta = kzalloc(sizeof(*ta), GFP_KERNEL); if (!ta) return -ENOMEM; ta->state = UNLINK_1; ta->ino = inode->i_ino; inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; page = logfs_get_dd_page(dir, dentry); if (!page) { kfree(ta); return -ENOENT; } if (IS_ERR(page)) { kfree(ta); return PTR_ERR(page); } index = page->index; page_cache_release(page); mutex_lock(&super->s_dirop_mutex); logfs_add_transaction(dir, ta); ret = logfs_delete(dir, index, NULL); if (!ret) ret = write_inode(dir); if (ret) { abort_transaction(dir, ta); printk(KERN_ERR"LOGFS: unable to delete inode\n"); goto out; } ta->state = UNLINK_2; logfs_add_transaction(inode, ta); ret = logfs_remove_inode(inode); out: mutex_unlock(&super->s_dirop_mutex); return ret; } static inline int logfs_empty_dir(struct inode *dir) { u64 data; data = logfs_seek_data(dir, 0) << dir->i_sb->s_blocksize_bits; return data >= i_size_read(dir); } static int logfs_rmdir(struct inode *dir, struct dentry *dentry) { struct inode *inode = dentry->d_inode; if (!logfs_empty_dir(inode)) return -ENOTEMPTY; return logfs_unlink(dir, dentry); } /* FIXME: readdir currently has it's own dir_walk code. I don't see a good * way to combine the two copies */ #define IMPLICIT_NODES 2 static int __logfs_readdir(struct file *file, void *buf, filldir_t filldir) { struct inode *dir = file->f_dentry->d_inode; loff_t pos = file->f_pos - IMPLICIT_NODES; struct page *page; struct logfs_disk_dentry *dd; int full; BUG_ON(pos < 0); for (;; pos++) { if (beyond_eof(dir, pos)) break; if (!logfs_exist_block(dir, pos)) { /* deleted dentry */ pos = dir_seek_data(dir, pos); continue; } page = read_cache_page(dir->i_mapping, pos, (filler_t *)logfs_readpage, NULL); if (IS_ERR(page)) return PTR_ERR(page); dd = kmap(page); BUG_ON(dd->namelen == 0); full = filldir(buf, (char *)dd->name, be16_to_cpu(dd->namelen), pos, be64_to_cpu(dd->ino), dd->type); kunmap(page); page_cache_release(page); if (full) break; } file->f_pos = pos + IMPLICIT_NODES; return 0; } static int logfs_readdir(struct file *file, void *buf, filldir_t filldir) { struct inode *inode = file->f_dentry->d_inode; ino_t pino = parent_ino(file->f_dentry); int err; if (file->f_pos < 0) return -EINVAL; if (file->f_pos == 0) { if (filldir(buf, ".", 1, 1, inode->i_ino, DT_DIR) < 0) return 0; file->f_pos++; } if (file->f_pos == 1) { if (filldir(buf, "..", 2, 2, pino, DT_DIR) < 0) return 0; file->f_pos++; } err = __logfs_readdir(file, buf, filldir); return err; } static void logfs_set_name(struct logfs_disk_dentry *dd, struct qstr *name) { dd->namelen = cpu_to_be16(name->len); memcpy(dd->name, name->name, name->len); } static struct dentry *logfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct page *page; struct logfs_disk_dentry *dd; pgoff_t index; u64 ino = 0; struct inode *inode; page = logfs_get_dd_page(dir, dentry); if (IS_ERR(page)) return ERR_CAST(page); if (!page) { d_add(dentry, NULL); return NULL; } index = page->index; dd = kmap_atomic(page, KM_USER0); ino = be64_to_cpu(dd->ino); kunmap_atomic(dd, KM_USER0); page_cache_release(page); inode = logfs_iget(dir->i_sb, ino); if (IS_ERR(inode)) { printk(KERN_ERR"LogFS: Cannot read inode #%llx for dentry (%lx, %lx)n", ino, dir->i_ino, index); return ERR_CAST(inode); } return d_splice_alias(inode, dentry); } static void grow_dir(struct inode *dir, loff_t index) { index = (index + 1) << dir->i_sb->s_blocksize_bits; if (i_size_read(dir) < index) i_size_write(dir, index); } static int logfs_write_dir(struct inode *dir, struct dentry *dentry, struct inode *inode) { struct page *page; struct logfs_disk_dentry *dd; u32 hash = hash_32(dentry->d_name.name, dentry->d_name.len, 0); pgoff_t index; int round, err; for (round = 0; round < 20; round++) { index = hash_index(hash, round); if (logfs_exist_block(dir, index)) continue; page = find_or_create_page(dir->i_mapping, index, GFP_KERNEL); if (!page) return -ENOMEM; dd = kmap_atomic(page, KM_USER0); memset(dd, 0, sizeof(*dd)); dd->ino = cpu_to_be64(inode->i_ino); dd->type = logfs_type(inode); logfs_set_name(dd, &dentry->d_name); kunmap_atomic(dd, KM_USER0); err = logfs_write_buf(dir, page, WF_LOCK); unlock_page(page); page_cache_release(page); if (!err) grow_dir(dir, index); return err; } /* FIXME: Is there a better return value? In most cases neither * the filesystem nor the directory are full. But we have had * too many collisions for this particular hash and no fallback. */ return -ENOSPC; } static int __logfs_create(struct inode *dir, struct dentry *dentry, struct inode *inode, const char *dest, long destlen) { struct logfs_super *super = logfs_super(dir->i_sb); struct logfs_inode *li = logfs_inode(inode); struct logfs_transaction *ta; int ret; ta = kzalloc(sizeof(*ta), GFP_KERNEL); if (!ta) { inode->i_nlink--; iput(inode); return -ENOMEM; } ta->state = CREATE_1; ta->ino = inode->i_ino; mutex_lock(&super->s_dirop_mutex); logfs_add_transaction(inode, ta); if (dest) { /* symlink */ ret = logfs_inode_write(inode, dest, destlen, 0, WF_LOCK, NULL); if (!ret) ret = write_inode(inode); } else { /* creat/mkdir/mknod */ ret = write_inode(inode); } if (ret) { abort_transaction(inode, ta); li->li_flags |= LOGFS_IF_STILLBORN; /* FIXME: truncate symlink */ inode->i_nlink--; iput(inode); goto out; } ta->state = CREATE_2; logfs_add_transaction(dir, ta); ret = logfs_write_dir(dir, dentry, inode); /* sync directory */ if (!ret) ret = write_inode(dir); if (ret) { logfs_del_transaction(dir, ta); ta->state = CREATE_2; logfs_add_transaction(inode, ta); logfs_remove_inode(inode); iput(inode); goto out; } d_instantiate(dentry, inode); out: mutex_unlock(&super->s_dirop_mutex); return ret; } static int logfs_mkdir(struct inode *dir, struct dentry *dentry, int mode) { struct inode *inode; /* * FIXME: why do we have to fill in S_IFDIR, while the mode is * correct for mknod, creat, etc.? Smells like the vfs *should* * do it for us but for some reason fails to do so. */ inode = logfs_new_inode(dir, S_IFDIR | mode); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &logfs_dir_iops; inode->i_fop = &logfs_dir_fops; return __logfs_create(dir, dentry, inode, NULL, 0); } static int logfs_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd) { struct inode *inode; inode = logfs_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &logfs_reg_iops; inode->i_fop = &logfs_reg_fops; inode->i_mapping->a_ops = &logfs_reg_aops; return __logfs_create(dir, dentry, inode, NULL, 0); } static int logfs_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) { struct inode *inode; if (dentry->d_name.len > LOGFS_MAX_NAMELEN) return -ENAMETOOLONG; inode = logfs_new_inode(dir, mode); if (IS_ERR(inode)) return PTR_ERR(inode); init_special_inode(inode, mode, rdev); return __logfs_create(dir, dentry, inode, NULL, 0); } static int logfs_symlink(struct inode *dir, struct dentry *dentry, const char *target) { struct inode *inode; size_t destlen = strlen(target) + 1; if (destlen > dir->i_sb->s_blocksize) return -ENAMETOOLONG; inode = logfs_new_inode(dir, S_IFLNK | 0777); if (IS_ERR(inode)) return PTR_ERR(inode); inode->i_op = &logfs_symlink_iops; inode->i_mapping->a_ops = &logfs_reg_aops; return __logfs_create(dir, dentry, inode, target, destlen); } static int logfs_permission(struct inode *inode, int mask, unsigned int flags) { if (flags & IPERM_FLAG_RCU) return -ECHILD; return generic_permission(inode, mask, flags, NULL); } static int logfs_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = old_dentry->d_inode; if (inode->i_nlink >= LOGFS_LINK_MAX) return -EMLINK; inode->i_ctime = dir->i_ctime = dir->i_mtime = CURRENT_TIME; ihold(inode); inode->i_nlink++; mark_inode_dirty_sync(inode); return __logfs_create(dir, dentry, inode, NULL, 0); } static int logfs_get_dd(struct inode *dir, struct dentry *dentry, struct logfs_disk_dentry *dd, loff_t *pos) { struct page *page; void *map; page = logfs_get_dd_page(dir, dentry); if (IS_ERR(page)) return PTR_ERR(page); *pos = page->index; map = kmap_atomic(page, KM_USER0); memcpy(dd, map, sizeof(*dd)); kunmap_atomic(map, KM_USER0); page_cache_release(page); return 0; } static int logfs_delete_dd(struct inode *dir, loff_t pos) { /* * Getting called with pos somewhere beyond eof is either a goofup * within this file or means someone maliciously edited the * (crc-protected) journal. */ BUG_ON(beyond_eof(dir, pos)); dir->i_ctime = dir->i_mtime = CURRENT_TIME; log_dir(" Delete dentry (%lx, %llx)\n", dir->i_ino, pos); return logfs_delete(dir, pos, NULL); } /* * Cross-directory rename, target does not exist. Just a little nasty. * Create a new dentry in the target dir, then remove the old dentry, * all the while taking care to remember our operation in the journal. */ static int logfs_rename_cross(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct logfs_super *super = logfs_super(old_dir->i_sb); struct logfs_disk_dentry dd; struct logfs_transaction *ta; loff_t pos; int err; /* 1. locate source dd */ err = logfs_get_dd(old_dir, old_dentry, &dd, &pos); if (err) return err; ta = kzalloc(sizeof(*ta), GFP_KERNEL); if (!ta) return -ENOMEM; ta->state = CROSS_RENAME_1; ta->dir = old_dir->i_ino; ta->pos = pos; /* 2. write target dd */ mutex_lock(&super->s_dirop_mutex); logfs_add_transaction(new_dir, ta); err = logfs_write_dir(new_dir, new_dentry, old_dentry->d_inode); if (!err) err = write_inode(new_dir); if (err) { super->s_rename_dir = 0; super->s_rename_pos = 0; abort_transaction(new_dir, ta); goto out; } /* 3. remove source dd */ ta->state = CROSS_RENAME_2; logfs_add_transaction(old_dir, ta); err = logfs_delete_dd(old_dir, pos); if (!err) err = write_inode(old_dir); LOGFS_BUG_ON(err, old_dir->i_sb); out: mutex_unlock(&super->s_dirop_mutex); return err; } static int logfs_replace_inode(struct inode *dir, struct dentry *dentry, struct logfs_disk_dentry *dd, struct inode *inode) { loff_t pos; int err; err = logfs_get_dd(dir, dentry, dd, &pos); if (err) return err; dd->ino = cpu_to_be64(inode->i_ino); dd->type = logfs_type(inode); err = write_dir(dir, dd, pos); if (err) return err; log_dir("Replace dentry (%lx, %llx) %s -> %llx\n", dir->i_ino, pos, dd->name, be64_to_cpu(dd->ino)); return write_inode(dir); } /* Target dentry exists - the worst case. We need to attach the source * inode to the target dentry, then remove the orphaned target inode and * source dentry. */ static int logfs_rename_target(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct logfs_super *super = logfs_super(old_dir->i_sb); struct inode *old_inode = old_dentry->d_inode; struct inode *new_inode = new_dentry->d_inode; int isdir = S_ISDIR(old_inode->i_mode); struct logfs_disk_dentry dd; struct logfs_transaction *ta; loff_t pos; int err; BUG_ON(isdir != S_ISDIR(new_inode->i_mode)); if (isdir) { if (!logfs_empty_dir(new_inode)) return -ENOTEMPTY; } /* 1. locate source dd */ err = logfs_get_dd(old_dir, old_dentry, &dd, &pos); if (err) return err; ta = kzalloc(sizeof(*ta), GFP_KERNEL); if (!ta) return -ENOMEM; ta->state = TARGET_RENAME_1; ta->dir = old_dir->i_ino; ta->pos = pos; ta->ino = new_inode->i_ino; /* 2. attach source inode to target dd */ mutex_lock(&super->s_dirop_mutex); logfs_add_transaction(new_dir, ta); err = logfs_replace_inode(new_dir, new_dentry, &dd, old_inode); if (err) { super->s_rename_dir = 0; super->s_rename_pos = 0; super->s_victim_ino = 0; abort_transaction(new_dir, ta); goto out; } /* 3. remove source dd */ ta->state = TARGET_RENAME_2; logfs_add_transaction(old_dir, ta); err = logfs_delete_dd(old_dir, pos); if (!err) err = write_inode(old_dir); LOGFS_BUG_ON(err, old_dir->i_sb); /* 4. remove target inode */ ta->state = TARGET_RENAME_3; logfs_add_transaction(new_inode, ta); err = logfs_remove_inode(new_inode); out: mutex_unlock(&super->s_dirop_mutex); return err; } static int logfs_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { if (new_dentry->d_inode) return logfs_rename_target(old_dir, old_dentry, new_dir, new_dentry); return logfs_rename_cross(old_dir, old_dentry, new_dir, new_dentry); } /* No locking done here, as this is called before .get_sb() returns. */ int logfs_replay_journal(struct super_block *sb) { struct logfs_super *super = logfs_super(sb); struct inode *inode; u64 ino, pos; int err; if (super->s_victim_ino) { /* delete victim inode */ ino = super->s_victim_ino; printk(KERN_INFO"LogFS: delete unmapped inode #%llx\n", ino); inode = logfs_iget(sb, ino); if (IS_ERR(inode)) goto fail; LOGFS_BUG_ON(i_size_read(inode) > 0, sb); super->s_victim_ino = 0; err = logfs_remove_inode(inode); iput(inode); if (err) { super->s_victim_ino = ino; goto fail; } } if (super->s_rename_dir) { /* delete old dd from rename */ ino = super->s_rename_dir; pos = super->s_rename_pos; printk(KERN_INFO"LogFS: delete unbacked dentry (%llx, %llx)\n", ino, pos); inode = logfs_iget(sb, ino); if (IS_ERR(inode)) goto fail; super->s_rename_dir = 0; super->s_rename_pos = 0; err = logfs_delete_dd(inode, pos); iput(inode); if (err) { super->s_rename_dir = ino; super->s_rename_pos = pos; goto fail; } } return 0; fail: LOGFS_BUG(sb); return -EIO; } const struct inode_operations logfs_symlink_iops = { .readlink = generic_readlink, .follow_link = page_follow_link_light, }; const struct inode_operations logfs_dir_iops = { .create = logfs_create, .link = logfs_link, .lookup = logfs_lookup, .mkdir = logfs_mkdir, .mknod = logfs_mknod, .rename = logfs_rename, .rmdir = logfs_rmdir, .permission = logfs_permission, .symlink = logfs_symlink, .unlink = logfs_unlink, }; const struct file_operations logfs_dir_fops = { .fsync = logfs_fsync, .unlocked_ioctl = logfs_ioctl, .readdir = logfs_readdir, .read = generic_read_dir, .llseek = default_llseek, };
gpl-2.0
chijure/android_kernel_huawei_y210
drivers/staging/tm6000/tm6000-video.c
109
37926
/* * tm6000-video.c - driver for TM5600/TM6000/TM6010 USB video capture devices * * Copyright (C) 2006-2007 Mauro Carvalho Chehab <mchehab@infradead.org> * * Copyright (C) 2007 Michel Ludwig <michel.ludwig@gmail.com> * - Fixed module load/unload * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation version 2 * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/random.h> #include <linux/version.h> #include <linux/usb.h> #include <linux/videodev2.h> #include <media/v4l2-ioctl.h> #include <linux/interrupt.h> #include <linux/kthread.h> #include <linux/highmem.h> #include <linux/freezer.h> #include "tm6000-regs.h" #include "tm6000.h" #define BUFFER_TIMEOUT msecs_to_jiffies(2000) /* 2 seconds */ /* Limits minimum and default number of buffers */ #define TM6000_MIN_BUF 4 #define TM6000_DEF_BUF 8 #define TM6000_MAX_ISO_PACKETS 46 /* Max number of ISO packets */ /* Declare static vars that will be used as parameters */ static unsigned int vid_limit = 16; /* Video memory limit, in Mb */ static int video_nr = -1; /* /dev/videoN, -1 for autodetect */ /* Debug level */ int tm6000_debug; EXPORT_SYMBOL_GPL(tm6000_debug); /* supported controls */ static struct v4l2_queryctrl tm6000_qctrl[] = { { .id = V4L2_CID_BRIGHTNESS, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Brightness", .minimum = 0, .maximum = 255, .step = 1, .default_value = 54, .flags = 0, }, { .id = V4L2_CID_CONTRAST, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Contrast", .minimum = 0, .maximum = 255, .step = 0x1, .default_value = 119, .flags = 0, }, { .id = V4L2_CID_SATURATION, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Saturation", .minimum = 0, .maximum = 255, .step = 0x1, .default_value = 112, .flags = 0, }, { .id = V4L2_CID_HUE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Hue", .minimum = -128, .maximum = 127, .step = 0x1, .default_value = 0, .flags = 0, } }; static int qctl_regs[ARRAY_SIZE(tm6000_qctrl)]; static struct tm6000_fmt format[] = { { .name = "4:2:2, packed, YVY2", .fourcc = V4L2_PIX_FMT_YUYV, .depth = 16, }, { .name = "4:2:2, packed, UYVY", .fourcc = V4L2_PIX_FMT_UYVY, .depth = 16, }, { .name = "A/V + VBI mux packet", .fourcc = V4L2_PIX_FMT_TM6000, .depth = 16, } }; /* ------------------------------------------------------------------ * DMA and thread functions * ------------------------------------------------------------------ */ #define norm_maxw(a) 720 #define norm_maxh(a) 576 #define norm_minw(a) norm_maxw(a) #define norm_minh(a) norm_maxh(a) /* * video-buf generic routine to get the next available buffer */ static inline void get_next_buf(struct tm6000_dmaqueue *dma_q, struct tm6000_buffer **buf) { struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq); char *outp; if (list_empty(&dma_q->active)) { dprintk(dev, V4L2_DEBUG_QUEUE, "No active queue to serve\n"); *buf = NULL; return; } *buf = list_entry(dma_q->active.next, struct tm6000_buffer, vb.queue); if (!buf) return; /* Cleans up buffer - Usefull for testing for frame/URB loss */ outp = videobuf_to_vmalloc(&(*buf)->vb); return; } /* * Announces that a buffer were filled and request the next */ static inline void buffer_filled(struct tm6000_core *dev, struct tm6000_dmaqueue *dma_q, struct tm6000_buffer *buf) { /* Advice that buffer was filled */ dprintk(dev, V4L2_DEBUG_ISOC, "[%p/%d] wakeup\n", buf, buf->vb.i); buf->vb.state = VIDEOBUF_DONE; buf->vb.field_count++; do_gettimeofday(&buf->vb.ts); list_del(&buf->vb.queue); wake_up(&buf->vb.done); } const char *tm6000_msg_type[] = { "unknown(0)", /* 0 */ "video", /* 1 */ "audio", /* 2 */ "vbi", /* 3 */ "pts", /* 4 */ "err", /* 5 */ "unknown(6)", /* 6 */ "unknown(7)", /* 7 */ }; /* * Identify the tm5600/6000 buffer header type and properly handles */ static int copy_streams(u8 *data, unsigned long len, struct urb *urb) { struct tm6000_dmaqueue *dma_q = urb->context; struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq); u8 *ptr = data, *endp = data+len, c; unsigned long header = 0; int rc = 0; unsigned int cmd, cpysize, pktsize, size, field, block, line, pos = 0; struct tm6000_buffer *vbuf; char *voutp = NULL; unsigned int linewidth; /* get video buffer */ get_next_buf(dma_q, &vbuf); if (!vbuf) return rc; voutp = videobuf_to_vmalloc(&vbuf->vb); if (!voutp) return 0; for (ptr = data; ptr < endp;) { if (!dev->isoc_ctl.cmd) { /* Header */ if (dev->isoc_ctl.tmp_buf_len > 0) { /* from last urb or packet */ header = dev->isoc_ctl.tmp_buf; if (4 - dev->isoc_ctl.tmp_buf_len > 0) { memcpy((u8 *)&header + dev->isoc_ctl.tmp_buf_len, ptr, 4 - dev->isoc_ctl.tmp_buf_len); ptr += 4 - dev->isoc_ctl.tmp_buf_len; } dev->isoc_ctl.tmp_buf_len = 0; } else { if (ptr + 3 >= endp) { /* have incomplete header */ dev->isoc_ctl.tmp_buf_len = endp - ptr; memcpy(&dev->isoc_ctl.tmp_buf, ptr, dev->isoc_ctl.tmp_buf_len); return rc; } /* Seek for sync */ for (; ptr < endp - 3; ptr++) { if (*(ptr + 3) == 0x47) break; } /* Get message header */ header = *(unsigned long *)ptr; ptr += 4; } /* split the header fields */ c = (header >> 24) & 0xff; size = ((header & 0x7e) << 1); if (size > 0) size -= 4; block = (header >> 7) & 0xf; field = (header >> 11) & 0x1; line = (header >> 12) & 0x1ff; cmd = (header >> 21) & 0x7; /* Validates haeder fields */ if (size > TM6000_URB_MSG_LEN) size = TM6000_URB_MSG_LEN; pktsize = TM6000_URB_MSG_LEN; /* calculate position in buffer * and change the buffer */ switch (cmd) { case TM6000_URB_MSG_VIDEO: if ((dev->isoc_ctl.vfield != field) && (field == 1)) { /* Announces that a new buffer * were filled */ buffer_filled(dev, dma_q, vbuf); dprintk(dev, V4L2_DEBUG_ISOC, "new buffer filled\n"); get_next_buf(dma_q, &vbuf); if (!vbuf) return rc; voutp = videobuf_to_vmalloc(&vbuf->vb); if (!voutp) return rc; memset(voutp, 0, vbuf->vb.size); } linewidth = vbuf->vb.width << 1; pos = ((line << 1) - field - 1) * linewidth + block * TM6000_URB_MSG_LEN; /* Don't allow to write out of the buffer */ if (pos + size > vbuf->vb.size) cmd = TM6000_URB_MSG_ERR; dev->isoc_ctl.vfield = field; break; case TM6000_URB_MSG_VBI: break; case TM6000_URB_MSG_AUDIO: case TM6000_URB_MSG_PTS: size = pktsize; /* Size is always 180 bytes */ break; } } else { /* Continue the last copy */ cmd = dev->isoc_ctl.cmd; size = dev->isoc_ctl.size; pos = dev->isoc_ctl.pos; pktsize = dev->isoc_ctl.pktsize; } cpysize = (endp - ptr > size) ? size : endp - ptr; if (cpysize) { /* copy data in different buffers */ switch (cmd) { case TM6000_URB_MSG_VIDEO: /* Fills video buffer */ if (vbuf) memcpy(&voutp[pos], ptr, cpysize); break; case TM6000_URB_MSG_AUDIO: /* Need some code to copy audio buffer */ if (dev->fourcc == V4L2_PIX_FMT_YUYV) { /* Swap word bytes */ int i; for (i = 0; i < cpysize; i += 2) swab16s((u16 *)(ptr + i)); } tm6000_call_fillbuf(dev, TM6000_AUDIO, ptr, cpysize); break; case TM6000_URB_MSG_VBI: /* Need some code to copy vbi buffer */ break; case TM6000_URB_MSG_PTS: /* Need some code to copy pts */ break; } } if (ptr + pktsize > endp) { /* End of URB packet, but cmd processing is not * complete. Preserve the state for a next packet */ dev->isoc_ctl.pos = pos + cpysize; dev->isoc_ctl.size = size - cpysize; dev->isoc_ctl.cmd = cmd; dev->isoc_ctl.pktsize = pktsize - (endp - ptr); ptr += endp - ptr; } else { dev->isoc_ctl.cmd = 0; ptr += pktsize; } } return 0; } /* * Identify the tm5600/6000 buffer header type and properly handles */ static int copy_multiplexed(u8 *ptr, unsigned long len, struct urb *urb) { struct tm6000_dmaqueue *dma_q = urb->context; struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq); unsigned int pos = dev->isoc_ctl.pos, cpysize; int rc = 1; struct tm6000_buffer *buf; char *outp = NULL; get_next_buf(dma_q, &buf); if (buf) outp = videobuf_to_vmalloc(&buf->vb); if (!outp) return 0; while (len > 0) { cpysize = min(len, buf->vb.size-pos); memcpy(&outp[pos], ptr, cpysize); pos += cpysize; ptr += cpysize; len -= cpysize; if (pos >= buf->vb.size) { pos = 0; /* Announces that a new buffer were filled */ buffer_filled(dev, dma_q, buf); dprintk(dev, V4L2_DEBUG_ISOC, "new buffer filled\n"); get_next_buf(dma_q, &buf); if (!buf) break; outp = videobuf_to_vmalloc(&(buf->vb)); if (!outp) return rc; pos = 0; } } dev->isoc_ctl.pos = pos; return rc; } static inline void print_err_status(struct tm6000_core *dev, int packet, int status) { char *errmsg = "Unknown"; switch (status) { case -ENOENT: errmsg = "unlinked synchronuously"; break; case -ECONNRESET: errmsg = "unlinked asynchronuously"; break; case -ENOSR: errmsg = "Buffer error (overrun)"; break; case -EPIPE: errmsg = "Stalled (device not responding)"; break; case -EOVERFLOW: errmsg = "Babble (bad cable?)"; break; case -EPROTO: errmsg = "Bit-stuff error (bad cable?)"; break; case -EILSEQ: errmsg = "CRC/Timeout (could be anything)"; break; case -ETIME: errmsg = "Device does not respond"; break; } if (packet < 0) { dprintk(dev, V4L2_DEBUG_QUEUE, "URB status %d [%s].\n", status, errmsg); } else { dprintk(dev, V4L2_DEBUG_QUEUE, "URB packet %d, status %d [%s].\n", packet, status, errmsg); } } /* * Controls the isoc copy of each urb packet */ static inline int tm6000_isoc_copy(struct urb *urb) { struct tm6000_dmaqueue *dma_q = urb->context; struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq); int i, len = 0, rc = 1, status; char *p; if (urb->status < 0) { print_err_status(dev, -1, urb->status); return 0; } for (i = 0; i < urb->number_of_packets; i++) { status = urb->iso_frame_desc[i].status; if (status < 0) { print_err_status(dev, i, status); continue; } len = urb->iso_frame_desc[i].actual_length; if (len > 0) { p = urb->transfer_buffer + urb->iso_frame_desc[i].offset; if (!urb->iso_frame_desc[i].status) { if ((dev->fourcc) == V4L2_PIX_FMT_TM6000) { rc = copy_multiplexed(p, len, urb); if (rc <= 0) return rc; } else { copy_streams(p, len, urb); } } } } return rc; } /* ------------------------------------------------------------------ * URB control * ------------------------------------------------------------------ */ /* * IRQ callback, called by URB callback */ static void tm6000_irq_callback(struct urb *urb) { struct tm6000_dmaqueue *dma_q = urb->context; struct tm6000_core *dev = container_of(dma_q, struct tm6000_core, vidq); int i; if (!dev) return; spin_lock(&dev->slock); tm6000_isoc_copy(urb); spin_unlock(&dev->slock); /* Reset urb buffers */ for (i = 0; i < urb->number_of_packets; i++) { urb->iso_frame_desc[i].status = 0; urb->iso_frame_desc[i].actual_length = 0; } urb->status = usb_submit_urb(urb, GFP_ATOMIC); if (urb->status) tm6000_err("urb resubmit failed (error=%i)\n", urb->status); } /* * Stop and Deallocate URBs */ static void tm6000_uninit_isoc(struct tm6000_core *dev) { struct urb *urb; int i; dev->isoc_ctl.buf = NULL; for (i = 0; i < dev->isoc_ctl.num_bufs; i++) { urb = dev->isoc_ctl.urb[i]; if (urb) { usb_kill_urb(urb); usb_unlink_urb(urb); if (dev->isoc_ctl.transfer_buffer[i]) { usb_free_coherent(dev->udev, urb->transfer_buffer_length, dev->isoc_ctl.transfer_buffer[i], urb->transfer_dma); } usb_free_urb(urb); dev->isoc_ctl.urb[i] = NULL; } dev->isoc_ctl.transfer_buffer[i] = NULL; } kfree(dev->isoc_ctl.urb); kfree(dev->isoc_ctl.transfer_buffer); dev->isoc_ctl.urb = NULL; dev->isoc_ctl.transfer_buffer = NULL; dev->isoc_ctl.num_bufs = 0; } /* * Allocate URBs and start IRQ */ static int tm6000_prepare_isoc(struct tm6000_core *dev, unsigned int framesize) { struct tm6000_dmaqueue *dma_q = &dev->vidq; int i, j, sb_size, pipe, size, max_packets, num_bufs = 8; struct urb *urb; /* De-allocates all pending stuff */ tm6000_uninit_isoc(dev); /* Stop interrupt USB pipe */ tm6000_ir_int_stop(dev); usb_set_interface(dev->udev, dev->isoc_in.bInterfaceNumber, dev->isoc_in.bAlternateSetting); /* Start interrupt USB pipe */ tm6000_ir_int_start(dev); pipe = usb_rcvisocpipe(dev->udev, dev->isoc_in.endp->desc.bEndpointAddress & USB_ENDPOINT_NUMBER_MASK); size = usb_maxpacket(dev->udev, pipe, usb_pipeout(pipe)); if (size > dev->isoc_in.maxsize) size = dev->isoc_in.maxsize; dev->isoc_ctl.max_pkt_size = size; max_packets = (framesize + size - 1) / size; if (max_packets > TM6000_MAX_ISO_PACKETS) max_packets = TM6000_MAX_ISO_PACKETS; sb_size = max_packets * size; dev->isoc_ctl.num_bufs = num_bufs; dev->isoc_ctl.urb = kmalloc(sizeof(void *)*num_bufs, GFP_KERNEL); if (!dev->isoc_ctl.urb) { tm6000_err("cannot alloc memory for usb buffers\n"); return -ENOMEM; } dev->isoc_ctl.transfer_buffer = kmalloc(sizeof(void *)*num_bufs, GFP_KERNEL); if (!dev->isoc_ctl.transfer_buffer) { tm6000_err("cannot allocate memory for usbtransfer\n"); kfree(dev->isoc_ctl.urb); return -ENOMEM; } dprintk(dev, V4L2_DEBUG_QUEUE, "Allocating %d x %d packets" " (%d bytes) of %d bytes each to handle %u size\n", max_packets, num_bufs, sb_size, dev->isoc_in.maxsize, size); /* allocate urbs and transfer buffers */ for (i = 0; i < dev->isoc_ctl.num_bufs; i++) { urb = usb_alloc_urb(max_packets, GFP_KERNEL); if (!urb) { tm6000_err("cannot alloc isoc_ctl.urb %i\n", i); tm6000_uninit_isoc(dev); usb_free_urb(urb); return -ENOMEM; } dev->isoc_ctl.urb[i] = urb; dev->isoc_ctl.transfer_buffer[i] = usb_alloc_coherent(dev->udev, sb_size, GFP_KERNEL, &urb->transfer_dma); if (!dev->isoc_ctl.transfer_buffer[i]) { tm6000_err("unable to allocate %i bytes for transfer" " buffer %i%s\n", sb_size, i, in_interrupt() ? " while in int" : ""); tm6000_uninit_isoc(dev); return -ENOMEM; } memset(dev->isoc_ctl.transfer_buffer[i], 0, sb_size); usb_fill_bulk_urb(urb, dev->udev, pipe, dev->isoc_ctl.transfer_buffer[i], sb_size, tm6000_irq_callback, dma_q); urb->interval = dev->isoc_in.endp->desc.bInterval; urb->number_of_packets = max_packets; urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; for (j = 0; j < max_packets; j++) { urb->iso_frame_desc[j].offset = size * j; urb->iso_frame_desc[j].length = size; } } return 0; } static int tm6000_start_thread(struct tm6000_core *dev) { struct tm6000_dmaqueue *dma_q = &dev->vidq; int i; dma_q->frame = 0; dma_q->ini_jiffies = jiffies; init_waitqueue_head(&dma_q->wq); /* submit urbs and enables IRQ */ for (i = 0; i < dev->isoc_ctl.num_bufs; i++) { int rc = usb_submit_urb(dev->isoc_ctl.urb[i], GFP_ATOMIC); if (rc) { tm6000_err("submit of urb %i failed (error=%i)\n", i, rc); tm6000_uninit_isoc(dev); return rc; } } return 0; } /* ------------------------------------------------------------------ * Videobuf operations * ------------------------------------------------------------------ */ static int buffer_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size) { struct tm6000_fh *fh = vq->priv_data; *size = fh->fmt->depth * fh->width * fh->height >> 3; if (0 == *count) *count = TM6000_DEF_BUF; if (*count < TM6000_MIN_BUF) *count = TM6000_MIN_BUF; while (*size * *count > vid_limit * 1024 * 1024) (*count)--; return 0; } static void free_buffer(struct videobuf_queue *vq, struct tm6000_buffer *buf) { struct tm6000_fh *fh = vq->priv_data; struct tm6000_core *dev = fh->dev; unsigned long flags; if (in_interrupt()) BUG(); /* We used to wait for the buffer to finish here, but this didn't work because, as we were keeping the state as VIDEOBUF_QUEUED, videobuf_queue_cancel marked it as finished for us. (Also, it could wedge forever if the hardware was misconfigured.) This should be safe; by the time we get here, the buffer isn't queued anymore. If we ever start marking the buffers as VIDEOBUF_ACTIVE, it won't be, though. */ spin_lock_irqsave(&dev->slock, flags); if (dev->isoc_ctl.buf == buf) dev->isoc_ctl.buf = NULL; spin_unlock_irqrestore(&dev->slock, flags); videobuf_vmalloc_free(&buf->vb); buf->vb.state = VIDEOBUF_NEEDS_INIT; } static int buffer_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb, enum v4l2_field field) { struct tm6000_fh *fh = vq->priv_data; struct tm6000_buffer *buf = container_of(vb, struct tm6000_buffer, vb); struct tm6000_core *dev = fh->dev; int rc = 0, urb_init = 0; BUG_ON(NULL == fh->fmt); /* FIXME: It assumes depth=2 */ /* The only currently supported format is 16 bits/pixel */ buf->vb.size = fh->fmt->depth*fh->width*fh->height >> 3; if (0 != buf->vb.baddr && buf->vb.bsize < buf->vb.size) return -EINVAL; if (buf->fmt != fh->fmt || buf->vb.width != fh->width || buf->vb.height != fh->height || buf->vb.field != field) { buf->fmt = fh->fmt; buf->vb.width = fh->width; buf->vb.height = fh->height; buf->vb.field = field; buf->vb.state = VIDEOBUF_NEEDS_INIT; } if (VIDEOBUF_NEEDS_INIT == buf->vb.state) { if (0 != (rc = videobuf_iolock(vq, &buf->vb, NULL))) goto fail; urb_init = 1; } if (!dev->isoc_ctl.num_bufs) urb_init = 1; if (urb_init) { rc = tm6000_prepare_isoc(dev, buf->vb.size); if (rc < 0) goto fail; rc = tm6000_start_thread(dev); if (rc < 0) goto fail; } buf->vb.state = VIDEOBUF_PREPARED; return 0; fail: free_buffer(vq, buf); return rc; } static void buffer_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) { struct tm6000_buffer *buf = container_of(vb, struct tm6000_buffer, vb); struct tm6000_fh *fh = vq->priv_data; struct tm6000_core *dev = fh->dev; struct tm6000_dmaqueue *vidq = &dev->vidq; buf->vb.state = VIDEOBUF_QUEUED; list_add_tail(&buf->vb.queue, &vidq->active); } static void buffer_release(struct videobuf_queue *vq, struct videobuf_buffer *vb) { struct tm6000_buffer *buf = container_of(vb, struct tm6000_buffer, vb); free_buffer(vq, buf); } static struct videobuf_queue_ops tm6000_video_qops = { .buf_setup = buffer_setup, .buf_prepare = buffer_prepare, .buf_queue = buffer_queue, .buf_release = buffer_release, }; /* ------------------------------------------------------------------ * IOCTL handling * ------------------------------------------------------------------ */ static bool is_res_read(struct tm6000_core *dev, struct tm6000_fh *fh) { /* Is the current fh handling it? if so, that's OK */ if (dev->resources == fh && dev->is_res_read) return true; return false; } static bool is_res_streaming(struct tm6000_core *dev, struct tm6000_fh *fh) { /* Is the current fh handling it? if so, that's OK */ if (dev->resources == fh) return true; return false; } static bool res_get(struct tm6000_core *dev, struct tm6000_fh *fh, bool is_res_read) { /* Is the current fh handling it? if so, that's OK */ if (dev->resources == fh && dev->is_res_read == is_res_read) return true; /* is it free? */ if (dev->resources) return false; /* grab it */ dev->resources = fh; dev->is_res_read = is_res_read; dprintk(dev, V4L2_DEBUG_RES_LOCK, "res: get\n"); return true; } static void res_free(struct tm6000_core *dev, struct tm6000_fh *fh) { /* Is the current fh handling it? if so, that's OK */ if (dev->resources != fh) return; dev->resources = NULL; dprintk(dev, V4L2_DEBUG_RES_LOCK, "res: put\n"); } /* ------------------------------------------------------------------ * IOCTL vidioc handling * ------------------------------------------------------------------ */ static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { strlcpy(cap->driver, "tm6000", sizeof(cap->driver)); strlcpy(cap->card, "Trident TVMaster TM5600/6000/6010", sizeof(cap->card)); cap->version = TM6000_VERSION; cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING | V4L2_CAP_TUNER | V4L2_CAP_READWRITE; return 0; } static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { if (unlikely(f->index >= ARRAY_SIZE(format))) return -EINVAL; strlcpy(f->description, format[f->index].name, sizeof(f->description)); f->pixelformat = format[f->index].fourcc; return 0; } static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct tm6000_fh *fh = priv; f->fmt.pix.width = fh->width; f->fmt.pix.height = fh->height; f->fmt.pix.field = fh->vb_vidq.field; f->fmt.pix.pixelformat = fh->fmt->fourcc; f->fmt.pix.bytesperline = (f->fmt.pix.width * fh->fmt->depth) >> 3; f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline; return 0; } static struct tm6000_fmt *format_by_fourcc(unsigned int fourcc) { unsigned int i; for (i = 0; i < ARRAY_SIZE(format); i++) if (format[i].fourcc == fourcc) return format+i; return NULL; } static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct tm6000_core *dev = ((struct tm6000_fh *)priv)->dev; struct tm6000_fmt *fmt; enum v4l2_field field; fmt = format_by_fourcc(f->fmt.pix.pixelformat); if (NULL == fmt) { dprintk(dev, V4L2_DEBUG_IOCTL_ARG, "Fourcc format (0x%08x)" " invalid.\n", f->fmt.pix.pixelformat); return -EINVAL; } field = f->fmt.pix.field; if (field == V4L2_FIELD_ANY) field = V4L2_FIELD_SEQ_TB; else if (V4L2_FIELD_INTERLACED != field) { dprintk(dev, V4L2_DEBUG_IOCTL_ARG, "Field type invalid.\n"); return -EINVAL; } tm6000_get_std_res(dev); f->fmt.pix.width = dev->width; f->fmt.pix.height = dev->height; f->fmt.pix.width &= ~0x01; f->fmt.pix.field = field; f->fmt.pix.bytesperline = (f->fmt.pix.width * fmt->depth) >> 3; f->fmt.pix.sizeimage = f->fmt.pix.height * f->fmt.pix.bytesperline; return 0; } /*FIXME: This seems to be generic enough to be at videodev2 */ static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct tm6000_fh *fh = priv; struct tm6000_core *dev = fh->dev; int ret = vidioc_try_fmt_vid_cap(file, fh, f); if (ret < 0) return ret; fh->fmt = format_by_fourcc(f->fmt.pix.pixelformat); fh->width = f->fmt.pix.width; fh->height = f->fmt.pix.height; fh->vb_vidq.field = f->fmt.pix.field; fh->type = f->type; dev->fourcc = f->fmt.pix.pixelformat; tm6000_set_fourcc_format(dev); return 0; } static int vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *p) { struct tm6000_fh *fh = priv; return videobuf_reqbufs(&fh->vb_vidq, p); } static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *p) { struct tm6000_fh *fh = priv; return videobuf_querybuf(&fh->vb_vidq, p); } static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *p) { struct tm6000_fh *fh = priv; return videobuf_qbuf(&fh->vb_vidq, p); } static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *p) { struct tm6000_fh *fh = priv; return videobuf_dqbuf(&fh->vb_vidq, p, file->f_flags & O_NONBLOCK); } static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i) { struct tm6000_fh *fh = priv; struct tm6000_core *dev = fh->dev; if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; if (i != fh->type) return -EINVAL; if (!res_get(dev, fh, false)) return -EBUSY; return (videobuf_streamon(&fh->vb_vidq)); } static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type i) { struct tm6000_fh *fh=priv; struct tm6000_core *dev = fh->dev; if (fh->type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; if (i != fh->type) return -EINVAL; videobuf_streamoff(&fh->vb_vidq); res_free(dev,fh); return (0); } static int vidioc_s_std (struct file *file, void *priv, v4l2_std_id *norm) { int rc=0; struct tm6000_fh *fh=priv; struct tm6000_core *dev = fh->dev; dev->norm = *norm; rc = tm6000_init_analog_mode(dev); fh->width = dev->width; fh->height = dev->height; if (rc<0) return rc; v4l2_device_call_all(&dev->v4l2_dev, 0, core, s_std, dev->norm); return 0; } static int vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *inp) { switch (inp->index) { case TM6000_INPUT_TV: inp->type = V4L2_INPUT_TYPE_TUNER; strcpy(inp->name, "Television"); break; case TM6000_INPUT_COMPOSITE: inp->type = V4L2_INPUT_TYPE_CAMERA; strcpy(inp->name, "Composite"); break; case TM6000_INPUT_SVIDEO: inp->type = V4L2_INPUT_TYPE_CAMERA; strcpy(inp->name, "S-Video"); break; default: return -EINVAL; } inp->std = TM6000_STD; return 0; } static int vidioc_g_input(struct file *file, void *priv, unsigned int *i) { struct tm6000_fh *fh = priv; struct tm6000_core *dev = fh->dev; *i = dev->input; return 0; } static int vidioc_s_input(struct file *file, void *priv, unsigned int i) { struct tm6000_fh *fh = priv; struct tm6000_core *dev = fh->dev; int rc = 0; char buf[1]; switch (i) { case TM6000_INPUT_TV: dev->input = i; *buf = 0; break; case TM6000_INPUT_COMPOSITE: case TM6000_INPUT_SVIDEO: dev->input = i; *buf = 1; break; default: return -EINVAL; } rc = tm6000_read_write_usb(dev, USB_DIR_OUT | USB_TYPE_VENDOR, REQ_03_SET_GET_MCU_PIN, 0x03, 1, buf, 1); if (!rc) { dev->input = i; rc = vidioc_s_std(file, priv, &dev->vfd->current_norm); } return rc; } /* --- controls ---------------------------------------------- */ static int vidioc_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *qc) { int i; for (i = 0; i < ARRAY_SIZE(tm6000_qctrl); i++) if (qc->id && qc->id == tm6000_qctrl[i].id) { memcpy(qc, &(tm6000_qctrl[i]), sizeof(*qc)); return 0; } return -EINVAL; } static int vidioc_g_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct tm6000_fh *fh = priv; struct tm6000_core *dev = fh->dev; int val; /* FIXME: Probably, those won't work! Maybe we need shadow regs */ switch (ctrl->id) { case V4L2_CID_CONTRAST: val = tm6000_get_reg(dev, TM6010_REQ07_R08_LUMA_CONTRAST_ADJ, 0); break; case V4L2_CID_BRIGHTNESS: val = tm6000_get_reg(dev, TM6010_REQ07_R09_LUMA_BRIGHTNESS_ADJ, 0); return 0; case V4L2_CID_SATURATION: val = tm6000_get_reg(dev, TM6010_REQ07_R0A_CHROMA_SATURATION_ADJ, 0); return 0; case V4L2_CID_HUE: val = tm6000_get_reg(dev, TM6010_REQ07_R0B_CHROMA_HUE_PHASE_ADJ, 0); return 0; default: return -EINVAL; } if (val < 0) return val; ctrl->value = val; return 0; } static int vidioc_s_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct tm6000_fh *fh = priv; struct tm6000_core *dev = fh->dev; u8 val = ctrl->value; switch (ctrl->id) { case V4L2_CID_CONTRAST: tm6000_set_reg(dev, TM6010_REQ07_R08_LUMA_CONTRAST_ADJ, val); return 0; case V4L2_CID_BRIGHTNESS: tm6000_set_reg(dev, TM6010_REQ07_R09_LUMA_BRIGHTNESS_ADJ, val); return 0; case V4L2_CID_SATURATION: tm6000_set_reg(dev, TM6010_REQ07_R0A_CHROMA_SATURATION_ADJ, val); return 0; case V4L2_CID_HUE: tm6000_set_reg(dev, TM6010_REQ07_R0B_CHROMA_HUE_PHASE_ADJ, val); return 0; } return -EINVAL; } static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *t) { struct tm6000_fh *fh = priv; struct tm6000_core *dev = fh->dev; if (unlikely(UNSET == dev->tuner_type)) return -EINVAL; if (0 != t->index) return -EINVAL; strcpy(t->name, "Television"); t->type = V4L2_TUNER_ANALOG_TV; t->capability = V4L2_TUNER_CAP_NORM; t->rangehigh = 0xffffffffUL; t->rxsubchans = V4L2_TUNER_SUB_MONO; return 0; } static int vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *t) { struct tm6000_fh *fh = priv; struct tm6000_core *dev = fh->dev; if (UNSET == dev->tuner_type) return -EINVAL; if (0 != t->index) return -EINVAL; return 0; } static int vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct tm6000_fh *fh = priv; struct tm6000_core *dev = fh->dev; if (unlikely(UNSET == dev->tuner_type)) return -EINVAL; f->type = V4L2_TUNER_ANALOG_TV; f->frequency = dev->freq; v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, g_frequency, f); return 0; } static int vidioc_s_frequency(struct file *file, void *priv, struct v4l2_frequency *f) { struct tm6000_fh *fh = priv; struct tm6000_core *dev = fh->dev; if (unlikely(f->type != V4L2_TUNER_ANALOG_TV)) return -EINVAL; if (unlikely(UNSET == dev->tuner_type)) return -EINVAL; if (unlikely(f->tuner != 0)) return -EINVAL; dev->freq = f->frequency; v4l2_device_call_all(&dev->v4l2_dev, 0, tuner, s_frequency, f); return 0; } /* ------------------------------------------------------------------ File operations for the device ------------------------------------------------------------------*/ static int tm6000_open(struct file *file) { struct video_device *vdev = video_devdata(file); struct tm6000_core *dev = video_drvdata(file); struct tm6000_fh *fh; enum v4l2_buf_type type = V4L2_BUF_TYPE_VIDEO_CAPTURE; int i, rc; printk(KERN_INFO "tm6000: open called (dev=%s)\n", video_device_node_name(vdev)); dprintk(dev, V4L2_DEBUG_OPEN, "tm6000: open called (dev=%s)\n", video_device_node_name(vdev)); /* If more than one user, mutex should be added */ dev->users++; dprintk(dev, V4L2_DEBUG_OPEN, "open dev=%s type=%s users=%d\n", video_device_node_name(vdev), v4l2_type_names[type], dev->users); /* allocate + initialize per filehandle data */ fh = kzalloc(sizeof(*fh), GFP_KERNEL); if (NULL == fh) { dev->users--; return -ENOMEM; } file->private_data = fh; fh->dev = dev; fh->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; dev->fourcc = format[0].fourcc; fh->fmt = format_by_fourcc(dev->fourcc); tm6000_get_std_res (dev); fh->width = dev->width; fh->height = dev->height; dprintk(dev, V4L2_DEBUG_OPEN, "Open: fh=0x%08lx, dev=0x%08lx, " "dev->vidq=0x%08lx\n", (unsigned long)fh,(unsigned long)dev,(unsigned long)&dev->vidq); dprintk(dev, V4L2_DEBUG_OPEN, "Open: list_empty " "queued=%d\n",list_empty(&dev->vidq.queued)); dprintk(dev, V4L2_DEBUG_OPEN, "Open: list_empty " "active=%d\n",list_empty(&dev->vidq.active)); /* initialize hardware on analog mode */ rc = tm6000_init_analog_mode(dev); if (rc < 0) return rc; if (dev->mode != TM6000_MODE_ANALOG) { /* Put all controls at a sane state */ for (i = 0; i < ARRAY_SIZE(tm6000_qctrl); i++) qctl_regs[i] = tm6000_qctrl[i].default_value; dev->mode = TM6000_MODE_ANALOG; } videobuf_queue_vmalloc_init(&fh->vb_vidq, &tm6000_video_qops, NULL, &dev->slock, fh->type, V4L2_FIELD_INTERLACED, sizeof(struct tm6000_buffer), fh, &dev->lock); return 0; } static ssize_t tm6000_read(struct file *file, char __user *data, size_t count, loff_t *pos) { struct tm6000_fh *fh = file->private_data; if (fh->type==V4L2_BUF_TYPE_VIDEO_CAPTURE) { if (!res_get(fh->dev, fh, true)) return -EBUSY; return videobuf_read_stream(&fh->vb_vidq, data, count, pos, 0, file->f_flags & O_NONBLOCK); } return 0; } static unsigned int tm6000_poll(struct file *file, struct poll_table_struct *wait) { struct tm6000_fh *fh = file->private_data; struct tm6000_buffer *buf; if (V4L2_BUF_TYPE_VIDEO_CAPTURE != fh->type) return POLLERR; if (!!is_res_streaming(fh->dev, fh)) return POLLERR; if (!is_res_read(fh->dev, fh)) { /* streaming capture */ if (list_empty(&fh->vb_vidq.stream)) return POLLERR; buf = list_entry(fh->vb_vidq.stream.next,struct tm6000_buffer,vb.stream); } else { /* read() capture */ return videobuf_poll_stream(file, &fh->vb_vidq, wait); } poll_wait(file, &buf->vb.done, wait); if (buf->vb.state == VIDEOBUF_DONE || buf->vb.state == VIDEOBUF_ERROR) return POLLIN | POLLRDNORM; return 0; } static int tm6000_release(struct file *file) { struct tm6000_fh *fh = file->private_data; struct tm6000_core *dev = fh->dev; struct video_device *vdev = video_devdata(file); dprintk(dev, V4L2_DEBUG_OPEN, "tm6000: close called (dev=%s, users=%d)\n", video_device_node_name(vdev), dev->users); dev->users--; res_free(dev, fh); if (!dev->users) { tm6000_uninit_isoc(dev); videobuf_mmap_free(&fh->vb_vidq); } kfree(fh); return 0; } static int tm6000_mmap(struct file *file, struct vm_area_struct * vma) { struct tm6000_fh *fh = file->private_data; int ret; ret = videobuf_mmap_mapper(&fh->vb_vidq, vma); return ret; } static struct v4l2_file_operations tm6000_fops = { .owner = THIS_MODULE, .open = tm6000_open, .release = tm6000_release, .unlocked_ioctl = video_ioctl2, /* V4L2 ioctl handler */ .read = tm6000_read, .poll = tm6000_poll, .mmap = tm6000_mmap, }; static const struct v4l2_ioctl_ops video_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, .vidioc_s_std = vidioc_s_std, .vidioc_enum_input = vidioc_enum_input, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, .vidioc_queryctrl = vidioc_queryctrl, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, .vidioc_reqbufs = vidioc_reqbufs, .vidioc_querybuf = vidioc_querybuf, .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, }; static struct video_device tm6000_template = { .name = "tm6000", .fops = &tm6000_fops, .ioctl_ops = &video_ioctl_ops, .release = video_device_release, .tvnorms = TM6000_STD, .current_norm = V4L2_STD_NTSC_M, }; /* ----------------------------------------------------------------- * Initialization and module stuff * ------------------------------------------------------------------ */ static struct video_device *vdev_init(struct tm6000_core *dev, const struct video_device *template, const char *type_name) { struct video_device *vfd; vfd = video_device_alloc(); if (NULL == vfd) return NULL; *vfd = *template; vfd->v4l2_dev = &dev->v4l2_dev; vfd->release = video_device_release; vfd->debug = tm6000_debug; vfd->lock = &dev->lock; snprintf(vfd->name, sizeof(vfd->name), "%s %s", dev->name, type_name); video_set_drvdata(vfd, dev); return vfd; } int tm6000_v4l2_register(struct tm6000_core *dev) { int ret = -1; dev->vfd = vdev_init(dev, &tm6000_template, "video"); if (!dev->vfd) { printk(KERN_INFO "%s: can't register video device\n", dev->name); return -ENOMEM; } /* init video dma queues */ INIT_LIST_HEAD(&dev->vidq.active); INIT_LIST_HEAD(&dev->vidq.queued); ret = video_register_device(dev->vfd, VFL_TYPE_GRABBER, video_nr); if (ret < 0) { printk(KERN_INFO "%s: can't register video device\n", dev->name); return ret; } printk(KERN_INFO "%s: registered device %s\n", dev->name, video_device_node_name(dev->vfd)); printk(KERN_INFO "Trident TVMaster TM5600/TM6000/TM6010 USB2 board (Load status: %d)\n", ret); return ret; } int tm6000_v4l2_unregister(struct tm6000_core *dev) { video_unregister_device(dev->vfd); return 0; } int tm6000_v4l2_exit(void) { return 0; } module_param(video_nr, int, 0); MODULE_PARM_DESC(video_nr, "Allow changing video device number"); module_param_named(debug, tm6000_debug, int, 0444); MODULE_PARM_DESC(debug, "activates debug info"); module_param(vid_limit, int, 0644); MODULE_PARM_DESC(vid_limit, "capture memory limit in megabytes");
gpl-2.0
cretin45/htc-kernel-pyramid
drivers/video/msm_8x60/mdp4_overlay_dsi_video.c
109
13191
/* Copyright (c) 2010, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/hrtimer.h> #include <linux/delay.h> #include <linux/io.h> #include <linux/semaphore.h> #include <linux/spinlock.h> #include <linux/fb.h> #include <linux/msm_mdp.h> #include <asm/system.h> #include <asm/mach-types.h> #include <mach/hardware.h> #include "mdp.h" #include "msm_fb.h" #include "mdp4.h" #include "mipi_dsi.h" #include <mach/debug_display.h> #define DSI_VIDEO_BASE 0xE0000 static int first_pixel_start_x; static int first_pixel_start_y; static int writeback_offset; static int writeback_size; static char *writeback_addr; static struct mdp4_overlay_pipe *dsi_pipe; static bool map_wb_address; unsigned char *blank_wb_buf; int mdp4_dsi_video_on(struct platform_device *pdev) { int dsi_width; int dsi_height; int dsi_bpp; int dsi_border_clr; int dsi_underflow_clr; int dsi_hsync_skew; int hsync_period; int hsync_ctrl; int vsync_period; int display_hctl; int display_v_start; int display_v_end; int active_hctl; int active_h_start; int active_h_end; int active_v_start; int active_v_end; int ctrl_polarity; int h_back_porch; int h_front_porch; int v_back_porch; int v_front_porch; int hsync_pulse_width; int vsync_pulse_width; int hsync_polarity; int vsync_polarity; int data_en_polarity; int hsync_start_x; int hsync_end_x; uint8 *buf; int bpp, ptype; struct fb_info *fbi; struct fb_var_screeninfo *var; struct msm_fb_data_type *mfd; struct mdp4_overlay_pipe *pipe; int ret; mfd = (struct msm_fb_data_type *)platform_get_drvdata(pdev); if (!mfd) return -ENODEV; if (mfd->key != MFD_KEY) return -EINVAL; fbi = mfd->fbi; var = &fbi->var; bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf += fbi->var.xoffset * bpp + fbi->var.yoffset * fbi->fix.line_length; if (dsi_pipe == NULL) { ptype = mdp4_overlay_format2type(mfd->fb_imgType); if (ptype < 0) printk(KERN_INFO "%s: format2type failed\n", __func__); pipe = mdp4_overlay_pipe_alloc(ptype, MDP4_MIXER0, 0); if (pipe == NULL) { printk(KERN_INFO "%s: pipe_alloc failed\n", __func__); return -EBUSY; } pipe->pipe_used++; pipe->mixer_stage = MDP4_MIXER_STAGE_BASE; pipe->mixer_num = MDP4_MIXER0; pipe->src_format = mfd->fb_imgType; mdp4_overlay_panel_mode(pipe->mixer_num, MDP4_PANEL_DSI_VIDEO); ret = mdp4_overlay_format2pipe(pipe); if (ret < 0) printk(KERN_INFO "%s: format2type failed\n", __func__); dsi_pipe = pipe; /* keep it */ writeback_offset = mdp4_overlay_writeback_setup( fbi, pipe, buf, bpp); writeback_size = ALIGN(fbi->var.xres, 32) * fbi->var.yres * 3 * 2; writeback_addr = (char *)ioremap(dsi_pipe->blt_base, writeback_size); if (!map_wb_address) { blank_wb_buf = ioremap((unsigned long)writeback_offset, ALIGN(fbi->var.xres, 32) * fbi->var.yres * bpp); map_wb_address = 1; if (!blank_wb_buf) { PR_DISP_ERR("%s: blank_wb_buf ioremap failed!\n", __func__); map_wb_address = 0; } } } else { pipe = dsi_pipe; } /* MDP cmd block enable */ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); if (is_mdp4_hw_reset()) { mdp4_hw_init(); outpdw(MDP_BASE + 0x0038, mdp4_display_intf); } pipe->src_height = fbi->var.yres; pipe->src_width = fbi->var.xres; pipe->src_h = fbi->var.yres; pipe->src_w = fbi->var.xres; pipe->src_y = 0; pipe->src_x = 0; pipe->srcp0_addr = (uint32) buf; pipe->srcp0_ystride = fbi->fix.line_length; pipe->bpp = bpp; if (pipe->src_format != mfd->fb_imgType) { pipe->src_format = mfd->fb_imgType; ret = mdp4_overlay_format2pipe(pipe); if (ret < 0) printk(KERN_INFO "%s: format2type failed\n", __func__); } mdp4_overlay_dmap_xy(pipe); /* dma_p */ mdp4_overlay_dmap_cfg(mfd, 1); mdp4_overlay_rgb_setup(pipe); mdp4_mixer_stage_up(pipe); mdp4_overlayproc_cfg(pipe); /* * DSI timing setting */ h_back_porch = var->left_margin; h_front_porch = var->right_margin; v_back_porch = var->upper_margin; v_front_porch = var->lower_margin; hsync_pulse_width = var->hsync_len; vsync_pulse_width = var->vsync_len; dsi_border_clr = mfd->panel_info.lcdc.border_clr; dsi_underflow_clr = mfd->panel_info.lcdc.underflow_clr; dsi_hsync_skew = mfd->panel_info.lcdc.hsync_skew; dsi_width = mfd->panel_info.xres; dsi_height = mfd->panel_info.yres; dsi_bpp = mfd->panel_info.bpp; hsync_period = hsync_pulse_width + h_back_porch + dsi_width + h_front_porch; hsync_ctrl = (hsync_period << 16) | hsync_pulse_width; hsync_start_x = h_back_porch + hsync_pulse_width; hsync_end_x = hsync_period - h_front_porch - 1; display_hctl = (hsync_end_x << 16) | hsync_start_x; vsync_period = (vsync_pulse_width + v_back_porch + dsi_height + v_front_porch); display_v_start = ((vsync_pulse_width + v_back_porch) * hsync_period) + dsi_hsync_skew; display_v_end = (vsync_period - v_front_porch) * hsync_period + dsi_hsync_skew - 1; if (dsi_width != var->xres) { active_h_start = hsync_start_x + first_pixel_start_x; active_h_end = active_h_start + var->xres - 1; active_hctl = ACTIVE_START_X_EN | (active_h_end << 16) | active_h_start; } else { active_hctl = 0; } if (dsi_height != var->yres) { active_v_start = display_v_start + first_pixel_start_y * hsync_period; active_v_end = active_v_start + (var->yres) * hsync_period - 1; active_v_start |= ACTIVE_START_Y_EN; } else { active_v_start = 0; active_v_end = 0; } dsi_underflow_clr |= 0x80000000; /* enable recovery */ hsync_polarity = 0; vsync_polarity = 0; data_en_polarity = 0; ctrl_polarity = (data_en_polarity << 2) | (vsync_polarity << 1) | (hsync_polarity); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x4, hsync_ctrl); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x8, vsync_period * hsync_period); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0xc, vsync_pulse_width * hsync_period); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x10, display_hctl); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x14, display_v_start); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x18, display_v_end); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x1c, active_hctl); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x20, active_v_start); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x24, active_v_end); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x28, dsi_border_clr); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x2c, dsi_underflow_clr); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x30, dsi_hsync_skew); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE + 0x38, ctrl_polarity); mdp4_overlay_reg_flush(pipe, 1); ret = panel_next_on(pdev); if (ret == 0) { /* enable DSI block */ MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 1); mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_ON, FALSE); } /* MDP cmd block disable */ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); return ret; } int mdp4_dsi_video_off(struct platform_device *pdev) { int ret = 0; /* MDP cmd block enable */ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_ON, FALSE); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0); /* MDP cmd block disable */ mdp_pipe_ctrl(MDP_CMD_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); mdp_pipe_ctrl(MDP_OVERLAY0_BLOCK, MDP_BLOCK_POWER_OFF, FALSE); ret = panel_next_off(pdev); #ifdef MIPI_DSI_RGB_UNSTAGE /* delay to make sure the last frame finishes */ msleep(100); /* dis-engage rgb0 from mixer0 */ if (dsi_pipe) mdp4_mixer_stage_down(dsi_pipe); #endif return ret; } #ifdef CONFIG_FB_MSM_OVERLAY_WRITEBACK void mdp4_dsi_video_blank(void) { if (writeback_addr) { printk(KERN_DEBUG "%s: addr=%x size=%d\n", __func__, (int)writeback_addr, writeback_size); memset(writeback_addr, 0xffffffff, writeback_size); } } static int mdp4_dsi_video_do_blt(struct msm_fb_data_type *mfd, int enable) { unsigned long flag; unsigned int data; int change = 0; void mdp4_overlay_dsi_video_wait4dmap(struct msm_fb_data_type *mfd); PR_DISP_INFO("%s: enable=%d addr=%x base=%x\n", __func__, enable, (int)dsi_pipe->blt_addr, (int)dsi_pipe->blt_base); spin_lock_irqsave(&mdp_spin_lock, flag); if (enable && dsi_pipe->blt_addr == 0) { dsi_pipe->blt_addr = dsi_pipe->blt_base; change++; } else if (enable == 0 && dsi_pipe->blt_addr) { dsi_pipe->blt_addr = 0; change++; } dsi_pipe->blt_cnt = 0; spin_unlock_irqrestore(&mdp_spin_lock, flag); if (!change) return 0; mutex_lock(&cmdlock); PR_DISP_INFO("%s: start\n", __func__); data = inpdw(MDP_BASE + DSI_VIDEO_BASE); data &= 0x01; if (data) { /* timing generator enabled */ mdp4_overlay_dsi_video_wait4dmap(mfd); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0); mdelay(17); /* make sure last frame is finished */ mipi_dsi_controller_cfg(0, 0, 0); } mdp4_overlayproc_cfg(dsi_pipe); mdp4_overlay_dmap_xy(dsi_pipe); if (data) { /* timing generator enabled */ MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 1); mdp4_overlay_dsi_video_wait4dmap(mfd); mdp4_overlay_dsi_video_wait4dmap(mfd); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 0); mipi_dsi_sw_reset(); mipi_dsi_controller_cfg(1, 0, 0); MDP_OUTP(MDP_BASE + DSI_VIDEO_BASE, 1); } PR_DISP_INFO("%s: done\n", __func__); mutex_unlock(&cmdlock); return 0; } int mdp4_dsi_overlay_video_blt_start(struct msm_fb_data_type *mfd) /* for runtime*/ { return mdp4_dsi_video_do_blt(mfd, 1); } int mdp4_dsi_overlay_video_blt_stop(struct msm_fb_data_type *mfd) { return mdp4_dsi_video_do_blt(mfd, 0); } int mdp4_dsi_video_overlay_blt_offset(struct msm_fb_data_type *mfd, struct msmfb_overlay_blt *req) { req->offset = writeback_offset; req->width = dsi_pipe->src_width; req->height = dsi_pipe->src_height; req->bpp = dsi_pipe->bpp; return sizeof(*req); } void mdp4_dsi_video_overlay_blt(struct msm_fb_data_type *mfd, struct msmfb_overlay_blt *req) { mdp4_dsi_video_do_blt(mfd, req->enable); } #else int mdp4_dsi_video_overlay_blt_offset(struct msm_fb_data_type *mfd, struct msmfb_overlay_blt *req) { return 0; } void mdp4_dsi_video_overlay_blt(struct msm_fb_data_type *mfd, struct msmfb_overlay_blt *req) { return; } #endif /* CONFIG_FB_MSM_OVERLAY_WRITEBACK */ void mdp4_overlay_dsi_video_wait4dmap(struct msm_fb_data_type *mfd) { unsigned long flag; /* enable irq */ spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(MDP_DMA2_TERM); /* enable intr */ INIT_COMPLETION(dsi_pipe->comp); if (mfd) mfd->dma->waiting = TRUE; outp32(MDP_INTR_CLEAR, INTR_DMA_P_DONE | INTR_PRIMARY_VSYNC); mdp_intr_mask |= INTR_DMA_P_DONE; outp32(MDP_INTR_ENABLE, mdp_intr_mask); spin_unlock_irqrestore(&mdp_spin_lock, flag); wait_for_completion_killable(&dsi_pipe->comp); mdp_disable_irq(MDP_DMA2_TERM); } void mdp4_overlay_dsi_video_wait4vsync(struct msm_fb_data_type *mfd) { unsigned long flag; /* enable irq */ spin_lock_irqsave(&mdp_spin_lock, flag); mdp_enable_irq(MDP_DMA2_TERM); /* enable intr */ INIT_COMPLETION(dsi_pipe->comp); if (mfd) mfd->dma->waiting = TRUE; outp32(MDP_INTR_CLEAR, INTR_DMA_P_DONE | INTR_PRIMARY_VSYNC); mdp_intr_mask |= INTR_PRIMARY_VSYNC; outp32(MDP_INTR_ENABLE, mdp_intr_mask); spin_unlock_irqrestore(&mdp_spin_lock, flag); wait_for_completion_killable(&dsi_pipe->comp); mdp_disable_irq(MDP_DMA2_TERM); } void mdp4_overlay_dsi_video_vsync_push(struct msm_fb_data_type *mfd, struct mdp4_overlay_pipe *pipe) { mdp4_overlay_reg_flush(pipe, 1); if (pipe->flags & MDP_OV_PLAY_NOWAIT) return; if (inpdw(MDP_BASE + DSI_VIDEO_BASE)) mdp4_overlay_dsi_video_wait4vsync(mfd); } /* * mdp4_primary_vsync_dsi_video: called from isr */ void mdp4_primary_vsync_dsi_video(void) { complete_all(&dsi_pipe->comp); } void mdp4_dmap_done_dsi_video(void) { complete_all(&dsi_pipe->comp); } /* * mdp4_overlay1_done_dsi: called from isr */ void mdp4_overlay0_done_dsi_video() { complete(&dsi_pipe->comp); } void mdp4_dsi_video_overlay(struct msm_fb_data_type *mfd) { struct fb_info *fbi = mfd->fbi; uint8 *buf; int bpp; struct mdp4_overlay_pipe *pipe; if (!mfd->panel_power_on) return; /* no need to power on cmd block since it's dsi mode */ bpp = fbi->var.bits_per_pixel / 8; buf = (uint8 *) fbi->fix.smem_start; buf += fbi->var.xoffset * bpp + fbi->var.yoffset * fbi->fix.line_length; mutex_lock(&mfd->dma->ov_mutex); pipe = dsi_pipe; pipe->srcp0_addr = (uint32) buf; mdp4_overlay_rgb_setup(pipe); mdp4_overlay_dsi_video_vsync_push(mfd, pipe); mdp4_stat.kickoff_dsi++; mdp4_overlay_resource_release(); mutex_unlock(&mfd->dma->ov_mutex); }
gpl-2.0
larks/linux-rcu
drivers/media/dvb/dvb-usb/usb-urb.c
109
7062
/* usb-urb.c is part of the DVB USB library. * * Copyright (C) 2004-6 Patrick Boettcher (patrick.boettcher@desy.de) * see dvb-usb-init.c for copyright information. * * This file keeps functions for initializing and handling the * BULK and ISOC USB data transfers in a generic way. * Can be used for DVB-only and also, that's the plan, for * Hybrid USB devices (analog and DVB). */ #include "dvb-usb-common.h" /* URB stuff for streaming */ static void usb_urb_complete(struct urb *urb) { struct usb_data_stream *stream = urb->context; int ptype = usb_pipetype(urb->pipe); int i; u8 *b; deb_uxfer("'%s' urb completed. status: %d, length: %d/%d, pack_num: %d, errors: %d\n", ptype == PIPE_ISOCHRONOUS ? "isoc" : "bulk", urb->status,urb->actual_length,urb->transfer_buffer_length, urb->number_of_packets,urb->error_count); switch (urb->status) { case 0: /* success */ case -ETIMEDOUT: /* NAK */ break; case -ECONNRESET: /* kill */ case -ENOENT: case -ESHUTDOWN: return; default: /* error */ deb_ts("urb completition error %d.\n", urb->status); break; } b = (u8 *) urb->transfer_buffer; switch (ptype) { case PIPE_ISOCHRONOUS: for (i = 0; i < urb->number_of_packets; i++) { if (urb->iso_frame_desc[i].status != 0) deb_ts("iso frame descriptor has an error: %d\n",urb->iso_frame_desc[i].status); else if (urb->iso_frame_desc[i].actual_length > 0) stream->complete(stream, b + urb->iso_frame_desc[i].offset, urb->iso_frame_desc[i].actual_length); urb->iso_frame_desc[i].status = 0; urb->iso_frame_desc[i].actual_length = 0; } debug_dump(b,20,deb_uxfer); break; case PIPE_BULK: if (urb->actual_length > 0) stream->complete(stream, b, urb->actual_length); break; default: err("unknown endpoint type in completition handler."); return; } usb_submit_urb(urb,GFP_ATOMIC); } int usb_urb_kill(struct usb_data_stream *stream) { int i; for (i = 0; i < stream->urbs_submitted; i++) { deb_ts("killing URB no. %d.\n",i); /* stop the URB */ usb_kill_urb(stream->urb_list[i]); } stream->urbs_submitted = 0; return 0; } int usb_urb_submit(struct usb_data_stream *stream) { int i,ret; for (i = 0; i < stream->urbs_initialized; i++) { deb_ts("submitting URB no. %d\n",i); if ((ret = usb_submit_urb(stream->urb_list[i],GFP_ATOMIC))) { err("could not submit URB no. %d - get them all back",i); usb_urb_kill(stream); return ret; } stream->urbs_submitted++; } return 0; } static int usb_free_stream_buffers(struct usb_data_stream *stream) { if (stream->state & USB_STATE_URB_BUF) { while (stream->buf_num) { stream->buf_num--; deb_mem("freeing buffer %d\n",stream->buf_num); usb_buffer_free(stream->udev, stream->buf_size, stream->buf_list[stream->buf_num], stream->dma_addr[stream->buf_num]); } } stream->state &= ~USB_STATE_URB_BUF; return 0; } static int usb_allocate_stream_buffers(struct usb_data_stream *stream, int num, unsigned long size) { stream->buf_num = 0; stream->buf_size = size; deb_mem("all in all I will use %lu bytes for streaming\n",num*size); for (stream->buf_num = 0; stream->buf_num < num; stream->buf_num++) { deb_mem("allocating buffer %d\n",stream->buf_num); if (( stream->buf_list[stream->buf_num] = usb_buffer_alloc(stream->udev, size, GFP_ATOMIC, &stream->dma_addr[stream->buf_num]) ) == NULL) { deb_mem("not enough memory for urb-buffer allocation.\n"); usb_free_stream_buffers(stream); return -ENOMEM; } deb_mem("buffer %d: %p (dma: %Lu)\n", stream->buf_num, stream->buf_list[stream->buf_num], (long long)stream->dma_addr[stream->buf_num]); memset(stream->buf_list[stream->buf_num],0,size); stream->state |= USB_STATE_URB_BUF; } deb_mem("allocation successful\n"); return 0; } static int usb_bulk_urb_init(struct usb_data_stream *stream) { int i, j; if ((i = usb_allocate_stream_buffers(stream,stream->props.count, stream->props.u.bulk.buffersize)) < 0) return i; /* allocate the URBs */ for (i = 0; i < stream->props.count; i++) { stream->urb_list[i] = usb_alloc_urb(0, GFP_ATOMIC); if (!stream->urb_list[i]) { deb_mem("not enough memory for urb_alloc_urb!.\n"); for (j = 0; j < i; j++) usb_free_urb(stream->urb_list[i]); return -ENOMEM; } usb_fill_bulk_urb( stream->urb_list[i], stream->udev, usb_rcvbulkpipe(stream->udev,stream->props.endpoint), stream->buf_list[i], stream->props.u.bulk.buffersize, usb_urb_complete, stream); stream->urb_list[i]->transfer_flags = URB_NO_TRANSFER_DMA_MAP; stream->urb_list[i]->transfer_dma = stream->dma_addr[i]; stream->urbs_initialized++; } return 0; } static int usb_isoc_urb_init(struct usb_data_stream *stream) { int i,j; if ((i = usb_allocate_stream_buffers(stream,stream->props.count, stream->props.u.isoc.framesize*stream->props.u.isoc.framesperurb)) < 0) return i; /* allocate the URBs */ for (i = 0; i < stream->props.count; i++) { struct urb *urb; int frame_offset = 0; stream->urb_list[i] = usb_alloc_urb(stream->props.u.isoc.framesperurb, GFP_ATOMIC); if (!stream->urb_list[i]) { deb_mem("not enough memory for urb_alloc_urb!\n"); for (j = 0; j < i; j++) usb_free_urb(stream->urb_list[i]); return -ENOMEM; } urb = stream->urb_list[i]; urb->dev = stream->udev; urb->context = stream; urb->complete = usb_urb_complete; urb->pipe = usb_rcvisocpipe(stream->udev,stream->props.endpoint); urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; urb->interval = stream->props.u.isoc.interval; urb->number_of_packets = stream->props.u.isoc.framesperurb; urb->transfer_buffer_length = stream->buf_size; urb->transfer_buffer = stream->buf_list[i]; urb->transfer_dma = stream->dma_addr[i]; for (j = 0; j < stream->props.u.isoc.framesperurb; j++) { urb->iso_frame_desc[j].offset = frame_offset; urb->iso_frame_desc[j].length = stream->props.u.isoc.framesize; frame_offset += stream->props.u.isoc.framesize; } stream->urbs_initialized++; } return 0; } int usb_urb_init(struct usb_data_stream *stream, struct usb_data_stream_properties *props) { if (stream == NULL || props == NULL) return -EINVAL; memcpy(&stream->props, props, sizeof(*props)); usb_clear_halt(stream->udev,usb_rcvbulkpipe(stream->udev,stream->props.endpoint)); if (stream->complete == NULL) { err("there is no data callback - this doesn't make sense."); return -EINVAL; } switch (stream->props.type) { case USB_BULK: return usb_bulk_urb_init(stream); case USB_ISOC: return usb_isoc_urb_init(stream); default: err("unknown URB-type for data transfer."); return -EINVAL; } } int usb_urb_exit(struct usb_data_stream *stream) { int i; usb_urb_kill(stream); for (i = 0; i < stream->urbs_initialized; i++) { if (stream->urb_list[i] != NULL) { deb_mem("freeing URB no. %d.\n",i); /* free the URBs */ usb_free_urb(stream->urb_list[i]); } } stream->urbs_initialized = 0; usb_free_stream_buffers(stream); return 0; }
gpl-2.0
taozhijiang/linux
sound/soc/au1x/psc-i2s.c
621
11115
/* * Au12x0/Au1550 PSC ALSA ASoC audio support. * * (c) 2007-2008 MSC Vertriebsges.m.b.H., * Manuel Lauss <manuel.lauss@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Au1xxx-PSC I2S glue. * * NOTE: so far only PSC slave mode (bit- and frameclock) is supported. */ #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/suspend.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/initval.h> #include <sound/soc.h> #include <asm/mach-au1x00/au1000.h> #include <asm/mach-au1x00/au1xxx_psc.h> #include "psc.h" /* supported I2S DAI hardware formats */ #define AU1XPSC_I2S_DAIFMT \ (SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_LEFT_J | \ SND_SOC_DAIFMT_NB_NF) /* supported I2S direction */ #define AU1XPSC_I2S_DIR \ (SND_SOC_DAIDIR_PLAYBACK | SND_SOC_DAIDIR_CAPTURE) #define AU1XPSC_I2S_RATES \ SNDRV_PCM_RATE_8000_192000 #define AU1XPSC_I2S_FMTS \ (SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S24_LE) #define I2SSTAT_BUSY(stype) \ ((stype) == SNDRV_PCM_STREAM_PLAYBACK ? PSC_I2SSTAT_TB : PSC_I2SSTAT_RB) #define I2SPCR_START(stype) \ ((stype) == SNDRV_PCM_STREAM_PLAYBACK ? PSC_I2SPCR_TS : PSC_I2SPCR_RS) #define I2SPCR_STOP(stype) \ ((stype) == SNDRV_PCM_STREAM_PLAYBACK ? PSC_I2SPCR_TP : PSC_I2SPCR_RP) #define I2SPCR_CLRFIFO(stype) \ ((stype) == SNDRV_PCM_STREAM_PLAYBACK ? PSC_I2SPCR_TC : PSC_I2SPCR_RC) static int au1xpsc_i2s_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) { struct au1xpsc_audio_data *pscdata = snd_soc_dai_get_drvdata(cpu_dai); unsigned long ct; int ret; ret = -EINVAL; ct = pscdata->cfg; ct &= ~(PSC_I2SCFG_XM | PSC_I2SCFG_MLJ); /* left-justified */ switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: ct |= PSC_I2SCFG_XM; /* enable I2S mode */ break; case SND_SOC_DAIFMT_MSB: break; case SND_SOC_DAIFMT_LSB: ct |= PSC_I2SCFG_MLJ; /* LSB (right-) justified */ break; default: goto out; } ct &= ~(PSC_I2SCFG_BI | PSC_I2SCFG_WI); /* IB-IF */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: ct |= PSC_I2SCFG_BI | PSC_I2SCFG_WI; break; case SND_SOC_DAIFMT_NB_IF: ct |= PSC_I2SCFG_BI; break; case SND_SOC_DAIFMT_IB_NF: ct |= PSC_I2SCFG_WI; break; case SND_SOC_DAIFMT_IB_IF: break; default: goto out; } switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBM_CFM: /* CODEC master */ ct |= PSC_I2SCFG_MS; /* PSC I2S slave mode */ break; case SND_SOC_DAIFMT_CBS_CFS: /* CODEC slave */ ct &= ~PSC_I2SCFG_MS; /* PSC I2S Master mode */ break; default: goto out; } pscdata->cfg = ct; ret = 0; out: return ret; } static int au1xpsc_i2s_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct au1xpsc_audio_data *pscdata = snd_soc_dai_get_drvdata(dai); int cfgbits; unsigned long stat; /* check if the PSC is already streaming data */ stat = __raw_readl(I2S_STAT(pscdata)); if (stat & (PSC_I2SSTAT_TB | PSC_I2SSTAT_RB)) { /* reject parameters not currently set up in hardware */ cfgbits = __raw_readl(I2S_CFG(pscdata)); if ((PSC_I2SCFG_GET_LEN(cfgbits) != params->msbits) || (params_rate(params) != pscdata->rate)) return -EINVAL; } else { /* set sample bitdepth */ pscdata->cfg &= ~(0x1f << 4); pscdata->cfg |= PSC_I2SCFG_SET_LEN(params->msbits); /* remember current rate for other stream */ pscdata->rate = params_rate(params); } return 0; } /* Configure PSC late: on my devel systems the codec is I2S master and * supplies the i2sbitclock __AND__ i2sMclk (!) to the PSC unit. ASoC * uses aggressive PM and switches the codec off when it is not in use * which also means the PSC unit doesn't get any clocks and is therefore * dead. That's why this chunk here gets called from the trigger callback * because I can be reasonably certain the codec is driving the clocks. */ static int au1xpsc_i2s_configure(struct au1xpsc_audio_data *pscdata) { unsigned long tmo; /* bring PSC out of sleep, and configure I2S unit */ __raw_writel(PSC_CTRL_ENABLE, PSC_CTRL(pscdata)); wmb(); /* drain writebuffer */ tmo = 1000000; while (!(__raw_readl(I2S_STAT(pscdata)) & PSC_I2SSTAT_SR) && tmo) tmo--; if (!tmo) goto psc_err; __raw_writel(0, I2S_CFG(pscdata)); wmb(); /* drain writebuffer */ __raw_writel(pscdata->cfg | PSC_I2SCFG_DE_ENABLE, I2S_CFG(pscdata)); wmb(); /* drain writebuffer */ /* wait for I2S controller to become ready */ tmo = 1000000; while (!(__raw_readl(I2S_STAT(pscdata)) & PSC_I2SSTAT_DR) && tmo) tmo--; if (tmo) return 0; psc_err: __raw_writel(0, I2S_CFG(pscdata)); __raw_writel(PSC_CTRL_SUSPEND, PSC_CTRL(pscdata)); wmb(); /* drain writebuffer */ return -ETIMEDOUT; } static int au1xpsc_i2s_start(struct au1xpsc_audio_data *pscdata, int stype) { unsigned long tmo, stat; int ret; ret = 0; /* if both TX and RX are idle, configure the PSC */ stat = __raw_readl(I2S_STAT(pscdata)); if (!(stat & (PSC_I2SSTAT_TB | PSC_I2SSTAT_RB))) { ret = au1xpsc_i2s_configure(pscdata); if (ret) goto out; } __raw_writel(I2SPCR_CLRFIFO(stype), I2S_PCR(pscdata)); wmb(); /* drain writebuffer */ __raw_writel(I2SPCR_START(stype), I2S_PCR(pscdata)); wmb(); /* drain writebuffer */ /* wait for start confirmation */ tmo = 1000000; while (!(__raw_readl(I2S_STAT(pscdata)) & I2SSTAT_BUSY(stype)) && tmo) tmo--; if (!tmo) { __raw_writel(I2SPCR_STOP(stype), I2S_PCR(pscdata)); wmb(); /* drain writebuffer */ ret = -ETIMEDOUT; } out: return ret; } static int au1xpsc_i2s_stop(struct au1xpsc_audio_data *pscdata, int stype) { unsigned long tmo, stat; __raw_writel(I2SPCR_STOP(stype), I2S_PCR(pscdata)); wmb(); /* drain writebuffer */ /* wait for stop confirmation */ tmo = 1000000; while ((__raw_readl(I2S_STAT(pscdata)) & I2SSTAT_BUSY(stype)) && tmo) tmo--; /* if both TX and RX are idle, disable PSC */ stat = __raw_readl(I2S_STAT(pscdata)); if (!(stat & (PSC_I2SSTAT_TB | PSC_I2SSTAT_RB))) { __raw_writel(0, I2S_CFG(pscdata)); wmb(); /* drain writebuffer */ __raw_writel(PSC_CTRL_SUSPEND, PSC_CTRL(pscdata)); wmb(); /* drain writebuffer */ } return 0; } static int au1xpsc_i2s_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct au1xpsc_audio_data *pscdata = snd_soc_dai_get_drvdata(dai); int ret, stype = substream->stream; switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: ret = au1xpsc_i2s_start(pscdata, stype); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: ret = au1xpsc_i2s_stop(pscdata, stype); break; default: ret = -EINVAL; } return ret; } static int au1xpsc_i2s_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct au1xpsc_audio_data *pscdata = snd_soc_dai_get_drvdata(dai); snd_soc_dai_set_dma_data(dai, substream, &pscdata->dmaids[0]); return 0; } static const struct snd_soc_dai_ops au1xpsc_i2s_dai_ops = { .startup = au1xpsc_i2s_startup, .trigger = au1xpsc_i2s_trigger, .hw_params = au1xpsc_i2s_hw_params, .set_fmt = au1xpsc_i2s_set_fmt, }; static const struct snd_soc_dai_driver au1xpsc_i2s_dai_template = { .playback = { .rates = AU1XPSC_I2S_RATES, .formats = AU1XPSC_I2S_FMTS, .channels_min = 2, .channels_max = 8, /* 2 without external help */ }, .capture = { .rates = AU1XPSC_I2S_RATES, .formats = AU1XPSC_I2S_FMTS, .channels_min = 2, .channels_max = 8, /* 2 without external help */ }, .ops = &au1xpsc_i2s_dai_ops, }; static const struct snd_soc_component_driver au1xpsc_i2s_component = { .name = "au1xpsc-i2s", }; static int au1xpsc_i2s_drvprobe(struct platform_device *pdev) { struct resource *iores, *dmares; unsigned long sel; struct au1xpsc_audio_data *wd; wd = devm_kzalloc(&pdev->dev, sizeof(struct au1xpsc_audio_data), GFP_KERNEL); if (!wd) return -ENOMEM; iores = platform_get_resource(pdev, IORESOURCE_MEM, 0); wd->mmio = devm_ioremap_resource(&pdev->dev, iores); if (IS_ERR(wd->mmio)) return PTR_ERR(wd->mmio); dmares = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (!dmares) return -EBUSY; wd->dmaids[SNDRV_PCM_STREAM_PLAYBACK] = dmares->start; dmares = platform_get_resource(pdev, IORESOURCE_DMA, 1); if (!dmares) return -EBUSY; wd->dmaids[SNDRV_PCM_STREAM_CAPTURE] = dmares->start; /* preserve PSC clock source set up by platform (dev.platform_data * is already occupied by soc layer) */ sel = __raw_readl(PSC_SEL(wd)) & PSC_SEL_CLK_MASK; __raw_writel(PSC_CTRL_DISABLE, PSC_CTRL(wd)); wmb(); /* drain writebuffer */ __raw_writel(PSC_SEL_PS_I2SMODE | sel, PSC_SEL(wd)); __raw_writel(0, I2S_CFG(wd)); wmb(); /* drain writebuffer */ /* preconfigure: set max rx/tx fifo depths */ wd->cfg |= PSC_I2SCFG_RT_FIFO8 | PSC_I2SCFG_TT_FIFO8; /* don't wait for I2S core to become ready now; clocks may not * be running yet; depending on clock input for PSC a wait might * time out. */ /* name the DAI like this device instance ("au1xpsc-i2s.PSCINDEX") */ memcpy(&wd->dai_drv, &au1xpsc_i2s_dai_template, sizeof(struct snd_soc_dai_driver)); wd->dai_drv.name = dev_name(&pdev->dev); platform_set_drvdata(pdev, wd); return snd_soc_register_component(&pdev->dev, &au1xpsc_i2s_component, &wd->dai_drv, 1); } static int au1xpsc_i2s_drvremove(struct platform_device *pdev) { struct au1xpsc_audio_data *wd = platform_get_drvdata(pdev); snd_soc_unregister_component(&pdev->dev); __raw_writel(0, I2S_CFG(wd)); wmb(); /* drain writebuffer */ __raw_writel(PSC_CTRL_DISABLE, PSC_CTRL(wd)); wmb(); /* drain writebuffer */ return 0; } #ifdef CONFIG_PM static int au1xpsc_i2s_drvsuspend(struct device *dev) { struct au1xpsc_audio_data *wd = dev_get_drvdata(dev); /* save interesting register and disable PSC */ wd->pm[0] = __raw_readl(PSC_SEL(wd)); __raw_writel(0, I2S_CFG(wd)); wmb(); /* drain writebuffer */ __raw_writel(PSC_CTRL_DISABLE, PSC_CTRL(wd)); wmb(); /* drain writebuffer */ return 0; } static int au1xpsc_i2s_drvresume(struct device *dev) { struct au1xpsc_audio_data *wd = dev_get_drvdata(dev); /* select I2S mode and PSC clock */ __raw_writel(PSC_CTRL_DISABLE, PSC_CTRL(wd)); wmb(); /* drain writebuffer */ __raw_writel(0, PSC_SEL(wd)); wmb(); /* drain writebuffer */ __raw_writel(wd->pm[0], PSC_SEL(wd)); wmb(); /* drain writebuffer */ return 0; } static struct dev_pm_ops au1xpsci2s_pmops = { .suspend = au1xpsc_i2s_drvsuspend, .resume = au1xpsc_i2s_drvresume, }; #define AU1XPSCI2S_PMOPS &au1xpsci2s_pmops #else #define AU1XPSCI2S_PMOPS NULL #endif static struct platform_driver au1xpsc_i2s_driver = { .driver = { .name = "au1xpsc_i2s", .pm = AU1XPSCI2S_PMOPS, }, .probe = au1xpsc_i2s_drvprobe, .remove = au1xpsc_i2s_drvremove, }; module_platform_driver(au1xpsc_i2s_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Au12x0/Au1550 PSC I2S ALSA ASoC audio driver"); MODULE_AUTHOR("Manuel Lauss");
gpl-2.0
cile381/s7_flat_kernel
arch/arm/mach-keystone/platsmp.c
877
1477
/* * Keystone SOC SMP platform code * * Copyright 2013 Texas Instruments, Inc. * Cyril Chemparathy <cyril@ti.com> * Santosh Shilimkar <santosh.shillimkar@ti.com> * * Based on platsmp.c, Copyright (C) 2002 ARM Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/smp.h> #include <linux/io.h> #include <asm/smp_plat.h> #include <asm/prom.h> #include <asm/tlbflush.h> #include <asm/pgtable.h> #include "keystone.h" static int keystone_smp_boot_secondary(unsigned int cpu, struct task_struct *idle) { unsigned long start = virt_to_idmap(&secondary_startup); int error; pr_debug("keystone-smp: booting cpu %d, vector %08lx\n", cpu, start); error = keystone_cpu_smc(KEYSTONE_MON_CPU_UP_IDX, cpu, start); if (error) pr_err("CPU %d bringup failed with %d\n", cpu, error); return error; } #ifdef CONFIG_ARM_LPAE static void __cpuinit keystone_smp_secondary_initmem(unsigned int cpu) { pgd_t *pgd0 = pgd_offset_k(0); cpu_set_ttbr(1, __pa(pgd0) + TTBR1_OFFSET); local_flush_tlb_all(); } #else static inline void __cpuinit keystone_smp_secondary_initmem(unsigned int cpu) {} #endif struct smp_operations keystone_smp_ops __initdata = { .smp_boot_secondary = keystone_smp_boot_secondary, .smp_secondary_init = keystone_smp_secondary_initmem, };
gpl-2.0
psyke83/codeaurora-kernel_samsung_europa
drivers/gpu/drm/drm_sman.c
1645
8782
/************************************************************************** * * Copyright 2006 Tungsten Graphics, Inc., Bismarck., ND., USA. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE * USE OR OTHER DEALINGS IN THE SOFTWARE. * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * **************************************************************************/ /* * Simple memory manager interface that keeps track on allocate regions on a * per "owner" basis. All regions associated with an "owner" can be released * with a simple call. Typically if the "owner" exists. The owner is any * "unsigned long" identifier. Can typically be a pointer to a file private * struct or a context identifier. * * Authors: * Thomas Hellström <thomas-at-tungstengraphics-dot-com> */ #include "drm_sman.h" struct drm_owner_item { struct drm_hash_item owner_hash; struct list_head sman_list; struct list_head mem_blocks; }; void drm_sman_takedown(struct drm_sman * sman) { drm_ht_remove(&sman->user_hash_tab); drm_ht_remove(&sman->owner_hash_tab); kfree(sman->mm); } EXPORT_SYMBOL(drm_sman_takedown); int drm_sman_init(struct drm_sman * sman, unsigned int num_managers, unsigned int user_order, unsigned int owner_order) { int ret = 0; sman->mm = (struct drm_sman_mm *) kcalloc(num_managers, sizeof(*sman->mm), GFP_KERNEL); if (!sman->mm) { ret = -ENOMEM; goto out; } sman->num_managers = num_managers; INIT_LIST_HEAD(&sman->owner_items); ret = drm_ht_create(&sman->owner_hash_tab, owner_order); if (ret) goto out1; ret = drm_ht_create(&sman->user_hash_tab, user_order); if (!ret) goto out; drm_ht_remove(&sman->owner_hash_tab); out1: kfree(sman->mm); out: return ret; } EXPORT_SYMBOL(drm_sman_init); static void *drm_sman_mm_allocate(void *private, unsigned long size, unsigned alignment) { struct drm_mm *mm = (struct drm_mm *) private; struct drm_mm_node *tmp; tmp = drm_mm_search_free(mm, size, alignment, 1); if (!tmp) { return NULL; } tmp = drm_mm_get_block(tmp, size, alignment); return tmp; } static void drm_sman_mm_free(void *private, void *ref) { struct drm_mm_node *node = (struct drm_mm_node *) ref; drm_mm_put_block(node); } static void drm_sman_mm_destroy(void *private) { struct drm_mm *mm = (struct drm_mm *) private; drm_mm_takedown(mm); kfree(mm); } static unsigned long drm_sman_mm_offset(void *private, void *ref) { struct drm_mm_node *node = (struct drm_mm_node *) ref; return node->start; } int drm_sman_set_range(struct drm_sman * sman, unsigned int manager, unsigned long start, unsigned long size) { struct drm_sman_mm *sman_mm; struct drm_mm *mm; int ret; BUG_ON(manager >= sman->num_managers); sman_mm = &sman->mm[manager]; mm = kzalloc(sizeof(*mm), GFP_KERNEL); if (!mm) { return -ENOMEM; } sman_mm->private = mm; ret = drm_mm_init(mm, start, size); if (ret) { kfree(mm); return ret; } sman_mm->allocate = drm_sman_mm_allocate; sman_mm->free = drm_sman_mm_free; sman_mm->destroy = drm_sman_mm_destroy; sman_mm->offset = drm_sman_mm_offset; return 0; } EXPORT_SYMBOL(drm_sman_set_range); int drm_sman_set_manager(struct drm_sman * sman, unsigned int manager, struct drm_sman_mm * allocator) { BUG_ON(manager >= sman->num_managers); sman->mm[manager] = *allocator; return 0; } EXPORT_SYMBOL(drm_sman_set_manager); static struct drm_owner_item *drm_sman_get_owner_item(struct drm_sman * sman, unsigned long owner) { int ret; struct drm_hash_item *owner_hash_item; struct drm_owner_item *owner_item; ret = drm_ht_find_item(&sman->owner_hash_tab, owner, &owner_hash_item); if (!ret) { return drm_hash_entry(owner_hash_item, struct drm_owner_item, owner_hash); } owner_item = kzalloc(sizeof(*owner_item), GFP_KERNEL); if (!owner_item) goto out; INIT_LIST_HEAD(&owner_item->mem_blocks); owner_item->owner_hash.key = owner; if (drm_ht_insert_item(&sman->owner_hash_tab, &owner_item->owner_hash)) goto out1; list_add_tail(&owner_item->sman_list, &sman->owner_items); return owner_item; out1: kfree(owner_item); out: return NULL; } struct drm_memblock_item *drm_sman_alloc(struct drm_sman *sman, unsigned int manager, unsigned long size, unsigned alignment, unsigned long owner) { void *tmp; struct drm_sman_mm *sman_mm; struct drm_owner_item *owner_item; struct drm_memblock_item *memblock; BUG_ON(manager >= sman->num_managers); sman_mm = &sman->mm[manager]; tmp = sman_mm->allocate(sman_mm->private, size, alignment); if (!tmp) { return NULL; } memblock = kzalloc(sizeof(*memblock), GFP_KERNEL); if (!memblock) goto out; memblock->mm_info = tmp; memblock->mm = sman_mm; memblock->sman = sman; if (drm_ht_just_insert_please (&sman->user_hash_tab, &memblock->user_hash, (unsigned long)memblock, 32, 0, 0)) goto out1; owner_item = drm_sman_get_owner_item(sman, owner); if (!owner_item) goto out2; list_add_tail(&memblock->owner_list, &owner_item->mem_blocks); return memblock; out2: drm_ht_remove_item(&sman->user_hash_tab, &memblock->user_hash); out1: kfree(memblock); out: sman_mm->free(sman_mm->private, tmp); return NULL; } EXPORT_SYMBOL(drm_sman_alloc); static void drm_sman_free(struct drm_memblock_item *item) { struct drm_sman *sman = item->sman; list_del(&item->owner_list); drm_ht_remove_item(&sman->user_hash_tab, &item->user_hash); item->mm->free(item->mm->private, item->mm_info); kfree(item); } int drm_sman_free_key(struct drm_sman *sman, unsigned int key) { struct drm_hash_item *hash_item; struct drm_memblock_item *memblock_item; if (drm_ht_find_item(&sman->user_hash_tab, key, &hash_item)) return -EINVAL; memblock_item = drm_hash_entry(hash_item, struct drm_memblock_item, user_hash); drm_sman_free(memblock_item); return 0; } EXPORT_SYMBOL(drm_sman_free_key); static void drm_sman_remove_owner(struct drm_sman *sman, struct drm_owner_item *owner_item) { list_del(&owner_item->sman_list); drm_ht_remove_item(&sman->owner_hash_tab, &owner_item->owner_hash); kfree(owner_item); } int drm_sman_owner_clean(struct drm_sman *sman, unsigned long owner) { struct drm_hash_item *hash_item; struct drm_owner_item *owner_item; if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) { return -1; } owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash); if (owner_item->mem_blocks.next == &owner_item->mem_blocks) { drm_sman_remove_owner(sman, owner_item); return -1; } return 0; } EXPORT_SYMBOL(drm_sman_owner_clean); static void drm_sman_do_owner_cleanup(struct drm_sman *sman, struct drm_owner_item *owner_item) { struct drm_memblock_item *entry, *next; list_for_each_entry_safe(entry, next, &owner_item->mem_blocks, owner_list) { drm_sman_free(entry); } drm_sman_remove_owner(sman, owner_item); } void drm_sman_owner_cleanup(struct drm_sman *sman, unsigned long owner) { struct drm_hash_item *hash_item; struct drm_owner_item *owner_item; if (drm_ht_find_item(&sman->owner_hash_tab, owner, &hash_item)) { return; } owner_item = drm_hash_entry(hash_item, struct drm_owner_item, owner_hash); drm_sman_do_owner_cleanup(sman, owner_item); } EXPORT_SYMBOL(drm_sman_owner_cleanup); void drm_sman_cleanup(struct drm_sman *sman) { struct drm_owner_item *entry, *next; unsigned int i; struct drm_sman_mm *sman_mm; list_for_each_entry_safe(entry, next, &sman->owner_items, sman_list) { drm_sman_do_owner_cleanup(sman, entry); } if (sman->mm) { for (i = 0; i < sman->num_managers; ++i) { sman_mm = &sman->mm[i]; if (sman_mm->private) { sman_mm->destroy(sman_mm->private); sman_mm->private = NULL; } } } } EXPORT_SYMBOL(drm_sman_cleanup);
gpl-2.0
cuzz1369/android_kernel_lge_g3
arch/arm/mach-msm/msm_bus/msm_bus_id.c
1901
2559
/* Copyright (c) 2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/device.h> #include <linux/module.h> #include <mach/msm_bus.h> #include <mach/msm_bus_board.h> #include <mach/board.h> #include <mach/rpm.h> #include <mach/socinfo.h> #include "msm_bus_core.h" #include "msm_bus_noc.h" #include "msm_bus_bimc.h" static uint32_t master_iids[MSM_BUS_MASTER_LAST]; static uint32_t slave_iids[MSM_BUS_SLAVE_LAST - SLAVE_ID_KEY]; static void msm_bus_assign_iids(struct msm_bus_fabric_registration *fabreg, int fabid) { int i; for (i = 0; i < fabreg->len; i++) { if (!fabreg->info[i].gateway) { fabreg->info[i].priv_id = fabid + fabreg->info[i].id; if (fabreg->info[i].id < SLAVE_ID_KEY) { if (fabreg->info[i].id >= MSM_BUS_MASTER_LAST) { WARN(1, "id %d exceeds array size!\n", fabreg->info[i].id); continue; } master_iids[fabreg->info[i].id] = fabreg->info[i].priv_id; } else { if ((fabreg->info[i].id - SLAVE_ID_KEY) >= (MSM_BUS_SLAVE_LAST - SLAVE_ID_KEY)) { WARN(1, "id %d exceeds array size!\n", fabreg->info[i].id); continue; } slave_iids[fabreg->info[i].id - (SLAVE_ID_KEY)] = fabreg->info[i].priv_id; } } else { fabreg->info[i].priv_id = fabreg->info[i].id; } } } static int msm_bus_get_iid(int id) { if ((id < SLAVE_ID_KEY && id >= MSM_BUS_MASTER_LAST) || id >= MSM_BUS_SLAVE_LAST) { MSM_BUS_ERR("Cannot get iid. Invalid id %d passed\n", id); return -EINVAL; } return CHECK_ID(((id < SLAVE_ID_KEY) ? master_iids[id] : slave_iids[id - SLAVE_ID_KEY]), id); } static struct msm_bus_board_algorithm msm_bus_id_algo = { .get_iid = msm_bus_get_iid, .assign_iids = msm_bus_assign_iids, }; int msm_bus_board_rpm_get_il_ids(uint16_t *id) { return -ENXIO; } void msm_bus_board_init(struct msm_bus_fabric_registration *pdata) { if (machine_is_msm8226()) msm_bus_id_algo.board_nfab = NFAB_MSM8226; else if (machine_is_msm8610()) msm_bus_id_algo.board_nfab = NFAB_MSM8610; pdata->board_algo = &msm_bus_id_algo; }
gpl-2.0
Split-Screen/android_kernel_lge_g3
arch/arm/mach-msm/msm_bus/msm_bus_id.c
1901
2559
/* Copyright (c) 2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/device.h> #include <linux/module.h> #include <mach/msm_bus.h> #include <mach/msm_bus_board.h> #include <mach/board.h> #include <mach/rpm.h> #include <mach/socinfo.h> #include "msm_bus_core.h" #include "msm_bus_noc.h" #include "msm_bus_bimc.h" static uint32_t master_iids[MSM_BUS_MASTER_LAST]; static uint32_t slave_iids[MSM_BUS_SLAVE_LAST - SLAVE_ID_KEY]; static void msm_bus_assign_iids(struct msm_bus_fabric_registration *fabreg, int fabid) { int i; for (i = 0; i < fabreg->len; i++) { if (!fabreg->info[i].gateway) { fabreg->info[i].priv_id = fabid + fabreg->info[i].id; if (fabreg->info[i].id < SLAVE_ID_KEY) { if (fabreg->info[i].id >= MSM_BUS_MASTER_LAST) { WARN(1, "id %d exceeds array size!\n", fabreg->info[i].id); continue; } master_iids[fabreg->info[i].id] = fabreg->info[i].priv_id; } else { if ((fabreg->info[i].id - SLAVE_ID_KEY) >= (MSM_BUS_SLAVE_LAST - SLAVE_ID_KEY)) { WARN(1, "id %d exceeds array size!\n", fabreg->info[i].id); continue; } slave_iids[fabreg->info[i].id - (SLAVE_ID_KEY)] = fabreg->info[i].priv_id; } } else { fabreg->info[i].priv_id = fabreg->info[i].id; } } } static int msm_bus_get_iid(int id) { if ((id < SLAVE_ID_KEY && id >= MSM_BUS_MASTER_LAST) || id >= MSM_BUS_SLAVE_LAST) { MSM_BUS_ERR("Cannot get iid. Invalid id %d passed\n", id); return -EINVAL; } return CHECK_ID(((id < SLAVE_ID_KEY) ? master_iids[id] : slave_iids[id - SLAVE_ID_KEY]), id); } static struct msm_bus_board_algorithm msm_bus_id_algo = { .get_iid = msm_bus_get_iid, .assign_iids = msm_bus_assign_iids, }; int msm_bus_board_rpm_get_il_ids(uint16_t *id) { return -ENXIO; } void msm_bus_board_init(struct msm_bus_fabric_registration *pdata) { if (machine_is_msm8226()) msm_bus_id_algo.board_nfab = NFAB_MSM8226; else if (machine_is_msm8610()) msm_bus_id_algo.board_nfab = NFAB_MSM8610; pdata->board_algo = &msm_bus_id_algo; }
gpl-2.0
Canonical-kernel/Ubuntu-kernel
drivers/gpu/drm/radeon/cik_blit_shaders.c
2925
5796
/* * Copyright 2012 Advanced Micro Devices, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * Alex Deucher <alexander.deucher@amd.com> */ #include <linux/types.h> #include <linux/bug.h> #include <linux/kernel.h> const u32 cik_default_state[] = { 0xc0066900, 0x00000000, 0x00000060, /* DB_RENDER_CONTROL */ 0x00000000, /* DB_COUNT_CONTROL */ 0x00000000, /* DB_DEPTH_VIEW */ 0x0000002a, /* DB_RENDER_OVERRIDE */ 0x00000000, /* DB_RENDER_OVERRIDE2 */ 0x00000000, /* DB_HTILE_DATA_BASE */ 0xc0046900, 0x00000008, 0x00000000, /* DB_DEPTH_BOUNDS_MIN */ 0x00000000, /* DB_DEPTH_BOUNDS_MAX */ 0x00000000, /* DB_STENCIL_CLEAR */ 0x00000000, /* DB_DEPTH_CLEAR */ 0xc0036900, 0x0000000f, 0x00000000, /* DB_DEPTH_INFO */ 0x00000000, /* DB_Z_INFO */ 0x00000000, /* DB_STENCIL_INFO */ 0xc0016900, 0x00000080, 0x00000000, /* PA_SC_WINDOW_OFFSET */ 0xc00d6900, 0x00000083, 0x0000ffff, /* PA_SC_CLIPRECT_RULE */ 0x00000000, /* PA_SC_CLIPRECT_0_TL */ 0x20002000, /* PA_SC_CLIPRECT_0_BR */ 0x00000000, 0x20002000, 0x00000000, 0x20002000, 0x00000000, 0x20002000, 0xaaaaaaaa, /* PA_SC_EDGERULE */ 0x00000000, /* PA_SU_HARDWARE_SCREEN_OFFSET */ 0x0000000f, /* CB_TARGET_MASK */ 0x0000000f, /* CB_SHADER_MASK */ 0xc0226900, 0x00000094, 0x80000000, /* PA_SC_VPORT_SCISSOR_0_TL */ 0x20002000, /* PA_SC_VPORT_SCISSOR_0_BR */ 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x80000000, 0x20002000, 0x00000000, /* PA_SC_VPORT_ZMIN_0 */ 0x3f800000, /* PA_SC_VPORT_ZMAX_0 */ 0xc0046900, 0x00000100, 0xffffffff, /* VGT_MAX_VTX_INDX */ 0x00000000, /* VGT_MIN_VTX_INDX */ 0x00000000, /* VGT_INDX_OFFSET */ 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_INDX */ 0xc0046900, 0x00000105, 0x00000000, /* CB_BLEND_RED */ 0x00000000, /* CB_BLEND_GREEN */ 0x00000000, /* CB_BLEND_BLUE */ 0x00000000, /* CB_BLEND_ALPHA */ 0xc0016900, 0x000001e0, 0x00000000, /* CB_BLEND0_CONTROL */ 0xc00c6900, 0x00000200, 0x00000000, /* DB_DEPTH_CONTROL */ 0x00000000, /* DB_EQAA */ 0x00cc0010, /* CB_COLOR_CONTROL */ 0x00000210, /* DB_SHADER_CONTROL */ 0x00010000, /* PA_CL_CLIP_CNTL */ 0x00000004, /* PA_SU_SC_MODE_CNTL */ 0x00000100, /* PA_CL_VTE_CNTL */ 0x00000000, /* PA_CL_VS_OUT_CNTL */ 0x00000000, /* PA_CL_NANINF_CNTL */ 0x00000000, /* PA_SU_LINE_STIPPLE_CNTL */ 0x00000000, /* PA_SU_LINE_STIPPLE_SCALE */ 0x00000000, /* PA_SU_PRIM_FILTER_CNTL */ 0xc0116900, 0x00000280, 0x00000000, /* PA_SU_POINT_SIZE */ 0x00000000, /* PA_SU_POINT_MINMAX */ 0x00000008, /* PA_SU_LINE_CNTL */ 0x00000000, /* PA_SC_LINE_STIPPLE */ 0x00000000, /* VGT_OUTPUT_PATH_CNTL */ 0x00000000, /* VGT_HOS_CNTL */ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, /* VGT_GS_MODE */ 0xc0026900, 0x00000292, 0x00000000, /* PA_SC_MODE_CNTL_0 */ 0x00000000, /* PA_SC_MODE_CNTL_1 */ 0xc0016900, 0x000002a1, 0x00000000, /* VGT_PRIMITIVEID_EN */ 0xc0016900, 0x000002a5, 0x00000000, /* VGT_MULTI_PRIM_IB_RESET_EN */ 0xc0026900, 0x000002a8, 0x00000000, /* VGT_INSTANCE_STEP_RATE_0 */ 0x00000000, 0xc0026900, 0x000002ad, 0x00000000, /* VGT_REUSE_OFF */ 0x00000000, 0xc0016900, 0x000002d5, 0x00000000, /* VGT_SHADER_STAGES_EN */ 0xc0016900, 0x000002dc, 0x0000aa00, /* DB_ALPHA_TO_MASK */ 0xc0066900, 0x000002de, 0x00000000, /* PA_SU_POLY_OFFSET_DB_FMT_CNTL */ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xc0026900, 0x000002e5, 0x00000000, /* VGT_STRMOUT_CONFIG */ 0x00000000, 0xc01b6900, 0x000002f5, 0x76543210, /* PA_SC_CENTROID_PRIORITY_0 */ 0xfedcba98, /* PA_SC_CENTROID_PRIORITY_1 */ 0x00000000, /* PA_SC_LINE_CNTL */ 0x00000000, /* PA_SC_AA_CONFIG */ 0x00000005, /* PA_SU_VTX_CNTL */ 0x3f800000, /* PA_CL_GB_VERT_CLIP_ADJ */ 0x3f800000, /* PA_CL_GB_VERT_DISC_ADJ */ 0x3f800000, /* PA_CL_GB_HORZ_CLIP_ADJ */ 0x3f800000, /* PA_CL_GB_HORZ_DISC_ADJ */ 0x00000000, /* PA_SC_AA_SAMPLE_LOCS_PIXEL_X0Y0_0 */ 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0xffffffff, /* PA_SC_AA_MASK_X0Y0_X1Y0 */ 0xffffffff, 0xc0026900, 0x00000316, 0x0000000e, /* VGT_VERTEX_REUSE_BLOCK_CNTL */ 0x00000010, /* */ }; const u32 cik_default_size = ARRAY_SIZE(cik_default_state);
gpl-2.0
carvsdriver/android_kernel_samsung_n5110-common
drivers/staging/rtl8192e/ieee80211/ieee80211_softmac_wx.c
2925
13458
/* IEEE 802.11 SoftMAC layer * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it> * * Mostly extracted from the rtl8180-sa2400 driver for the * in-kernel generic ieee802.11 stack. * * Some pieces of code might be stolen from ipw2100 driver * copyright of who own it's copyright ;-) * * PS wx handler mostly stolen from hostap, copyright who * own it's copyright ;-) * * released under the GPL */ #include "ieee80211.h" #ifdef ENABLE_DOT11D #include "dot11d.h" #endif /* FIXME: add A freqs */ const long ieee80211_wlan_frequencies[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442, 2447, 2452, 2457, 2462, 2467, 2472, 2484 }; int ieee80211_wx_set_freq(struct ieee80211_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { int ret; struct iw_freq *fwrq = & wrqu->freq; down(&ieee->wx_sem); if(ieee->iw_mode == IW_MODE_INFRA){ ret = -EOPNOTSUPP; goto out; } /* if setting by freq convert to channel */ if (fwrq->e == 1) { if ((fwrq->m >= (int) 2.412e8 && fwrq->m <= (int) 2.487e8)) { int f = fwrq->m / 100000; int c = 0; while ((c < 14) && (f != ieee80211_wlan_frequencies[c])) c++; /* hack to fall through */ fwrq->e = 0; fwrq->m = c + 1; } } if (fwrq->e > 0 || fwrq->m > 14 || fwrq->m < 1 ){ ret = -EOPNOTSUPP; goto out; }else { /* Set the channel */ #ifdef ENABLE_DOT11D if (!(GET_DOT11D_INFO(ieee)->channel_map)[fwrq->m]) { ret = -EINVAL; goto out; } #endif ieee->current_network.channel = fwrq->m; ieee->set_chan(ieee, ieee->current_network.channel); if(ieee->iw_mode == IW_MODE_ADHOC || ieee->iw_mode == IW_MODE_MASTER) if(ieee->state == IEEE80211_LINKED){ ieee80211_stop_send_beacons(ieee); ieee80211_start_send_beacons(ieee); } } ret = 0; out: up(&ieee->wx_sem); return ret; } int ieee80211_wx_get_freq(struct ieee80211_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct iw_freq *fwrq = & wrqu->freq; if (ieee->current_network.channel == 0) return -1; //NM 0.7.0 will not accept channel any more. fwrq->m = ieee80211_wlan_frequencies[ieee->current_network.channel-1] * 100000; fwrq->e = 1; return 0; } int ieee80211_wx_get_wap(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { unsigned long flags; wrqu->ap_addr.sa_family = ARPHRD_ETHER; if (ieee->iw_mode == IW_MODE_MONITOR) return -1; /* We want avoid to give to the user inconsistent infos*/ spin_lock_irqsave(&ieee->lock, flags); if (ieee->state != IEEE80211_LINKED && ieee->state != IEEE80211_LINKED_SCANNING && ieee->wap_set == 0) memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN); else memcpy(wrqu->ap_addr.sa_data, ieee->current_network.bssid, ETH_ALEN); spin_unlock_irqrestore(&ieee->lock, flags); return 0; } int ieee80211_wx_set_wap(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *awrq, char *extra) { int ret = 0; u8 zero[] = {0,0,0,0,0,0}; unsigned long flags; short ifup = ieee->proto_started;//dev->flags & IFF_UP; struct sockaddr *temp = (struct sockaddr *)awrq; ieee->sync_scan_hurryup = 1; down(&ieee->wx_sem); /* use ifconfig hw ether */ if (ieee->iw_mode == IW_MODE_MASTER){ ret = -1; goto out; } if (temp->sa_family != ARPHRD_ETHER){ ret = -EINVAL; goto out; } if (ifup) ieee80211_stop_protocol(ieee,true); /* just to avoid to give inconsistent infos in the * get wx method. not really needed otherwise */ spin_lock_irqsave(&ieee->lock, flags); memcpy(ieee->current_network.bssid, temp->sa_data, ETH_ALEN); ieee->wap_set = memcmp(temp->sa_data, zero,ETH_ALEN)!=0; spin_unlock_irqrestore(&ieee->lock, flags); if (ifup) ieee80211_start_protocol(ieee); out: up(&ieee->wx_sem); return ret; } int ieee80211_wx_get_essid(struct ieee80211_device *ieee, struct iw_request_info *a,union iwreq_data *wrqu,char *b) { int len,ret = 0; unsigned long flags; if (ieee->iw_mode == IW_MODE_MONITOR) return -1; /* We want avoid to give to the user inconsistent infos*/ spin_lock_irqsave(&ieee->lock, flags); if (ieee->current_network.ssid[0] == '\0' || ieee->current_network.ssid_len == 0){ ret = -1; goto out; } if (ieee->state != IEEE80211_LINKED && ieee->state != IEEE80211_LINKED_SCANNING && ieee->ssid_set == 0){ ret = -1; goto out; } len = ieee->current_network.ssid_len; wrqu->essid.length = len; strncpy(b,ieee->current_network.ssid,len); wrqu->essid.flags = 1; out: spin_unlock_irqrestore(&ieee->lock, flags); return ret; } int ieee80211_wx_set_rate(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { u32 target_rate = wrqu->bitrate.value; ieee->rate = target_rate/100000; //FIXME: we might want to limit rate also in management protocols. return 0; } int ieee80211_wx_get_rate(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { u32 tmp_rate; tmp_rate = TxCountToDataRate(ieee, ieee->softmac_stats.CurrentShowTxate); wrqu->bitrate.value = tmp_rate * 500000; return 0; } int ieee80211_wx_set_rts(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { if (wrqu->rts.disabled || !wrqu->rts.fixed) ieee->rts = DEFAULT_RTS_THRESHOLD; else { if (wrqu->rts.value < MIN_RTS_THRESHOLD || wrqu->rts.value > MAX_RTS_THRESHOLD) return -EINVAL; ieee->rts = wrqu->rts.value; } return 0; } int ieee80211_wx_get_rts(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { wrqu->rts.value = ieee->rts; wrqu->rts.fixed = 0; /* no auto select */ wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD); return 0; } int ieee80211_wx_set_mode(struct ieee80211_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { ieee->sync_scan_hurryup = 1; down(&ieee->wx_sem); if (wrqu->mode == ieee->iw_mode) goto out; if (wrqu->mode == IW_MODE_MONITOR){ ieee->dev->type = ARPHRD_IEEE80211; }else{ ieee->dev->type = ARPHRD_ETHER; } if (!ieee->proto_started){ ieee->iw_mode = wrqu->mode; }else{ ieee80211_stop_protocol(ieee,true); ieee->iw_mode = wrqu->mode; ieee80211_start_protocol(ieee); } out: up(&ieee->wx_sem); return 0; } void ieee80211_wx_sync_scan_wq(struct work_struct *work) { struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, wx_sync_scan_wq); short chan; HT_EXTCHNL_OFFSET chan_offset=0; HT_CHANNEL_WIDTH bandwidth=0; int b40M = 0; static int count = 0; chan = ieee->current_network.channel; #ifdef ENABLE_LPS if (ieee->LeisurePSLeave) { ieee->LeisurePSLeave(ieee); } /* notify AP to be in PS mode */ ieee80211_sta_ps_send_null_frame(ieee, 1); ieee80211_sta_ps_send_null_frame(ieee, 1); #endif if (ieee->data_hard_stop) ieee->data_hard_stop(ieee); ieee80211_stop_send_beacons(ieee); ieee->state = IEEE80211_LINKED_SCANNING; ieee->link_change(ieee); ieee->InitialGainHandler(ieee, IG_Backup); if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT && ieee->pHTInfo->bCurBW40MHz) { b40M = 1; chan_offset = ieee->pHTInfo->CurSTAExtChnlOffset; bandwidth = (HT_CHANNEL_WIDTH)ieee->pHTInfo->bCurBW40MHz; printk("Scan in 40M, force to 20M first:%d, %d\n", chan_offset, bandwidth); ieee->SetBWModeHandler(ieee, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT); } ieee80211_start_scan_syncro(ieee); if (b40M) { printk("Scan in 20M, back to 40M\n"); if (chan_offset == HT_EXTCHNL_OFFSET_UPPER) ieee->set_chan(ieee, chan + 2); else if (chan_offset == HT_EXTCHNL_OFFSET_LOWER) ieee->set_chan(ieee, chan - 2); else ieee->set_chan(ieee, chan); ieee->SetBWModeHandler(ieee, bandwidth, chan_offset); } else { ieee->set_chan(ieee, chan); } ieee->InitialGainHandler(ieee, IG_Restore); ieee->state = IEEE80211_LINKED; ieee->link_change(ieee); #ifdef ENABLE_LPS /* Notify AP that I wake up again */ ieee80211_sta_ps_send_null_frame(ieee, 0); #endif // To prevent the immediately calling watch_dog after scan. if(ieee->LinkDetectInfo.NumRecvBcnInPeriod==0||ieee->LinkDetectInfo.NumRecvDataInPeriod==0 ) { ieee->LinkDetectInfo.NumRecvBcnInPeriod = 1; ieee->LinkDetectInfo.NumRecvDataInPeriod= 1; } if (ieee->data_hard_resume) ieee->data_hard_resume(ieee); if(ieee->iw_mode == IW_MODE_ADHOC || ieee->iw_mode == IW_MODE_MASTER) ieee80211_start_send_beacons(ieee); count = 0; up(&ieee->wx_sem); } int ieee80211_wx_set_scan(struct ieee80211_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { int ret = 0; down(&ieee->wx_sem); if (ieee->iw_mode == IW_MODE_MONITOR || !(ieee->proto_started)){ ret = -1; goto out; } if ( ieee->state == IEEE80211_LINKED){ queue_work(ieee->wq, &ieee->wx_sync_scan_wq); /* intentionally forget to up sem */ return 0; } out: up(&ieee->wx_sem); return ret; } int ieee80211_wx_set_essid(struct ieee80211_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *extra) { int ret=0,len; short proto_started; unsigned long flags; ieee->sync_scan_hurryup = 1; down(&ieee->wx_sem); proto_started = ieee->proto_started; if (wrqu->essid.length > IW_ESSID_MAX_SIZE){ ret= -E2BIG; goto out; } if (ieee->iw_mode == IW_MODE_MONITOR){ ret= -1; goto out; } if(proto_started){ ieee80211_stop_protocol(ieee,true); } /* this is just to be sure that the GET wx callback * has consisten infos. not needed otherwise */ spin_lock_irqsave(&ieee->lock, flags); if (wrqu->essid.flags && wrqu->essid.length) { //first flush current network.ssid len = ((wrqu->essid.length-1) < IW_ESSID_MAX_SIZE) ? (wrqu->essid.length-1) : IW_ESSID_MAX_SIZE; strncpy(ieee->current_network.ssid, extra, len+1); ieee->current_network.ssid_len = len+1; ieee->ssid_set = 1; } else{ ieee->ssid_set = 0; ieee->current_network.ssid[0] = '\0'; ieee->current_network.ssid_len = 0; } spin_unlock_irqrestore(&ieee->lock, flags); if (proto_started) ieee80211_start_protocol(ieee); out: up(&ieee->wx_sem); return ret; } int ieee80211_wx_get_mode(struct ieee80211_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { wrqu->mode = ieee->iw_mode; return 0; } int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int *parms = (int *)extra; int enable = (parms[0] > 0); short prev = ieee->raw_tx; down(&ieee->wx_sem); if(enable) ieee->raw_tx = 1; else ieee->raw_tx = 0; printk(KERN_INFO"raw TX is %s\n", ieee->raw_tx ? "enabled" : "disabled"); if(ieee->iw_mode == IW_MODE_MONITOR) { if(prev == 0 && ieee->raw_tx){ if (ieee->data_hard_resume) ieee->data_hard_resume(ieee); netif_carrier_on(ieee->dev); } if(prev && ieee->raw_tx == 1) netif_carrier_off(ieee->dev); } up(&ieee->wx_sem); return 0; } int ieee80211_wx_get_name(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { strcpy(wrqu->name, "802.11"); if(ieee->modulation & IEEE80211_CCK_MODULATION) strcat(wrqu->name, "b"); if(ieee->modulation & IEEE80211_OFDM_MODULATION) strcat(wrqu->name, "g"); if (ieee->mode & (IEEE_N_24G | IEEE_N_5G)) strcat(wrqu->name, "n"); return 0; } /* this is mostly stolen from hostap */ int ieee80211_wx_set_power(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; if( (!ieee->sta_wake_up) || (!ieee->enter_sleep_state) || (!ieee->ps_is_queue_empty)){ return -1; } down(&ieee->wx_sem); if (wrqu->power.disabled){ ieee->ps = IEEE80211_PS_DISABLED; goto exit; } if (wrqu->power.flags & IW_POWER_TIMEOUT) { ieee->ps_timeout = wrqu->power.value / 1000; } if (wrqu->power.flags & IW_POWER_PERIOD) { ieee->ps_period = wrqu->power.value / 1000; } switch (wrqu->power.flags & IW_POWER_MODE) { case IW_POWER_UNICAST_R: ieee->ps = IEEE80211_PS_UNICAST; break; case IW_POWER_MULTICAST_R: ieee->ps = IEEE80211_PS_MBCAST; break; case IW_POWER_ALL_R: ieee->ps = IEEE80211_PS_UNICAST | IEEE80211_PS_MBCAST; break; case IW_POWER_ON: break; default: ret = -EINVAL; goto exit; } exit: up(&ieee->wx_sem); return ret; } /* this is stolen from hostap */ int ieee80211_wx_get_power(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret =0; down(&ieee->wx_sem); if(ieee->ps == IEEE80211_PS_DISABLED){ wrqu->power.disabled = 1; goto exit; } wrqu->power.disabled = 0; if ((wrqu->power.flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) { wrqu->power.flags = IW_POWER_TIMEOUT; wrqu->power.value = ieee->ps_timeout * 1000; } else { wrqu->power.flags = IW_POWER_PERIOD; wrqu->power.value = ieee->ps_period * 1000; } if ((ieee->ps & (IEEE80211_PS_MBCAST | IEEE80211_PS_UNICAST)) == (IEEE80211_PS_MBCAST | IEEE80211_PS_UNICAST)) wrqu->power.flags |= IW_POWER_ALL_R; else if (ieee->ps & IEEE80211_PS_MBCAST) wrqu->power.flags |= IW_POWER_MULTICAST_R; else wrqu->power.flags |= IW_POWER_UNICAST_R; exit: up(&ieee->wx_sem); return ret; }
gpl-2.0
C3C0/zte_skate_35
arch/s390/kvm/diag.c
3437
1743
/* * diag.c - handling diagnose instructions * * Copyright IBM Corp. 2008 * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License (version 2 only) * as published by the Free Software Foundation. * * Author(s): Carsten Otte <cotte@de.ibm.com> * Christian Borntraeger <borntraeger@de.ibm.com> */ #include <linux/kvm.h> #include <linux/kvm_host.h> #include "kvm-s390.h" static int __diag_time_slice_end(struct kvm_vcpu *vcpu) { VCPU_EVENT(vcpu, 5, "%s", "diag time slice end"); vcpu->stat.diagnose_44++; vcpu_put(vcpu); yield(); vcpu_load(vcpu); return 0; } static int __diag_ipl_functions(struct kvm_vcpu *vcpu) { unsigned int reg = vcpu->arch.sie_block->ipa & 0xf; unsigned long subcode = vcpu->arch.guest_gprs[reg] & 0xffff; VCPU_EVENT(vcpu, 5, "diag ipl functions, subcode %lx", subcode); switch (subcode) { case 3: vcpu->run->s390_reset_flags = KVM_S390_RESET_CLEAR; break; case 4: vcpu->run->s390_reset_flags = 0; break; default: return -EOPNOTSUPP; } atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags); vcpu->run->s390_reset_flags |= KVM_S390_RESET_SUBSYSTEM; vcpu->run->s390_reset_flags |= KVM_S390_RESET_IPL; vcpu->run->s390_reset_flags |= KVM_S390_RESET_CPU_INIT; vcpu->run->exit_reason = KVM_EXIT_S390_RESET; VCPU_EVENT(vcpu, 3, "requesting userspace resets %llx", vcpu->run->s390_reset_flags); return -EREMOTE; } int kvm_s390_handle_diag(struct kvm_vcpu *vcpu) { int code = (vcpu->arch.sie_block->ipb & 0xfff0000) >> 16; switch (code) { case 0x44: return __diag_time_slice_end(vcpu); case 0x308: return __diag_ipl_functions(vcpu); default: return -EOPNOTSUPP; } }
gpl-2.0
ashishtanwer/mptcp_android-4.2.2_Nexus4
arch/arm/mach-mmp/ttc_dkb.c
4717
3824
/* * linux/arch/arm/mach-mmp/ttc_dkb.c * * Support for the Marvell PXA910-based TTC_DKB Development Platform. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * publishhed by the Free Software Foundation. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/mtd/onenand.h> #include <linux/interrupt.h> #include <linux/i2c/pca953x.h> #include <linux/gpio.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> #include <mach/addr-map.h> #include <mach/mfp-pxa910.h> #include <mach/pxa910.h> #include <mach/irqs.h> #include "common.h" #define TTCDKB_GPIO_EXT0(x) (MMP_NR_BUILTIN_GPIO + ((x < 0) ? 0 : \ ((x < 16) ? x : 15))) #define TTCDKB_GPIO_EXT1(x) (MMP_NR_BUILTIN_GPIO + 16 + ((x < 0) ? 0 : \ ((x < 16) ? x : 15))) /* * 16 board interrupts -- MAX7312 GPIO expander * 16 board interrupts -- PCA9575 GPIO expander * 24 board interrupts -- 88PM860x PMIC */ #define TTCDKB_NR_IRQS (MMP_NR_IRQS + 16 + 16 + 24) static unsigned long ttc_dkb_pin_config[] __initdata = { /* UART2 */ GPIO47_UART2_RXD, GPIO48_UART2_TXD, /* DFI */ DF_IO0_ND_IO0, DF_IO1_ND_IO1, DF_IO2_ND_IO2, DF_IO3_ND_IO3, DF_IO4_ND_IO4, DF_IO5_ND_IO5, DF_IO6_ND_IO6, DF_IO7_ND_IO7, DF_IO8_ND_IO8, DF_IO9_ND_IO9, DF_IO10_ND_IO10, DF_IO11_ND_IO11, DF_IO12_ND_IO12, DF_IO13_ND_IO13, DF_IO14_ND_IO14, DF_IO15_ND_IO15, DF_nCS0_SM_nCS2_nCS0, DF_ALE_SM_WEn_ND_ALE, DF_CLE_SM_OEn_ND_CLE, DF_WEn_DF_WEn, DF_REn_DF_REn, DF_RDY0_DF_RDY0, }; static struct mtd_partition ttc_dkb_onenand_partitions[] = { { .name = "bootloader", .offset = 0, .size = SZ_1M, .mask_flags = MTD_WRITEABLE, }, { .name = "reserved", .offset = MTDPART_OFS_APPEND, .size = SZ_128K, .mask_flags = MTD_WRITEABLE, }, { .name = "reserved", .offset = MTDPART_OFS_APPEND, .size = SZ_8M, .mask_flags = MTD_WRITEABLE, }, { .name = "kernel", .offset = MTDPART_OFS_APPEND, .size = (SZ_2M + SZ_1M), .mask_flags = 0, }, { .name = "filesystem", .offset = MTDPART_OFS_APPEND, .size = SZ_32M + SZ_16M, .mask_flags = 0, } }; static struct onenand_platform_data ttc_dkb_onenand_info = { .parts = ttc_dkb_onenand_partitions, .nr_parts = ARRAY_SIZE(ttc_dkb_onenand_partitions), }; static struct resource ttc_dkb_resource_onenand[] = { [0] = { .start = SMC_CS0_PHYS_BASE, .end = SMC_CS0_PHYS_BASE + SZ_1M, .flags = IORESOURCE_MEM, }, }; static struct platform_device ttc_dkb_device_onenand = { .name = "onenand-flash", .id = -1, .resource = ttc_dkb_resource_onenand, .num_resources = ARRAY_SIZE(ttc_dkb_resource_onenand), .dev = { .platform_data = &ttc_dkb_onenand_info, }, }; static struct platform_device *ttc_dkb_devices[] = { &pxa910_device_gpio, &pxa910_device_rtc, &ttc_dkb_device_onenand, }; static struct pca953x_platform_data max7312_data[] = { { .gpio_base = TTCDKB_GPIO_EXT0(0), .irq_base = MMP_NR_IRQS, }, }; static struct i2c_board_info ttc_dkb_i2c_info[] = { { .type = "max7312", .addr = 0x23, .irq = MMP_GPIO_TO_IRQ(80), .platform_data = &max7312_data, }, }; static void __init ttc_dkb_init(void) { mfp_config(ARRAY_AND_SIZE(ttc_dkb_pin_config)); /* on-chip devices */ pxa910_add_uart(1); /* off-chip devices */ pxa910_add_twsi(0, NULL, ARRAY_AND_SIZE(ttc_dkb_i2c_info)); platform_add_devices(ARRAY_AND_SIZE(ttc_dkb_devices)); } MACHINE_START(TTC_DKB, "PXA910-based TTC_DKB Development Platform") .map_io = mmp_map_io, .nr_irqs = TTCDKB_NR_IRQS, .init_irq = pxa910_init_irq, .timer = &pxa910_timer, .init_machine = ttc_dkb_init, .restart = mmp_restart, MACHINE_END
gpl-2.0
Stuxnet-Kernel/kernel_mako
drivers/net/wireless/ath/ath9k/eeprom_4k.c
4973
33600
/* * Copyright (c) 2008-2011 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <asm/unaligned.h> #include "hw.h" #include "ar9002_phy.h" static int ath9k_hw_4k_get_eeprom_ver(struct ath_hw *ah) { return ((ah->eeprom.map4k.baseEepHeader.version >> 12) & 0xF); } static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah) { return ((ah->eeprom.map4k.baseEepHeader.version) & 0xFFF); } #define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) static bool __ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); u16 *eep_data = (u16 *)&ah->eeprom.map4k; int addr, eep_start_loc = 64; for (addr = 0; addr < SIZE_EEPROM_4K; addr++) { if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) { ath_dbg(common, EEPROM, "Unable to read eeprom region\n"); return false; } eep_data++; } return true; } static bool __ath9k_hw_usb_4k_fill_eeprom(struct ath_hw *ah) { u16 *eep_data = (u16 *)&ah->eeprom.map4k; ath9k_hw_usb_gen_fill_eeprom(ah, eep_data, 64, SIZE_EEPROM_4K); return true; } static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) { struct ath_common *common = ath9k_hw_common(ah); if (!ath9k_hw_use_flash(ah)) { ath_dbg(common, EEPROM, "Reading from EEPROM, not flash\n"); } if (common->bus_ops->ath_bus_type == ATH_USB) return __ath9k_hw_usb_4k_fill_eeprom(ah); else return __ath9k_hw_4k_fill_eeprom(ah); } #if defined(CONFIG_ATH9K_DEBUGFS) || defined(CONFIG_ATH9K_HTC_DEBUGFS) static u32 ath9k_dump_4k_modal_eeprom(char *buf, u32 len, u32 size, struct modal_eep_4k_header *modal_hdr) { PR_EEP("Chain0 Ant. Control", modal_hdr->antCtrlChain[0]); PR_EEP("Ant. Common Control", modal_hdr->antCtrlCommon); PR_EEP("Chain0 Ant. Gain", modal_hdr->antennaGainCh[0]); PR_EEP("Switch Settle", modal_hdr->switchSettling); PR_EEP("Chain0 TxRxAtten", modal_hdr->txRxAttenCh[0]); PR_EEP("Chain0 RxTxMargin", modal_hdr->rxTxMarginCh[0]); PR_EEP("ADC Desired size", modal_hdr->adcDesiredSize); PR_EEP("PGA Desired size", modal_hdr->pgaDesiredSize); PR_EEP("Chain0 xlna Gain", modal_hdr->xlnaGainCh[0]); PR_EEP("txEndToXpaOff", modal_hdr->txEndToXpaOff); PR_EEP("txEndToRxOn", modal_hdr->txEndToRxOn); PR_EEP("txFrameToXpaOn", modal_hdr->txFrameToXpaOn); PR_EEP("CCA Threshold)", modal_hdr->thresh62); PR_EEP("Chain0 NF Threshold", modal_hdr->noiseFloorThreshCh[0]); PR_EEP("xpdGain", modal_hdr->xpdGain); PR_EEP("External PD", modal_hdr->xpd); PR_EEP("Chain0 I Coefficient", modal_hdr->iqCalICh[0]); PR_EEP("Chain0 Q Coefficient", modal_hdr->iqCalQCh[0]); PR_EEP("pdGainOverlap", modal_hdr->pdGainOverlap); PR_EEP("O/D Bias Version", modal_hdr->version); PR_EEP("CCK OutputBias", modal_hdr->ob_0); PR_EEP("BPSK OutputBias", modal_hdr->ob_1); PR_EEP("QPSK OutputBias", modal_hdr->ob_2); PR_EEP("16QAM OutputBias", modal_hdr->ob_3); PR_EEP("64QAM OutputBias", modal_hdr->ob_4); PR_EEP("CCK Driver1_Bias", modal_hdr->db1_0); PR_EEP("BPSK Driver1_Bias", modal_hdr->db1_1); PR_EEP("QPSK Driver1_Bias", modal_hdr->db1_2); PR_EEP("16QAM Driver1_Bias", modal_hdr->db1_3); PR_EEP("64QAM Driver1_Bias", modal_hdr->db1_4); PR_EEP("CCK Driver2_Bias", modal_hdr->db2_0); PR_EEP("BPSK Driver2_Bias", modal_hdr->db2_1); PR_EEP("QPSK Driver2_Bias", modal_hdr->db2_2); PR_EEP("16QAM Driver2_Bias", modal_hdr->db2_3); PR_EEP("64QAM Driver2_Bias", modal_hdr->db2_4); PR_EEP("xPA Bias Level", modal_hdr->xpaBiasLvl); PR_EEP("txFrameToDataStart", modal_hdr->txFrameToDataStart); PR_EEP("txFrameToPaOn", modal_hdr->txFrameToPaOn); PR_EEP("HT40 Power Inc.", modal_hdr->ht40PowerIncForPdadc); PR_EEP("Chain0 bswAtten", modal_hdr->bswAtten[0]); PR_EEP("Chain0 bswMargin", modal_hdr->bswMargin[0]); PR_EEP("HT40 Switch Settle", modal_hdr->swSettleHt40); PR_EEP("Chain0 xatten2Db", modal_hdr->xatten2Db[0]); PR_EEP("Chain0 xatten2Margin", modal_hdr->xatten2Margin[0]); PR_EEP("Ant. Diversity ctl1", modal_hdr->antdiv_ctl1); PR_EEP("Ant. Diversity ctl2", modal_hdr->antdiv_ctl2); PR_EEP("TX Diversity", modal_hdr->tx_diversity); return len; } static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, u8 *buf, u32 len, u32 size) { struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; struct base_eep_header_4k *pBase = &eep->baseEepHeader; if (!dump_base_hdr) { len += snprintf(buf + len, size - len, "%20s :\n", "2GHz modal Header"); len += ath9k_dump_4k_modal_eeprom(buf, len, size, &eep->modalHeader); goto out; } PR_EEP("Major Version", pBase->version >> 12); PR_EEP("Minor Version", pBase->version & 0xFFF); PR_EEP("Checksum", pBase->checksum); PR_EEP("Length", pBase->length); PR_EEP("RegDomain1", pBase->regDmn[0]); PR_EEP("RegDomain2", pBase->regDmn[1]); PR_EEP("TX Mask", pBase->txMask); PR_EEP("RX Mask", pBase->rxMask); PR_EEP("Allow 5GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11A)); PR_EEP("Allow 2GHz", !!(pBase->opCapFlags & AR5416_OPFLAGS_11G)); PR_EEP("Disable 2GHz HT20", !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT20)); PR_EEP("Disable 2GHz HT40", !!(pBase->opCapFlags & AR5416_OPFLAGS_N_2G_HT40)); PR_EEP("Disable 5Ghz HT20", !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT20)); PR_EEP("Disable 5Ghz HT40", !!(pBase->opCapFlags & AR5416_OPFLAGS_N_5G_HT40)); PR_EEP("Big Endian", !!(pBase->eepMisc & 0x01)); PR_EEP("Cal Bin Major Ver", (pBase->binBuildNumber >> 24) & 0xFF); PR_EEP("Cal Bin Minor Ver", (pBase->binBuildNumber >> 16) & 0xFF); PR_EEP("Cal Bin Build", (pBase->binBuildNumber >> 8) & 0xFF); PR_EEP("TX Gain type", pBase->txGainType); len += snprintf(buf + len, size - len, "%20s : %pM\n", "MacAddress", pBase->macAddr); out: if (len > size) len = size; return len; } #else static u32 ath9k_hw_4k_dump_eeprom(struct ath_hw *ah, bool dump_base_hdr, u8 *buf, u32 len, u32 size) { return 0; } #endif #undef SIZE_EEPROM_4K static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) { #define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) struct ath_common *common = ath9k_hw_common(ah); struct ar5416_eeprom_4k *eep = (struct ar5416_eeprom_4k *) &ah->eeprom.map4k; u16 *eepdata, temp, magic, magic2; u32 sum = 0, el; bool need_swap = false; int i, addr; if (!ath9k_hw_use_flash(ah)) { if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET, &magic)) { ath_err(common, "Reading Magic # failed\n"); return false; } ath_dbg(common, EEPROM, "Read Magic = 0x%04X\n", magic); if (magic != AR5416_EEPROM_MAGIC) { magic2 = swab16(magic); if (magic2 == AR5416_EEPROM_MAGIC) { need_swap = true; eepdata = (u16 *) (&ah->eeprom); for (addr = 0; addr < EEPROM_4K_SIZE; addr++) { temp = swab16(*eepdata); *eepdata = temp; eepdata++; } } else { ath_err(common, "Invalid EEPROM Magic. Endianness mismatch.\n"); return -EINVAL; } } } ath_dbg(common, EEPROM, "need_swap = %s\n", need_swap ? "True" : "False"); if (need_swap) el = swab16(ah->eeprom.map4k.baseEepHeader.length); else el = ah->eeprom.map4k.baseEepHeader.length; if (el > sizeof(struct ar5416_eeprom_4k)) el = sizeof(struct ar5416_eeprom_4k) / sizeof(u16); else el = el / sizeof(u16); eepdata = (u16 *)(&ah->eeprom); for (i = 0; i < el; i++) sum ^= *eepdata++; if (need_swap) { u32 integer; u16 word; ath_dbg(common, EEPROM, "EEPROM Endianness is not native.. Changing\n"); word = swab16(eep->baseEepHeader.length); eep->baseEepHeader.length = word; word = swab16(eep->baseEepHeader.checksum); eep->baseEepHeader.checksum = word; word = swab16(eep->baseEepHeader.version); eep->baseEepHeader.version = word; word = swab16(eep->baseEepHeader.regDmn[0]); eep->baseEepHeader.regDmn[0] = word; word = swab16(eep->baseEepHeader.regDmn[1]); eep->baseEepHeader.regDmn[1] = word; word = swab16(eep->baseEepHeader.rfSilent); eep->baseEepHeader.rfSilent = word; word = swab16(eep->baseEepHeader.blueToothOptions); eep->baseEepHeader.blueToothOptions = word; word = swab16(eep->baseEepHeader.deviceCap); eep->baseEepHeader.deviceCap = word; integer = swab32(eep->modalHeader.antCtrlCommon); eep->modalHeader.antCtrlCommon = integer; for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) { integer = swab32(eep->modalHeader.antCtrlChain[i]); eep->modalHeader.antCtrlChain[i] = integer; } for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) { word = swab16(eep->modalHeader.spurChans[i].spurChan); eep->modalHeader.spurChans[i].spurChan = word; } } if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER || ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) { ath_err(common, "Bad EEPROM checksum 0x%x or revision 0x%04x\n", sum, ah->eep_ops->get_eeprom_ver(ah)); return -EINVAL; } return 0; #undef EEPROM_4K_SIZE } static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah, enum eeprom_param param) { struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; struct modal_eep_4k_header *pModal = &eep->modalHeader; struct base_eep_header_4k *pBase = &eep->baseEepHeader; u16 ver_minor; ver_minor = pBase->version & AR5416_EEP_VER_MINOR_MASK; switch (param) { case EEP_NFTHRESH_2: return pModal->noiseFloorThreshCh[0]; case EEP_MAC_LSW: return get_unaligned_be16(pBase->macAddr); case EEP_MAC_MID: return get_unaligned_be16(pBase->macAddr + 2); case EEP_MAC_MSW: return get_unaligned_be16(pBase->macAddr + 4); case EEP_REG_0: return pBase->regDmn[0]; case EEP_OP_CAP: return pBase->deviceCap; case EEP_OP_MODE: return pBase->opCapFlags; case EEP_RF_SILENT: return pBase->rfSilent; case EEP_OB_2: return pModal->ob_0; case EEP_DB_2: return pModal->db1_1; case EEP_MINOR_REV: return ver_minor; case EEP_TX_MASK: return pBase->txMask; case EEP_RX_MASK: return pBase->rxMask; case EEP_FRAC_N_5G: return 0; case EEP_PWR_TABLE_OFFSET: return AR5416_PWR_TABLE_OFFSET_DB; case EEP_MODAL_VER: return pModal->version; case EEP_ANT_DIV_CTL1: return pModal->antdiv_ctl1; case EEP_TXGAIN_TYPE: return pBase->txGainType; case EEP_ANTENNA_GAIN_2G: return pModal->antennaGainCh[0]; default: return 0; } } static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah, struct ath9k_channel *chan) { struct ath_common *common = ath9k_hw_common(ah); struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k; struct cal_data_per_freq_4k *pRawDataset; u8 *pCalBChans = NULL; u16 pdGainOverlap_t2; static u8 pdadcValues[AR5416_NUM_PDADC_VALUES]; u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK]; u16 numPiers, i, j; u16 numXpdGain, xpdMask; u16 xpdGainValues[AR5416_EEP4K_NUM_PD_GAINS] = { 0, 0 }; u32 reg32, regOffset, regChainOffset; xpdMask = pEepData->modalHeader.xpdGain; if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= AR5416_EEP_MINOR_VER_2) { pdGainOverlap_t2 = pEepData->modalHeader.pdGainOverlap; } else { pdGainOverlap_t2 = (u16)(MS(REG_READ(ah, AR_PHY_TPCRG5), AR_PHY_TPCRG5_PD_GAIN_OVERLAP)); } pCalBChans = pEepData->calFreqPier2G; numPiers = AR5416_EEP4K_NUM_2G_CAL_PIERS; numXpdGain = 0; for (i = 1; i <= AR5416_PD_GAINS_IN_MASK; i++) { if ((xpdMask >> (AR5416_PD_GAINS_IN_MASK - i)) & 1) { if (numXpdGain >= AR5416_EEP4K_NUM_PD_GAINS) break; xpdGainValues[numXpdGain] = (u16)(AR5416_PD_GAINS_IN_MASK - i); numXpdGain++; } } REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_NUM_PD_GAIN, (numXpdGain - 1) & 0x3); REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_1, xpdGainValues[0]); REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_2, xpdGainValues[1]); REG_RMW_FIELD(ah, AR_PHY_TPCRG1, AR_PHY_TPCRG1_PD_GAIN_3, 0); for (i = 0; i < AR5416_EEP4K_MAX_CHAINS; i++) { regChainOffset = i * 0x1000; if (pEepData->baseEepHeader.txMask & (1 << i)) { pRawDataset = pEepData->calPierData2G[i]; ath9k_hw_get_gain_boundaries_pdadcs(ah, chan, pRawDataset, pCalBChans, numPiers, pdGainOverlap_t2, gainBoundaries, pdadcValues, numXpdGain); ENABLE_REGWRITE_BUFFER(ah); REG_WRITE(ah, AR_PHY_TPCRG5 + regChainOffset, SM(pdGainOverlap_t2, AR_PHY_TPCRG5_PD_GAIN_OVERLAP) | SM(gainBoundaries[0], AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_1) | SM(gainBoundaries[1], AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_2) | SM(gainBoundaries[2], AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_3) | SM(gainBoundaries[3], AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_4)); regOffset = AR_PHY_BASE + (672 << 2) + regChainOffset; for (j = 0; j < 32; j++) { reg32 = get_unaligned_le32(&pdadcValues[4 * j]); REG_WRITE(ah, regOffset, reg32); ath_dbg(common, EEPROM, "PDADC (%d,%4x): %4.4x %8.8x\n", i, regChainOffset, regOffset, reg32); ath_dbg(common, EEPROM, "PDADC: Chain %d | " "PDADC %3d Value %3d | " "PDADC %3d Value %3d | " "PDADC %3d Value %3d | " "PDADC %3d Value %3d |\n", i, 4 * j, pdadcValues[4 * j], 4 * j + 1, pdadcValues[4 * j + 1], 4 * j + 2, pdadcValues[4 * j + 2], 4 * j + 3, pdadcValues[4 * j + 3]); regOffset += 4; } REGWRITE_BUFFER_FLUSH(ah); } } } static void ath9k_hw_set_4k_power_per_rate_table(struct ath_hw *ah, struct ath9k_channel *chan, int16_t *ratesArray, u16 cfgCtl, u16 antenna_reduction, u16 powerLimit) { #define CMP_TEST_GRP \ (((cfgCtl & ~CTL_MODE_M)| (pCtlMode[ctlMode] & CTL_MODE_M)) == \ pEepData->ctlIndex[i]) \ || (((cfgCtl & ~CTL_MODE_M) | (pCtlMode[ctlMode] & CTL_MODE_M)) == \ ((pEepData->ctlIndex[i] & CTL_MODE_M) | SD_NO_CTL)) int i; u16 twiceMinEdgePower; u16 twiceMaxEdgePower; u16 scaledPower = 0, minCtlPower; u16 numCtlModes; const u16 *pCtlMode; u16 ctlMode, freq; struct chan_centers centers; struct cal_ctl_data_4k *rep; struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k; struct cal_target_power_leg targetPowerOfdm, targetPowerCck = { 0, { 0, 0, 0, 0} }; struct cal_target_power_leg targetPowerOfdmExt = { 0, { 0, 0, 0, 0} }, targetPowerCckExt = { 0, { 0, 0, 0, 0 } }; struct cal_target_power_ht targetPowerHt20, targetPowerHt40 = { 0, {0, 0, 0, 0} }; static const u16 ctlModesFor11g[] = { CTL_11B, CTL_11G, CTL_2GHT20, CTL_11B_EXT, CTL_11G_EXT, CTL_2GHT40 }; ath9k_hw_get_channel_centers(ah, chan, &centers); scaledPower = powerLimit - antenna_reduction; numCtlModes = ARRAY_SIZE(ctlModesFor11g) - SUB_NUM_CTL_MODES_AT_2G_40; pCtlMode = ctlModesFor11g; ath9k_hw_get_legacy_target_powers(ah, chan, pEepData->calTargetPowerCck, AR5416_NUM_2G_CCK_TARGET_POWERS, &targetPowerCck, 4, false); ath9k_hw_get_legacy_target_powers(ah, chan, pEepData->calTargetPower2G, AR5416_NUM_2G_20_TARGET_POWERS, &targetPowerOfdm, 4, false); ath9k_hw_get_target_powers(ah, chan, pEepData->calTargetPower2GHT20, AR5416_NUM_2G_20_TARGET_POWERS, &targetPowerHt20, 8, false); if (IS_CHAN_HT40(chan)) { numCtlModes = ARRAY_SIZE(ctlModesFor11g); ath9k_hw_get_target_powers(ah, chan, pEepData->calTargetPower2GHT40, AR5416_NUM_2G_40_TARGET_POWERS, &targetPowerHt40, 8, true); ath9k_hw_get_legacy_target_powers(ah, chan, pEepData->calTargetPowerCck, AR5416_NUM_2G_CCK_TARGET_POWERS, &targetPowerCckExt, 4, true); ath9k_hw_get_legacy_target_powers(ah, chan, pEepData->calTargetPower2G, AR5416_NUM_2G_20_TARGET_POWERS, &targetPowerOfdmExt, 4, true); } for (ctlMode = 0; ctlMode < numCtlModes; ctlMode++) { bool isHt40CtlMode = (pCtlMode[ctlMode] == CTL_5GHT40) || (pCtlMode[ctlMode] == CTL_2GHT40); if (isHt40CtlMode) freq = centers.synth_center; else if (pCtlMode[ctlMode] & EXT_ADDITIVE) freq = centers.ext_center; else freq = centers.ctl_center; twiceMaxEdgePower = MAX_RATE_POWER; for (i = 0; (i < AR5416_EEP4K_NUM_CTLS) && pEepData->ctlIndex[i]; i++) { if (CMP_TEST_GRP) { rep = &(pEepData->ctlData[i]); twiceMinEdgePower = ath9k_hw_get_max_edge_power( freq, rep->ctlEdges[ ar5416_get_ntxchains(ah->txchainmask) - 1], IS_CHAN_2GHZ(chan), AR5416_EEP4K_NUM_BAND_EDGES); if ((cfgCtl & ~CTL_MODE_M) == SD_NO_CTL) { twiceMaxEdgePower = min(twiceMaxEdgePower, twiceMinEdgePower); } else { twiceMaxEdgePower = twiceMinEdgePower; break; } } } minCtlPower = (u8)min(twiceMaxEdgePower, scaledPower); switch (pCtlMode[ctlMode]) { case CTL_11B: for (i = 0; i < ARRAY_SIZE(targetPowerCck.tPow2x); i++) { targetPowerCck.tPow2x[i] = min((u16)targetPowerCck.tPow2x[i], minCtlPower); } break; case CTL_11G: for (i = 0; i < ARRAY_SIZE(targetPowerOfdm.tPow2x); i++) { targetPowerOfdm.tPow2x[i] = min((u16)targetPowerOfdm.tPow2x[i], minCtlPower); } break; case CTL_2GHT20: for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++) { targetPowerHt20.tPow2x[i] = min((u16)targetPowerHt20.tPow2x[i], minCtlPower); } break; case CTL_11B_EXT: targetPowerCckExt.tPow2x[0] = min((u16)targetPowerCckExt.tPow2x[0], minCtlPower); break; case CTL_11G_EXT: targetPowerOfdmExt.tPow2x[0] = min((u16)targetPowerOfdmExt.tPow2x[0], minCtlPower); break; case CTL_2GHT40: for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) { targetPowerHt40.tPow2x[i] = min((u16)targetPowerHt40.tPow2x[i], minCtlPower); } break; default: break; } } ratesArray[rate6mb] = ratesArray[rate9mb] = ratesArray[rate12mb] = ratesArray[rate18mb] = ratesArray[rate24mb] = targetPowerOfdm.tPow2x[0]; ratesArray[rate36mb] = targetPowerOfdm.tPow2x[1]; ratesArray[rate48mb] = targetPowerOfdm.tPow2x[2]; ratesArray[rate54mb] = targetPowerOfdm.tPow2x[3]; ratesArray[rateXr] = targetPowerOfdm.tPow2x[0]; for (i = 0; i < ARRAY_SIZE(targetPowerHt20.tPow2x); i++) ratesArray[rateHt20_0 + i] = targetPowerHt20.tPow2x[i]; ratesArray[rate1l] = targetPowerCck.tPow2x[0]; ratesArray[rate2s] = ratesArray[rate2l] = targetPowerCck.tPow2x[1]; ratesArray[rate5_5s] = ratesArray[rate5_5l] = targetPowerCck.tPow2x[2]; ratesArray[rate11s] = ratesArray[rate11l] = targetPowerCck.tPow2x[3]; if (IS_CHAN_HT40(chan)) { for (i = 0; i < ARRAY_SIZE(targetPowerHt40.tPow2x); i++) { ratesArray[rateHt40_0 + i] = targetPowerHt40.tPow2x[i]; } ratesArray[rateDupOfdm] = targetPowerHt40.tPow2x[0]; ratesArray[rateDupCck] = targetPowerHt40.tPow2x[0]; ratesArray[rateExtOfdm] = targetPowerOfdmExt.tPow2x[0]; ratesArray[rateExtCck] = targetPowerCckExt.tPow2x[0]; } #undef CMP_TEST_GRP } static void ath9k_hw_4k_set_txpower(struct ath_hw *ah, struct ath9k_channel *chan, u16 cfgCtl, u8 twiceAntennaReduction, u8 powerLimit, bool test) { struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k; struct modal_eep_4k_header *pModal = &pEepData->modalHeader; int16_t ratesArray[Ar5416RateSize]; u8 ht40PowerIncForPdadc = 2; int i; memset(ratesArray, 0, sizeof(ratesArray)); if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= AR5416_EEP_MINOR_VER_2) { ht40PowerIncForPdadc = pModal->ht40PowerIncForPdadc; } ath9k_hw_set_4k_power_per_rate_table(ah, chan, &ratesArray[0], cfgCtl, twiceAntennaReduction, powerLimit); ath9k_hw_set_4k_power_cal_table(ah, chan); regulatory->max_power_level = 0; for (i = 0; i < ARRAY_SIZE(ratesArray); i++) { if (ratesArray[i] > MAX_RATE_POWER) ratesArray[i] = MAX_RATE_POWER; if (ratesArray[i] > regulatory->max_power_level) regulatory->max_power_level = ratesArray[i]; } if (test) return; for (i = 0; i < Ar5416RateSize; i++) ratesArray[i] -= AR5416_PWR_TABLE_OFFSET_DB * 2; ENABLE_REGWRITE_BUFFER(ah); /* OFDM power per rate */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE1, ATH9K_POW_SM(ratesArray[rate18mb], 24) | ATH9K_POW_SM(ratesArray[rate12mb], 16) | ATH9K_POW_SM(ratesArray[rate9mb], 8) | ATH9K_POW_SM(ratesArray[rate6mb], 0)); REG_WRITE(ah, AR_PHY_POWER_TX_RATE2, ATH9K_POW_SM(ratesArray[rate54mb], 24) | ATH9K_POW_SM(ratesArray[rate48mb], 16) | ATH9K_POW_SM(ratesArray[rate36mb], 8) | ATH9K_POW_SM(ratesArray[rate24mb], 0)); /* CCK power per rate */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE3, ATH9K_POW_SM(ratesArray[rate2s], 24) | ATH9K_POW_SM(ratesArray[rate2l], 16) | ATH9K_POW_SM(ratesArray[rateXr], 8) | ATH9K_POW_SM(ratesArray[rate1l], 0)); REG_WRITE(ah, AR_PHY_POWER_TX_RATE4, ATH9K_POW_SM(ratesArray[rate11s], 24) | ATH9K_POW_SM(ratesArray[rate11l], 16) | ATH9K_POW_SM(ratesArray[rate5_5s], 8) | ATH9K_POW_SM(ratesArray[rate5_5l], 0)); /* HT20 power per rate */ REG_WRITE(ah, AR_PHY_POWER_TX_RATE5, ATH9K_POW_SM(ratesArray[rateHt20_3], 24) | ATH9K_POW_SM(ratesArray[rateHt20_2], 16) | ATH9K_POW_SM(ratesArray[rateHt20_1], 8) | ATH9K_POW_SM(ratesArray[rateHt20_0], 0)); REG_WRITE(ah, AR_PHY_POWER_TX_RATE6, ATH9K_POW_SM(ratesArray[rateHt20_7], 24) | ATH9K_POW_SM(ratesArray[rateHt20_6], 16) | ATH9K_POW_SM(ratesArray[rateHt20_5], 8) | ATH9K_POW_SM(ratesArray[rateHt20_4], 0)); /* HT40 power per rate */ if (IS_CHAN_HT40(chan)) { REG_WRITE(ah, AR_PHY_POWER_TX_RATE7, ATH9K_POW_SM(ratesArray[rateHt40_3] + ht40PowerIncForPdadc, 24) | ATH9K_POW_SM(ratesArray[rateHt40_2] + ht40PowerIncForPdadc, 16) | ATH9K_POW_SM(ratesArray[rateHt40_1] + ht40PowerIncForPdadc, 8) | ATH9K_POW_SM(ratesArray[rateHt40_0] + ht40PowerIncForPdadc, 0)); REG_WRITE(ah, AR_PHY_POWER_TX_RATE8, ATH9K_POW_SM(ratesArray[rateHt40_7] + ht40PowerIncForPdadc, 24) | ATH9K_POW_SM(ratesArray[rateHt40_6] + ht40PowerIncForPdadc, 16) | ATH9K_POW_SM(ratesArray[rateHt40_5] + ht40PowerIncForPdadc, 8) | ATH9K_POW_SM(ratesArray[rateHt40_4] + ht40PowerIncForPdadc, 0)); REG_WRITE(ah, AR_PHY_POWER_TX_RATE9, ATH9K_POW_SM(ratesArray[rateExtOfdm], 24) | ATH9K_POW_SM(ratesArray[rateExtCck], 16) | ATH9K_POW_SM(ratesArray[rateDupOfdm], 8) | ATH9K_POW_SM(ratesArray[rateDupCck], 0)); } REGWRITE_BUFFER_FLUSH(ah); } static void ath9k_hw_4k_set_gain(struct ath_hw *ah, struct modal_eep_4k_header *pModal, struct ar5416_eeprom_4k *eep, u8 txRxAttenLocal) { REG_WRITE(ah, AR_PHY_SWITCH_CHAIN_0, pModal->antCtrlChain[0]); REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), (REG_READ(ah, AR_PHY_TIMING_CTRL4(0)) & ~(AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF | AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF)) | SM(pModal->iqCalICh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF) | SM(pModal->iqCalQCh[0], AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF)); if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= AR5416_EEP_MINOR_VER_3) { txRxAttenLocal = pModal->txRxAttenCh[0]; REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ, AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN, pModal->bswMargin[0]); REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ, AR_PHY_GAIN_2GHZ_XATTEN1_DB, pModal->bswAtten[0]); REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ, AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN, pModal->xatten2Margin[0]); REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ, AR_PHY_GAIN_2GHZ_XATTEN2_DB, pModal->xatten2Db[0]); /* Set the block 1 value to block 0 value */ REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + 0x1000, AR_PHY_GAIN_2GHZ_XATTEN1_MARGIN, pModal->bswMargin[0]); REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + 0x1000, AR_PHY_GAIN_2GHZ_XATTEN1_DB, pModal->bswAtten[0]); REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + 0x1000, AR_PHY_GAIN_2GHZ_XATTEN2_MARGIN, pModal->xatten2Margin[0]); REG_RMW_FIELD(ah, AR_PHY_GAIN_2GHZ + 0x1000, AR_PHY_GAIN_2GHZ_XATTEN2_DB, pModal->xatten2Db[0]); } REG_RMW_FIELD(ah, AR_PHY_RXGAIN, AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal); REG_RMW_FIELD(ah, AR_PHY_RXGAIN, AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]); REG_RMW_FIELD(ah, AR_PHY_RXGAIN + 0x1000, AR9280_PHY_RXGAIN_TXRX_ATTEN, txRxAttenLocal); REG_RMW_FIELD(ah, AR_PHY_RXGAIN + 0x1000, AR9280_PHY_RXGAIN_TXRX_MARGIN, pModal->rxTxMarginCh[0]); } /* * Read EEPROM header info and program the device for correct operation * given the channel value. */ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah, struct ath9k_channel *chan) { struct modal_eep_4k_header *pModal; struct ar5416_eeprom_4k *eep = &ah->eeprom.map4k; struct base_eep_header_4k *pBase = &eep->baseEepHeader; u8 txRxAttenLocal; u8 ob[5], db1[5], db2[5]; u8 ant_div_control1, ant_div_control2; u8 bb_desired_scale; u32 regVal; pModal = &eep->modalHeader; txRxAttenLocal = 23; REG_WRITE(ah, AR_PHY_SWITCH_COM, pModal->antCtrlCommon); /* Single chain for 4K EEPROM*/ ath9k_hw_4k_set_gain(ah, pModal, eep, txRxAttenLocal); /* Initialize Ant Diversity settings from EEPROM */ if (pModal->version >= 3) { ant_div_control1 = pModal->antdiv_ctl1; ant_div_control2 = pModal->antdiv_ctl2; regVal = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL); regVal &= (~(AR_PHY_9285_ANT_DIV_CTL_ALL)); regVal |= SM(ant_div_control1, AR_PHY_9285_ANT_DIV_CTL); regVal |= SM(ant_div_control2, AR_PHY_9285_ANT_DIV_ALT_LNACONF); regVal |= SM((ant_div_control2 >> 2), AR_PHY_9285_ANT_DIV_MAIN_LNACONF); regVal |= SM((ant_div_control1 >> 1), AR_PHY_9285_ANT_DIV_ALT_GAINTB); regVal |= SM((ant_div_control1 >> 2), AR_PHY_9285_ANT_DIV_MAIN_GAINTB); REG_WRITE(ah, AR_PHY_MULTICHAIN_GAIN_CTL, regVal); regVal = REG_READ(ah, AR_PHY_MULTICHAIN_GAIN_CTL); regVal = REG_READ(ah, AR_PHY_CCK_DETECT); regVal &= (~AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV); regVal |= SM((ant_div_control1 >> 3), AR_PHY_CCK_DETECT_BB_ENABLE_ANT_FAST_DIV); REG_WRITE(ah, AR_PHY_CCK_DETECT, regVal); regVal = REG_READ(ah, AR_PHY_CCK_DETECT); } if (pModal->version >= 2) { ob[0] = pModal->ob_0; ob[1] = pModal->ob_1; ob[2] = pModal->ob_2; ob[3] = pModal->ob_3; ob[4] = pModal->ob_4; db1[0] = pModal->db1_0; db1[1] = pModal->db1_1; db1[2] = pModal->db1_2; db1[3] = pModal->db1_3; db1[4] = pModal->db1_4; db2[0] = pModal->db2_0; db2[1] = pModal->db2_1; db2[2] = pModal->db2_2; db2[3] = pModal->db2_3; db2[4] = pModal->db2_4; } else if (pModal->version == 1) { ob[0] = pModal->ob_0; ob[1] = ob[2] = ob[3] = ob[4] = pModal->ob_1; db1[0] = pModal->db1_0; db1[1] = db1[2] = db1[3] = db1[4] = pModal->db1_1; db2[0] = pModal->db2_0; db2[1] = db2[2] = db2[3] = db2[4] = pModal->db2_1; } else { int i; for (i = 0; i < 5; i++) { ob[i] = pModal->ob_0; db1[i] = pModal->db1_0; db2[i] = pModal->db1_0; } } if (AR_SREV_9271(ah)) { ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_OB_cck, AR9271_AN_RF2G3_OB_cck_S, ob[0]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_OB_psk, AR9271_AN_RF2G3_OB_psk_S, ob[1]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_OB_qam, AR9271_AN_RF2G3_OB_qam_S, ob[2]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9271_AN_RF2G3_DB_1, AR9271_AN_RF2G3_DB_1_S, db1[0]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9271_AN_RF2G4_DB_2, AR9271_AN_RF2G4_DB_2_S, db2[0]); } else { ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_OB_0, AR9285_AN_RF2G3_OB_0_S, ob[0]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_OB_1, AR9285_AN_RF2G3_OB_1_S, ob[1]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_OB_2, AR9285_AN_RF2G3_OB_2_S, ob[2]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_OB_3, AR9285_AN_RF2G3_OB_3_S, ob[3]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_OB_4, AR9285_AN_RF2G3_OB_4_S, ob[4]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_DB1_0, AR9285_AN_RF2G3_DB1_0_S, db1[0]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_DB1_1, AR9285_AN_RF2G3_DB1_1_S, db1[1]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G3, AR9285_AN_RF2G3_DB1_2, AR9285_AN_RF2G3_DB1_2_S, db1[2]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9285_AN_RF2G4_DB1_3, AR9285_AN_RF2G4_DB1_3_S, db1[3]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9285_AN_RF2G4_DB1_4, AR9285_AN_RF2G4_DB1_4_S, db1[4]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9285_AN_RF2G4_DB2_0, AR9285_AN_RF2G4_DB2_0_S, db2[0]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9285_AN_RF2G4_DB2_1, AR9285_AN_RF2G4_DB2_1_S, db2[1]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9285_AN_RF2G4_DB2_2, AR9285_AN_RF2G4_DB2_2_S, db2[2]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9285_AN_RF2G4_DB2_3, AR9285_AN_RF2G4_DB2_3_S, db2[3]); ath9k_hw_analog_shift_rmw(ah, AR9285_AN_RF2G4, AR9285_AN_RF2G4_DB2_4, AR9285_AN_RF2G4_DB2_4_S, db2[4]); } REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH, pModal->switchSettling); REG_RMW_FIELD(ah, AR_PHY_DESIRED_SZ, AR_PHY_DESIRED_SZ_ADC, pModal->adcDesiredSize); REG_WRITE(ah, AR_PHY_RF_CTL4, SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAA_OFF) | SM(pModal->txEndToXpaOff, AR_PHY_RF_CTL4_TX_END_XPAB_OFF) | SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAA_ON) | SM(pModal->txFrameToXpaOn, AR_PHY_RF_CTL4_FRAME_XPAB_ON)); REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON, pModal->txEndToRxOn); if (AR_SREV_9271_10(ah)) REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON, pModal->txEndToRxOn); REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62, pModal->thresh62); REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0, AR_PHY_EXT_CCA0_THRESH62, pModal->thresh62); if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= AR5416_EEP_MINOR_VER_2) { REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_DATA_START, pModal->txFrameToDataStart); REG_RMW_FIELD(ah, AR_PHY_RF_CTL2, AR_PHY_TX_END_PA_ON, pModal->txFrameToPaOn); } if ((eep->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= AR5416_EEP_MINOR_VER_3) { if (IS_CHAN_HT40(chan)) REG_RMW_FIELD(ah, AR_PHY_SETTLING, AR_PHY_SETTLING_SWITCH, pModal->swSettleHt40); } bb_desired_scale = (pModal->bb_scale_smrt_antenna & EEP_4K_BB_DESIRED_SCALE_MASK); if ((pBase->txGainType == 0) && (bb_desired_scale != 0)) { u32 pwrctrl, mask, clr; mask = BIT(0)|BIT(5)|BIT(10)|BIT(15)|BIT(20)|BIT(25); pwrctrl = mask * bb_desired_scale; clr = mask * 0x1f; REG_RMW(ah, AR_PHY_TX_PWRCTRL8, pwrctrl, clr); REG_RMW(ah, AR_PHY_TX_PWRCTRL10, pwrctrl, clr); REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL12, pwrctrl, clr); mask = BIT(0)|BIT(5)|BIT(15); pwrctrl = mask * bb_desired_scale; clr = mask * 0x1f; REG_RMW(ah, AR_PHY_TX_PWRCTRL9, pwrctrl, clr); mask = BIT(0)|BIT(5); pwrctrl = mask * bb_desired_scale; clr = mask * 0x1f; REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL11, pwrctrl, clr); REG_RMW(ah, AR_PHY_CH0_TX_PWRCTRL13, pwrctrl, clr); } } static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz) { #define EEP_MAP4K_SPURCHAN \ (ah->eeprom.map4k.modalHeader.spurChans[i].spurChan) struct ath_common *common = ath9k_hw_common(ah); u16 spur_val = AR_NO_SPUR; ath_dbg(common, ANI, "Getting spur idx:%d is2Ghz:%d val:%x\n", i, is2GHz, ah->config.spurchans[i][is2GHz]); switch (ah->config.spurmode) { case SPUR_DISABLE: break; case SPUR_ENABLE_IOCTL: spur_val = ah->config.spurchans[i][is2GHz]; ath_dbg(common, ANI, "Getting spur val from new loc. %d\n", spur_val); break; case SPUR_ENABLE_EEPROM: spur_val = EEP_MAP4K_SPURCHAN; break; } return spur_val; #undef EEP_MAP4K_SPURCHAN } const struct eeprom_ops eep_4k_ops = { .check_eeprom = ath9k_hw_4k_check_eeprom, .get_eeprom = ath9k_hw_4k_get_eeprom, .fill_eeprom = ath9k_hw_4k_fill_eeprom, .dump_eeprom = ath9k_hw_4k_dump_eeprom, .get_eeprom_ver = ath9k_hw_4k_get_eeprom_ver, .get_eeprom_rev = ath9k_hw_4k_get_eeprom_rev, .set_board_values = ath9k_hw_4k_set_board_values, .set_txpower = ath9k_hw_4k_set_txpower, .get_spur_channel = ath9k_hw_4k_get_spur_channel };
gpl-2.0
CyanogenMod/android_kernel_huawei_msm8928
lib/mpi/mpi-cmp.c
4973
1647
/* mpi-cmp.c - MPI functions * Copyright (C) 1998, 1999 Free Software Foundation, Inc. * * This file is part of GnuPG. * * GnuPG is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * GnuPG is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA */ #include "mpi-internal.h" int mpi_cmp_ui(MPI u, unsigned long v) { mpi_limb_t limb = v; mpi_normalize(u); if (!u->nlimbs && !limb) return 0; if (u->sign) return -1; if (u->nlimbs > 1) return 1; if (u->d[0] == limb) return 0; else if (u->d[0] > limb) return 1; else return -1; } int mpi_cmp(MPI u, MPI v) { mpi_size_t usize, vsize; int cmp; mpi_normalize(u); mpi_normalize(v); usize = u->nlimbs; vsize = v->nlimbs; if (!u->sign && v->sign) return 1; if (u->sign && !v->sign) return -1; if (usize != vsize && !u->sign && !v->sign) return usize - vsize; if (usize != vsize && u->sign && v->sign) return vsize + usize; if (!usize) return 0; cmp = mpihelp_cmp(u->d, v->d, usize); if (!cmp) return 0; if ((cmp < 0 ? 1 : 0) == (u->sign ? 1 : 0)) return 1; return -1; }
gpl-2.0
DmitryADP/diff_qc750
kernel/arch/mips/dec/wbflush.c
4973
2109
/* * Setup the right wbflush routine for the different DECstations. * * Created with information from: * DECstation 3100 Desktop Workstation Functional Specification * DECstation 5000/200 KN02 System Module Functional Specification * mipsel-linux-objdump --disassemble vmunix | grep "wbflush" :-) * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 1998 Harald Koerfgen * Copyright (C) 2002 Maciej W. Rozycki */ #include <linux/init.h> #include <asm/bootinfo.h> #include <asm/system.h> #include <asm/wbflush.h> static void wbflush_kn01(void); static void wbflush_kn210(void); static void wbflush_mips(void); void (*__wbflush) (void); void __init wbflush_setup(void) { switch (mips_machtype) { case MACH_DS23100: case MACH_DS5000_200: /* DS5000 3max */ __wbflush = wbflush_kn01; break; case MACH_DS5100: /* DS5100 MIPSMATE */ __wbflush = wbflush_kn210; break; case MACH_DS5000_1XX: /* DS5000/100 3min */ case MACH_DS5000_XX: /* Personal DS5000/2x */ case MACH_DS5000_2X0: /* DS5000/240 3max+ */ case MACH_DS5900: /* DS5900 bigmax */ default: __wbflush = wbflush_mips; break; } } /* * For the DS3100 and DS5000/200 the R2020/R3220 writeback buffer functions * as part of Coprocessor 0. */ static void wbflush_kn01(void) { asm(".set\tpush\n\t" ".set\tnoreorder\n\t" "1:\tbc0f\t1b\n\t" "nop\n\t" ".set\tpop"); } /* * For the DS5100 the writeback buffer seems to be a part of Coprocessor 3. * But CP3 has to enabled first. */ static void wbflush_kn210(void) { asm(".set\tpush\n\t" ".set\tnoreorder\n\t" "mfc0\t$2,$12\n\t" "lui\t$3,0x8000\n\t" "or\t$3,$2,$3\n\t" "mtc0\t$3,$12\n\t" "nop\n" "1:\tbc3f\t1b\n\t" "nop\n\t" "mtc0\t$2,$12\n\t" "nop\n\t" ".set\tpop" : : : "$2", "$3"); } /* * I/O ASIC systems use a standard writeback buffer that gets flushed * upon an uncached read. */ static void wbflush_mips(void) { __fast_iob(); } #include <linux/module.h> EXPORT_SYMBOL(__wbflush);
gpl-2.0
netarchy/nexus-s
drivers/staging/vt6656/usbpipe.c
5997
20616
/* * Copyright (c) 1996, 2003 VIA Networking Technologies, Inc. * All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. * * * File: usbpipe.c * * Purpose: Handle USB control endpoint * * Author: Warren Hsu * * Date: Mar. 29, 2005 * * Functions: * CONTROLnsRequestOut - Write variable length bytes to MEM/BB/MAC/EEPROM * CONTROLnsRequestIn - Read variable length bytes from MEM/BB/MAC/EEPROM * ControlvWriteByte - Write one byte to MEM/BB/MAC/EEPROM * ControlvReadByte - Read one byte from MEM/BB/MAC/EEPROM * ControlvMaskByte - Read one byte from MEM/BB/MAC/EEPROM and clear/set some bits in the same address * * Revision History: * 04-05-2004 Jerry Chen: Initial release * 11-24-2004 Warren Hsu: Add ControlvWriteByte,ControlvReadByte,ControlvMaskByte * */ #include "int.h" #include "rxtx.h" #include "dpc.h" #include "control.h" #include "desc.h" #include "device.h" /*--------------------- Static Definitions -------------------------*/ //endpoint def //endpoint 0: control //endpoint 1: interrupt //endpoint 2: read bulk //endpoint 3: write bulk //RequestType: //#define REQUEST_OUT (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE) // 0x40 //#define REQUEST_IN (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE ) //0xc0 //static int msglevel =MSG_LEVEL_DEBUG; static int msglevel =MSG_LEVEL_INFO; #define USB_CTL_WAIT 500 //ms #ifndef URB_ASYNC_UNLINK #define URB_ASYNC_UNLINK 0 #endif /*--------------------- Static Classes ----------------------------*/ /*--------------------- Static Variables --------------------------*/ /*--------------------- Static Functions --------------------------*/ static void s_nsInterruptUsbIoCompleteRead( struct urb *urb ); static void s_nsBulkInUsbIoCompleteRead( struct urb *urb ); static void s_nsBulkOutIoCompleteWrite( struct urb *urb ); static void s_nsControlInUsbIoCompleteRead( struct urb *urb ); static void s_nsControlInUsbIoCompleteWrite( struct urb *urb ); /*--------------------- Export Variables --------------------------*/ /*--------------------- Export Functions --------------------------*/ int PIPEnsControlOutAsyn( PSDevice pDevice, BYTE byRequest, WORD wValue, WORD wIndex, WORD wLength, PBYTE pbyBuffer ) { int ntStatus; if (pDevice->Flags & fMP_DISCONNECTED) return STATUS_FAILURE; if (pDevice->Flags & fMP_CONTROL_WRITES) return STATUS_FAILURE; if (in_interrupt()) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"in_interrupt return ..byRequest %x\n", byRequest); return STATUS_FAILURE; } ntStatus = usb_control_msg( pDevice->usb, usb_sndctrlpipe(pDevice->usb , 0), byRequest, 0x40, // RequestType wValue, wIndex, (void *) pbyBuffer, wLength, HZ ); if (ntStatus >= 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"usb_sndctrlpipe ntStatus= %d\n", ntStatus); ntStatus = 0; } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"usb_sndctrlpipe fail, ntStatus= %d\n", ntStatus); } return ntStatus; } int PIPEnsControlOut( PSDevice pDevice, BYTE byRequest, WORD wValue, WORD wIndex, WORD wLength, PBYTE pbyBuffer ) { int ntStatus = 0; int ii; if (pDevice->Flags & fMP_DISCONNECTED) return STATUS_FAILURE; if (pDevice->Flags & fMP_CONTROL_WRITES) return STATUS_FAILURE; pDevice->sUsbCtlRequest.bRequestType = 0x40; pDevice->sUsbCtlRequest.bRequest = byRequest; pDevice->sUsbCtlRequest.wValue = cpu_to_le16p(&wValue); pDevice->sUsbCtlRequest.wIndex = cpu_to_le16p(&wIndex); pDevice->sUsbCtlRequest.wLength = cpu_to_le16p(&wLength); pDevice->pControlURB->transfer_flags |= URB_ASYNC_UNLINK; pDevice->pControlURB->actual_length = 0; // Notice, pbyBuffer limited point to variable buffer, can't be constant. usb_fill_control_urb(pDevice->pControlURB, pDevice->usb, usb_sndctrlpipe(pDevice->usb , 0), (char *) &pDevice->sUsbCtlRequest, pbyBuffer, wLength, s_nsControlInUsbIoCompleteWrite, pDevice); ntStatus = usb_submit_urb(pDevice->pControlURB, GFP_ATOMIC); if (ntStatus != 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"control send request submission failed: %d\n", ntStatus); return STATUS_FAILURE; } else { MP_SET_FLAG(pDevice, fMP_CONTROL_WRITES); } spin_unlock_irq(&pDevice->lock); for (ii = 0; ii <= USB_CTL_WAIT; ii ++) { if (pDevice->Flags & fMP_CONTROL_WRITES) mdelay(1); else break; if (ii >= USB_CTL_WAIT) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "control send request submission timeout\n"); spin_lock_irq(&pDevice->lock); MP_CLEAR_FLAG(pDevice, fMP_CONTROL_WRITES); return STATUS_FAILURE; } } spin_lock_irq(&pDevice->lock); return STATUS_SUCCESS; } int PIPEnsControlIn( PSDevice pDevice, BYTE byRequest, WORD wValue, WORD wIndex, WORD wLength, PBYTE pbyBuffer ) { int ntStatus = 0; int ii; if (pDevice->Flags & fMP_DISCONNECTED) return STATUS_FAILURE; if (pDevice->Flags & fMP_CONTROL_READS) return STATUS_FAILURE; pDevice->sUsbCtlRequest.bRequestType = 0xC0; pDevice->sUsbCtlRequest.bRequest = byRequest; pDevice->sUsbCtlRequest.wValue = cpu_to_le16p(&wValue); pDevice->sUsbCtlRequest.wIndex = cpu_to_le16p(&wIndex); pDevice->sUsbCtlRequest.wLength = cpu_to_le16p(&wLength); pDevice->pControlURB->transfer_flags |= URB_ASYNC_UNLINK; pDevice->pControlURB->actual_length = 0; usb_fill_control_urb(pDevice->pControlURB, pDevice->usb, usb_rcvctrlpipe(pDevice->usb , 0), (char *) &pDevice->sUsbCtlRequest, pbyBuffer, wLength, s_nsControlInUsbIoCompleteRead, pDevice); ntStatus = usb_submit_urb(pDevice->pControlURB, GFP_ATOMIC); if (ntStatus != 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"control request submission failed: %d\n", ntStatus); }else { MP_SET_FLAG(pDevice, fMP_CONTROL_READS); } spin_unlock_irq(&pDevice->lock); for (ii = 0; ii <= USB_CTL_WAIT; ii ++) { if (pDevice->Flags & fMP_CONTROL_READS) mdelay(1); else break; if (ii >= USB_CTL_WAIT) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "control rcv request submission timeout\n"); spin_lock_irq(&pDevice->lock); MP_CLEAR_FLAG(pDevice, fMP_CONTROL_READS); return STATUS_FAILURE; } } spin_lock_irq(&pDevice->lock); return ntStatus; } static void s_nsControlInUsbIoCompleteWrite( struct urb *urb ) { PSDevice pDevice; pDevice = urb->context; switch (urb->status) { case 0: break; case -EINPROGRESS: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ctrl write urb status EINPROGRESS%d\n", urb->status); break; case -ENOENT: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ctrl write urb status ENOENT %d\n", urb->status); break; default: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ctrl write urb status %d\n", urb->status); } MP_CLEAR_FLAG(pDevice, fMP_CONTROL_WRITES); } /* * Description: * Complete function of usb Control callback * * Parameters: * In: * pDevice - Pointer to the adapter * * Out: * none * * Return Value: STATUS_INSUFFICIENT_RESOURCES or result of IoCallDriver * */ static void s_nsControlInUsbIoCompleteRead( struct urb *urb ) { PSDevice pDevice; pDevice = urb->context; switch (urb->status) { case 0: break; case -EINPROGRESS: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ctrl read urb status EINPROGRESS%d\n", urb->status); break; case -ENOENT: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ctrl read urb status = ENOENT %d\n", urb->status); break; default: DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"ctrl read urb status %d\n", urb->status); } MP_CLEAR_FLAG(pDevice, fMP_CONTROL_READS); } /* * Description: * Allocates an usb interrupt in irp and calls USBD. * * Parameters: * In: * pDevice - Pointer to the adapter * Out: * none * * Return Value: STATUS_INSUFFICIENT_RESOURCES or result of IoCallDriver * */ int PIPEnsInterruptRead(PSDevice pDevice) { int ntStatus = STATUS_FAILURE; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsStartInterruptUsbRead()\n"); if(pDevice->intBuf.bInUse == TRUE){ return (STATUS_FAILURE); } pDevice->intBuf.bInUse = TRUE; // pDevice->bEventAvailable = FALSE; pDevice->ulIntInPosted++; // // Now that we have created the urb, we will send a // request to the USB device object. // pDevice->pInterruptURB->interval = pDevice->int_interval; usb_fill_bulk_urb(pDevice->pInterruptURB, pDevice->usb, usb_rcvbulkpipe(pDevice->usb, 1), (void *) pDevice->intBuf.pDataBuf, MAX_INTERRUPT_SIZE, s_nsInterruptUsbIoCompleteRead, pDevice); ntStatus = usb_submit_urb(pDevice->pInterruptURB, GFP_ATOMIC); if (ntStatus != 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Submit int URB failed %d\n", ntStatus); } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"<----s_nsStartInterruptUsbRead Return(%x)\n",ntStatus); return ntStatus; } /* * Description: * Complete function of usb interrupt in irp. * * Parameters: * In: * pDevice - Pointer to the adapter * * Out: * none * * Return Value: STATUS_INSUFFICIENT_RESOURCES or result of IoCallDriver * */ static void s_nsInterruptUsbIoCompleteRead( struct urb *urb ) { PSDevice pDevice; int ntStatus; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsInterruptUsbIoCompleteRead\n"); // // The context given to IoSetCompletionRoutine is the receive buffer object // pDevice = (PSDevice)urb->context; // // We have a number of cases: // 1) The USB read timed out and we received no data. // 2) The USB read timed out and we received some data. // 3) The USB read was successful and fully filled our irp buffer. // 4) The irp was cancelled. // 5) Some other failure from the USB device object. // ntStatus = urb->status; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_nsInterruptUsbIoCompleteRead Status %d\n", ntStatus); // if we were not successful, we need to free the int buffer for future use right here // otherwise interrupt data handler will free int buffer after it handle it. if (( ntStatus != STATUS_SUCCESS )) { pDevice->ulBulkInError++; pDevice->intBuf.bInUse = FALSE; // if (ntStatus == USBD_STATUS_CRC) { // pDevice->ulIntInContCRCError++; // } // if (ntStatus == STATUS_NOT_CONNECTED ) // { pDevice->fKillEventPollingThread = TRUE; // } DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"IntUSBIoCompleteControl STATUS = %d\n", ntStatus ); } else { pDevice->ulIntInBytesRead += (unsigned long) urb->actual_length; pDevice->ulIntInContCRCError = 0; pDevice->bEventAvailable = TRUE; INTnsProcessData(pDevice); } STAvUpdateUSBCounter(&pDevice->scStatistic.USB_InterruptStat, ntStatus); if (pDevice->fKillEventPollingThread != TRUE) { usb_fill_bulk_urb(pDevice->pInterruptURB, pDevice->usb, usb_rcvbulkpipe(pDevice->usb, 1), (void *) pDevice->intBuf.pDataBuf, MAX_INTERRUPT_SIZE, s_nsInterruptUsbIoCompleteRead, pDevice); ntStatus = usb_submit_urb(pDevice->pInterruptURB, GFP_ATOMIC); if (ntStatus != 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Submit int URB failed %d\n", ntStatus); } } // // We return STATUS_MORE_PROCESSING_REQUIRED so that the completion // routine (IofCompleteRequest) will stop working on the irp. // return ; } /* * Description: * Allocates an usb BulkIn irp and calls USBD. * * Parameters: * In: * pDevice - Pointer to the adapter * Out: * none * * Return Value: STATUS_INSUFFICIENT_RESOURCES or result of IoCallDriver * */ int PIPEnsBulkInUsbRead(PSDevice pDevice, PRCB pRCB) { int ntStatus = 0; struct urb *pUrb; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsStartBulkInUsbRead\n"); if (pDevice->Flags & fMP_DISCONNECTED) return STATUS_FAILURE; pDevice->ulBulkInPosted++; pUrb = pRCB->pUrb; // // Now that we have created the urb, we will send a // request to the USB device object. // if (pRCB->skb == NULL) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"pRCB->skb is null \n"); return ntStatus; } usb_fill_bulk_urb(pUrb, pDevice->usb, usb_rcvbulkpipe(pDevice->usb, 2), (void *) (pRCB->skb->data), MAX_TOTAL_SIZE_WITH_ALL_HEADERS, s_nsBulkInUsbIoCompleteRead, pRCB); ntStatus = usb_submit_urb(pUrb, GFP_ATOMIC); if (ntStatus != 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Submit Rx URB failed %d\n", ntStatus); return STATUS_FAILURE ; } pRCB->Ref = 1; pRCB->bBoolInUse= TRUE; return ntStatus; } /* * Description: * Complete function of usb BulkIn irp. * * Parameters: * In: * pDevice - Pointer to the adapter * * Out: * none * * Return Value: STATUS_INSUFFICIENT_RESOURCES or result of IoCallDriver * */ static void s_nsBulkInUsbIoCompleteRead( struct urb *urb ) { PRCB pRCB = (PRCB)urb->context; PSDevice pDevice = (PSDevice)pRCB->pDevice; unsigned long bytesRead; BOOL bIndicateReceive = FALSE; BOOL bReAllocSkb = FALSE; int status; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsBulkInUsbIoCompleteRead\n"); status = urb->status; bytesRead = urb->actual_length; if (status) { pDevice->ulBulkInError++; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BULK In failed %d\n", status); pDevice->scStatistic.RxFcsErrCnt ++; //todo...xxxxxx // if (status == USBD_STATUS_CRC) { // pDevice->ulBulkInContCRCError++; // } // if (status == STATUS_DEVICE_NOT_CONNECTED ) // { // MP_SET_FLAG(pDevice, fMP_DISCONNECTED); // } } else { bIndicateReceive = TRUE; pDevice->ulBulkInContCRCError = 0; pDevice->ulBulkInBytesRead += bytesRead; pDevice->scStatistic.RxOkCnt ++; } STAvUpdateUSBCounter(&pDevice->scStatistic.USB_BulkInStat, status); if (bIndicateReceive) { spin_lock(&pDevice->lock); if (RXbBulkInProcessData(pDevice, pRCB, bytesRead) == TRUE) bReAllocSkb = TRUE; spin_unlock(&pDevice->lock); } pRCB->Ref--; if (pRCB->Ref == 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"RxvFreeNormal %d \n",pDevice->NumRecvFreeList); spin_lock(&pDevice->lock); RXvFreeRCB(pRCB, bReAllocSkb); spin_unlock(&pDevice->lock); } return; } /* * Description: * Allocates an usb BulkOut irp and calls USBD. * * Parameters: * In: * pDevice - Pointer to the adapter * Out: * none * * Return Value: STATUS_INSUFFICIENT_RESOURCES or result of IoCallDriver * */ int PIPEnsSendBulkOut( PSDevice pDevice, PUSB_SEND_CONTEXT pContext ) { int status; struct urb *pUrb; pDevice->bPWBitOn = FALSE; /* if (pDevice->pPendingBulkOutContext != NULL) { pDevice->NumContextsQueued++; EnqueueContext(pDevice->FirstTxContextQueue, pDevice->LastTxContextQueue, pContext); status = STATUS_PENDING; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Send pending!\n"); return status; } */ DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"s_nsSendBulkOut\n"); if (MP_IS_READY(pDevice) && (pDevice->Flags & fMP_POST_WRITES)) { pUrb = pContext->pUrb; pDevice->ulBulkOutPosted++; // pDevice->pPendingBulkOutContext = pContext; usb_fill_bulk_urb( pUrb, pDevice->usb, usb_sndbulkpipe(pDevice->usb, 3), (void *) &(pContext->Data[0]), pContext->uBufLen, s_nsBulkOutIoCompleteWrite, pContext); status = usb_submit_urb(pUrb, GFP_ATOMIC); if (status != 0) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Submit Tx URB failed %d\n", status); return STATUS_FAILURE; } return STATUS_PENDING; } else { pContext->bBoolInUse = FALSE; return STATUS_RESOURCES; } } /* * Description: s_nsBulkOutIoCompleteWrite * 1a) Indicate to the protocol the status of the write. * 1b) Return ownership of the packet to the protocol. * * 2) If any more packets are queue for sending, send another packet * to USBD. * If the attempt to send the packet to the driver fails, * return ownership of the packet to the protocol and * try another packet (until one succeeds). * * Parameters: * In: * pdoUsbDevObj - pointer to the USB device object which * completed the irp * pIrp - the irp which was completed by the * device object * pContext - the context given to IoSetCompletionRoutine * before calling IoCallDriver on the irp * The pContext is a pointer to the USB device object. * Out: * none * * Return Value: STATUS_MORE_PROCESSING_REQUIRED - allows the completion routine * (IofCompleteRequest) to stop working on the irp. * */ static void s_nsBulkOutIoCompleteWrite( struct urb *urb ) { PSDevice pDevice; int status; CONTEXT_TYPE ContextType; unsigned long ulBufLen; PUSB_SEND_CONTEXT pContext; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"---->s_nsBulkOutIoCompleteWrite\n"); // // The context given to IoSetCompletionRoutine is an USB_CONTEXT struct // pContext = (PUSB_SEND_CONTEXT) urb->context; ASSERT( NULL != pContext ); pDevice = pContext->pDevice; ContextType = pContext->Type; ulBufLen = pContext->uBufLen; if (!netif_device_present(pDevice->dev)) return; // // Perform various IRP, URB, and buffer 'sanity checks' // status = urb->status; //we should have failed, succeeded, or cancelled, but NOT be pending STAvUpdateUSBCounter(&pDevice->scStatistic.USB_BulkOutStat, status); if(status == STATUS_SUCCESS) { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Write %d bytes\n",(int)ulBufLen); pDevice->ulBulkOutBytesWrite += ulBufLen; pDevice->ulBulkOutContCRCError = 0; pDevice->nTxDataTimeCout = 0; } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"BULK Out failed %d\n", status); pDevice->ulBulkOutError++; } // pDevice->ulCheckForHangCount = 0; // pDevice->pPendingBulkOutContext = NULL; if ( CONTEXT_DATA_PACKET == ContextType ) { // Indicate to the protocol the status of the sent packet and return // ownership of the packet. if (pContext->pPacket != NULL) { dev_kfree_skb_irq(pContext->pPacket); pContext->pPacket = NULL; DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"tx %d bytes\n",(int)ulBufLen); } pDevice->dev->trans_start = jiffies; if (status == STATUS_SUCCESS) { pDevice->packetsSent++; } else { DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO"Send USB error! [%08xh]\n", status); pDevice->packetsSentDropped++; } } if (pDevice->bLinkPass == TRUE) { if (netif_queue_stopped(pDevice->dev)) netif_wake_queue(pDevice->dev); } pContext->bBoolInUse = FALSE; return; }
gpl-2.0
AnguisCaptor/PwnKernel_Hammerhead
arch/sh/lib64/dbg.c
9069
7231
/*-------------------------------------------------------------------------- -- -- Identity : Linux50 Debug Funcions -- -- File : arch/sh/lib64/dbg.c -- -- Copyright 2000, 2001 STMicroelectronics Limited. -- Copyright 2004 Richard Curnow (evt_debug etc) -- --------------------------------------------------------------------------*/ #include <linux/types.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/fs.h> #include <asm/mmu_context.h> typedef u64 regType_t; static regType_t getConfigReg(u64 id) { register u64 reg __asm__("r2"); asm volatile ("getcfg %1, 0, %0":"=r" (reg):"r"(id)); return (reg); } /* ======================================================================= */ static char *szTab[] = { "4k", "64k", "1M", "512M" }; static char *protTab[] = { "----", "---R", "--X-", "--XR", "-W--", "-W-R", "-WX-", "-WXR", "U---", "U--R", "U-X-", "U-XR", "UW--", "UW-R", "UWX-", "UWXR" }; #define ITLB_BASE 0x00000000 #define DTLB_BASE 0x00800000 #define MAX_TLBs 64 /* PTE High */ #define GET_VALID(pte) ((pte) & 0x1) #define GET_SHARED(pte) ((pte) & 0x2) #define GET_ASID(pte) ((pte >> 2) & 0x0ff) #define GET_EPN(pte) ((pte) & 0xfffff000) /* PTE Low */ #define GET_CBEHAVIOR(pte) ((pte) & 0x3) #define GET_PAGE_SIZE(pte) szTab[((pte >> 3) & 0x3)] #define GET_PROTECTION(pte) protTab[((pte >> 6) & 0xf)] #define GET_PPN(pte) ((pte) & 0xfffff000) #define PAGE_1K_MASK 0x00000000 #define PAGE_4K_MASK 0x00000010 #define PAGE_64K_MASK 0x00000080 #define MMU_PAGESIZE_MASK (PAGE_64K_MASK | PAGE_4K_MASK) #define PAGE_1MB_MASK MMU_PAGESIZE_MASK #define PAGE_1K (1024) #define PAGE_4K (1024 * 4) #define PAGE_64K (1024 * 64) #define PAGE_1MB (1024 * 1024) #define HOW_TO_READ_TLB_CONTENT \ "[ ID] PPN EPN ASID Share CB P.Size PROT.\n" void print_single_tlb(unsigned long tlb, int single_print) { regType_t pteH; regType_t pteL; unsigned int valid, shared, asid, epn, cb, ppn; char *pSize; char *pProt; /* ** in case of single print <single_print> is true, this implies: ** 1) print the TLB in any case also if NOT VALID ** 2) print out the header */ pteH = getConfigReg(tlb); valid = GET_VALID(pteH); if (single_print) printk(HOW_TO_READ_TLB_CONTENT); else if (!valid) return; pteL = getConfigReg(tlb + 1); shared = GET_SHARED(pteH); asid = GET_ASID(pteH); epn = GET_EPN(pteH); cb = GET_CBEHAVIOR(pteL); pSize = GET_PAGE_SIZE(pteL); pProt = GET_PROTECTION(pteL); ppn = GET_PPN(pteL); printk("[%c%2ld] 0x%08x 0x%08x %03d %02x %02x %4s %s\n", ((valid) ? ' ' : 'u'), ((tlb & 0x0ffff) / TLB_STEP), ppn, epn, asid, shared, cb, pSize, pProt); } void print_dtlb(void) { int count; unsigned long tlb; printk(" ================= SH-5 D-TLBs Status ===================\n"); printk(HOW_TO_READ_TLB_CONTENT); tlb = DTLB_BASE; for (count = 0; count < MAX_TLBs; count++, tlb += TLB_STEP) print_single_tlb(tlb, 0); printk (" =============================================================\n"); } void print_itlb(void) { int count; unsigned long tlb; printk(" ================= SH-5 I-TLBs Status ===================\n"); printk(HOW_TO_READ_TLB_CONTENT); tlb = ITLB_BASE; for (count = 0; count < MAX_TLBs; count++, tlb += TLB_STEP) print_single_tlb(tlb, 0); printk (" =============================================================\n"); } void show_excp_regs(char *from, int trapnr, int signr, struct pt_regs *regs) { unsigned long long ah, al, bh, bl, ch, cl; printk("\n"); printk("EXCEPTION - %s: task %d; Linux trap # %d; signal = %d\n", ((from) ? from : "???"), current->pid, trapnr, signr); asm volatile ("getcon " __EXPEVT ", %0":"=r"(ah)); asm volatile ("getcon " __EXPEVT ", %0":"=r"(al)); ah = (ah) >> 32; al = (al) & 0xffffffff; asm volatile ("getcon " __KCR1 ", %0":"=r"(bh)); asm volatile ("getcon " __KCR1 ", %0":"=r"(bl)); bh = (bh) >> 32; bl = (bl) & 0xffffffff; asm volatile ("getcon " __INTEVT ", %0":"=r"(ch)); asm volatile ("getcon " __INTEVT ", %0":"=r"(cl)); ch = (ch) >> 32; cl = (cl) & 0xffffffff; printk("EXPE: %08Lx%08Lx KCR1: %08Lx%08Lx INTE: %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); asm volatile ("getcon " __PEXPEVT ", %0":"=r"(ah)); asm volatile ("getcon " __PEXPEVT ", %0":"=r"(al)); ah = (ah) >> 32; al = (al) & 0xffffffff; asm volatile ("getcon " __PSPC ", %0":"=r"(bh)); asm volatile ("getcon " __PSPC ", %0":"=r"(bl)); bh = (bh) >> 32; bl = (bl) & 0xffffffff; asm volatile ("getcon " __PSSR ", %0":"=r"(ch)); asm volatile ("getcon " __PSSR ", %0":"=r"(cl)); ch = (ch) >> 32; cl = (cl) & 0xffffffff; printk("PEXP: %08Lx%08Lx PSPC: %08Lx%08Lx PSSR: %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->pc) >> 32; al = (regs->pc) & 0xffffffff; bh = (regs->regs[18]) >> 32; bl = (regs->regs[18]) & 0xffffffff; ch = (regs->regs[15]) >> 32; cl = (regs->regs[15]) & 0xffffffff; printk("PC : %08Lx%08Lx LINK: %08Lx%08Lx SP : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->sr) >> 32; al = (regs->sr) & 0xffffffff; asm volatile ("getcon " __TEA ", %0":"=r"(bh)); asm volatile ("getcon " __TEA ", %0":"=r"(bl)); bh = (bh) >> 32; bl = (bl) & 0xffffffff; asm volatile ("getcon " __KCR0 ", %0":"=r"(ch)); asm volatile ("getcon " __KCR0 ", %0":"=r"(cl)); ch = (ch) >> 32; cl = (cl) & 0xffffffff; printk("SR : %08Lx%08Lx TEA : %08Lx%08Lx KCR0: %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->regs[0]) >> 32; al = (regs->regs[0]) & 0xffffffff; bh = (regs->regs[1]) >> 32; bl = (regs->regs[1]) & 0xffffffff; ch = (regs->regs[2]) >> 32; cl = (regs->regs[2]) & 0xffffffff; printk("R0 : %08Lx%08Lx R1 : %08Lx%08Lx R2 : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->regs[3]) >> 32; al = (regs->regs[3]) & 0xffffffff; bh = (regs->regs[4]) >> 32; bl = (regs->regs[4]) & 0xffffffff; ch = (regs->regs[5]) >> 32; cl = (regs->regs[5]) & 0xffffffff; printk("R3 : %08Lx%08Lx R4 : %08Lx%08Lx R5 : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->regs[6]) >> 32; al = (regs->regs[6]) & 0xffffffff; bh = (regs->regs[7]) >> 32; bl = (regs->regs[7]) & 0xffffffff; ch = (regs->regs[8]) >> 32; cl = (regs->regs[8]) & 0xffffffff; printk("R6 : %08Lx%08Lx R7 : %08Lx%08Lx R8 : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); ah = (regs->regs[9]) >> 32; al = (regs->regs[9]) & 0xffffffff; bh = (regs->regs[10]) >> 32; bl = (regs->regs[10]) & 0xffffffff; ch = (regs->regs[11]) >> 32; cl = (regs->regs[11]) & 0xffffffff; printk("R9 : %08Lx%08Lx R10 : %08Lx%08Lx R11 : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); printk("....\n"); ah = (regs->tregs[0]) >> 32; al = (regs->tregs[0]) & 0xffffffff; bh = (regs->tregs[1]) >> 32; bl = (regs->tregs[1]) & 0xffffffff; ch = (regs->tregs[2]) >> 32; cl = (regs->tregs[2]) & 0xffffffff; printk("T0 : %08Lx%08Lx T1 : %08Lx%08Lx T2 : %08Lx%08Lx\n", ah, al, bh, bl, ch, cl); printk("....\n"); print_dtlb(); print_itlb(); }
gpl-2.0
CheckYourScreen/Arsenic.Kernel_onyx-oos
drivers/video/console/fbcon_ud.c
9837
11960
/* * linux/drivers/video/console/fbcon_ud.c -- Software Rotation - 180 degrees * * Copyright (C) 2005 Antonino Daplas <adaplas @pol.net> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ #include <linux/module.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/fb.h> #include <linux/vt_kern.h> #include <linux/console.h> #include <asm/types.h> #include "fbcon.h" #include "fbcon_rotate.h" /* * Rotation 180 degrees */ static void ud_update_attr(u8 *dst, u8 *src, int attribute, struct vc_data *vc) { int i, offset = (vc->vc_font.height < 10) ? 1 : 2; int width = (vc->vc_font.width + 7) >> 3; unsigned int cellsize = vc->vc_font.height * width; u8 c; offset = offset * width; for (i = 0; i < cellsize; i++) { c = src[i]; if (attribute & FBCON_ATTRIBUTE_UNDERLINE && i < offset) c = 0xff; if (attribute & FBCON_ATTRIBUTE_BOLD) c |= c << 1; if (attribute & FBCON_ATTRIBUTE_REVERSE) c = ~c; dst[i] = c; } } static void ud_bmove(struct vc_data *vc, struct fb_info *info, int sy, int sx, int dy, int dx, int height, int width) { struct fbcon_ops *ops = info->fbcon_par; struct fb_copyarea area; u32 vyres = GETVYRES(ops->p->scrollmode, info); u32 vxres = GETVXRES(ops->p->scrollmode, info); area.sy = vyres - ((sy + height) * vc->vc_font.height); area.sx = vxres - ((sx + width) * vc->vc_font.width); area.dy = vyres - ((dy + height) * vc->vc_font.height); area.dx = vxres - ((dx + width) * vc->vc_font.width); area.height = height * vc->vc_font.height; area.width = width * vc->vc_font.width; info->fbops->fb_copyarea(info, &area); } static void ud_clear(struct vc_data *vc, struct fb_info *info, int sy, int sx, int height, int width) { struct fbcon_ops *ops = info->fbcon_par; struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; u32 vyres = GETVYRES(ops->p->scrollmode, info); u32 vxres = GETVXRES(ops->p->scrollmode, info); region.color = attr_bgcol_ec(bgshift,vc,info); region.dy = vyres - ((sy + height) * vc->vc_font.height); region.dx = vxres - ((sx + width) * vc->vc_font.width); region.width = width * vc->vc_font.width; region.height = height * vc->vc_font.height; region.rop = ROP_COPY; info->fbops->fb_fillrect(info, &region); } static inline void ud_putcs_aligned(struct vc_data *vc, struct fb_info *info, const u16 *s, u32 attr, u32 cnt, u32 d_pitch, u32 s_pitch, u32 cellsize, struct fb_image *image, u8 *buf, u8 *dst) { struct fbcon_ops *ops = info->fbcon_par; u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; u32 idx = vc->vc_font.width >> 3; u8 *src; while (cnt--) { src = ops->fontbuffer + (scr_readw(s--) & charmask)*cellsize; if (attr) { ud_update_attr(buf, src, attr, vc); src = buf; } if (likely(idx == 1)) __fb_pad_aligned_buffer(dst, d_pitch, src, idx, image->height); else fb_pad_aligned_buffer(dst, d_pitch, src, idx, image->height); dst += s_pitch; } info->fbops->fb_imageblit(info, image); } static inline void ud_putcs_unaligned(struct vc_data *vc, struct fb_info *info, const u16 *s, u32 attr, u32 cnt, u32 d_pitch, u32 s_pitch, u32 cellsize, struct fb_image *image, u8 *buf, u8 *dst) { struct fbcon_ops *ops = info->fbcon_par; u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; u32 shift_low = 0, mod = vc->vc_font.width % 8; u32 shift_high = 8; u32 idx = vc->vc_font.width >> 3; u8 *src; while (cnt--) { src = ops->fontbuffer + (scr_readw(s--) & charmask)*cellsize; if (attr) { ud_update_attr(buf, src, attr, vc); src = buf; } fb_pad_unaligned_buffer(dst, d_pitch, src, idx, image->height, shift_high, shift_low, mod); shift_low += mod; dst += (shift_low >= 8) ? s_pitch : s_pitch - 1; shift_low &= 7; shift_high = 8 - shift_low; } info->fbops->fb_imageblit(info, image); } static void ud_putcs(struct vc_data *vc, struct fb_info *info, const unsigned short *s, int count, int yy, int xx, int fg, int bg) { struct fb_image image; struct fbcon_ops *ops = info->fbcon_par; u32 width = (vc->vc_font.width + 7)/8; u32 cellsize = width * vc->vc_font.height; u32 maxcnt = info->pixmap.size/cellsize; u32 scan_align = info->pixmap.scan_align - 1; u32 buf_align = info->pixmap.buf_align - 1; u32 mod = vc->vc_font.width % 8, cnt, pitch, size; u32 attribute = get_attribute(info, scr_readw(s)); u8 *dst, *buf = NULL; u32 vyres = GETVYRES(ops->p->scrollmode, info); u32 vxres = GETVXRES(ops->p->scrollmode, info); if (!ops->fontbuffer) return; image.fg_color = fg; image.bg_color = bg; image.dy = vyres - ((yy * vc->vc_font.height) + vc->vc_font.height); image.dx = vxres - ((xx + count) * vc->vc_font.width); image.height = vc->vc_font.height; image.depth = 1; if (attribute) { buf = kmalloc(cellsize, GFP_KERNEL); if (!buf) return; } s += count - 1; while (count) { if (count > maxcnt) cnt = maxcnt; else cnt = count; image.width = vc->vc_font.width * cnt; pitch = ((image.width + 7) >> 3) + scan_align; pitch &= ~scan_align; size = pitch * image.height + buf_align; size &= ~buf_align; dst = fb_get_buffer_offset(info, &info->pixmap, size); image.data = dst; if (!mod) ud_putcs_aligned(vc, info, s, attribute, cnt, pitch, width, cellsize, &image, buf, dst); else ud_putcs_unaligned(vc, info, s, attribute, cnt, pitch, width, cellsize, &image, buf, dst); image.dx += image.width; count -= cnt; s -= cnt; xx += cnt; } /* buf is always NULL except when in monochrome mode, so in this case it's a gain to check buf against NULL even though kfree() handles NULL pointers just fine */ if (unlikely(buf)) kfree(buf); } static void ud_clear_margins(struct vc_data *vc, struct fb_info *info, int bottom_only) { unsigned int cw = vc->vc_font.width; unsigned int ch = vc->vc_font.height; unsigned int rw = info->var.xres - (vc->vc_cols*cw); unsigned int bh = info->var.yres - (vc->vc_rows*ch); struct fb_fillrect region; int bgshift = (vc->vc_hi_font_mask) ? 13 : 12; region.color = attr_bgcol_ec(bgshift,vc,info); region.rop = ROP_COPY; if (rw && !bottom_only) { region.dy = 0; region.dx = info->var.xoffset; region.width = rw; region.height = info->var.yres_virtual; info->fbops->fb_fillrect(info, &region); } if (bh) { region.dy = info->var.yoffset; region.dx = info->var.xoffset; region.height = bh; region.width = info->var.xres; info->fbops->fb_fillrect(info, &region); } } static void ud_cursor(struct vc_data *vc, struct fb_info *info, int mode, int softback_lines, int fg, int bg) { struct fb_cursor cursor; struct fbcon_ops *ops = info->fbcon_par; unsigned short charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff; int w = (vc->vc_font.width + 7) >> 3, c; int y = real_y(ops->p, vc->vc_y); int attribute, use_sw = (vc->vc_cursor_type & 0x10); int err = 1, dx, dy; char *src; u32 vyres = GETVYRES(ops->p->scrollmode, info); u32 vxres = GETVXRES(ops->p->scrollmode, info); if (!ops->fontbuffer) return; cursor.set = 0; if (softback_lines) { if (y + softback_lines >= vc->vc_rows) { mode = CM_ERASE; ops->cursor_flash = 0; return; } else y += softback_lines; } c = scr_readw((u16 *) vc->vc_pos); attribute = get_attribute(info, c); src = ops->fontbuffer + ((c & charmask) * (w * vc->vc_font.height)); if (ops->cursor_state.image.data != src || ops->cursor_reset) { ops->cursor_state.image.data = src; cursor.set |= FB_CUR_SETIMAGE; } if (attribute) { u8 *dst; dst = kmalloc(w * vc->vc_font.height, GFP_ATOMIC); if (!dst) return; kfree(ops->cursor_data); ops->cursor_data = dst; ud_update_attr(dst, src, attribute, vc); src = dst; } if (ops->cursor_state.image.fg_color != fg || ops->cursor_state.image.bg_color != bg || ops->cursor_reset) { ops->cursor_state.image.fg_color = fg; ops->cursor_state.image.bg_color = bg; cursor.set |= FB_CUR_SETCMAP; } if (ops->cursor_state.image.height != vc->vc_font.height || ops->cursor_state.image.width != vc->vc_font.width || ops->cursor_reset) { ops->cursor_state.image.height = vc->vc_font.height; ops->cursor_state.image.width = vc->vc_font.width; cursor.set |= FB_CUR_SETSIZE; } dy = vyres - ((y * vc->vc_font.height) + vc->vc_font.height); dx = vxres - ((vc->vc_x * vc->vc_font.width) + vc->vc_font.width); if (ops->cursor_state.image.dx != dx || ops->cursor_state.image.dy != dy || ops->cursor_reset) { ops->cursor_state.image.dx = dx; ops->cursor_state.image.dy = dy; cursor.set |= FB_CUR_SETPOS; } if (ops->cursor_state.hot.x || ops->cursor_state.hot.y || ops->cursor_reset) { ops->cursor_state.hot.x = cursor.hot.y = 0; cursor.set |= FB_CUR_SETHOT; } if (cursor.set & FB_CUR_SETSIZE || vc->vc_cursor_type != ops->p->cursor_shape || ops->cursor_state.mask == NULL || ops->cursor_reset) { char *mask = kmalloc(w*vc->vc_font.height, GFP_ATOMIC); int cur_height, size, i = 0; u8 msk = 0xff; if (!mask) return; kfree(ops->cursor_state.mask); ops->cursor_state.mask = mask; ops->p->cursor_shape = vc->vc_cursor_type; cursor.set |= FB_CUR_SETSHAPE; switch (ops->p->cursor_shape & CUR_HWMASK) { case CUR_NONE: cur_height = 0; break; case CUR_UNDERLINE: cur_height = (vc->vc_font.height < 10) ? 1 : 2; break; case CUR_LOWER_THIRD: cur_height = vc->vc_font.height/3; break; case CUR_LOWER_HALF: cur_height = vc->vc_font.height >> 1; break; case CUR_TWO_THIRDS: cur_height = (vc->vc_font.height << 1)/3; break; case CUR_BLOCK: default: cur_height = vc->vc_font.height; break; } size = cur_height * w; while (size--) mask[i++] = msk; size = (vc->vc_font.height - cur_height) * w; while (size--) mask[i++] = ~msk; } switch (mode) { case CM_ERASE: ops->cursor_state.enable = 0; break; case CM_DRAW: case CM_MOVE: default: ops->cursor_state.enable = (use_sw) ? 0 : 1; break; } cursor.image.data = src; cursor.image.fg_color = ops->cursor_state.image.fg_color; cursor.image.bg_color = ops->cursor_state.image.bg_color; cursor.image.dx = ops->cursor_state.image.dx; cursor.image.dy = ops->cursor_state.image.dy; cursor.image.height = ops->cursor_state.image.height; cursor.image.width = ops->cursor_state.image.width; cursor.hot.x = ops->cursor_state.hot.x; cursor.hot.y = ops->cursor_state.hot.y; cursor.mask = ops->cursor_state.mask; cursor.enable = ops->cursor_state.enable; cursor.image.depth = 1; cursor.rop = ROP_XOR; if (info->fbops->fb_cursor) err = info->fbops->fb_cursor(info, &cursor); if (err) soft_cursor(info, &cursor); ops->cursor_reset = 0; } static int ud_update_start(struct fb_info *info) { struct fbcon_ops *ops = info->fbcon_par; int xoffset, yoffset; u32 vyres = GETVYRES(ops->p->scrollmode, info); u32 vxres = GETVXRES(ops->p->scrollmode, info); int err; xoffset = vxres - info->var.xres - ops->var.xoffset; yoffset = vyres - info->var.yres - ops->var.yoffset; if (yoffset < 0) yoffset += vyres; ops->var.xoffset = xoffset; ops->var.yoffset = yoffset; err = fb_pan_display(info, &ops->var); ops->var.xoffset = info->var.xoffset; ops->var.yoffset = info->var.yoffset; ops->var.vmode = info->var.vmode; return err; } void fbcon_rotate_ud(struct fbcon_ops *ops) { ops->bmove = ud_bmove; ops->clear = ud_clear; ops->putcs = ud_putcs; ops->clear_margins = ud_clear_margins; ops->cursor = ud_cursor; ops->update_start = ud_update_start; } EXPORT_SYMBOL(fbcon_rotate_ud); MODULE_AUTHOR("Antonino Daplas <adaplas@pol.net>"); MODULE_DESCRIPTION("Console Rotation (180 degrees) Support"); MODULE_LICENSE("GPL");
gpl-2.0
mkasick/android_kernel_samsung_jfltespr
lib/crc8.c
10349
2461
/* * Copyright (c) 2011 Broadcom Corporation * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/crc8.h> #include <linux/printk.h> /* * crc8_populate_msb - fill crc table for given polynomial in reverse bit order. * * table: table to be filled. * polynomial: polynomial for which table is to be filled. */ void crc8_populate_msb(u8 table[CRC8_TABLE_SIZE], u8 polynomial) { int i, j; const u8 msbit = 0x80; u8 t = msbit; table[0] = 0; for (i = 1; i < CRC8_TABLE_SIZE; i *= 2) { t = (t << 1) ^ (t & msbit ? polynomial : 0); for (j = 0; j < i; j++) table[i+j] = table[j] ^ t; } } EXPORT_SYMBOL(crc8_populate_msb); /* * crc8_populate_lsb - fill crc table for given polynomial in regular bit order. * * table: table to be filled. * polynomial: polynomial for which table is to be filled. */ void crc8_populate_lsb(u8 table[CRC8_TABLE_SIZE], u8 polynomial) { int i, j; u8 t = 1; table[0] = 0; for (i = (CRC8_TABLE_SIZE >> 1); i; i >>= 1) { t = (t >> 1) ^ (t & 1 ? polynomial : 0); for (j = 0; j < CRC8_TABLE_SIZE; j += 2*i) table[i+j] = table[j] ^ t; } } EXPORT_SYMBOL(crc8_populate_lsb); /* * crc8 - calculate a crc8 over the given input data. * * table: crc table used for calculation. * pdata: pointer to data buffer. * nbytes: number of bytes in data buffer. * crc: previous returned crc8 value. */ u8 crc8(const u8 table[CRC8_TABLE_SIZE], u8 *pdata, size_t nbytes, u8 crc) { /* loop over the buffer data */ while (nbytes-- > 0) crc = table[(crc ^ *pdata++) & 0xff]; return crc; } EXPORT_SYMBOL(crc8); MODULE_DESCRIPTION("CRC8 (by Williams, Ross N.) function"); MODULE_AUTHOR("Broadcom Corporation"); MODULE_LICENSE("Dual BSD/GPL");
gpl-2.0
1DeMaCr/android_kernel_samsung_codina
drivers/leds/leds-lm3530.c
110
13007
/* * Copyright (C) 2011 ST-Ericsson SA. * Copyright (C) 2009 Motorola, Inc. * * License Terms: GNU General Public License v2 * * Simple driver for National Semiconductor LM3530 Backlight driver chip * * Author: Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com> * based on leds-lm3530.c by Dan Murphy <D.Murphy@motorola.com> */ #include <linux/i2c.h> #include <linux/leds.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/input.h> #include <linux/led-lm3530.h> #include <linux/types.h> #include <linux/regulator/consumer.h> #include <linux/gpio.h> #define LM3530_LED_DEV "lcd-backlight" #define LM3530_NAME "lm3530-led" #define LM3530_GEN_CONFIG 0x10 #define LM3530_ALS_CONFIG 0x20 #define LM3530_BRT_RAMP_RATE 0x30 #define LM3530_ALS_ZONE_REG 0x40 #define LM3530_ALS_IMP_SELECT 0x41 #define LM3530_BRT_CTRL_REG 0xA0 #define LM3530_ALS_ZB0_REG 0x60 #define LM3530_ALS_ZB1_REG 0x61 #define LM3530_ALS_ZB2_REG 0x62 #define LM3530_ALS_ZB3_REG 0x63 #define LM3530_ALS_Z0T_REG 0x70 #define LM3530_ALS_Z1T_REG 0x71 #define LM3530_ALS_Z2T_REG 0x72 #define LM3530_ALS_Z3T_REG 0x73 #define LM3530_ALS_Z4T_REG 0x74 #define LM3530_REG_MAX 15 /* General Control Register */ #define LM3530_EN_I2C_SHIFT (0) #define LM3530_RAMP_LAW_SHIFT (1) #define LM3530_MAX_CURR_SHIFT (2) #define LM3530_EN_PWM_SHIFT (5) #define LM3530_PWM_POL_SHIFT (6) #define LM3530_EN_PWM_SIMPLE_SHIFT (7) #define LM3530_ENABLE_I2C (1 << LM3530_EN_I2C_SHIFT) #define LM3530_ENABLE_PWM (1 << LM3530_EN_PWM_SHIFT) #define LM3530_POL_LOW (1 << LM3530_PWM_POL_SHIFT) #define LM3530_ENABLE_PWM_SIMPLE (1 << LM3530_EN_PWM_SIMPLE_SHIFT) /* ALS Config Register Options */ #define LM3530_ALS_AVG_TIME_SHIFT (0) #define LM3530_EN_ALS_SHIFT (3) #define LM3530_ALS_SEL_SHIFT (5) #define LM3530_ENABLE_ALS (3 << LM3530_EN_ALS_SHIFT) /* Brightness Ramp Rate Register */ #define LM3530_BRT_RAMP_FALL_SHIFT (0) #define LM3530_BRT_RAMP_RISE_SHIFT (3) /* ALS Resistor Select */ #define LM3530_ALS1_IMP_SHIFT (0) #define LM3530_ALS2_IMP_SHIFT (4) /* Zone Boundary Register defaults */ #define LM3530_ALS_ZB_MAX (4) #define LM3530_ALS_WINDOW_mV (1000) #define LM3530_ALS_OFFSET_mV (4) /* Zone Target Register defaults */ #define LM3530_DEF_ZT_0 (0x7F) #define LM3530_DEF_ZT_1 (0x66) #define LM3530_DEF_ZT_2 (0x4C) #define LM3530_DEF_ZT_3 (0x33) #define LM3530_DEF_ZT_4 (0x19) struct lm3530_mode_map { const char *mode; enum lm3530_mode mode_val; }; static struct lm3530_mode_map mode_map[] = { { "man", LM3530_BL_MODE_MANUAL }, { "als", LM3530_BL_MODE_ALS }, { "pwm", LM3530_BL_MODE_PWM }, }; /** * struct lm3530_data * @led_dev: led class device * @client: i2c client * @pdata: LM3530 platform data * @mode: mode of operation - manual, ALS, PWM * @regulator: regulator * @brighness: previous brightness value * @hw_en_gpio: GPIO line for LM3530 HWEN * @enable: regulator is enabled */ struct lm3530_data { struct led_classdev led_dev; struct i2c_client *client; struct lm3530_platform_data *pdata; enum lm3530_mode mode; struct regulator *regulator; enum led_brightness brightness; int hw_en_gpio; bool enable; }; static const u8 lm3530_reg[LM3530_REG_MAX] = { LM3530_GEN_CONFIG, LM3530_ALS_CONFIG, LM3530_BRT_RAMP_RATE, LM3530_ALS_ZONE_REG, LM3530_ALS_IMP_SELECT, LM3530_BRT_CTRL_REG, LM3530_ALS_ZB0_REG, LM3530_ALS_ZB1_REG, LM3530_ALS_ZB2_REG, LM3530_ALS_ZB3_REG, LM3530_ALS_Z0T_REG, LM3530_ALS_Z1T_REG, LM3530_ALS_Z2T_REG, LM3530_ALS_Z3T_REG, LM3530_ALS_Z4T_REG, }; static int lm3530_get_mode_from_str(const char *str) { int i; for (i = 0; i < ARRAY_SIZE(mode_map); i++) if (sysfs_streq(str, mode_map[i].mode)) return mode_map[i].mode_val; return -1; } static int lm3530_init_registers(struct lm3530_data *drvdata) { int ret = 0; int i; u8 gen_config; u8 als_config = 0; u8 brt_ramp; u8 als_imp_sel = 0; u8 brightness; u8 reg_val[LM3530_REG_MAX]; u8 zones[LM3530_ALS_ZB_MAX] = {0}; u32 als_vmin, als_vmax, als_vstep; struct lm3530_platform_data *pltfm = drvdata->pdata; struct i2c_client *client = drvdata->client; gen_config = (pltfm->brt_ramp_law << LM3530_RAMP_LAW_SHIFT) | ((pltfm->max_current & 7) << LM3530_MAX_CURR_SHIFT); if (drvdata->mode == LM3530_BL_MODE_MANUAL || drvdata->mode == LM3530_BL_MODE_ALS) gen_config |= (LM3530_ENABLE_I2C); if (drvdata->mode == LM3530_BL_MODE_ALS) { if (pltfm->als_vmax == 0) { pltfm->als_vmin = als_vmin = 0; pltfm->als_vmin = als_vmax = LM3530_ALS_WINDOW_mV; } als_vmin = pltfm->als_vmin; als_vmax = pltfm->als_vmax; if ((als_vmax - als_vmin) > LM3530_ALS_WINDOW_mV) pltfm->als_vmax = als_vmax = als_vmin + LM3530_ALS_WINDOW_mV; /* n zone boundary makes n+1 zones */ als_vstep = (als_vmax - als_vmin) / (LM3530_ALS_ZB_MAX + 1); for (i = 0; i < LM3530_ALS_ZB_MAX; i++) zones[i] = (((als_vmin + LM3530_ALS_OFFSET_mV) + als_vstep + (i * als_vstep)) * LED_FULL) / 1000; als_config = (pltfm->als_avrg_time << LM3530_ALS_AVG_TIME_SHIFT) | (LM3530_ENABLE_ALS) | (pltfm->als_input_mode << LM3530_ALS_SEL_SHIFT); als_imp_sel = (pltfm->als1_resistor_sel << LM3530_ALS1_IMP_SHIFT) | (pltfm->als2_resistor_sel << LM3530_ALS2_IMP_SHIFT); } if (drvdata->mode == LM3530_BL_MODE_PWM) gen_config |= (LM3530_ENABLE_PWM) | (pltfm->pwm_pol_hi << LM3530_PWM_POL_SHIFT) | (LM3530_ENABLE_PWM_SIMPLE); brt_ramp = (pltfm->brt_ramp_fall << LM3530_BRT_RAMP_FALL_SHIFT) | (pltfm->brt_ramp_rise << LM3530_BRT_RAMP_RISE_SHIFT); if (drvdata->brightness) brightness = drvdata->brightness; else brightness = drvdata->brightness = pltfm->brt_val; reg_val[0] = gen_config; /* LM3530_GEN_CONFIG */ reg_val[1] = als_config; /* LM3530_ALS_CONFIG */ reg_val[2] = brt_ramp; /* LM3530_BRT_RAMP_RATE */ reg_val[3] = 0x00; /* LM3530_ALS_ZONE_REG */ reg_val[4] = als_imp_sel; /* LM3530_ALS_IMP_SELECT */ reg_val[5] = brightness; /* LM3530_BRT_CTRL_REG */ reg_val[6] = zones[0]; /* LM3530_ALS_ZB0_REG */ reg_val[7] = zones[1]; /* LM3530_ALS_ZB1_REG */ reg_val[8] = zones[2]; /* LM3530_ALS_ZB2_REG */ reg_val[9] = zones[3]; /* LM3530_ALS_ZB3_REG */ reg_val[10] = LM3530_DEF_ZT_0; /* LM3530_ALS_Z0T_REG */ reg_val[11] = LM3530_DEF_ZT_1; /* LM3530_ALS_Z1T_REG */ reg_val[12] = LM3530_DEF_ZT_2; /* LM3530_ALS_Z2T_REG */ reg_val[13] = LM3530_DEF_ZT_3; /* LM3530_ALS_Z3T_REG */ reg_val[14] = LM3530_DEF_ZT_4; /* LM3530_ALS_Z4T_REG */ if (!drvdata->enable) { if (drvdata->hw_en_gpio != LM3530_NO_HWEN_GPIO) gpio_set_value(drvdata->hw_en_gpio, 1); ret = regulator_enable(drvdata->regulator); if (ret) { dev_err(&drvdata->client->dev, "Enable regulator failed\n"); return ret; } drvdata->enable = true; } for (i = 0; i < LM3530_REG_MAX; i++) { ret = i2c_smbus_write_byte_data(client, lm3530_reg[i], reg_val[i]); if (ret) break; } return ret; } static void lm3530_brightness_set(struct led_classdev *led_cdev, enum led_brightness brt_val) { int err; struct lm3530_data *drvdata = container_of(led_cdev, struct lm3530_data, led_dev); switch (drvdata->mode) { case LM3530_BL_MODE_MANUAL: if (!drvdata->enable) { err = lm3530_init_registers(drvdata); if (err) { dev_err(&drvdata->client->dev, "Register Init failed: %d\n", err); break; } } /* set the brightness in brightness control register*/ err = i2c_smbus_write_byte_data(drvdata->client, LM3530_BRT_CTRL_REG, brt_val / 2); if (err) dev_err(&drvdata->client->dev, "Unable to set brightness: %d\n", err); else drvdata->brightness = brt_val / 2; if (brt_val == 0) { err = regulator_disable(drvdata->regulator); if (err) dev_err(&drvdata->client->dev, "Disable regulator failed\n"); if (drvdata->hw_en_gpio != LM3530_NO_HWEN_GPIO) gpio_set_value(drvdata->hw_en_gpio, 0); drvdata->enable = false; } break; case LM3530_BL_MODE_ALS: break; case LM3530_BL_MODE_PWM: break; default: break; } } static ssize_t lm3530_mode_get(struct device *dev, struct device_attribute *attr, char *buf) { struct i2c_client *client = container_of( dev->parent, struct i2c_client, dev); struct lm3530_data *drvdata = i2c_get_clientdata(client); int i, len = 0; for (i = 0; i < ARRAY_SIZE(mode_map); i++) if (drvdata->mode == mode_map[i].mode_val) len += sprintf(buf + len, "[%s] ", mode_map[i].mode); else len += sprintf(buf + len, "%s ", mode_map[i].mode); len += sprintf(buf + len, "\n"); return len; } static ssize_t lm3530_mode_set(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { int err; struct i2c_client *client = container_of( dev->parent, struct i2c_client, dev); struct lm3530_data *drvdata = i2c_get_clientdata(client); int mode; mode = lm3530_get_mode_from_str(buf); if (mode < 0) { dev_err(dev, "Invalid mode\n"); return -EINVAL; } if (mode == LM3530_BL_MODE_MANUAL) drvdata->mode = LM3530_BL_MODE_MANUAL; else if (mode == LM3530_BL_MODE_ALS) drvdata->mode = LM3530_BL_MODE_ALS; else if (mode == LM3530_BL_MODE_PWM) { dev_err(dev, "PWM mode not supported\n"); return -EINVAL; } err = lm3530_init_registers(drvdata); if (err) { dev_err(dev, "Setting %s Mode failed :%d\n", buf, err); return err; } return sizeof(drvdata->mode); } static DEVICE_ATTR(mode, 0644, lm3530_mode_get, lm3530_mode_set); static int __devinit lm3530_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct lm3530_platform_data *pdata = client->dev.platform_data; struct lm3530_data *drvdata; int err = 0; if (pdata == NULL) { dev_err(&client->dev, "platform data required\n"); err = -ENODEV; goto err_out; } /* BL mode */ if (pdata->mode > LM3530_BL_MODE_PWM) { dev_err(&client->dev, "Illegal Mode request\n"); err = -EINVAL; goto err_out; } if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { dev_err(&client->dev, "I2C_FUNC_I2C not supported\n"); err = -EIO; goto err_out; } drvdata = kzalloc(sizeof(struct lm3530_data), GFP_KERNEL); if (drvdata == NULL) { err = -ENOMEM; goto err_out; } drvdata->mode = pdata->mode; drvdata->client = client; drvdata->pdata = pdata; drvdata->brightness = LED_OFF; drvdata->hw_en_gpio = pdata->hw_en_gpio; drvdata->enable = false; drvdata->led_dev.name = LM3530_LED_DEV; drvdata->led_dev.brightness_set = lm3530_brightness_set; i2c_set_clientdata(client, drvdata); if (gpio_is_valid(drvdata->hw_en_gpio)) { err = gpio_request_one(drvdata->hw_en_gpio, GPIOF_OUT_INIT_HIGH, "lm3530_hw_en"); if (err < 0) { dev_err(&client->dev, "lm3530 hw_en gpio failed: %d\n", err); goto err_gpio_request; } } drvdata->regulator = regulator_get(&client->dev, "vin"); if (IS_ERR(drvdata->regulator)) { dev_err(&client->dev, "regulator get failed\n"); err = PTR_ERR(drvdata->regulator); drvdata->regulator = NULL; goto err_regulator_get; } if (drvdata->pdata->brt_val) { err = lm3530_init_registers(drvdata); if (err < 0) { dev_err(&client->dev, "Register Init failed: %d\n", err); err = -ENODEV; goto err_reg_init; } } err = led_classdev_register(&client->dev, &drvdata->led_dev); if (err < 0) { dev_err(&client->dev, "Register led class failed: %d\n", err); err = -ENODEV; goto err_class_register; } err = device_create_file(drvdata->led_dev.dev, &dev_attr_mode); if (err < 0) { dev_err(&client->dev, "File device creation failed: %d\n", err); err = -ENODEV; goto err_create_file; } return 0; err_create_file: led_classdev_unregister(&drvdata->led_dev); err_class_register: err_reg_init: regulator_put(drvdata->regulator); err_regulator_get: if (gpio_is_valid(drvdata->hw_en_gpio)) gpio_free(drvdata->hw_en_gpio); err_gpio_request: i2c_set_clientdata(client, NULL); kfree(drvdata); err_out: return err; } static int __devexit lm3530_remove(struct i2c_client *client) { struct lm3530_data *drvdata = i2c_get_clientdata(client); device_remove_file(drvdata->led_dev.dev, &dev_attr_mode); if (drvdata->enable) regulator_disable(drvdata->regulator); regulator_put(drvdata->regulator); if (gpio_is_valid(drvdata->hw_en_gpio)) gpio_free(drvdata->hw_en_gpio); led_classdev_unregister(&drvdata->led_dev); kfree(drvdata); return 0; } static const struct i2c_device_id lm3530_id[] = { {LM3530_NAME, 0}, {} }; MODULE_DEVICE_TABLE(i2c, lm3530_id); static struct i2c_driver lm3530_i2c_driver = { .probe = lm3530_probe, .remove = lm3530_remove, .id_table = lm3530_id, .driver = { .name = LM3530_NAME, .owner = THIS_MODULE, }, }; static int __init lm3530_init(void) { return i2c_add_driver(&lm3530_i2c_driver); } static void __exit lm3530_exit(void) { i2c_del_driver(&lm3530_i2c_driver); } module_init(lm3530_init); module_exit(lm3530_exit); MODULE_DESCRIPTION("Back Light driver for LM3530"); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Shreshtha Kumar SAHU <shreshthakumar.sahu@stericsson.com>");
gpl-2.0
bhree/android_kernel_jena_msm7x27a
drivers/infiniband/hw/ipath/ipath_file_ops.c
110
73449
/* * Copyright (c) 2006, 2007, 2008 QLogic Corporation. All rights reserved. * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/pci.h> #include <linux/poll.h> #include <linux/cdev.h> #include <linux/swap.h> #include <linux/vmalloc.h> #include <linux/slab.h> #include <linux/highmem.h> #include <linux/io.h> #include <linux/jiffies.h> #include <asm/pgtable.h> #include "ipath_kernel.h" #include "ipath_common.h" #include "ipath_user_sdma.h" static int ipath_open(struct inode *, struct file *); static int ipath_close(struct inode *, struct file *); static ssize_t ipath_write(struct file *, const char __user *, size_t, loff_t *); static ssize_t ipath_writev(struct kiocb *, const struct iovec *, unsigned long , loff_t); static unsigned int ipath_poll(struct file *, struct poll_table_struct *); static int ipath_mmap(struct file *, struct vm_area_struct *); static const struct file_operations ipath_file_ops = { .owner = THIS_MODULE, .write = ipath_write, .aio_write = ipath_writev, .open = ipath_open, .release = ipath_close, .poll = ipath_poll, .mmap = ipath_mmap, .llseek = noop_llseek, }; /* * Convert kernel virtual addresses to physical addresses so they don't * potentially conflict with the chip addresses used as mmap offsets. * It doesn't really matter what mmap offset we use as long as we can * interpret it correctly. */ static u64 cvt_kvaddr(void *p) { struct page *page; u64 paddr = 0; page = vmalloc_to_page(p); if (page) paddr = page_to_pfn(page) << PAGE_SHIFT; return paddr; } static int ipath_get_base_info(struct file *fp, void __user *ubase, size_t ubase_size) { struct ipath_portdata *pd = port_fp(fp); int ret = 0; struct ipath_base_info *kinfo = NULL; struct ipath_devdata *dd = pd->port_dd; unsigned subport_cnt; int shared, master; size_t sz; subport_cnt = pd->port_subport_cnt; if (!subport_cnt) { shared = 0; master = 0; subport_cnt = 1; } else { shared = 1; master = !subport_fp(fp); } sz = sizeof(*kinfo); /* If port sharing is not requested, allow the old size structure */ if (!shared) sz -= 7 * sizeof(u64); if (ubase_size < sz) { ipath_cdbg(PROC, "Base size %zu, need %zu (version mismatch?)\n", ubase_size, sz); ret = -EINVAL; goto bail; } kinfo = kzalloc(sizeof(*kinfo), GFP_KERNEL); if (kinfo == NULL) { ret = -ENOMEM; goto bail; } ret = dd->ipath_f_get_base_info(pd, kinfo); if (ret < 0) goto bail; kinfo->spi_rcvhdr_cnt = dd->ipath_rcvhdrcnt; kinfo->spi_rcvhdrent_size = dd->ipath_rcvhdrentsize; kinfo->spi_tidegrcnt = dd->ipath_rcvegrcnt; kinfo->spi_rcv_egrbufsize = dd->ipath_rcvegrbufsize; /* * have to mmap whole thing */ kinfo->spi_rcv_egrbuftotlen = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size; kinfo->spi_rcv_egrperchunk = pd->port_rcvegrbufs_perchunk; kinfo->spi_rcv_egrchunksize = kinfo->spi_rcv_egrbuftotlen / pd->port_rcvegrbuf_chunks; kinfo->spi_tidcnt = dd->ipath_rcvtidcnt / subport_cnt; if (master) kinfo->spi_tidcnt += dd->ipath_rcvtidcnt % subport_cnt; /* * for this use, may be ipath_cfgports summed over all chips that * are are configured and present */ kinfo->spi_nports = dd->ipath_cfgports; /* unit (chip/board) our port is on */ kinfo->spi_unit = dd->ipath_unit; /* for now, only a single page */ kinfo->spi_tid_maxsize = PAGE_SIZE; /* * Doing this per port, and based on the skip value, etc. This has * to be the actual buffer size, since the protocol code treats it * as an array. * * These have to be set to user addresses in the user code via mmap. * These values are used on return to user code for the mmap target * addresses only. For 32 bit, same 44 bit address problem, so use * the physical address, not virtual. Before 2.6.11, using the * page_address() macro worked, but in 2.6.11, even that returns the * full 64 bit address (upper bits all 1's). So far, using the * physical addresses (or chip offsets, for chip mapping) works, but * no doubt some future kernel release will change that, and we'll be * on to yet another method of dealing with this. */ kinfo->spi_rcvhdr_base = (u64) pd->port_rcvhdrq_phys; kinfo->spi_rcvhdr_tailaddr = (u64) pd->port_rcvhdrqtailaddr_phys; kinfo->spi_rcv_egrbufs = (u64) pd->port_rcvegr_phys; kinfo->spi_pioavailaddr = (u64) dd->ipath_pioavailregs_phys; kinfo->spi_status = (u64) kinfo->spi_pioavailaddr + (void *) dd->ipath_statusp - (void *) dd->ipath_pioavailregs_dma; if (!shared) { kinfo->spi_piocnt = pd->port_piocnt; kinfo->spi_piobufbase = (u64) pd->port_piobufs; kinfo->__spi_uregbase = (u64) dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port; } else if (master) { kinfo->spi_piocnt = (pd->port_piocnt / subport_cnt) + (pd->port_piocnt % subport_cnt); /* Master's PIO buffers are after all the slave's */ kinfo->spi_piobufbase = (u64) pd->port_piobufs + dd->ipath_palign * (pd->port_piocnt - kinfo->spi_piocnt); } else { unsigned slave = subport_fp(fp) - 1; kinfo->spi_piocnt = pd->port_piocnt / subport_cnt; kinfo->spi_piobufbase = (u64) pd->port_piobufs + dd->ipath_palign * kinfo->spi_piocnt * slave; } if (shared) { kinfo->spi_port_uregbase = (u64) dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port; kinfo->spi_port_rcvegrbuf = kinfo->spi_rcv_egrbufs; kinfo->spi_port_rcvhdr_base = kinfo->spi_rcvhdr_base; kinfo->spi_port_rcvhdr_tailaddr = kinfo->spi_rcvhdr_tailaddr; kinfo->__spi_uregbase = cvt_kvaddr(pd->subport_uregbase + PAGE_SIZE * subport_fp(fp)); kinfo->spi_rcvhdr_base = cvt_kvaddr(pd->subport_rcvhdr_base + pd->port_rcvhdrq_size * subport_fp(fp)); kinfo->spi_rcvhdr_tailaddr = 0; kinfo->spi_rcv_egrbufs = cvt_kvaddr(pd->subport_rcvegrbuf + pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size * subport_fp(fp)); kinfo->spi_subport_uregbase = cvt_kvaddr(pd->subport_uregbase); kinfo->spi_subport_rcvegrbuf = cvt_kvaddr(pd->subport_rcvegrbuf); kinfo->spi_subport_rcvhdr_base = cvt_kvaddr(pd->subport_rcvhdr_base); ipath_cdbg(PROC, "port %u flags %x %llx %llx %llx\n", kinfo->spi_port, kinfo->spi_runtime_flags, (unsigned long long) kinfo->spi_subport_uregbase, (unsigned long long) kinfo->spi_subport_rcvegrbuf, (unsigned long long) kinfo->spi_subport_rcvhdr_base); } /* * All user buffers are 2KB buffers. If we ever support * giving 4KB buffers to user processes, this will need some * work. */ kinfo->spi_pioindex = (kinfo->spi_piobufbase - (dd->ipath_piobufbase & 0xffffffff)) / dd->ipath_palign; kinfo->spi_pioalign = dd->ipath_palign; kinfo->spi_qpair = IPATH_KD_QP; /* * user mode PIO buffers are always 2KB, even when 4KB can * be received, and sent via the kernel; this is ibmaxlen * for 2K MTU. */ kinfo->spi_piosize = dd->ipath_piosize2k - 2 * sizeof(u32); kinfo->spi_mtu = dd->ipath_ibmaxlen; /* maxlen, not ibmtu */ kinfo->spi_port = pd->port_port; kinfo->spi_subport = subport_fp(fp); kinfo->spi_sw_version = IPATH_KERN_SWVERSION; kinfo->spi_hw_version = dd->ipath_revision; if (master) { kinfo->spi_runtime_flags |= IPATH_RUNTIME_MASTER; } sz = (ubase_size < sizeof(*kinfo)) ? ubase_size : sizeof(*kinfo); if (copy_to_user(ubase, kinfo, sz)) ret = -EFAULT; bail: kfree(kinfo); return ret; } /** * ipath_tid_update - update a port TID * @pd: the port * @fp: the ipath device file * @ti: the TID information * * The new implementation as of Oct 2004 is that the driver assigns * the tid and returns it to the caller. To make it easier to * catch bugs, and to reduce search time, we keep a cursor for * each port, walking the shadow tid array to find one that's not * in use. * * For now, if we can't allocate the full list, we fail, although * in the long run, we'll allocate as many as we can, and the * caller will deal with that by trying the remaining pages later. * That means that when we fail, we have to mark the tids as not in * use again, in our shadow copy. * * It's up to the caller to free the tids when they are done. * We'll unlock the pages as they free them. * * Also, right now we are locking one page at a time, but since * the intended use of this routine is for a single group of * virtually contiguous pages, that should change to improve * performance. */ static int ipath_tid_update(struct ipath_portdata *pd, struct file *fp, const struct ipath_tid_info *ti) { int ret = 0, ntids; u32 tid, porttid, cnt, i, tidcnt, tidoff; u16 *tidlist; struct ipath_devdata *dd = pd->port_dd; u64 physaddr; unsigned long vaddr; u64 __iomem *tidbase; unsigned long tidmap[8]; struct page **pagep = NULL; unsigned subport = subport_fp(fp); if (!dd->ipath_pageshadow) { ret = -ENOMEM; goto done; } cnt = ti->tidcnt; if (!cnt) { ipath_dbg("After copyin, tidcnt 0, tidlist %llx\n", (unsigned long long) ti->tidlist); /* * Should we treat as success? likely a bug */ ret = -EFAULT; goto done; } porttid = pd->port_port * dd->ipath_rcvtidcnt; if (!pd->port_subport_cnt) { tidcnt = dd->ipath_rcvtidcnt; tid = pd->port_tidcursor; tidoff = 0; } else if (!subport) { tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) + (dd->ipath_rcvtidcnt % pd->port_subport_cnt); tidoff = dd->ipath_rcvtidcnt - tidcnt; porttid += tidoff; tid = tidcursor_fp(fp); } else { tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt; tidoff = tidcnt * (subport - 1); porttid += tidoff; tid = tidcursor_fp(fp); } if (cnt > tidcnt) { /* make sure it all fits in port_tid_pg_list */ dev_info(&dd->pcidev->dev, "Process tried to allocate %u " "TIDs, only trying max (%u)\n", cnt, tidcnt); cnt = tidcnt; } pagep = &((struct page **) pd->port_tid_pg_list)[tidoff]; tidlist = &((u16 *) &pagep[dd->ipath_rcvtidcnt])[tidoff]; memset(tidmap, 0, sizeof(tidmap)); /* before decrement; chip actual # */ ntids = tidcnt; tidbase = (u64 __iomem *) (((char __iomem *) dd->ipath_kregbase) + dd->ipath_rcvtidbase + porttid * sizeof(*tidbase)); ipath_cdbg(VERBOSE, "Port%u %u tids, cursor %u, tidbase %p\n", pd->port_port, cnt, tid, tidbase); /* virtual address of first page in transfer */ vaddr = ti->tidvaddr; if (!access_ok(VERIFY_WRITE, (void __user *) vaddr, cnt * PAGE_SIZE)) { ipath_dbg("Fail vaddr %p, %u pages, !access_ok\n", (void *)vaddr, cnt); ret = -EFAULT; goto done; } ret = ipath_get_user_pages(vaddr, cnt, pagep); if (ret) { if (ret == -EBUSY) { ipath_dbg("Failed to lock addr %p, %u pages " "(already locked)\n", (void *) vaddr, cnt); /* * for now, continue, and see what happens but with * the new implementation, this should never happen, * unless perhaps the user has mpin'ed the pages * themselves (something we need to test) */ ret = 0; } else { dev_info(&dd->pcidev->dev, "Failed to lock addr %p, %u pages: " "errno %d\n", (void *) vaddr, cnt, -ret); goto done; } } for (i = 0; i < cnt; i++, vaddr += PAGE_SIZE) { for (; ntids--; tid++) { if (tid == tidcnt) tid = 0; if (!dd->ipath_pageshadow[porttid + tid]) break; } if (ntids < 0) { /* * oops, wrapped all the way through their TIDs, * and didn't have enough free; see comments at * start of routine */ ipath_dbg("Not enough free TIDs for %u pages " "(index %d), failing\n", cnt, i); i--; /* last tidlist[i] not filled in */ ret = -ENOMEM; break; } tidlist[i] = tid + tidoff; ipath_cdbg(VERBOSE, "Updating idx %u to TID %u, " "vaddr %lx\n", i, tid + tidoff, vaddr); /* we "know" system pages and TID pages are same size */ dd->ipath_pageshadow[porttid + tid] = pagep[i]; dd->ipath_physshadow[porttid + tid] = ipath_map_page( dd->pcidev, pagep[i], 0, PAGE_SIZE, PCI_DMA_FROMDEVICE); /* * don't need atomic or it's overhead */ __set_bit(tid, tidmap); physaddr = dd->ipath_physshadow[porttid + tid]; ipath_stats.sps_pagelocks++; ipath_cdbg(VERBOSE, "TID %u, vaddr %lx, physaddr %llx pgp %p\n", tid, vaddr, (unsigned long long) physaddr, pagep[i]); dd->ipath_f_put_tid(dd, &tidbase[tid], RCVHQ_RCV_TYPE_EXPECTED, physaddr); /* * don't check this tid in ipath_portshadow, since we * just filled it in; start with the next one. */ tid++; } if (ret) { u32 limit; cleanup: /* jump here if copy out of updated info failed... */ ipath_dbg("After failure (ret=%d), undo %d of %d entries\n", -ret, i, cnt); /* same code that's in ipath_free_tid() */ limit = sizeof(tidmap) * BITS_PER_BYTE; if (limit > tidcnt) /* just in case size changes in future */ limit = tidcnt; tid = find_first_bit((const unsigned long *)tidmap, limit); for (; tid < limit; tid++) { if (!test_bit(tid, tidmap)) continue; if (dd->ipath_pageshadow[porttid + tid]) { ipath_cdbg(VERBOSE, "Freeing TID %u\n", tid); dd->ipath_f_put_tid(dd, &tidbase[tid], RCVHQ_RCV_TYPE_EXPECTED, dd->ipath_tidinvalid); pci_unmap_page(dd->pcidev, dd->ipath_physshadow[porttid + tid], PAGE_SIZE, PCI_DMA_FROMDEVICE); dd->ipath_pageshadow[porttid + tid] = NULL; ipath_stats.sps_pageunlocks++; } } ipath_release_user_pages(pagep, cnt); } else { /* * Copy the updated array, with ipath_tid's filled in, back * to user. Since we did the copy in already, this "should * never fail" If it does, we have to clean up... */ if (copy_to_user((void __user *) (unsigned long) ti->tidlist, tidlist, cnt * sizeof(*tidlist))) { ret = -EFAULT; goto cleanup; } if (copy_to_user((void __user *) (unsigned long) ti->tidmap, tidmap, sizeof tidmap)) { ret = -EFAULT; goto cleanup; } if (tid == tidcnt) tid = 0; if (!pd->port_subport_cnt) pd->port_tidcursor = tid; else tidcursor_fp(fp) = tid; } done: if (ret) ipath_dbg("Failed to map %u TID pages, failing with %d\n", ti->tidcnt, -ret); return ret; } /** * ipath_tid_free - free a port TID * @pd: the port * @subport: the subport * @ti: the TID info * * right now we are unlocking one page at a time, but since * the intended use of this routine is for a single group of * virtually contiguous pages, that should change to improve * performance. We check that the TID is in range for this port * but otherwise don't check validity; if user has an error and * frees the wrong tid, it's only their own data that can thereby * be corrupted. We do check that the TID was in use, for sanity * We always use our idea of the saved address, not the address that * they pass in to us. */ static int ipath_tid_free(struct ipath_portdata *pd, unsigned subport, const struct ipath_tid_info *ti) { int ret = 0; u32 tid, porttid, cnt, limit, tidcnt; struct ipath_devdata *dd = pd->port_dd; u64 __iomem *tidbase; unsigned long tidmap[8]; if (!dd->ipath_pageshadow) { ret = -ENOMEM; goto done; } if (copy_from_user(tidmap, (void __user *)(unsigned long)ti->tidmap, sizeof tidmap)) { ret = -EFAULT; goto done; } porttid = pd->port_port * dd->ipath_rcvtidcnt; if (!pd->port_subport_cnt) tidcnt = dd->ipath_rcvtidcnt; else if (!subport) { tidcnt = (dd->ipath_rcvtidcnt / pd->port_subport_cnt) + (dd->ipath_rcvtidcnt % pd->port_subport_cnt); porttid += dd->ipath_rcvtidcnt - tidcnt; } else { tidcnt = dd->ipath_rcvtidcnt / pd->port_subport_cnt; porttid += tidcnt * (subport - 1); } tidbase = (u64 __iomem *) ((char __iomem *)(dd->ipath_kregbase) + dd->ipath_rcvtidbase + porttid * sizeof(*tidbase)); limit = sizeof(tidmap) * BITS_PER_BYTE; if (limit > tidcnt) /* just in case size changes in future */ limit = tidcnt; tid = find_first_bit(tidmap, limit); ipath_cdbg(VERBOSE, "Port%u free %u tids; first bit (max=%d) " "set is %d, porttid %u\n", pd->port_port, ti->tidcnt, limit, tid, porttid); for (cnt = 0; tid < limit; tid++) { /* * small optimization; if we detect a run of 3 or so without * any set, use find_first_bit again. That's mainly to * accelerate the case where we wrapped, so we have some at * the beginning, and some at the end, and a big gap * in the middle. */ if (!test_bit(tid, tidmap)) continue; cnt++; if (dd->ipath_pageshadow[porttid + tid]) { struct page *p; p = dd->ipath_pageshadow[porttid + tid]; dd->ipath_pageshadow[porttid + tid] = NULL; ipath_cdbg(VERBOSE, "PID %u freeing TID %u\n", pid_nr(pd->port_pid), tid); dd->ipath_f_put_tid(dd, &tidbase[tid], RCVHQ_RCV_TYPE_EXPECTED, dd->ipath_tidinvalid); pci_unmap_page(dd->pcidev, dd->ipath_physshadow[porttid + tid], PAGE_SIZE, PCI_DMA_FROMDEVICE); ipath_release_user_pages(&p, 1); ipath_stats.sps_pageunlocks++; } else ipath_dbg("Unused tid %u, ignoring\n", tid); } if (cnt != ti->tidcnt) ipath_dbg("passed in tidcnt %d, only %d bits set in map\n", ti->tidcnt, cnt); done: if (ret) ipath_dbg("Failed to unmap %u TID pages, failing with %d\n", ti->tidcnt, -ret); return ret; } /** * ipath_set_part_key - set a partition key * @pd: the port * @key: the key * * We can have up to 4 active at a time (other than the default, which is * always allowed). This is somewhat tricky, since multiple ports may set * the same key, so we reference count them, and clean up at exit. All 4 * partition keys are packed into a single infinipath register. It's an * error for a process to set the same pkey multiple times. We provide no * mechanism to de-allocate a pkey at this time, we may eventually need to * do that. I've used the atomic operations, and no locking, and only make * a single pass through what's available. This should be more than * adequate for some time. I'll think about spinlocks or the like if and as * it's necessary. */ static int ipath_set_part_key(struct ipath_portdata *pd, u16 key) { struct ipath_devdata *dd = pd->port_dd; int i, any = 0, pidx = -1; u16 lkey = key & 0x7FFF; int ret; if (lkey == (IPATH_DEFAULT_P_KEY & 0x7FFF)) { /* nothing to do; this key always valid */ ret = 0; goto bail; } ipath_cdbg(VERBOSE, "p%u try to set pkey %hx, current keys " "%hx:%x %hx:%x %hx:%x %hx:%x\n", pd->port_port, key, dd->ipath_pkeys[0], atomic_read(&dd->ipath_pkeyrefs[0]), dd->ipath_pkeys[1], atomic_read(&dd->ipath_pkeyrefs[1]), dd->ipath_pkeys[2], atomic_read(&dd->ipath_pkeyrefs[2]), dd->ipath_pkeys[3], atomic_read(&dd->ipath_pkeyrefs[3])); if (!lkey) { ipath_cdbg(PROC, "p%u tries to set key 0, not allowed\n", pd->port_port); ret = -EINVAL; goto bail; } /* * Set the full membership bit, because it has to be * set in the register or the packet, and it seems * cleaner to set in the register than to force all * callers to set it. (see bug 4331) */ key |= 0x8000; for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { if (!pd->port_pkeys[i] && pidx == -1) pidx = i; if (pd->port_pkeys[i] == key) { ipath_cdbg(VERBOSE, "p%u tries to set same pkey " "(%x) more than once\n", pd->port_port, key); ret = -EEXIST; goto bail; } } if (pidx == -1) { ipath_dbg("All pkeys for port %u already in use, " "can't set %x\n", pd->port_port, key); ret = -EBUSY; goto bail; } for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { if (!dd->ipath_pkeys[i]) { any++; continue; } if (dd->ipath_pkeys[i] == key) { atomic_t *pkrefs = &dd->ipath_pkeyrefs[i]; if (atomic_inc_return(pkrefs) > 1) { pd->port_pkeys[pidx] = key; ipath_cdbg(VERBOSE, "p%u set key %x " "matches #%d, count now %d\n", pd->port_port, key, i, atomic_read(pkrefs)); ret = 0; goto bail; } else { /* * lost race, decrement count, catch below */ atomic_dec(pkrefs); ipath_cdbg(VERBOSE, "Lost race, count was " "0, after dec, it's %d\n", atomic_read(pkrefs)); any++; } } if ((dd->ipath_pkeys[i] & 0x7FFF) == lkey) { /* * It makes no sense to have both the limited and * full membership PKEY set at the same time since * the unlimited one will disable the limited one. */ ret = -EEXIST; goto bail; } } if (!any) { ipath_dbg("port %u, all pkeys already in use, " "can't set %x\n", pd->port_port, key); ret = -EBUSY; goto bail; } for (any = i = 0; i < ARRAY_SIZE(dd->ipath_pkeys); i++) { if (!dd->ipath_pkeys[i] && atomic_inc_return(&dd->ipath_pkeyrefs[i]) == 1) { u64 pkey; /* for ipathstats, etc. */ ipath_stats.sps_pkeys[i] = lkey; pd->port_pkeys[pidx] = dd->ipath_pkeys[i] = key; pkey = (u64) dd->ipath_pkeys[0] | ((u64) dd->ipath_pkeys[1] << 16) | ((u64) dd->ipath_pkeys[2] << 32) | ((u64) dd->ipath_pkeys[3] << 48); ipath_cdbg(PROC, "p%u set key %x in #%d, " "portidx %d, new pkey reg %llx\n", pd->port_port, key, i, pidx, (unsigned long long) pkey); ipath_write_kreg( dd, dd->ipath_kregs->kr_partitionkey, pkey); ret = 0; goto bail; } } ipath_dbg("port %u, all pkeys already in use 2nd pass, " "can't set %x\n", pd->port_port, key); ret = -EBUSY; bail: return ret; } /** * ipath_manage_rcvq - manage a port's receive queue * @pd: the port * @subport: the subport * @start_stop: action to carry out * * start_stop == 0 disables receive on the port, for use in queue * overflow conditions. start_stop==1 re-enables, to be used to * re-init the software copy of the head register */ static int ipath_manage_rcvq(struct ipath_portdata *pd, unsigned subport, int start_stop) { struct ipath_devdata *dd = pd->port_dd; ipath_cdbg(PROC, "%sabling rcv for unit %u port %u:%u\n", start_stop ? "en" : "dis", dd->ipath_unit, pd->port_port, subport); if (subport) goto bail; /* atomically clear receive enable port. */ if (start_stop) { /* * On enable, force in-memory copy of the tail register to * 0, so that protocol code doesn't have to worry about * whether or not the chip has yet updated the in-memory * copy or not on return from the system call. The chip * always resets it's tail register back to 0 on a * transition from disabled to enabled. This could cause a * problem if software was broken, and did the enable w/o * the disable, but eventually the in-memory copy will be * updated and correct itself, even in the face of software * bugs. */ if (pd->port_rcvhdrtail_kvaddr) ipath_clear_rcvhdrtail(pd); set_bit(dd->ipath_r_portenable_shift + pd->port_port, &dd->ipath_rcvctrl); } else clear_bit(dd->ipath_r_portenable_shift + pd->port_port, &dd->ipath_rcvctrl); ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl); /* now be sure chip saw it before we return */ ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); if (start_stop) { /* * And try to be sure that tail reg update has happened too. * This should in theory interlock with the RXE changes to * the tail register. Don't assign it to the tail register * in memory copy, since we could overwrite an update by the * chip if we did. */ ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port); } /* always; new head should be equal to new tail; see above */ bail: return 0; } static void ipath_clean_part_key(struct ipath_portdata *pd, struct ipath_devdata *dd) { int i, j, pchanged = 0; u64 oldpkey; /* for debugging only */ oldpkey = (u64) dd->ipath_pkeys[0] | ((u64) dd->ipath_pkeys[1] << 16) | ((u64) dd->ipath_pkeys[2] << 32) | ((u64) dd->ipath_pkeys[3] << 48); for (i = 0; i < ARRAY_SIZE(pd->port_pkeys); i++) { if (!pd->port_pkeys[i]) continue; ipath_cdbg(VERBOSE, "look for key[%d] %hx in pkeys\n", i, pd->port_pkeys[i]); for (j = 0; j < ARRAY_SIZE(dd->ipath_pkeys); j++) { /* check for match independent of the global bit */ if ((dd->ipath_pkeys[j] & 0x7fff) != (pd->port_pkeys[i] & 0x7fff)) continue; if (atomic_dec_and_test(&dd->ipath_pkeyrefs[j])) { ipath_cdbg(VERBOSE, "p%u clear key " "%x matches #%d\n", pd->port_port, pd->port_pkeys[i], j); ipath_stats.sps_pkeys[j] = dd->ipath_pkeys[j] = 0; pchanged++; } else ipath_cdbg( VERBOSE, "p%u key %x matches #%d, " "but ref still %d\n", pd->port_port, pd->port_pkeys[i], j, atomic_read(&dd->ipath_pkeyrefs[j])); break; } pd->port_pkeys[i] = 0; } if (pchanged) { u64 pkey = (u64) dd->ipath_pkeys[0] | ((u64) dd->ipath_pkeys[1] << 16) | ((u64) dd->ipath_pkeys[2] << 32) | ((u64) dd->ipath_pkeys[3] << 48); ipath_cdbg(VERBOSE, "p%u old pkey reg %llx, " "new pkey reg %llx\n", pd->port_port, (unsigned long long) oldpkey, (unsigned long long) pkey); ipath_write_kreg(dd, dd->ipath_kregs->kr_partitionkey, pkey); } } /* * Initialize the port data with the receive buffer sizes * so this can be done while the master port is locked. * Otherwise, there is a race with a slave opening the port * and seeing these fields uninitialized. */ static void init_user_egr_sizes(struct ipath_portdata *pd) { struct ipath_devdata *dd = pd->port_dd; unsigned egrperchunk, egrcnt, size; /* * to avoid wasting a lot of memory, we allocate 32KB chunks of * physically contiguous memory, advance through it until used up * and then allocate more. Of course, we need memory to store those * extra pointers, now. Started out with 256KB, but under heavy * memory pressure (creating large files and then copying them over * NFS while doing lots of MPI jobs), we hit some allocation * failures, even though we can sleep... (2.6.10) Still get * failures at 64K. 32K is the lowest we can go without wasting * additional memory. */ size = 0x8000; egrperchunk = size / dd->ipath_rcvegrbufsize; egrcnt = dd->ipath_rcvegrcnt; pd->port_rcvegrbuf_chunks = (egrcnt + egrperchunk - 1) / egrperchunk; pd->port_rcvegrbufs_perchunk = egrperchunk; pd->port_rcvegrbuf_size = size; } /** * ipath_create_user_egr - allocate eager TID buffers * @pd: the port to allocate TID buffers for * * This routine is now quite different for user and kernel, because * the kernel uses skb's, for the accelerated network performance * This is the user port version * * Allocate the eager TID buffers and program them into infinipath * They are no longer completely contiguous, we do multiple allocation * calls. */ static int ipath_create_user_egr(struct ipath_portdata *pd) { struct ipath_devdata *dd = pd->port_dd; unsigned e, egrcnt, egrperchunk, chunk, egrsize, egroff; size_t size; int ret; gfp_t gfp_flags; /* * GFP_USER, but without GFP_FS, so buffer cache can be * coalesced (we hope); otherwise, even at order 4, * heavy filesystem activity makes these fail, and we can * use compound pages. */ gfp_flags = __GFP_WAIT | __GFP_IO | __GFP_COMP; egrcnt = dd->ipath_rcvegrcnt; /* TID number offset for this port */ egroff = (pd->port_port - 1) * egrcnt + dd->ipath_p0_rcvegrcnt; egrsize = dd->ipath_rcvegrbufsize; ipath_cdbg(VERBOSE, "Allocating %d egr buffers, at egrtid " "offset %x, egrsize %u\n", egrcnt, egroff, egrsize); chunk = pd->port_rcvegrbuf_chunks; egrperchunk = pd->port_rcvegrbufs_perchunk; size = pd->port_rcvegrbuf_size; pd->port_rcvegrbuf = kmalloc(chunk * sizeof(pd->port_rcvegrbuf[0]), GFP_KERNEL); if (!pd->port_rcvegrbuf) { ret = -ENOMEM; goto bail; } pd->port_rcvegrbuf_phys = kmalloc(chunk * sizeof(pd->port_rcvegrbuf_phys[0]), GFP_KERNEL); if (!pd->port_rcvegrbuf_phys) { ret = -ENOMEM; goto bail_rcvegrbuf; } for (e = 0; e < pd->port_rcvegrbuf_chunks; e++) { pd->port_rcvegrbuf[e] = dma_alloc_coherent( &dd->pcidev->dev, size, &pd->port_rcvegrbuf_phys[e], gfp_flags); if (!pd->port_rcvegrbuf[e]) { ret = -ENOMEM; goto bail_rcvegrbuf_phys; } } pd->port_rcvegr_phys = pd->port_rcvegrbuf_phys[0]; for (e = chunk = 0; chunk < pd->port_rcvegrbuf_chunks; chunk++) { dma_addr_t pa = pd->port_rcvegrbuf_phys[chunk]; unsigned i; for (i = 0; e < egrcnt && i < egrperchunk; e++, i++) { dd->ipath_f_put_tid(dd, e + egroff + (u64 __iomem *) ((char __iomem *) dd->ipath_kregbase + dd->ipath_rcvegrbase), RCVHQ_RCV_TYPE_EAGER, pa); pa += egrsize; } cond_resched(); /* don't hog the cpu */ } ret = 0; goto bail; bail_rcvegrbuf_phys: for (e = 0; e < pd->port_rcvegrbuf_chunks && pd->port_rcvegrbuf[e]; e++) { dma_free_coherent(&dd->pcidev->dev, size, pd->port_rcvegrbuf[e], pd->port_rcvegrbuf_phys[e]); } kfree(pd->port_rcvegrbuf_phys); pd->port_rcvegrbuf_phys = NULL; bail_rcvegrbuf: kfree(pd->port_rcvegrbuf); pd->port_rcvegrbuf = NULL; bail: return ret; } /* common code for the mappings on dma_alloc_coherent mem */ static int ipath_mmap_mem(struct vm_area_struct *vma, struct ipath_portdata *pd, unsigned len, int write_ok, void *kvaddr, char *what) { struct ipath_devdata *dd = pd->port_dd; unsigned long pfn; int ret; if ((vma->vm_end - vma->vm_start) > len) { dev_info(&dd->pcidev->dev, "FAIL on %s: len %lx > %x\n", what, vma->vm_end - vma->vm_start, len); ret = -EFAULT; goto bail; } if (!write_ok) { if (vma->vm_flags & VM_WRITE) { dev_info(&dd->pcidev->dev, "%s must be mapped readonly\n", what); ret = -EPERM; goto bail; } /* don't allow them to later change with mprotect */ vma->vm_flags &= ~VM_MAYWRITE; } pfn = virt_to_phys(kvaddr) >> PAGE_SHIFT; ret = remap_pfn_range(vma, vma->vm_start, pfn, len, vma->vm_page_prot); if (ret) dev_info(&dd->pcidev->dev, "%s port%u mmap of %lx, %x " "bytes r%c failed: %d\n", what, pd->port_port, pfn, len, write_ok?'w':'o', ret); else ipath_cdbg(VERBOSE, "%s port%u mmaped %lx, %x bytes " "r%c\n", what, pd->port_port, pfn, len, write_ok?'w':'o'); bail: return ret; } static int mmap_ureg(struct vm_area_struct *vma, struct ipath_devdata *dd, u64 ureg) { unsigned long phys; int ret; /* * This is real hardware, so use io_remap. This is the mechanism * for the user process to update the head registers for their port * in the chip. */ if ((vma->vm_end - vma->vm_start) > PAGE_SIZE) { dev_info(&dd->pcidev->dev, "FAIL mmap userreg: reqlen " "%lx > PAGE\n", vma->vm_end - vma->vm_start); ret = -EFAULT; } else { phys = dd->ipath_physaddr + ureg; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot); } return ret; } static int mmap_piobufs(struct vm_area_struct *vma, struct ipath_devdata *dd, struct ipath_portdata *pd, unsigned piobufs, unsigned piocnt) { unsigned long phys; int ret; /* * When we map the PIO buffers in the chip, we want to map them as * writeonly, no read possible. This prevents access to previous * process data, and catches users who might try to read the i/o * space due to a bug. */ if ((vma->vm_end - vma->vm_start) > (piocnt * dd->ipath_palign)) { dev_info(&dd->pcidev->dev, "FAIL mmap piobufs: " "reqlen %lx > PAGE\n", vma->vm_end - vma->vm_start); ret = -EINVAL; goto bail; } phys = dd->ipath_physaddr + piobufs; #if defined(__powerpc__) /* There isn't a generic way to specify writethrough mappings */ pgprot_val(vma->vm_page_prot) |= _PAGE_NO_CACHE; pgprot_val(vma->vm_page_prot) |= _PAGE_WRITETHRU; pgprot_val(vma->vm_page_prot) &= ~_PAGE_GUARDED; #endif /* * don't allow them to later change to readable with mprotect (for when * not initially mapped readable, as is normally the case) */ vma->vm_flags &= ~VM_MAYREAD; vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND; ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT, vma->vm_end - vma->vm_start, vma->vm_page_prot); bail: return ret; } static int mmap_rcvegrbufs(struct vm_area_struct *vma, struct ipath_portdata *pd) { struct ipath_devdata *dd = pd->port_dd; unsigned long start, size; size_t total_size, i; unsigned long pfn; int ret; size = pd->port_rcvegrbuf_size; total_size = pd->port_rcvegrbuf_chunks * size; if ((vma->vm_end - vma->vm_start) > total_size) { dev_info(&dd->pcidev->dev, "FAIL on egr bufs: " "reqlen %lx > actual %lx\n", vma->vm_end - vma->vm_start, (unsigned long) total_size); ret = -EINVAL; goto bail; } if (vma->vm_flags & VM_WRITE) { dev_info(&dd->pcidev->dev, "Can't map eager buffers as " "writable (flags=%lx)\n", vma->vm_flags); ret = -EPERM; goto bail; } /* don't allow them to later change to writeable with mprotect */ vma->vm_flags &= ~VM_MAYWRITE; start = vma->vm_start; for (i = 0; i < pd->port_rcvegrbuf_chunks; i++, start += size) { pfn = virt_to_phys(pd->port_rcvegrbuf[i]) >> PAGE_SHIFT; ret = remap_pfn_range(vma, start, pfn, size, vma->vm_page_prot); if (ret < 0) goto bail; } ret = 0; bail: return ret; } /* * ipath_file_vma_fault - handle a VMA page fault. */ static int ipath_file_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page; page = vmalloc_to_page((void *)(vmf->pgoff << PAGE_SHIFT)); if (!page) return VM_FAULT_SIGBUS; get_page(page); vmf->page = page; return 0; } static const struct vm_operations_struct ipath_file_vm_ops = { .fault = ipath_file_vma_fault, }; static int mmap_kvaddr(struct vm_area_struct *vma, u64 pgaddr, struct ipath_portdata *pd, unsigned subport) { unsigned long len; struct ipath_devdata *dd; void *addr; size_t size; int ret = 0; /* If the port is not shared, all addresses should be physical */ if (!pd->port_subport_cnt) goto bail; dd = pd->port_dd; size = pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size; /* * Each process has all the subport uregbase, rcvhdrq, and * rcvegrbufs mmapped - as an array for all the processes, * and also separately for this process. */ if (pgaddr == cvt_kvaddr(pd->subport_uregbase)) { addr = pd->subport_uregbase; size = PAGE_SIZE * pd->port_subport_cnt; } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base)) { addr = pd->subport_rcvhdr_base; size = pd->port_rcvhdrq_size * pd->port_subport_cnt; } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf)) { addr = pd->subport_rcvegrbuf; size *= pd->port_subport_cnt; } else if (pgaddr == cvt_kvaddr(pd->subport_uregbase + PAGE_SIZE * subport)) { addr = pd->subport_uregbase + PAGE_SIZE * subport; size = PAGE_SIZE; } else if (pgaddr == cvt_kvaddr(pd->subport_rcvhdr_base + pd->port_rcvhdrq_size * subport)) { addr = pd->subport_rcvhdr_base + pd->port_rcvhdrq_size * subport; size = pd->port_rcvhdrq_size; } else if (pgaddr == cvt_kvaddr(pd->subport_rcvegrbuf + size * subport)) { addr = pd->subport_rcvegrbuf + size * subport; /* rcvegrbufs are read-only on the slave */ if (vma->vm_flags & VM_WRITE) { dev_info(&dd->pcidev->dev, "Can't map eager buffers as " "writable (flags=%lx)\n", vma->vm_flags); ret = -EPERM; goto bail; } /* * Don't allow permission to later change to writeable * with mprotect. */ vma->vm_flags &= ~VM_MAYWRITE; } else { goto bail; } len = vma->vm_end - vma->vm_start; if (len > size) { ipath_cdbg(MM, "FAIL: reqlen %lx > %zx\n", len, size); ret = -EINVAL; goto bail; } vma->vm_pgoff = (unsigned long) addr >> PAGE_SHIFT; vma->vm_ops = &ipath_file_vm_ops; vma->vm_flags |= VM_RESERVED | VM_DONTEXPAND; ret = 1; bail: return ret; } /** * ipath_mmap - mmap various structures into user space * @fp: the file pointer * @vma: the VM area * * We use this to have a shared buffer between the kernel and the user code * for the rcvhdr queue, egr buffers, and the per-port user regs and pio * buffers in the chip. We have the open and close entries so we can bump * the ref count and keep the driver from being unloaded while still mapped. */ static int ipath_mmap(struct file *fp, struct vm_area_struct *vma) { struct ipath_portdata *pd; struct ipath_devdata *dd; u64 pgaddr, ureg; unsigned piobufs, piocnt; int ret; pd = port_fp(fp); if (!pd) { ret = -EINVAL; goto bail; } dd = pd->port_dd; /* * This is the ipath_do_user_init() code, mapping the shared buffers * into the user process. The address referred to by vm_pgoff is the * file offset passed via mmap(). For shared ports, this is the * kernel vmalloc() address of the pages to share with the master. * For non-shared or master ports, this is a physical address. * We only do one mmap for each space mapped. */ pgaddr = vma->vm_pgoff << PAGE_SHIFT; /* * Check for 0 in case one of the allocations failed, but user * called mmap anyway. */ if (!pgaddr) { ret = -EINVAL; goto bail; } ipath_cdbg(MM, "pgaddr %llx vm_start=%lx len %lx port %u:%u:%u\n", (unsigned long long) pgaddr, vma->vm_start, vma->vm_end - vma->vm_start, dd->ipath_unit, pd->port_port, subport_fp(fp)); /* * Physical addresses must fit in 40 bits for our hardware. * Check for kernel virtual addresses first, anything else must * match a HW or memory address. */ ret = mmap_kvaddr(vma, pgaddr, pd, subport_fp(fp)); if (ret) { if (ret > 0) ret = 0; goto bail; } ureg = dd->ipath_uregbase + dd->ipath_ureg_align * pd->port_port; if (!pd->port_subport_cnt) { /* port is not shared */ piocnt = pd->port_piocnt; piobufs = pd->port_piobufs; } else if (!subport_fp(fp)) { /* caller is the master */ piocnt = (pd->port_piocnt / pd->port_subport_cnt) + (pd->port_piocnt % pd->port_subport_cnt); piobufs = pd->port_piobufs + dd->ipath_palign * (pd->port_piocnt - piocnt); } else { unsigned slave = subport_fp(fp) - 1; /* caller is a slave */ piocnt = pd->port_piocnt / pd->port_subport_cnt; piobufs = pd->port_piobufs + dd->ipath_palign * piocnt * slave; } if (pgaddr == ureg) ret = mmap_ureg(vma, dd, ureg); else if (pgaddr == piobufs) ret = mmap_piobufs(vma, dd, pd, piobufs, piocnt); else if (pgaddr == dd->ipath_pioavailregs_phys) /* in-memory copy of pioavail registers */ ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0, (void *) dd->ipath_pioavailregs_dma, "pioavail registers"); else if (pgaddr == pd->port_rcvegr_phys) ret = mmap_rcvegrbufs(vma, pd); else if (pgaddr == (u64) pd->port_rcvhdrq_phys) /* * The rcvhdrq itself; readonly except on HT (so have * to allow writable mapping), multiple pages, contiguous * from an i/o perspective. */ ret = ipath_mmap_mem(vma, pd, pd->port_rcvhdrq_size, 1, pd->port_rcvhdrq, "rcvhdrq"); else if (pgaddr == (u64) pd->port_rcvhdrqtailaddr_phys) /* in-memory copy of rcvhdrq tail register */ ret = ipath_mmap_mem(vma, pd, PAGE_SIZE, 0, pd->port_rcvhdrtail_kvaddr, "rcvhdrq tail"); else ret = -EINVAL; vma->vm_private_data = NULL; if (ret < 0) dev_info(&dd->pcidev->dev, "Failure %d on off %llx len %lx\n", -ret, (unsigned long long)pgaddr, vma->vm_end - vma->vm_start); bail: return ret; } static unsigned ipath_poll_hdrqfull(struct ipath_portdata *pd) { unsigned pollflag = 0; if ((pd->poll_type & IPATH_POLL_TYPE_OVERFLOW) && pd->port_hdrqfull != pd->port_hdrqfull_poll) { pollflag |= POLLIN | POLLRDNORM; pd->port_hdrqfull_poll = pd->port_hdrqfull; } return pollflag; } static unsigned int ipath_poll_urgent(struct ipath_portdata *pd, struct file *fp, struct poll_table_struct *pt) { unsigned pollflag = 0; struct ipath_devdata *dd; dd = pd->port_dd; /* variable access in ipath_poll_hdrqfull() needs this */ rmb(); pollflag = ipath_poll_hdrqfull(pd); if (pd->port_urgent != pd->port_urgent_poll) { pollflag |= POLLIN | POLLRDNORM; pd->port_urgent_poll = pd->port_urgent; } if (!pollflag) { /* this saves a spin_lock/unlock in interrupt handler... */ set_bit(IPATH_PORT_WAITING_URG, &pd->port_flag); /* flush waiting flag so don't miss an event... */ wmb(); poll_wait(fp, &pd->port_wait, pt); } return pollflag; } static unsigned int ipath_poll_next(struct ipath_portdata *pd, struct file *fp, struct poll_table_struct *pt) { u32 head; u32 tail; unsigned pollflag = 0; struct ipath_devdata *dd; dd = pd->port_dd; /* variable access in ipath_poll_hdrqfull() needs this */ rmb(); pollflag = ipath_poll_hdrqfull(pd); head = ipath_read_ureg32(dd, ur_rcvhdrhead, pd->port_port); if (pd->port_rcvhdrtail_kvaddr) tail = ipath_get_rcvhdrtail(pd); else tail = ipath_read_ureg32(dd, ur_rcvhdrtail, pd->port_port); if (head != tail) pollflag |= POLLIN | POLLRDNORM; else { /* this saves a spin_lock/unlock in interrupt handler */ set_bit(IPATH_PORT_WAITING_RCV, &pd->port_flag); /* flush waiting flag so we don't miss an event */ wmb(); set_bit(pd->port_port + dd->ipath_r_intravail_shift, &dd->ipath_rcvctrl); ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl); if (dd->ipath_rhdrhead_intr_off) /* arm rcv interrupt */ ipath_write_ureg(dd, ur_rcvhdrhead, dd->ipath_rhdrhead_intr_off | head, pd->port_port); poll_wait(fp, &pd->port_wait, pt); } return pollflag; } static unsigned int ipath_poll(struct file *fp, struct poll_table_struct *pt) { struct ipath_portdata *pd; unsigned pollflag; pd = port_fp(fp); if (!pd) pollflag = 0; else if (pd->poll_type & IPATH_POLL_TYPE_URGENT) pollflag = ipath_poll_urgent(pd, fp, pt); else pollflag = ipath_poll_next(pd, fp, pt); return pollflag; } static int ipath_supports_subports(int user_swmajor, int user_swminor) { /* no subport implementation prior to software version 1.3 */ return (user_swmajor > 1) || (user_swminor >= 3); } static int ipath_compatible_subports(int user_swmajor, int user_swminor) { /* this code is written long-hand for clarity */ if (IPATH_USER_SWMAJOR != user_swmajor) { /* no promise of compatibility if major mismatch */ return 0; } if (IPATH_USER_SWMAJOR == 1) { switch (IPATH_USER_SWMINOR) { case 0: case 1: case 2: /* no subport implementation so cannot be compatible */ return 0; case 3: /* 3 is only compatible with itself */ return user_swminor == 3; default: /* >= 4 are compatible (or are expected to be) */ return user_swminor >= 4; } } /* make no promises yet for future major versions */ return 0; } static int init_subports(struct ipath_devdata *dd, struct ipath_portdata *pd, const struct ipath_user_info *uinfo) { int ret = 0; unsigned num_subports; size_t size; /* * If the user is requesting zero subports, * skip the subport allocation. */ if (uinfo->spu_subport_cnt <= 0) goto bail; /* Self-consistency check for ipath_compatible_subports() */ if (ipath_supports_subports(IPATH_USER_SWMAJOR, IPATH_USER_SWMINOR) && !ipath_compatible_subports(IPATH_USER_SWMAJOR, IPATH_USER_SWMINOR)) { dev_info(&dd->pcidev->dev, "Inconsistent ipath_compatible_subports()\n"); goto bail; } /* Check for subport compatibility */ if (!ipath_compatible_subports(uinfo->spu_userversion >> 16, uinfo->spu_userversion & 0xffff)) { dev_info(&dd->pcidev->dev, "Mismatched user version (%d.%d) and driver " "version (%d.%d) while port sharing. Ensure " "that driver and library are from the same " "release.\n", (int) (uinfo->spu_userversion >> 16), (int) (uinfo->spu_userversion & 0xffff), IPATH_USER_SWMAJOR, IPATH_USER_SWMINOR); goto bail; } if (uinfo->spu_subport_cnt > INFINIPATH_MAX_SUBPORT) { ret = -EINVAL; goto bail; } num_subports = uinfo->spu_subport_cnt; pd->subport_uregbase = vzalloc(PAGE_SIZE * num_subports); if (!pd->subport_uregbase) { ret = -ENOMEM; goto bail; } /* Note: pd->port_rcvhdrq_size isn't initialized yet. */ size = ALIGN(dd->ipath_rcvhdrcnt * dd->ipath_rcvhdrentsize * sizeof(u32), PAGE_SIZE) * num_subports; pd->subport_rcvhdr_base = vzalloc(size); if (!pd->subport_rcvhdr_base) { ret = -ENOMEM; goto bail_ureg; } pd->subport_rcvegrbuf = vzalloc(pd->port_rcvegrbuf_chunks * pd->port_rcvegrbuf_size * num_subports); if (!pd->subport_rcvegrbuf) { ret = -ENOMEM; goto bail_rhdr; } pd->port_subport_cnt = uinfo->spu_subport_cnt; pd->port_subport_id = uinfo->spu_subport_id; pd->active_slaves = 1; set_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag); goto bail; bail_rhdr: vfree(pd->subport_rcvhdr_base); bail_ureg: vfree(pd->subport_uregbase); pd->subport_uregbase = NULL; bail: return ret; } static int try_alloc_port(struct ipath_devdata *dd, int port, struct file *fp, const struct ipath_user_info *uinfo) { struct ipath_portdata *pd; int ret; if (!(pd = dd->ipath_pd[port])) { void *ptmp; pd = kzalloc(sizeof(struct ipath_portdata), GFP_KERNEL); /* * Allocate memory for use in ipath_tid_update() just once * at open, not per call. Reduces cost of expected send * setup. */ ptmp = kmalloc(dd->ipath_rcvtidcnt * sizeof(u16) + dd->ipath_rcvtidcnt * sizeof(struct page **), GFP_KERNEL); if (!pd || !ptmp) { ipath_dev_err(dd, "Unable to allocate portdata " "memory, failing open\n"); ret = -ENOMEM; kfree(pd); kfree(ptmp); goto bail; } dd->ipath_pd[port] = pd; dd->ipath_pd[port]->port_port = port; dd->ipath_pd[port]->port_dd = dd; dd->ipath_pd[port]->port_tid_pg_list = ptmp; init_waitqueue_head(&dd->ipath_pd[port]->port_wait); } if (!pd->port_cnt) { pd->userversion = uinfo->spu_userversion; init_user_egr_sizes(pd); if ((ret = init_subports(dd, pd, uinfo)) != 0) goto bail; ipath_cdbg(PROC, "%s[%u] opened unit:port %u:%u\n", current->comm, current->pid, dd->ipath_unit, port); pd->port_cnt = 1; port_fp(fp) = pd; pd->port_pid = get_pid(task_pid(current)); strlcpy(pd->port_comm, current->comm, sizeof(pd->port_comm)); ipath_stats.sps_ports++; ret = 0; } else ret = -EBUSY; bail: return ret; } static inline int usable(struct ipath_devdata *dd) { return dd && (dd->ipath_flags & IPATH_PRESENT) && dd->ipath_kregbase && dd->ipath_lid && !(dd->ipath_flags & (IPATH_LINKDOWN | IPATH_DISABLED | IPATH_LINKUNK)); } static int find_free_port(int unit, struct file *fp, const struct ipath_user_info *uinfo) { struct ipath_devdata *dd = ipath_lookup(unit); int ret, i; if (!dd) { ret = -ENODEV; goto bail; } if (!usable(dd)) { ret = -ENETDOWN; goto bail; } for (i = 1; i < dd->ipath_cfgports; i++) { ret = try_alloc_port(dd, i, fp, uinfo); if (ret != -EBUSY) goto bail; } ret = -EBUSY; bail: return ret; } static int find_best_unit(struct file *fp, const struct ipath_user_info *uinfo) { int ret = 0, i, prefunit = -1, devmax; int maxofallports, npresent, nup; int ndev; devmax = ipath_count_units(&npresent, &nup, &maxofallports); /* * This code is present to allow a knowledgeable person to * specify the layout of processes to processors before opening * this driver, and then we'll assign the process to the "closest" * InfiniPath chip to that processor (we assume reasonable connectivity, * for now). This code assumes that if affinity has been set * before this point, that at most one cpu is set; for now this * is reasonable. I check for both cpumask_empty() and cpumask_full(), * in case some kernel variant sets none of the bits when no * affinity is set. 2.6.11 and 12 kernels have all present * cpus set. Some day we'll have to fix it up further to handle * a cpu subset. This algorithm fails for two HT chips connected * in tunnel fashion. Eventually this needs real topology * information. There may be some issues with dual core numbering * as well. This needs more work prior to release. */ if (!cpumask_empty(&current->cpus_allowed) && !cpumask_full(&current->cpus_allowed)) { int ncpus = num_online_cpus(), curcpu = -1, nset = 0; for (i = 0; i < ncpus; i++) if (cpumask_test_cpu(i, &current->cpus_allowed)) { ipath_cdbg(PROC, "%s[%u] affinity set for " "cpu %d/%d\n", current->comm, current->pid, i, ncpus); curcpu = i; nset++; } if (curcpu != -1 && nset != ncpus) { if (npresent) { prefunit = curcpu / (ncpus / npresent); ipath_cdbg(PROC,"%s[%u] %d chips, %d cpus, " "%d cpus/chip, select unit %d\n", current->comm, current->pid, npresent, ncpus, ncpus / npresent, prefunit); } } } /* * user ports start at 1, kernel port is 0 * For now, we do round-robin access across all chips */ if (prefunit != -1) devmax = prefunit + 1; recheck: for (i = 1; i < maxofallports; i++) { for (ndev = prefunit != -1 ? prefunit : 0; ndev < devmax; ndev++) { struct ipath_devdata *dd = ipath_lookup(ndev); if (!usable(dd)) continue; /* can't use this unit */ if (i >= dd->ipath_cfgports) /* * Maxed out on users of this unit. Try * next. */ continue; ret = try_alloc_port(dd, i, fp, uinfo); if (!ret) goto done; } } if (npresent) { if (nup == 0) { ret = -ENETDOWN; ipath_dbg("No ports available (none initialized " "and ready)\n"); } else { if (prefunit > 0) { /* if started above 0, retry from 0 */ ipath_cdbg(PROC, "%s[%u] no ports on prefunit " "%d, clear and re-check\n", current->comm, current->pid, prefunit); devmax = ipath_count_units(NULL, NULL, NULL); prefunit = -1; goto recheck; } ret = -EBUSY; ipath_dbg("No ports available\n"); } } else { ret = -ENXIO; ipath_dbg("No boards found\n"); } done: return ret; } static int find_shared_port(struct file *fp, const struct ipath_user_info *uinfo) { int devmax, ndev, i; int ret = 0; devmax = ipath_count_units(NULL, NULL, NULL); for (ndev = 0; ndev < devmax; ndev++) { struct ipath_devdata *dd = ipath_lookup(ndev); if (!usable(dd)) continue; for (i = 1; i < dd->ipath_cfgports; i++) { struct ipath_portdata *pd = dd->ipath_pd[i]; /* Skip ports which are not yet open */ if (!pd || !pd->port_cnt) continue; /* Skip port if it doesn't match the requested one */ if (pd->port_subport_id != uinfo->spu_subport_id) continue; /* Verify the sharing process matches the master */ if (pd->port_subport_cnt != uinfo->spu_subport_cnt || pd->userversion != uinfo->spu_userversion || pd->port_cnt >= pd->port_subport_cnt) { ret = -EINVAL; goto done; } port_fp(fp) = pd; subport_fp(fp) = pd->port_cnt++; pd->port_subpid[subport_fp(fp)] = get_pid(task_pid(current)); tidcursor_fp(fp) = 0; pd->active_slaves |= 1 << subport_fp(fp); ipath_cdbg(PROC, "%s[%u] %u sharing %s[%u] unit:port %u:%u\n", current->comm, current->pid, subport_fp(fp), pd->port_comm, pid_nr(pd->port_pid), dd->ipath_unit, pd->port_port); ret = 1; goto done; } } done: return ret; } static int ipath_open(struct inode *in, struct file *fp) { /* The real work is performed later in ipath_assign_port() */ fp->private_data = kzalloc(sizeof(struct ipath_filedata), GFP_KERNEL); return fp->private_data ? 0 : -ENOMEM; } /* Get port early, so can set affinity prior to memory allocation */ static int ipath_assign_port(struct file *fp, const struct ipath_user_info *uinfo) { int ret; int i_minor; unsigned swmajor, swminor; /* Check to be sure we haven't already initialized this file */ if (port_fp(fp)) { ret = -EINVAL; goto done; } /* for now, if major version is different, bail */ swmajor = uinfo->spu_userversion >> 16; if (swmajor != IPATH_USER_SWMAJOR) { ipath_dbg("User major version %d not same as driver " "major %d\n", uinfo->spu_userversion >> 16, IPATH_USER_SWMAJOR); ret = -ENODEV; goto done; } swminor = uinfo->spu_userversion & 0xffff; if (swminor != IPATH_USER_SWMINOR) ipath_dbg("User minor version %d not same as driver " "minor %d\n", swminor, IPATH_USER_SWMINOR); mutex_lock(&ipath_mutex); if (ipath_compatible_subports(swmajor, swminor) && uinfo->spu_subport_cnt && (ret = find_shared_port(fp, uinfo))) { if (ret > 0) ret = 0; goto done_chk_sdma; } i_minor = iminor(fp->f_path.dentry->d_inode) - IPATH_USER_MINOR_BASE; ipath_cdbg(VERBOSE, "open on dev %lx (minor %d)\n", (long)fp->f_path.dentry->d_inode->i_rdev, i_minor); if (i_minor) ret = find_free_port(i_minor - 1, fp, uinfo); else ret = find_best_unit(fp, uinfo); done_chk_sdma: if (!ret) { struct ipath_filedata *fd = fp->private_data; const struct ipath_portdata *pd = fd->pd; const struct ipath_devdata *dd = pd->port_dd; fd->pq = ipath_user_sdma_queue_create(&dd->pcidev->dev, dd->ipath_unit, pd->port_port, fd->subport); if (!fd->pq) ret = -ENOMEM; } mutex_unlock(&ipath_mutex); done: return ret; } static int ipath_do_user_init(struct file *fp, const struct ipath_user_info *uinfo) { int ret; struct ipath_portdata *pd = port_fp(fp); struct ipath_devdata *dd; u32 head32; /* Subports don't need to initialize anything since master did it. */ if (subport_fp(fp)) { ret = wait_event_interruptible(pd->port_wait, !test_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag)); goto done; } dd = pd->port_dd; if (uinfo->spu_rcvhdrsize) { ret = ipath_setrcvhdrsize(dd, uinfo->spu_rcvhdrsize); if (ret) goto done; } /* for now we do nothing with rcvhdrcnt: uinfo->spu_rcvhdrcnt */ /* some ports may get extra buffers, calculate that here */ if (pd->port_port <= dd->ipath_ports_extrabuf) pd->port_piocnt = dd->ipath_pbufsport + 1; else pd->port_piocnt = dd->ipath_pbufsport; /* for right now, kernel piobufs are at end, so port 1 is at 0 */ if (pd->port_port <= dd->ipath_ports_extrabuf) pd->port_pio_base = (dd->ipath_pbufsport + 1) * (pd->port_port - 1); else pd->port_pio_base = dd->ipath_ports_extrabuf + dd->ipath_pbufsport * (pd->port_port - 1); pd->port_piobufs = dd->ipath_piobufbase + pd->port_pio_base * dd->ipath_palign; ipath_cdbg(VERBOSE, "piobuf base for port %u is 0x%x, piocnt %u," " first pio %u\n", pd->port_port, pd->port_piobufs, pd->port_piocnt, pd->port_pio_base); ipath_chg_pioavailkernel(dd, pd->port_pio_base, pd->port_piocnt, 0); /* * Now allocate the rcvhdr Q and eager TIDs; skip the TID * array for time being. If pd->port_port > chip-supported, * we need to do extra stuff here to handle by handling overflow * through port 0, someday */ ret = ipath_create_rcvhdrq(dd, pd); if (!ret) ret = ipath_create_user_egr(pd); if (ret) goto done; /* * set the eager head register for this port to the current values * of the tail pointers, since we don't know if they were * updated on last use of the port. */ head32 = ipath_read_ureg32(dd, ur_rcvegrindextail, pd->port_port); ipath_write_ureg(dd, ur_rcvegrindexhead, head32, pd->port_port); pd->port_lastrcvhdrqtail = -1; ipath_cdbg(VERBOSE, "Wrote port%d egrhead %x from tail regs\n", pd->port_port, head32); pd->port_tidcursor = 0; /* start at beginning after open */ /* initialize poll variables... */ pd->port_urgent = 0; pd->port_urgent_poll = 0; pd->port_hdrqfull_poll = pd->port_hdrqfull; /* * Now enable the port for receive. * For chips that are set to DMA the tail register to memory * when they change (and when the update bit transitions from * 0 to 1. So for those chips, we turn it off and then back on. * This will (very briefly) affect any other open ports, but the * duration is very short, and therefore isn't an issue. We * explictly set the in-memory tail copy to 0 beforehand, so we * don't have to wait to be sure the DMA update has happened * (chip resets head/tail to 0 on transition to enable). */ set_bit(dd->ipath_r_portenable_shift + pd->port_port, &dd->ipath_rcvctrl); if (!(dd->ipath_flags & IPATH_NODMA_RTAIL)) { if (pd->port_rcvhdrtail_kvaddr) ipath_clear_rcvhdrtail(pd); ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl & ~(1ULL << dd->ipath_r_tailupd_shift)); } ipath_write_kreg(dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl); /* Notify any waiting slaves */ if (pd->port_subport_cnt) { clear_bit(IPATH_PORT_MASTER_UNINIT, &pd->port_flag); wake_up(&pd->port_wait); } done: return ret; } /** * unlock_exptid - unlock any expected TID entries port still had in use * @pd: port * * We don't actually update the chip here, because we do a bulk update * below, using ipath_f_clear_tids. */ static void unlock_expected_tids(struct ipath_portdata *pd) { struct ipath_devdata *dd = pd->port_dd; int port_tidbase = pd->port_port * dd->ipath_rcvtidcnt; int i, cnt = 0, maxtid = port_tidbase + dd->ipath_rcvtidcnt; ipath_cdbg(VERBOSE, "Port %u unlocking any locked expTID pages\n", pd->port_port); for (i = port_tidbase; i < maxtid; i++) { struct page *ps = dd->ipath_pageshadow[i]; if (!ps) continue; dd->ipath_pageshadow[i] = NULL; pci_unmap_page(dd->pcidev, dd->ipath_physshadow[i], PAGE_SIZE, PCI_DMA_FROMDEVICE); ipath_release_user_pages_on_close(&ps, 1); cnt++; ipath_stats.sps_pageunlocks++; } if (cnt) ipath_cdbg(VERBOSE, "Port %u locked %u expTID entries\n", pd->port_port, cnt); if (ipath_stats.sps_pagelocks || ipath_stats.sps_pageunlocks) ipath_cdbg(VERBOSE, "%llu pages locked, %llu unlocked\n", (unsigned long long) ipath_stats.sps_pagelocks, (unsigned long long) ipath_stats.sps_pageunlocks); } static int ipath_close(struct inode *in, struct file *fp) { int ret = 0; struct ipath_filedata *fd; struct ipath_portdata *pd; struct ipath_devdata *dd; unsigned long flags; unsigned port; struct pid *pid; ipath_cdbg(VERBOSE, "close on dev %lx, private data %p\n", (long)in->i_rdev, fp->private_data); mutex_lock(&ipath_mutex); fd = fp->private_data; fp->private_data = NULL; pd = fd->pd; if (!pd) { mutex_unlock(&ipath_mutex); goto bail; } dd = pd->port_dd; /* drain user sdma queue */ ipath_user_sdma_queue_drain(dd, fd->pq); ipath_user_sdma_queue_destroy(fd->pq); if (--pd->port_cnt) { /* * XXX If the master closes the port before the slave(s), * revoke the mmap for the eager receive queue so * the slave(s) don't wait for receive data forever. */ pd->active_slaves &= ~(1 << fd->subport); put_pid(pd->port_subpid[fd->subport]); pd->port_subpid[fd->subport] = NULL; mutex_unlock(&ipath_mutex); goto bail; } /* early; no interrupt users after this */ spin_lock_irqsave(&dd->ipath_uctxt_lock, flags); port = pd->port_port; dd->ipath_pd[port] = NULL; pid = pd->port_pid; pd->port_pid = NULL; spin_unlock_irqrestore(&dd->ipath_uctxt_lock, flags); if (pd->port_rcvwait_to || pd->port_piowait_to || pd->port_rcvnowait || pd->port_pionowait) { ipath_cdbg(VERBOSE, "port%u, %u rcv, %u pio wait timeo; " "%u rcv %u, pio already\n", pd->port_port, pd->port_rcvwait_to, pd->port_piowait_to, pd->port_rcvnowait, pd->port_pionowait); pd->port_rcvwait_to = pd->port_piowait_to = pd->port_rcvnowait = pd->port_pionowait = 0; } if (pd->port_flag) { ipath_cdbg(PROC, "port %u port_flag set: 0x%lx\n", pd->port_port, pd->port_flag); pd->port_flag = 0; } if (dd->ipath_kregbase) { /* atomically clear receive enable port and intr avail. */ clear_bit(dd->ipath_r_portenable_shift + port, &dd->ipath_rcvctrl); clear_bit(pd->port_port + dd->ipath_r_intravail_shift, &dd->ipath_rcvctrl); ipath_write_kreg( dd, dd->ipath_kregs->kr_rcvctrl, dd->ipath_rcvctrl); /* and read back from chip to be sure that nothing * else is in flight when we do the rest */ (void)ipath_read_kreg64(dd, dd->ipath_kregs->kr_scratch); /* clean up the pkeys for this port user */ ipath_clean_part_key(pd, dd); /* * be paranoid, and never write 0's to these, just use an * unused part of the port 0 tail page. Of course, * rcvhdraddr points to a large chunk of memory, so this * could still trash things, but at least it won't trash * page 0, and by disabling the port, it should stop "soon", * even if a packet or two is in already in flight after we * disabled the port. */ ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdrtailaddr, port, dd->ipath_dummy_hdrq_phys); ipath_write_kreg_port(dd, dd->ipath_kregs->kr_rcvhdraddr, pd->port_port, dd->ipath_dummy_hdrq_phys); ipath_disarm_piobufs(dd, pd->port_pio_base, pd->port_piocnt); ipath_chg_pioavailkernel(dd, pd->port_pio_base, pd->port_piocnt, 1); dd->ipath_f_clear_tids(dd, pd->port_port); if (dd->ipath_pageshadow) unlock_expected_tids(pd); ipath_stats.sps_ports--; ipath_cdbg(PROC, "%s[%u] closed port %u:%u\n", pd->port_comm, pid_nr(pid), dd->ipath_unit, port); } put_pid(pid); mutex_unlock(&ipath_mutex); ipath_free_pddata(dd, pd); /* after releasing the mutex */ bail: kfree(fd); return ret; } static int ipath_port_info(struct ipath_portdata *pd, u16 subport, struct ipath_port_info __user *uinfo) { struct ipath_port_info info; int nup; int ret; size_t sz; (void) ipath_count_units(NULL, &nup, NULL); info.num_active = nup; info.unit = pd->port_dd->ipath_unit; info.port = pd->port_port; info.subport = subport; /* Don't return new fields if old library opened the port. */ if (ipath_supports_subports(pd->userversion >> 16, pd->userversion & 0xffff)) { /* Number of user ports available for this device. */ info.num_ports = pd->port_dd->ipath_cfgports - 1; info.num_subports = pd->port_subport_cnt; sz = sizeof(info); } else sz = sizeof(info) - 2 * sizeof(u16); if (copy_to_user(uinfo, &info, sz)) { ret = -EFAULT; goto bail; } ret = 0; bail: return ret; } static int ipath_get_slave_info(struct ipath_portdata *pd, void __user *slave_mask_addr) { int ret = 0; if (copy_to_user(slave_mask_addr, &pd->active_slaves, sizeof(u32))) ret = -EFAULT; return ret; } static int ipath_sdma_get_inflight(struct ipath_user_sdma_queue *pq, u32 __user *inflightp) { const u32 val = ipath_user_sdma_inflight_counter(pq); if (put_user(val, inflightp)) return -EFAULT; return 0; } static int ipath_sdma_get_complete(struct ipath_devdata *dd, struct ipath_user_sdma_queue *pq, u32 __user *completep) { u32 val; int err; err = ipath_user_sdma_make_progress(dd, pq); if (err < 0) return err; val = ipath_user_sdma_complete_counter(pq); if (put_user(val, completep)) return -EFAULT; return 0; } static ssize_t ipath_write(struct file *fp, const char __user *data, size_t count, loff_t *off) { const struct ipath_cmd __user *ucmd; struct ipath_portdata *pd; const void __user *src; size_t consumed, copy; struct ipath_cmd cmd; ssize_t ret = 0; void *dest; if (count < sizeof(cmd.type)) { ret = -EINVAL; goto bail; } ucmd = (const struct ipath_cmd __user *) data; if (copy_from_user(&cmd.type, &ucmd->type, sizeof(cmd.type))) { ret = -EFAULT; goto bail; } consumed = sizeof(cmd.type); switch (cmd.type) { case IPATH_CMD_ASSIGN_PORT: case __IPATH_CMD_USER_INIT: case IPATH_CMD_USER_INIT: copy = sizeof(cmd.cmd.user_info); dest = &cmd.cmd.user_info; src = &ucmd->cmd.user_info; break; case IPATH_CMD_RECV_CTRL: copy = sizeof(cmd.cmd.recv_ctrl); dest = &cmd.cmd.recv_ctrl; src = &ucmd->cmd.recv_ctrl; break; case IPATH_CMD_PORT_INFO: copy = sizeof(cmd.cmd.port_info); dest = &cmd.cmd.port_info; src = &ucmd->cmd.port_info; break; case IPATH_CMD_TID_UPDATE: case IPATH_CMD_TID_FREE: copy = sizeof(cmd.cmd.tid_info); dest = &cmd.cmd.tid_info; src = &ucmd->cmd.tid_info; break; case IPATH_CMD_SET_PART_KEY: copy = sizeof(cmd.cmd.part_key); dest = &cmd.cmd.part_key; src = &ucmd->cmd.part_key; break; case __IPATH_CMD_SLAVE_INFO: copy = sizeof(cmd.cmd.slave_mask_addr); dest = &cmd.cmd.slave_mask_addr; src = &ucmd->cmd.slave_mask_addr; break; case IPATH_CMD_PIOAVAILUPD: // force an update of PIOAvail reg copy = 0; src = NULL; dest = NULL; break; case IPATH_CMD_POLL_TYPE: copy = sizeof(cmd.cmd.poll_type); dest = &cmd.cmd.poll_type; src = &ucmd->cmd.poll_type; break; case IPATH_CMD_ARMLAUNCH_CTRL: copy = sizeof(cmd.cmd.armlaunch_ctrl); dest = &cmd.cmd.armlaunch_ctrl; src = &ucmd->cmd.armlaunch_ctrl; break; case IPATH_CMD_SDMA_INFLIGHT: copy = sizeof(cmd.cmd.sdma_inflight); dest = &cmd.cmd.sdma_inflight; src = &ucmd->cmd.sdma_inflight; break; case IPATH_CMD_SDMA_COMPLETE: copy = sizeof(cmd.cmd.sdma_complete); dest = &cmd.cmd.sdma_complete; src = &ucmd->cmd.sdma_complete; break; default: ret = -EINVAL; goto bail; } if (copy) { if ((count - consumed) < copy) { ret = -EINVAL; goto bail; } if (copy_from_user(dest, src, copy)) { ret = -EFAULT; goto bail; } consumed += copy; } pd = port_fp(fp); if (!pd && cmd.type != __IPATH_CMD_USER_INIT && cmd.type != IPATH_CMD_ASSIGN_PORT) { ret = -EINVAL; goto bail; } switch (cmd.type) { case IPATH_CMD_ASSIGN_PORT: ret = ipath_assign_port(fp, &cmd.cmd.user_info); if (ret) goto bail; break; case __IPATH_CMD_USER_INIT: /* backwards compatibility, get port first */ ret = ipath_assign_port(fp, &cmd.cmd.user_info); if (ret) goto bail; /* and fall through to current version. */ case IPATH_CMD_USER_INIT: ret = ipath_do_user_init(fp, &cmd.cmd.user_info); if (ret) goto bail; ret = ipath_get_base_info( fp, (void __user *) (unsigned long) cmd.cmd.user_info.spu_base_info, cmd.cmd.user_info.spu_base_info_size); break; case IPATH_CMD_RECV_CTRL: ret = ipath_manage_rcvq(pd, subport_fp(fp), cmd.cmd.recv_ctrl); break; case IPATH_CMD_PORT_INFO: ret = ipath_port_info(pd, subport_fp(fp), (struct ipath_port_info __user *) (unsigned long) cmd.cmd.port_info); break; case IPATH_CMD_TID_UPDATE: ret = ipath_tid_update(pd, fp, &cmd.cmd.tid_info); break; case IPATH_CMD_TID_FREE: ret = ipath_tid_free(pd, subport_fp(fp), &cmd.cmd.tid_info); break; case IPATH_CMD_SET_PART_KEY: ret = ipath_set_part_key(pd, cmd.cmd.part_key); break; case __IPATH_CMD_SLAVE_INFO: ret = ipath_get_slave_info(pd, (void __user *) (unsigned long) cmd.cmd.slave_mask_addr); break; case IPATH_CMD_PIOAVAILUPD: ipath_force_pio_avail_update(pd->port_dd); break; case IPATH_CMD_POLL_TYPE: pd->poll_type = cmd.cmd.poll_type; break; case IPATH_CMD_ARMLAUNCH_CTRL: if (cmd.cmd.armlaunch_ctrl) ipath_enable_armlaunch(pd->port_dd); else ipath_disable_armlaunch(pd->port_dd); break; case IPATH_CMD_SDMA_INFLIGHT: ret = ipath_sdma_get_inflight(user_sdma_queue_fp(fp), (u32 __user *) (unsigned long) cmd.cmd.sdma_inflight); break; case IPATH_CMD_SDMA_COMPLETE: ret = ipath_sdma_get_complete(pd->port_dd, user_sdma_queue_fp(fp), (u32 __user *) (unsigned long) cmd.cmd.sdma_complete); break; } if (ret >= 0) ret = consumed; bail: return ret; } static ssize_t ipath_writev(struct kiocb *iocb, const struct iovec *iov, unsigned long dim, loff_t off) { struct file *filp = iocb->ki_filp; struct ipath_filedata *fp = filp->private_data; struct ipath_portdata *pd = port_fp(filp); struct ipath_user_sdma_queue *pq = fp->pq; if (!dim) return -EINVAL; return ipath_user_sdma_writev(pd->port_dd, pq, iov, dim); } static struct class *ipath_class; static int init_cdev(int minor, char *name, const struct file_operations *fops, struct cdev **cdevp, struct device **devp) { const dev_t dev = MKDEV(IPATH_MAJOR, minor); struct cdev *cdev = NULL; struct device *device = NULL; int ret; cdev = cdev_alloc(); if (!cdev) { printk(KERN_ERR IPATH_DRV_NAME ": Could not allocate cdev for minor %d, %s\n", minor, name); ret = -ENOMEM; goto done; } cdev->owner = THIS_MODULE; cdev->ops = fops; kobject_set_name(&cdev->kobj, name); ret = cdev_add(cdev, dev, 1); if (ret < 0) { printk(KERN_ERR IPATH_DRV_NAME ": Could not add cdev for minor %d, %s (err %d)\n", minor, name, -ret); goto err_cdev; } device = device_create(ipath_class, NULL, dev, NULL, name); if (IS_ERR(device)) { ret = PTR_ERR(device); printk(KERN_ERR IPATH_DRV_NAME ": Could not create " "device for minor %d, %s (err %d)\n", minor, name, -ret); goto err_cdev; } goto done; err_cdev: cdev_del(cdev); cdev = NULL; done: if (ret >= 0) { *cdevp = cdev; *devp = device; } else { *cdevp = NULL; *devp = NULL; } return ret; } int ipath_cdev_init(int minor, char *name, const struct file_operations *fops, struct cdev **cdevp, struct device **devp) { return init_cdev(minor, name, fops, cdevp, devp); } static void cleanup_cdev(struct cdev **cdevp, struct device **devp) { struct device *dev = *devp; if (dev) { device_unregister(dev); *devp = NULL; } if (*cdevp) { cdev_del(*cdevp); *cdevp = NULL; } } void ipath_cdev_cleanup(struct cdev **cdevp, struct device **devp) { cleanup_cdev(cdevp, devp); } static struct cdev *wildcard_cdev; static struct device *wildcard_dev; static const dev_t dev = MKDEV(IPATH_MAJOR, 0); static int user_init(void) { int ret; ret = register_chrdev_region(dev, IPATH_NMINORS, IPATH_DRV_NAME); if (ret < 0) { printk(KERN_ERR IPATH_DRV_NAME ": Could not register " "chrdev region (err %d)\n", -ret); goto done; } ipath_class = class_create(THIS_MODULE, IPATH_DRV_NAME); if (IS_ERR(ipath_class)) { ret = PTR_ERR(ipath_class); printk(KERN_ERR IPATH_DRV_NAME ": Could not create " "device class (err %d)\n", -ret); goto bail; } goto done; bail: unregister_chrdev_region(dev, IPATH_NMINORS); done: return ret; } static void user_cleanup(void) { if (ipath_class) { class_destroy(ipath_class); ipath_class = NULL; } unregister_chrdev_region(dev, IPATH_NMINORS); } static atomic_t user_count = ATOMIC_INIT(0); static atomic_t user_setup = ATOMIC_INIT(0); int ipath_user_add(struct ipath_devdata *dd) { char name[10]; int ret; if (atomic_inc_return(&user_count) == 1) { ret = user_init(); if (ret < 0) { ipath_dev_err(dd, "Unable to set up user support: " "error %d\n", -ret); goto bail; } ret = init_cdev(0, "ipath", &ipath_file_ops, &wildcard_cdev, &wildcard_dev); if (ret < 0) { ipath_dev_err(dd, "Could not create wildcard " "minor: error %d\n", -ret); goto bail_user; } atomic_set(&user_setup, 1); } snprintf(name, sizeof(name), "ipath%d", dd->ipath_unit); ret = init_cdev(dd->ipath_unit + 1, name, &ipath_file_ops, &dd->user_cdev, &dd->user_dev); if (ret < 0) ipath_dev_err(dd, "Could not create user minor %d, %s\n", dd->ipath_unit + 1, name); goto bail; bail_user: user_cleanup(); bail: return ret; } void ipath_user_remove(struct ipath_devdata *dd) { cleanup_cdev(&dd->user_cdev, &dd->user_dev); if (atomic_dec_return(&user_count) == 0) { if (atomic_read(&user_setup) == 0) goto bail; cleanup_cdev(&wildcard_cdev, &wildcard_dev); user_cleanup(); atomic_set(&user_setup, 0); } bail: return; }
gpl-2.0
InnerFire/thunderzap_sprout
kernel/trace/ring_buffer.c
110
132011
/* * Generic ring buffer * * Copyright (C) 2008 Steven Rostedt <srostedt@redhat.com> */ #include <linux/ftrace_event.h> #include <linux/ring_buffer.h> #include <linux/trace_clock.h> #include <linux/trace_seq.h> #include <linux/spinlock.h> #include <linux/irq_work.h> #include <linux/debugfs.h> #include <linux/uaccess.h> #include <linux/hardirq.h> #include <linux/kthread.h> /* for self test */ #include <linux/kmemcheck.h> #include <linux/module.h> #include <linux/percpu.h> #include <linux/mutex.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/hash.h> #include <linux/list.h> #include <linux/cpu.h> #include <linux/fs.h> #include <asm/local.h> static void update_pages_handler(struct work_struct *work); /* * The ring buffer header is special. We must manually up keep it. */ int ring_buffer_print_entry_header(struct trace_seq *s) { int ret; ret = trace_seq_printf(s, "# compressed entry header\n"); ret = trace_seq_printf(s, "\ttype_len : 5 bits\n"); ret = trace_seq_printf(s, "\ttime_delta : 27 bits\n"); ret = trace_seq_printf(s, "\tarray : 32 bits\n"); ret = trace_seq_printf(s, "\n"); ret = trace_seq_printf(s, "\tpadding : type == %d\n", RINGBUF_TYPE_PADDING); ret = trace_seq_printf(s, "\ttime_extend : type == %d\n", RINGBUF_TYPE_TIME_EXTEND); ret = trace_seq_printf(s, "\tdata max type_len == %d\n", RINGBUF_TYPE_DATA_TYPE_LEN_MAX); return ret; } /* * The ring buffer is made up of a list of pages. A separate list of pages is * allocated for each CPU. A writer may only write to a buffer that is * associated with the CPU it is currently executing on. A reader may read * from any per cpu buffer. * * The reader is special. For each per cpu buffer, the reader has its own * reader page. When a reader has read the entire reader page, this reader * page is swapped with another page in the ring buffer. * * Now, as long as the writer is off the reader page, the reader can do what * ever it wants with that page. The writer will never write to that page * again (as long as it is out of the ring buffer). * * Here's some silly ASCII art. * * +------+ * |reader| RING BUFFER * |page | * +------+ +---+ +---+ +---+ * | |-->| |-->| | * +---+ +---+ +---+ * ^ | * | | * +---------------+ * * * +------+ * |reader| RING BUFFER * |page |------------------v * +------+ +---+ +---+ +---+ * | |-->| |-->| | * +---+ +---+ +---+ * ^ | * | | * +---------------+ * * * +------+ * |reader| RING BUFFER * |page |------------------v * +------+ +---+ +---+ +---+ * ^ | |-->| |-->| | * | +---+ +---+ +---+ * | | * | | * +------------------------------+ * * * +------+ * |buffer| RING BUFFER * |page |------------------v * +------+ +---+ +---+ +---+ * ^ | | | |-->| | * | New +---+ +---+ +---+ * | Reader------^ | * | page | * +------------------------------+ * * * After we make this swap, the reader can hand this page off to the splice * code and be done with it. It can even allocate a new page if it needs to * and swap that into the ring buffer. * * We will be using cmpxchg soon to make all this lockless. * */ /* * A fast way to enable or disable all ring buffers is to * call tracing_on or tracing_off. Turning off the ring buffers * prevents all ring buffers from being recorded to. * Turning this switch on, makes it OK to write to the * ring buffer, if the ring buffer is enabled itself. * * There's three layers that must be on in order to write * to the ring buffer. * * 1) This global flag must be set. * 2) The ring buffer must be enabled for recording. * 3) The per cpu buffer must be enabled for recording. * * In case of an anomaly, this global flag has a bit set that * will permantly disable all ring buffers. */ /* * Global flag to disable all recording to ring buffers * This has two bits: ON, DISABLED * * ON DISABLED * ---- ---------- * 0 0 : ring buffers are off * 1 0 : ring buffers are on * X 1 : ring buffers are permanently disabled */ enum { RB_BUFFERS_ON_BIT = 0, RB_BUFFERS_DISABLED_BIT = 1, }; enum { RB_BUFFERS_ON = 1 << RB_BUFFERS_ON_BIT, RB_BUFFERS_DISABLED = 1 << RB_BUFFERS_DISABLED_BIT, }; static unsigned long ring_buffer_flags __read_mostly = RB_BUFFERS_ON; /* Used for individual buffers (after the counter) */ #define RB_BUFFER_OFF (1 << 20) #define BUF_PAGE_HDR_SIZE offsetof(struct buffer_data_page, data) /** * tracing_off_permanent - permanently disable ring buffers * * This function, once called, will disable all ring buffers * permanently. */ void tracing_off_permanent(void) { set_bit(RB_BUFFERS_DISABLED_BIT, &ring_buffer_flags); } #define RB_EVNT_HDR_SIZE (offsetof(struct ring_buffer_event, array)) #define RB_ALIGNMENT 4U #define RB_MAX_SMALL_DATA (RB_ALIGNMENT * RINGBUF_TYPE_DATA_TYPE_LEN_MAX) #define RB_EVNT_MIN_SIZE 8U /* two 32bit words */ #ifndef CONFIG_HAVE_64BIT_ALIGNED_ACCESS # define RB_FORCE_8BYTE_ALIGNMENT 0 # define RB_ARCH_ALIGNMENT RB_ALIGNMENT #else # define RB_FORCE_8BYTE_ALIGNMENT 1 # define RB_ARCH_ALIGNMENT 8U #endif #define RB_ALIGN_DATA __aligned(RB_ARCH_ALIGNMENT) /* define RINGBUF_TYPE_DATA for 'case RINGBUF_TYPE_DATA:' */ #define RINGBUF_TYPE_DATA 0 ... RINGBUF_TYPE_DATA_TYPE_LEN_MAX enum { RB_LEN_TIME_EXTEND = 8, RB_LEN_TIME_STAMP = 16, }; #define skip_time_extend(event) \ ((struct ring_buffer_event *)((char *)event + RB_LEN_TIME_EXTEND)) static inline int rb_null_event(struct ring_buffer_event *event) { return event->type_len == RINGBUF_TYPE_PADDING && !event->time_delta; } static void rb_event_set_padding(struct ring_buffer_event *event) { /* padding has a NULL time_delta */ event->type_len = RINGBUF_TYPE_PADDING; event->time_delta = 0; } static unsigned rb_event_data_length(struct ring_buffer_event *event) { unsigned length; if (event->type_len) length = event->type_len * RB_ALIGNMENT; else length = event->array[0]; return length + RB_EVNT_HDR_SIZE; } /* * Return the length of the given event. Will return * the length of the time extend if the event is a * time extend. */ static inline unsigned rb_event_length(struct ring_buffer_event *event) { switch (event->type_len) { case RINGBUF_TYPE_PADDING: if (rb_null_event(event)) /* undefined */ return -1; return event->array[0] + RB_EVNT_HDR_SIZE; case RINGBUF_TYPE_TIME_EXTEND: return RB_LEN_TIME_EXTEND; case RINGBUF_TYPE_TIME_STAMP: return RB_LEN_TIME_STAMP; case RINGBUF_TYPE_DATA: return rb_event_data_length(event); default: BUG(); } /* not hit */ return 0; } /* * Return total length of time extend and data, * or just the event length for all other events. */ static inline unsigned rb_event_ts_length(struct ring_buffer_event *event) { unsigned len = 0; if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) { /* time extends include the data event after it */ len = RB_LEN_TIME_EXTEND; event = skip_time_extend(event); } return len + rb_event_length(event); } /** * ring_buffer_event_length - return the length of the event * @event: the event to get the length of * * Returns the size of the data load of a data event. * If the event is something other than a data event, it * returns the size of the event itself. With the exception * of a TIME EXTEND, where it still returns the size of the * data load of the data event after it. */ unsigned ring_buffer_event_length(struct ring_buffer_event *event) { unsigned length; if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) event = skip_time_extend(event); length = rb_event_length(event); if (event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX) return length; length -= RB_EVNT_HDR_SIZE; if (length > RB_MAX_SMALL_DATA + sizeof(event->array[0])) length -= sizeof(event->array[0]); return length; } EXPORT_SYMBOL_GPL(ring_buffer_event_length); /* inline for ring buffer fast paths */ static void * rb_event_data(struct ring_buffer_event *event) { if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) event = skip_time_extend(event); BUG_ON(event->type_len > RINGBUF_TYPE_DATA_TYPE_LEN_MAX); /* If length is in len field, then array[0] has the data */ if (event->type_len) return (void *)&event->array[0]; /* Otherwise length is in array[0] and array[1] has the data */ return (void *)&event->array[1]; } /** * ring_buffer_event_data - return the data of the event * @event: the event to get the data from */ void *ring_buffer_event_data(struct ring_buffer_event *event) { return rb_event_data(event); } EXPORT_SYMBOL_GPL(ring_buffer_event_data); #define for_each_buffer_cpu(buffer, cpu) \ for_each_cpu(cpu, buffer->cpumask) #define TS_SHIFT 27 #define TS_MASK ((1ULL << TS_SHIFT) - 1) #define TS_DELTA_TEST (~TS_MASK) /* Flag when events were overwritten */ #define RB_MISSED_EVENTS (1 << 31) /* Missed count stored at end */ #define RB_MISSED_STORED (1 << 30) struct buffer_data_page { u64 time_stamp; /* page time stamp */ local_t commit; /* write committed index */ unsigned char data[] RB_ALIGN_DATA; /* data of buffer page */ }; /* * Note, the buffer_page list must be first. The buffer pages * are allocated in cache lines, which means that each buffer * page will be at the beginning of a cache line, and thus * the least significant bits will be zero. We use this to * add flags in the list struct pointers, to make the ring buffer * lockless. */ struct buffer_page { struct list_head list; /* list of buffer pages */ local_t write; /* index for next write */ unsigned read; /* index for next read */ local_t entries; /* entries on this page */ unsigned long real_end; /* real end of data */ struct buffer_data_page *page; /* Actual data page */ }; /* * The buffer page counters, write and entries, must be reset * atomically when crossing page boundaries. To synchronize this * update, two counters are inserted into the number. One is * the actual counter for the write position or count on the page. * * The other is a counter of updaters. Before an update happens * the update partition of the counter is incremented. This will * allow the updater to update the counter atomically. * * The counter is 20 bits, and the state data is 12. */ #define RB_WRITE_MASK 0xfffff #define RB_WRITE_INTCNT (1 << 20) static void rb_init_page(struct buffer_data_page *bpage) { local_set(&bpage->commit, 0); } /** * ring_buffer_page_len - the size of data on the page. * @page: The page to read * * Returns the amount of data on the page, including buffer page header. */ size_t ring_buffer_page_len(void *page) { return local_read(&((struct buffer_data_page *)page)->commit) + BUF_PAGE_HDR_SIZE; } /* * Also stolen from mm/slob.c. Thanks to Mathieu Desnoyers for pointing * this issue out. */ static void free_buffer_page(struct buffer_page *bpage) { free_page((unsigned long)bpage->page); kfree(bpage); } /* * We need to fit the time_stamp delta into 27 bits. */ static inline int test_time_stamp(u64 delta) { if (delta & TS_DELTA_TEST) return 1; return 0; } #define BUF_PAGE_SIZE (PAGE_SIZE - BUF_PAGE_HDR_SIZE) /* Max payload is BUF_PAGE_SIZE - header (8bytes) */ #define BUF_MAX_DATA_SIZE (BUF_PAGE_SIZE - (sizeof(u32) * 2)) int ring_buffer_print_page_header(struct trace_seq *s) { struct buffer_data_page field; int ret; ret = trace_seq_printf(s, "\tfield: u64 timestamp;\t" "offset:0;\tsize:%u;\tsigned:%u;\n", (unsigned int)sizeof(field.time_stamp), (unsigned int)is_signed_type(u64)); ret = trace_seq_printf(s, "\tfield: local_t commit;\t" "offset:%u;\tsize:%u;\tsigned:%u;\n", (unsigned int)offsetof(typeof(field), commit), (unsigned int)sizeof(field.commit), (unsigned int)is_signed_type(long)); ret = trace_seq_printf(s, "\tfield: int overwrite;\t" "offset:%u;\tsize:%u;\tsigned:%u;\n", (unsigned int)offsetof(typeof(field), commit), 1, (unsigned int)is_signed_type(long)); ret = trace_seq_printf(s, "\tfield: char data;\t" "offset:%u;\tsize:%u;\tsigned:%u;\n", (unsigned int)offsetof(typeof(field), data), (unsigned int)BUF_PAGE_SIZE, (unsigned int)is_signed_type(char)); return ret; } struct rb_irq_work { struct irq_work work; wait_queue_head_t waiters; bool waiters_pending; }; /* * head_page == tail_page && head == tail then buffer is empty. */ struct ring_buffer_per_cpu { int cpu; atomic_t record_disabled; struct ring_buffer *buffer; raw_spinlock_t reader_lock; /* serialize readers */ arch_spinlock_t lock; struct lock_class_key lock_key; unsigned int nr_pages; struct list_head *pages; struct buffer_page *head_page; /* read from head */ struct buffer_page *tail_page; /* write to tail */ struct buffer_page *commit_page; /* committed pages */ struct buffer_page *reader_page; unsigned long lost_events; unsigned long last_overrun; local_t entries_bytes; local_t entries; local_t overrun; local_t commit_overrun; local_t dropped_events; local_t committing; local_t commits; unsigned long read; unsigned long read_bytes; u64 write_stamp; u64 read_stamp; /* ring buffer pages to update, > 0 to add, < 0 to remove */ int nr_pages_to_update; struct list_head new_pages; /* new pages to add */ struct work_struct update_pages_work; struct completion update_done; struct rb_irq_work irq_work; }; struct ring_buffer { unsigned flags; int cpus; atomic_t record_disabled; atomic_t resize_disabled; cpumask_var_t cpumask; struct lock_class_key *reader_lock_key; struct mutex mutex; struct ring_buffer_per_cpu **buffers; #ifdef CONFIG_HOTPLUG_CPU struct notifier_block cpu_notify; #endif u64 (*clock)(void); struct rb_irq_work irq_work; }; struct ring_buffer_iter { struct ring_buffer_per_cpu *cpu_buffer; unsigned long head; struct buffer_page *head_page; struct buffer_page *cache_reader_page; unsigned long cache_read; u64 read_stamp; }; /* * rb_wake_up_waiters - wake up tasks waiting for ring buffer input * * Schedules a delayed work to wake up any task that is blocked on the * ring buffer waiters queue. */ static void rb_wake_up_waiters(struct irq_work *work) { struct rb_irq_work *rbwork = container_of(work, struct rb_irq_work, work); wake_up_all(&rbwork->waiters); } /** * ring_buffer_wait - wait for input to the ring buffer * @buffer: buffer to wait on * @cpu: the cpu buffer to wait on * * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon * as data is added to any of the @buffer's cpu buffers. Otherwise * it will wait for data to be added to a specific cpu buffer. */ int ring_buffer_wait(struct ring_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; DEFINE_WAIT(wait); struct rb_irq_work *work; /* * Depending on what the caller is waiting for, either any * data in any cpu buffer, or a specific buffer, put the * caller on the appropriate wait queue. */ if (cpu == RING_BUFFER_ALL_CPUS) work = &buffer->irq_work; else { if (!cpumask_test_cpu(cpu, buffer->cpumask)) return -ENODEV; cpu_buffer = buffer->buffers[cpu]; work = &cpu_buffer->irq_work; } prepare_to_wait(&work->waiters, &wait, TASK_INTERRUPTIBLE); /* * The events can happen in critical sections where * checking a work queue can cause deadlocks. * After adding a task to the queue, this flag is set * only to notify events to try to wake up the queue * using irq_work. * * We don't clear it even if the buffer is no longer * empty. The flag only causes the next event to run * irq_work to do the work queue wake up. The worse * that can happen if we race with !trace_empty() is that * an event will cause an irq_work to try to wake up * an empty queue. * * There's no reason to protect this flag either, as * the work queue and irq_work logic will do the necessary * synchronization for the wake ups. The only thing * that is necessary is that the wake up happens after * a task has been queued. It's OK for spurious wake ups. */ work->waiters_pending = true; if ((cpu == RING_BUFFER_ALL_CPUS && ring_buffer_empty(buffer)) || (cpu != RING_BUFFER_ALL_CPUS && ring_buffer_empty_cpu(buffer, cpu))) schedule(); finish_wait(&work->waiters, &wait); return 0; } /** * ring_buffer_poll_wait - poll on buffer input * @buffer: buffer to wait on * @cpu: the cpu buffer to wait on * @filp: the file descriptor * @poll_table: The poll descriptor * * If @cpu == RING_BUFFER_ALL_CPUS then the task will wake up as soon * as data is added to any of the @buffer's cpu buffers. Otherwise * it will wait for data to be added to a specific cpu buffer. * * Returns POLLIN | POLLRDNORM if data exists in the buffers, * zero otherwise. */ int ring_buffer_poll_wait(struct ring_buffer *buffer, int cpu, struct file *filp, poll_table *poll_table) { struct ring_buffer_per_cpu *cpu_buffer; struct rb_irq_work *work; if (cpu == RING_BUFFER_ALL_CPUS) work = &buffer->irq_work; else { if (!cpumask_test_cpu(cpu, buffer->cpumask)) return -EINVAL; cpu_buffer = buffer->buffers[cpu]; work = &cpu_buffer->irq_work; } poll_wait(filp, &work->waiters, poll_table); work->waiters_pending = true; /* * There's a tight race between setting the waiters_pending and * checking if the ring buffer is empty. Once the waiters_pending bit * is set, the next event will wake the task up, but we can get stuck * if there's only a single event in. * * FIXME: Ideally, we need a memory barrier on the writer side as well, * but adding a memory barrier to all events will cause too much of a * performance hit in the fast path. We only need a memory barrier when * the buffer goes from empty to having content. But as this race is * extremely small, and it's not a problem if another event comes in, we * will fix it later. */ smp_mb(); if ((cpu == RING_BUFFER_ALL_CPUS && !ring_buffer_empty(buffer)) || (cpu != RING_BUFFER_ALL_CPUS && !ring_buffer_empty_cpu(buffer, cpu))) return POLLIN | POLLRDNORM; return 0; } /* buffer may be either ring_buffer or ring_buffer_per_cpu */ #define RB_WARN_ON(b, cond) \ ({ \ int _____ret = unlikely(cond); \ if (_____ret) { \ if (__same_type(*(b), struct ring_buffer_per_cpu)) { \ struct ring_buffer_per_cpu *__b = \ (void *)b; \ atomic_inc(&__b->buffer->record_disabled); \ } else \ atomic_inc(&b->record_disabled); \ WARN_ON(1); \ } \ _____ret; \ }) /* Up this if you want to test the TIME_EXTENTS and normalization */ #define DEBUG_SHIFT 0 static inline u64 rb_time_stamp(struct ring_buffer *buffer) { /* shift to debug/test normalization and TIME_EXTENTS */ return buffer->clock() << DEBUG_SHIFT; } u64 ring_buffer_time_stamp(struct ring_buffer *buffer, int cpu) { u64 time; preempt_disable_notrace(); time = rb_time_stamp(buffer); preempt_enable_no_resched_notrace(); return time; } EXPORT_SYMBOL_GPL(ring_buffer_time_stamp); void ring_buffer_normalize_time_stamp(struct ring_buffer *buffer, int cpu, u64 *ts) { /* Just stupid testing the normalize function and deltas */ *ts >>= DEBUG_SHIFT; } EXPORT_SYMBOL_GPL(ring_buffer_normalize_time_stamp); /* * Making the ring buffer lockless makes things tricky. * Although writes only happen on the CPU that they are on, * and they only need to worry about interrupts. Reads can * happen on any CPU. * * The reader page is always off the ring buffer, but when the * reader finishes with a page, it needs to swap its page with * a new one from the buffer. The reader needs to take from * the head (writes go to the tail). But if a writer is in overwrite * mode and wraps, it must push the head page forward. * * Here lies the problem. * * The reader must be careful to replace only the head page, and * not another one. As described at the top of the file in the * ASCII art, the reader sets its old page to point to the next * page after head. It then sets the page after head to point to * the old reader page. But if the writer moves the head page * during this operation, the reader could end up with the tail. * * We use cmpxchg to help prevent this race. We also do something * special with the page before head. We set the LSB to 1. * * When the writer must push the page forward, it will clear the * bit that points to the head page, move the head, and then set * the bit that points to the new head page. * * We also don't want an interrupt coming in and moving the head * page on another writer. Thus we use the second LSB to catch * that too. Thus: * * head->list->prev->next bit 1 bit 0 * ------- ------- * Normal page 0 0 * Points to head page 0 1 * New head page 1 0 * * Note we can not trust the prev pointer of the head page, because: * * +----+ +-----+ +-----+ * | |------>| T |---X--->| N | * | |<------| | | | * +----+ +-----+ +-----+ * ^ ^ | * | +-----+ | | * +----------| R |----------+ | * | |<-----------+ * +-----+ * * Key: ---X--> HEAD flag set in pointer * T Tail page * R Reader page * N Next page * * (see __rb_reserve_next() to see where this happens) * * What the above shows is that the reader just swapped out * the reader page with a page in the buffer, but before it * could make the new header point back to the new page added * it was preempted by a writer. The writer moved forward onto * the new page added by the reader and is about to move forward * again. * * You can see, it is legitimate for the previous pointer of * the head (or any page) not to point back to itself. But only * temporarially. */ #define RB_PAGE_NORMAL 0UL #define RB_PAGE_HEAD 1UL #define RB_PAGE_UPDATE 2UL #define RB_FLAG_MASK 3UL /* PAGE_MOVED is not part of the mask */ #define RB_PAGE_MOVED 4UL /* * rb_list_head - remove any bit */ static struct list_head *rb_list_head(struct list_head *list) { unsigned long val = (unsigned long)list; return (struct list_head *)(val & ~RB_FLAG_MASK); } /* * rb_is_head_page - test if the given page is the head page * * Because the reader may move the head_page pointer, we can * not trust what the head page is (it may be pointing to * the reader page). But if the next page is a header page, * its flags will be non zero. */ static inline int rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer, struct buffer_page *page, struct list_head *list) { unsigned long val; val = (unsigned long)list->next; if ((val & ~RB_FLAG_MASK) != (unsigned long)&page->list) return RB_PAGE_MOVED; return val & RB_FLAG_MASK; } /* * rb_is_reader_page * * The unique thing about the reader page, is that, if the * writer is ever on it, the previous pointer never points * back to the reader page. */ static int rb_is_reader_page(struct buffer_page *page) { struct list_head *list = page->list.prev; return rb_list_head(list->next) != &page->list; } /* * rb_set_list_to_head - set a list_head to be pointing to head. */ static void rb_set_list_to_head(struct ring_buffer_per_cpu *cpu_buffer, struct list_head *list) { unsigned long *ptr; ptr = (unsigned long *)&list->next; *ptr |= RB_PAGE_HEAD; *ptr &= ~RB_PAGE_UPDATE; } /* * rb_head_page_activate - sets up head page */ static void rb_head_page_activate(struct ring_buffer_per_cpu *cpu_buffer) { struct buffer_page *head; head = cpu_buffer->head_page; if (!head) return; /* * Set the previous list pointer to have the HEAD flag. */ rb_set_list_to_head(cpu_buffer, head->list.prev); } static void rb_list_head_clear(struct list_head *list) { unsigned long *ptr = (unsigned long *)&list->next; *ptr &= ~RB_FLAG_MASK; } /* * rb_head_page_dactivate - clears head page ptr (for free list) */ static void rb_head_page_deactivate(struct ring_buffer_per_cpu *cpu_buffer) { struct list_head *hd; /* Go through the whole list and clear any pointers found. */ rb_list_head_clear(cpu_buffer->pages); list_for_each(hd, cpu_buffer->pages) rb_list_head_clear(hd); } static int rb_head_page_set(struct ring_buffer_per_cpu *cpu_buffer, struct buffer_page *head, struct buffer_page *prev, int old_flag, int new_flag) { struct list_head *list; unsigned long val = (unsigned long)&head->list; unsigned long ret; list = &prev->list; val &= ~RB_FLAG_MASK; ret = cmpxchg((unsigned long *)&list->next, val | old_flag, val | new_flag); /* check if the reader took the page */ if ((ret & ~RB_FLAG_MASK) != val) return RB_PAGE_MOVED; return ret & RB_FLAG_MASK; } static int rb_head_page_set_update(struct ring_buffer_per_cpu *cpu_buffer, struct buffer_page *head, struct buffer_page *prev, int old_flag) { return rb_head_page_set(cpu_buffer, head, prev, old_flag, RB_PAGE_UPDATE); } static int rb_head_page_set_head(struct ring_buffer_per_cpu *cpu_buffer, struct buffer_page *head, struct buffer_page *prev, int old_flag) { return rb_head_page_set(cpu_buffer, head, prev, old_flag, RB_PAGE_HEAD); } static int rb_head_page_set_normal(struct ring_buffer_per_cpu *cpu_buffer, struct buffer_page *head, struct buffer_page *prev, int old_flag) { return rb_head_page_set(cpu_buffer, head, prev, old_flag, RB_PAGE_NORMAL); } static inline void rb_inc_page(struct ring_buffer_per_cpu *cpu_buffer, struct buffer_page **bpage) { struct list_head *p = rb_list_head((*bpage)->list.next); *bpage = list_entry(p, struct buffer_page, list); } static struct buffer_page * rb_set_head_page(struct ring_buffer_per_cpu *cpu_buffer) { struct buffer_page *head; struct buffer_page *page; struct list_head *list; int i; if (RB_WARN_ON(cpu_buffer, !cpu_buffer->head_page)) return NULL; /* sanity check */ list = cpu_buffer->pages; if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev->next) != list)) return NULL; page = head = cpu_buffer->head_page; /* * It is possible that the writer moves the header behind * where we started, and we miss in one loop. * A second loop should grab the header, but we'll do * three loops just because I'm paranoid. */ for (i = 0; i < 3; i++) { do { if (rb_is_head_page(cpu_buffer, page, page->list.prev)) { cpu_buffer->head_page = page; return page; } rb_inc_page(cpu_buffer, &page); } while (page != head); } RB_WARN_ON(cpu_buffer, 1); return NULL; } static int rb_head_page_replace(struct buffer_page *old, struct buffer_page *new) { unsigned long *ptr = (unsigned long *)&old->list.prev->next; unsigned long val; unsigned long ret; val = *ptr & ~RB_FLAG_MASK; val |= RB_PAGE_HEAD; ret = cmpxchg(ptr, val, (unsigned long)&new->list); return ret == val; } /* * rb_tail_page_update - move the tail page forward * * Returns 1 if moved tail page, 0 if someone else did. */ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer, struct buffer_page *tail_page, struct buffer_page *next_page) { struct buffer_page *old_tail; unsigned long old_entries; unsigned long old_write; int ret = 0; /* * The tail page now needs to be moved forward. * * We need to reset the tail page, but without messing * with possible erasing of data brought in by interrupts * that have moved the tail page and are currently on it. * * We add a counter to the write field to denote this. */ old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write); old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries); /* * Just make sure we have seen our old_write and synchronize * with any interrupts that come in. */ barrier(); /* * If the tail page is still the same as what we think * it is, then it is up to us to update the tail * pointer. */ if (tail_page == cpu_buffer->tail_page) { /* Zero the write counter */ unsigned long val = old_write & ~RB_WRITE_MASK; unsigned long eval = old_entries & ~RB_WRITE_MASK; /* * This will only succeed if an interrupt did * not come in and change it. In which case, we * do not want to modify it. * * We add (void) to let the compiler know that we do not care * about the return value of these functions. We use the * cmpxchg to only update if an interrupt did not already * do it for us. If the cmpxchg fails, we don't care. */ (void)local_cmpxchg(&next_page->write, old_write, val); (void)local_cmpxchg(&next_page->entries, old_entries, eval); /* * No need to worry about races with clearing out the commit. * it only can increment when a commit takes place. But that * only happens in the outer most nested commit. */ local_set(&next_page->page->commit, 0); old_tail = cmpxchg(&cpu_buffer->tail_page, tail_page, next_page); if (old_tail == tail_page) ret = 1; } return ret; } static int rb_check_bpage(struct ring_buffer_per_cpu *cpu_buffer, struct buffer_page *bpage) { unsigned long val = (unsigned long)bpage; if (RB_WARN_ON(cpu_buffer, val & RB_FLAG_MASK)) return 1; return 0; } /** * rb_check_list - make sure a pointer to a list has the last bits zero */ static int rb_check_list(struct ring_buffer_per_cpu *cpu_buffer, struct list_head *list) { if (RB_WARN_ON(cpu_buffer, rb_list_head(list->prev) != list->prev)) return 1; if (RB_WARN_ON(cpu_buffer, rb_list_head(list->next) != list->next)) return 1; return 0; } /** * check_pages - integrity check of buffer pages * @cpu_buffer: CPU buffer with pages to test * * As a safety measure we check to make sure the data pages have not * been corrupted. */ static int rb_check_pages(struct ring_buffer_per_cpu *cpu_buffer) { struct list_head *head = cpu_buffer->pages; struct buffer_page *bpage, *tmp; /* Reset the head page if it exists */ if (cpu_buffer->head_page) rb_set_head_page(cpu_buffer); rb_head_page_deactivate(cpu_buffer); if (RB_WARN_ON(cpu_buffer, head->next->prev != head)) return -1; if (RB_WARN_ON(cpu_buffer, head->prev->next != head)) return -1; if (rb_check_list(cpu_buffer, head)) return -1; list_for_each_entry_safe(bpage, tmp, head, list) { if (RB_WARN_ON(cpu_buffer, bpage->list.next->prev != &bpage->list)) return -1; if (RB_WARN_ON(cpu_buffer, bpage->list.prev->next != &bpage->list)) return -1; if (rb_check_list(cpu_buffer, &bpage->list)) return -1; } rb_head_page_activate(cpu_buffer); return 0; } static int __rb_allocate_pages(int nr_pages, struct list_head *pages, int cpu) { int i; struct buffer_page *bpage, *tmp; for (i = 0; i < nr_pages; i++) { struct page *page; /* * __GFP_NORETRY flag makes sure that the allocation fails * gracefully without invoking oom-killer and the system is * not destabilized. */ bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), GFP_KERNEL | __GFP_NORETRY, cpu_to_node(cpu)); if (!bpage) goto free_pages; list_add(&bpage->list, pages); page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_NORETRY, 0); if (!page) goto free_pages; bpage->page = page_address(page); rb_init_page(bpage->page); } return 0; free_pages: list_for_each_entry_safe(bpage, tmp, pages, list) { list_del_init(&bpage->list); free_buffer_page(bpage); } return -ENOMEM; } static int rb_allocate_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned nr_pages) { LIST_HEAD(pages); WARN_ON(!nr_pages); if (__rb_allocate_pages(nr_pages, &pages, cpu_buffer->cpu)) return -ENOMEM; /* * The ring buffer page list is a circular list that does not * start and end with a list head. All page list items point to * other pages. */ cpu_buffer->pages = pages.next; list_del(&pages); cpu_buffer->nr_pages = nr_pages; rb_check_pages(cpu_buffer); return 0; } static struct ring_buffer_per_cpu * rb_allocate_cpu_buffer(struct ring_buffer *buffer, int nr_pages, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; struct buffer_page *bpage; struct page *page; int ret; cpu_buffer = kzalloc_node(ALIGN(sizeof(*cpu_buffer), cache_line_size()), GFP_KERNEL, cpu_to_node(cpu)); if (!cpu_buffer) return NULL; cpu_buffer->cpu = cpu; cpu_buffer->buffer = buffer; raw_spin_lock_init(&cpu_buffer->reader_lock); lockdep_set_class(&cpu_buffer->reader_lock, buffer->reader_lock_key); cpu_buffer->lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; INIT_WORK(&cpu_buffer->update_pages_work, update_pages_handler); init_completion(&cpu_buffer->update_done); init_irq_work(&cpu_buffer->irq_work.work, rb_wake_up_waiters); init_waitqueue_head(&cpu_buffer->irq_work.waiters); bpage = kzalloc_node(ALIGN(sizeof(*bpage), cache_line_size()), GFP_KERNEL, cpu_to_node(cpu)); if (!bpage) goto fail_free_buffer; rb_check_bpage(cpu_buffer, bpage); cpu_buffer->reader_page = bpage; page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, 0); if (!page) goto fail_free_reader; bpage->page = page_address(page); rb_init_page(bpage->page); INIT_LIST_HEAD(&cpu_buffer->reader_page->list); INIT_LIST_HEAD(&cpu_buffer->new_pages); ret = rb_allocate_pages(cpu_buffer, nr_pages); if (ret < 0) goto fail_free_reader; cpu_buffer->head_page = list_entry(cpu_buffer->pages, struct buffer_page, list); cpu_buffer->tail_page = cpu_buffer->commit_page = cpu_buffer->head_page; rb_head_page_activate(cpu_buffer); return cpu_buffer; fail_free_reader: free_buffer_page(cpu_buffer->reader_page); fail_free_buffer: kfree(cpu_buffer); return NULL; } static void rb_free_cpu_buffer(struct ring_buffer_per_cpu *cpu_buffer) { struct list_head *head = cpu_buffer->pages; struct buffer_page *bpage, *tmp; free_buffer_page(cpu_buffer->reader_page); rb_head_page_deactivate(cpu_buffer); if (head) { list_for_each_entry_safe(bpage, tmp, head, list) { list_del_init(&bpage->list); free_buffer_page(bpage); } bpage = list_entry(head, struct buffer_page, list); free_buffer_page(bpage); } kfree(cpu_buffer); } #ifdef CONFIG_HOTPLUG_CPU static int rb_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu); #endif /** * ring_buffer_alloc - allocate a new ring_buffer * @size: the size in bytes per cpu that is needed. * @flags: attributes to set for the ring buffer. * * Currently the only flag that is available is the RB_FL_OVERWRITE * flag. This flag means that the buffer will overwrite old data * when the buffer wraps. If this flag is not set, the buffer will * drop data when the tail hits the head. */ struct ring_buffer *__ring_buffer_alloc(unsigned long size, unsigned flags, struct lock_class_key *key) { struct ring_buffer *buffer; int bsize; int cpu, nr_pages; /* keep it in its own cache line */ buffer = kzalloc(ALIGN(sizeof(*buffer), cache_line_size()), GFP_KERNEL); if (!buffer) return NULL; if (!alloc_cpumask_var(&buffer->cpumask, GFP_KERNEL)) goto fail_free_buffer; nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); buffer->flags = flags; buffer->clock = trace_clock_local; buffer->reader_lock_key = key; init_irq_work(&buffer->irq_work.work, rb_wake_up_waiters); init_waitqueue_head(&buffer->irq_work.waiters); /* need at least two pages */ if (nr_pages < 2) nr_pages = 2; /* * In case of non-hotplug cpu, if the ring-buffer is allocated * in early initcall, it will not be notified of secondary cpus. * In that off case, we need to allocate for all possible cpus. */ #ifdef CONFIG_HOTPLUG_CPU get_online_cpus(); cpumask_copy(buffer->cpumask, cpu_online_mask); #else cpumask_copy(buffer->cpumask, cpu_possible_mask); #endif buffer->cpus = nr_cpu_ids; bsize = sizeof(void *) * nr_cpu_ids; buffer->buffers = kzalloc(ALIGN(bsize, cache_line_size()), GFP_KERNEL); if (!buffer->buffers) goto fail_free_cpumask; for_each_buffer_cpu(buffer, cpu) { buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); if (!buffer->buffers[cpu]) goto fail_free_buffers; } #ifdef CONFIG_HOTPLUG_CPU buffer->cpu_notify.notifier_call = rb_cpu_notify; buffer->cpu_notify.priority = 0; register_cpu_notifier(&buffer->cpu_notify); #endif put_online_cpus(); mutex_init(&buffer->mutex); return buffer; fail_free_buffers: for_each_buffer_cpu(buffer, cpu) { if (buffer->buffers[cpu]) rb_free_cpu_buffer(buffer->buffers[cpu]); } kfree(buffer->buffers); fail_free_cpumask: free_cpumask_var(buffer->cpumask); put_online_cpus(); fail_free_buffer: kfree(buffer); return NULL; } EXPORT_SYMBOL_GPL(__ring_buffer_alloc); /** * ring_buffer_free - free a ring buffer. * @buffer: the buffer to free. */ void ring_buffer_free(struct ring_buffer *buffer) { int cpu; get_online_cpus(); #ifdef CONFIG_HOTPLUG_CPU unregister_cpu_notifier(&buffer->cpu_notify); #endif for_each_buffer_cpu(buffer, cpu) rb_free_cpu_buffer(buffer->buffers[cpu]); put_online_cpus(); kfree(buffer->buffers); free_cpumask_var(buffer->cpumask); kfree(buffer); } EXPORT_SYMBOL_GPL(ring_buffer_free); void ring_buffer_set_clock(struct ring_buffer *buffer, u64 (*clock)(void)) { buffer->clock = clock; } static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer); static inline unsigned long rb_page_entries(struct buffer_page *bpage) { return local_read(&bpage->entries) & RB_WRITE_MASK; } static inline unsigned long rb_page_write(struct buffer_page *bpage) { return local_read(&bpage->write) & RB_WRITE_MASK; } static int rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages) { struct list_head *tail_page, *to_remove, *next_page; struct buffer_page *to_remove_page, *tmp_iter_page; struct buffer_page *last_page, *first_page; unsigned int nr_removed; unsigned long head_bit; int page_entries; head_bit = 0; raw_spin_lock_irq(&cpu_buffer->reader_lock); atomic_inc(&cpu_buffer->record_disabled); /* * We don't race with the readers since we have acquired the reader * lock. We also don't race with writers after disabling recording. * This makes it easy to figure out the first and the last page to be * removed from the list. We unlink all the pages in between including * the first and last pages. This is done in a busy loop so that we * lose the least number of traces. * The pages are freed after we restart recording and unlock readers. */ tail_page = &cpu_buffer->tail_page->list; /* * tail page might be on reader page, we remove the next page * from the ring buffer */ if (cpu_buffer->tail_page == cpu_buffer->reader_page) tail_page = rb_list_head(tail_page->next); to_remove = tail_page; /* start of pages to remove */ first_page = list_entry(rb_list_head(to_remove->next), struct buffer_page, list); for (nr_removed = 0; nr_removed < nr_pages; nr_removed++) { to_remove = rb_list_head(to_remove)->next; head_bit |= (unsigned long)to_remove & RB_PAGE_HEAD; } next_page = rb_list_head(to_remove)->next; /* * Now we remove all pages between tail_page and next_page. * Make sure that we have head_bit value preserved for the * next page */ tail_page->next = (struct list_head *)((unsigned long)next_page | head_bit); next_page = rb_list_head(next_page); next_page->prev = tail_page; /* make sure pages points to a valid page in the ring buffer */ cpu_buffer->pages = next_page; /* update head page */ if (head_bit) cpu_buffer->head_page = list_entry(next_page, struct buffer_page, list); /* * change read pointer to make sure any read iterators reset * themselves */ cpu_buffer->read = 0; /* pages are removed, resume tracing and then free the pages */ atomic_dec(&cpu_buffer->record_disabled); raw_spin_unlock_irq(&cpu_buffer->reader_lock); RB_WARN_ON(cpu_buffer, list_empty(cpu_buffer->pages)); /* last buffer page to remove */ last_page = list_entry(rb_list_head(to_remove), struct buffer_page, list); tmp_iter_page = first_page; do { to_remove_page = tmp_iter_page; rb_inc_page(cpu_buffer, &tmp_iter_page); /* update the counters */ page_entries = rb_page_entries(to_remove_page); if (page_entries) { /* * If something was added to this page, it was full * since it is not the tail page. So we deduct the * bytes consumed in ring buffer from here. * Increment overrun to account for the lost events. */ local_add(page_entries, &cpu_buffer->overrun); local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); } /* * We have already removed references to this list item, just * free up the buffer_page and its page */ free_buffer_page(to_remove_page); nr_removed--; } while (to_remove_page != last_page); RB_WARN_ON(cpu_buffer, nr_removed); return nr_removed == 0; } static int rb_insert_pages(struct ring_buffer_per_cpu *cpu_buffer) { struct list_head *pages = &cpu_buffer->new_pages; int retries, success; raw_spin_lock_irq(&cpu_buffer->reader_lock); /* * We are holding the reader lock, so the reader page won't be swapped * in the ring buffer. Now we are racing with the writer trying to * move head page and the tail page. * We are going to adapt the reader page update process where: * 1. We first splice the start and end of list of new pages between * the head page and its previous page. * 2. We cmpxchg the prev_page->next to point from head page to the * start of new pages list. * 3. Finally, we update the head->prev to the end of new list. * * We will try this process 10 times, to make sure that we don't keep * spinning. */ retries = 10; success = 0; while (retries--) { struct list_head *head_page, *prev_page, *r; struct list_head *last_page, *first_page; struct list_head *head_page_with_bit; head_page = &rb_set_head_page(cpu_buffer)->list; if (!head_page) break; prev_page = head_page->prev; first_page = pages->next; last_page = pages->prev; head_page_with_bit = (struct list_head *) ((unsigned long)head_page | RB_PAGE_HEAD); last_page->next = head_page_with_bit; first_page->prev = prev_page; r = cmpxchg(&prev_page->next, head_page_with_bit, first_page); if (r == head_page_with_bit) { /* * yay, we replaced the page pointer to our new list, * now, we just have to update to head page's prev * pointer to point to end of list */ head_page->prev = last_page; success = 1; break; } } if (success) INIT_LIST_HEAD(pages); /* * If we weren't successful in adding in new pages, warn and stop * tracing */ RB_WARN_ON(cpu_buffer, !success); raw_spin_unlock_irq(&cpu_buffer->reader_lock); /* free pages if they weren't inserted */ if (!success) { struct buffer_page *bpage, *tmp; list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, list) { list_del_init(&bpage->list); free_buffer_page(bpage); } } return success; } static void rb_update_pages(struct ring_buffer_per_cpu *cpu_buffer) { int success; if (cpu_buffer->nr_pages_to_update > 0) success = rb_insert_pages(cpu_buffer); else success = rb_remove_pages(cpu_buffer, -cpu_buffer->nr_pages_to_update); if (success) cpu_buffer->nr_pages += cpu_buffer->nr_pages_to_update; } static void update_pages_handler(struct work_struct *work) { struct ring_buffer_per_cpu *cpu_buffer = container_of(work, struct ring_buffer_per_cpu, update_pages_work); rb_update_pages(cpu_buffer); complete(&cpu_buffer->update_done); } /** * ring_buffer_resize - resize the ring buffer * @buffer: the buffer to resize. * @size: the new size. * * Minimum size is 2 * BUF_PAGE_SIZE. * * Returns 0 on success and < 0 on failure. */ int ring_buffer_resize(struct ring_buffer *buffer, unsigned long size, int cpu_id) { struct ring_buffer_per_cpu *cpu_buffer; unsigned nr_pages; int cpu, err = 0; /* * Always succeed at resizing a non-existent buffer: */ if (!buffer) return size; /* Make sure the requested buffer exists */ if (cpu_id != RING_BUFFER_ALL_CPUS && !cpumask_test_cpu(cpu_id, buffer->cpumask)) return size; size = DIV_ROUND_UP(size, BUF_PAGE_SIZE); size *= BUF_PAGE_SIZE; /* we need a minimum of two pages */ if (size < BUF_PAGE_SIZE * 2) size = BUF_PAGE_SIZE * 2; nr_pages = DIV_ROUND_UP(size, BUF_PAGE_SIZE); /* * Don't succeed if resizing is disabled, as a reader might be * manipulating the ring buffer and is expecting a sane state while * this is true. */ if (atomic_read(&buffer->resize_disabled)) return -EBUSY; /* prevent another thread from changing buffer sizes */ mutex_lock(&buffer->mutex); if (cpu_id == RING_BUFFER_ALL_CPUS) { /* calculate the pages to update */ for_each_buffer_cpu(buffer, cpu) { cpu_buffer = buffer->buffers[cpu]; cpu_buffer->nr_pages_to_update = nr_pages - cpu_buffer->nr_pages; /* * nothing more to do for removing pages or no update */ if (cpu_buffer->nr_pages_to_update <= 0) continue; /* * to add pages, make sure all new pages can be * allocated without receiving ENOMEM */ INIT_LIST_HEAD(&cpu_buffer->new_pages); if (__rb_allocate_pages(cpu_buffer->nr_pages_to_update, &cpu_buffer->new_pages, cpu)) { /* not enough memory for new pages */ err = -ENOMEM; goto out_err; } } get_online_cpus(); /* * Fire off all the required work handlers * We can't schedule on offline CPUs, but it's not necessary * since we can change their buffer sizes without any race. */ for_each_buffer_cpu(buffer, cpu) { cpu_buffer = buffer->buffers[cpu]; if (!cpu_buffer->nr_pages_to_update) continue; /* The update must run on the CPU that is being updated. */ preempt_disable(); if (cpu == smp_processor_id() || !cpu_online(cpu)) { rb_update_pages(cpu_buffer); cpu_buffer->nr_pages_to_update = 0; } else { /* * Can not disable preemption for schedule_work_on() * on PREEMPT_RT. */ preempt_enable(); schedule_work_on(cpu, &cpu_buffer->update_pages_work); preempt_disable(); } preempt_enable(); } /* wait for all the updates to complete */ for_each_buffer_cpu(buffer, cpu) { cpu_buffer = buffer->buffers[cpu]; if (!cpu_buffer->nr_pages_to_update) continue; if (cpu_online(cpu)) wait_for_completion(&cpu_buffer->update_done); cpu_buffer->nr_pages_to_update = 0; } put_online_cpus(); } else { /* Make sure this CPU has been intitialized */ if (!cpumask_test_cpu(cpu_id, buffer->cpumask)) goto out; cpu_buffer = buffer->buffers[cpu_id]; if (nr_pages == cpu_buffer->nr_pages) goto out; cpu_buffer->nr_pages_to_update = nr_pages - cpu_buffer->nr_pages; INIT_LIST_HEAD(&cpu_buffer->new_pages); if (cpu_buffer->nr_pages_to_update > 0 && __rb_allocate_pages(cpu_buffer->nr_pages_to_update, &cpu_buffer->new_pages, cpu_id)) { err = -ENOMEM; goto out_err; } get_online_cpus(); preempt_disable(); /* The update must run on the CPU that is being updated. */ if (cpu_id == smp_processor_id() || !cpu_online(cpu_id)) rb_update_pages(cpu_buffer); else { /* * Can not disable preemption for schedule_work_on() * on PREEMPT_RT. */ preempt_enable(); schedule_work_on(cpu_id, &cpu_buffer->update_pages_work); wait_for_completion(&cpu_buffer->update_done); preempt_disable(); } preempt_enable(); cpu_buffer->nr_pages_to_update = 0; put_online_cpus(); } out: /* * The ring buffer resize can happen with the ring buffer * enabled, so that the update disturbs the tracing as little * as possible. But if the buffer is disabled, we do not need * to worry about that, and we can take the time to verify * that the buffer is not corrupt. */ if (atomic_read(&buffer->record_disabled)) { atomic_inc(&buffer->record_disabled); /* * Even though the buffer was disabled, we must make sure * that it is truly disabled before calling rb_check_pages. * There could have been a race between checking * record_disable and incrementing it. */ synchronize_sched(); for_each_buffer_cpu(buffer, cpu) { cpu_buffer = buffer->buffers[cpu]; rb_check_pages(cpu_buffer); } atomic_dec(&buffer->record_disabled); } mutex_unlock(&buffer->mutex); return size; out_err: for_each_buffer_cpu(buffer, cpu) { struct buffer_page *bpage, *tmp; cpu_buffer = buffer->buffers[cpu]; cpu_buffer->nr_pages_to_update = 0; if (list_empty(&cpu_buffer->new_pages)) continue; list_for_each_entry_safe(bpage, tmp, &cpu_buffer->new_pages, list) { list_del_init(&bpage->list); free_buffer_page(bpage); } } mutex_unlock(&buffer->mutex); return err; } EXPORT_SYMBOL_GPL(ring_buffer_resize); void ring_buffer_change_overwrite(struct ring_buffer *buffer, int val) { mutex_lock(&buffer->mutex); if (val) buffer->flags |= RB_FL_OVERWRITE; else buffer->flags &= ~RB_FL_OVERWRITE; mutex_unlock(&buffer->mutex); } EXPORT_SYMBOL_GPL(ring_buffer_change_overwrite); static inline void * __rb_data_page_index(struct buffer_data_page *bpage, unsigned index) { return bpage->data + index; } static inline void *__rb_page_index(struct buffer_page *bpage, unsigned index) { return bpage->page->data + index; } static inline struct ring_buffer_event * rb_reader_event(struct ring_buffer_per_cpu *cpu_buffer) { return __rb_page_index(cpu_buffer->reader_page, cpu_buffer->reader_page->read); } static inline struct ring_buffer_event * rb_iter_head_event(struct ring_buffer_iter *iter) { return __rb_page_index(iter->head_page, iter->head); } static inline unsigned rb_page_commit(struct buffer_page *bpage) { return local_read(&bpage->page->commit); } /* Size is determined by what has been committed */ static inline unsigned rb_page_size(struct buffer_page *bpage) { return rb_page_commit(bpage); } static inline unsigned rb_commit_index(struct ring_buffer_per_cpu *cpu_buffer) { return rb_page_commit(cpu_buffer->commit_page); } static inline unsigned rb_event_index(struct ring_buffer_event *event) { unsigned long addr = (unsigned long)event; return (addr & ~PAGE_MASK) - BUF_PAGE_HDR_SIZE; } static inline int rb_event_is_commit(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event) { unsigned long addr = (unsigned long)event; unsigned long index; index = rb_event_index(event); addr &= PAGE_MASK; return cpu_buffer->commit_page->page == (void *)addr && rb_commit_index(cpu_buffer) == index; } static void rb_set_commit_to_write(struct ring_buffer_per_cpu *cpu_buffer) { unsigned long max_count; /* * We only race with interrupts and NMIs on this CPU. * If we own the commit event, then we can commit * all others that interrupted us, since the interruptions * are in stack format (they finish before they come * back to us). This allows us to do a simple loop to * assign the commit to the tail. */ again: max_count = cpu_buffer->nr_pages * 100; while (cpu_buffer->commit_page != cpu_buffer->tail_page) { if (RB_WARN_ON(cpu_buffer, !(--max_count))) return; if (RB_WARN_ON(cpu_buffer, rb_is_reader_page(cpu_buffer->tail_page))) return; local_set(&cpu_buffer->commit_page->page->commit, rb_page_write(cpu_buffer->commit_page)); rb_inc_page(cpu_buffer, &cpu_buffer->commit_page); cpu_buffer->write_stamp = cpu_buffer->commit_page->page->time_stamp; /* add barrier to keep gcc from optimizing too much */ barrier(); } while (rb_commit_index(cpu_buffer) != rb_page_write(cpu_buffer->commit_page)) { local_set(&cpu_buffer->commit_page->page->commit, rb_page_write(cpu_buffer->commit_page)); RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->commit_page->page->commit) & ~RB_WRITE_MASK); barrier(); } /* again, keep gcc from optimizing */ barrier(); /* * If an interrupt came in just after the first while loop * and pushed the tail page forward, we will be left with * a dangling commit that will never go forward. */ if (unlikely(cpu_buffer->commit_page != cpu_buffer->tail_page)) goto again; } static void rb_reset_reader_page(struct ring_buffer_per_cpu *cpu_buffer) { cpu_buffer->read_stamp = cpu_buffer->reader_page->page->time_stamp; cpu_buffer->reader_page->read = 0; } static void rb_inc_iter(struct ring_buffer_iter *iter) { struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; /* * The iterator could be on the reader page (it starts there). * But the head could have moved, since the reader was * found. Check for this case and assign the iterator * to the head page instead of next. */ if (iter->head_page == cpu_buffer->reader_page) iter->head_page = rb_set_head_page(cpu_buffer); else rb_inc_page(cpu_buffer, &iter->head_page); iter->read_stamp = iter->head_page->page->time_stamp; iter->head = 0; } /* Slow path, do not inline */ static noinline struct ring_buffer_event * rb_add_time_stamp(struct ring_buffer_event *event, u64 delta) { event->type_len = RINGBUF_TYPE_TIME_EXTEND; /* Not the first event on the page? */ if (rb_event_index(event)) { event->time_delta = delta & TS_MASK; event->array[0] = delta >> TS_SHIFT; } else { /* nope, just zero it */ event->time_delta = 0; event->array[0] = 0; } return skip_time_extend(event); } /** * rb_update_event - update event type and data * @event: the event to update * @type: the type of event * @length: the size of the event field in the ring buffer * * Update the type and data fields of the event. The length * is the actual size that is written to the ring buffer, * and with this, we can determine what to place into the * data field. */ static void rb_update_event(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event, unsigned length, int add_timestamp, u64 delta) { /* Only a commit updates the timestamp */ if (unlikely(!rb_event_is_commit(cpu_buffer, event))) delta = 0; /* * If we need to add a timestamp, then we * add it to the start of the resevered space. */ if (unlikely(add_timestamp)) { event = rb_add_time_stamp(event, delta); length -= RB_LEN_TIME_EXTEND; delta = 0; } event->time_delta = delta; length -= RB_EVNT_HDR_SIZE; if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) { event->type_len = 0; event->array[0] = length; } else event->type_len = DIV_ROUND_UP(length, RB_ALIGNMENT); } /* * rb_handle_head_page - writer hit the head page * * Returns: +1 to retry page * 0 to continue * -1 on error */ static int rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, struct buffer_page *tail_page, struct buffer_page *next_page) { struct buffer_page *new_head; int entries; int type; int ret; entries = rb_page_entries(next_page); /* * The hard part is here. We need to move the head * forward, and protect against both readers on * other CPUs and writers coming in via interrupts. */ type = rb_head_page_set_update(cpu_buffer, next_page, tail_page, RB_PAGE_HEAD); /* * type can be one of four: * NORMAL - an interrupt already moved it for us * HEAD - we are the first to get here. * UPDATE - we are the interrupt interrupting * a current move. * MOVED - a reader on another CPU moved the next * pointer to its reader page. Give up * and try again. */ switch (type) { case RB_PAGE_HEAD: /* * We changed the head to UPDATE, thus * it is our responsibility to update * the counters. */ local_add(entries, &cpu_buffer->overrun); local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); /* * The entries will be zeroed out when we move the * tail page. */ /* still more to do */ break; case RB_PAGE_UPDATE: /* * This is an interrupt that interrupt the * previous update. Still more to do. */ break; case RB_PAGE_NORMAL: /* * An interrupt came in before the update * and processed this for us. * Nothing left to do. */ return 1; case RB_PAGE_MOVED: /* * The reader is on another CPU and just did * a swap with our next_page. * Try again. */ return 1; default: RB_WARN_ON(cpu_buffer, 1); /* WTF??? */ return -1; } /* * Now that we are here, the old head pointer is * set to UPDATE. This will keep the reader from * swapping the head page with the reader page. * The reader (on another CPU) will spin till * we are finished. * * We just need to protect against interrupts * doing the job. We will set the next pointer * to HEAD. After that, we set the old pointer * to NORMAL, but only if it was HEAD before. * otherwise we are an interrupt, and only * want the outer most commit to reset it. */ new_head = next_page; rb_inc_page(cpu_buffer, &new_head); ret = rb_head_page_set_head(cpu_buffer, new_head, next_page, RB_PAGE_NORMAL); /* * Valid returns are: * HEAD - an interrupt came in and already set it. * NORMAL - One of two things: * 1) We really set it. * 2) A bunch of interrupts came in and moved * the page forward again. */ switch (ret) { case RB_PAGE_HEAD: case RB_PAGE_NORMAL: /* OK */ break; default: RB_WARN_ON(cpu_buffer, 1); return -1; } /* * It is possible that an interrupt came in, * set the head up, then more interrupts came in * and moved it again. When we get back here, * the page would have been set to NORMAL but we * just set it back to HEAD. * * How do you detect this? Well, if that happened * the tail page would have moved. */ if (ret == RB_PAGE_NORMAL) { /* * If the tail had moved passed next, then we need * to reset the pointer. */ if (cpu_buffer->tail_page != tail_page && cpu_buffer->tail_page != next_page) rb_head_page_set_normal(cpu_buffer, new_head, next_page, RB_PAGE_HEAD); } /* * If this was the outer most commit (the one that * changed the original pointer from HEAD to UPDATE), * then it is up to us to reset it to NORMAL. */ if (type == RB_PAGE_HEAD) { ret = rb_head_page_set_normal(cpu_buffer, next_page, tail_page, RB_PAGE_UPDATE); if (RB_WARN_ON(cpu_buffer, ret != RB_PAGE_UPDATE)) return -1; } return 0; } static unsigned rb_calculate_event_length(unsigned length) { struct ring_buffer_event event; /* Used only for sizeof array */ /* zero length can cause confusions */ if (!length) length = 1; if (length > RB_MAX_SMALL_DATA || RB_FORCE_8BYTE_ALIGNMENT) length += sizeof(event.array[0]); length += RB_EVNT_HDR_SIZE; length = ALIGN(length, RB_ARCH_ALIGNMENT); return length; } static inline void rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, struct buffer_page *tail_page, unsigned long tail, unsigned long length) { struct ring_buffer_event *event; /* * Only the event that crossed the page boundary * must fill the old tail_page with padding. */ if (tail >= BUF_PAGE_SIZE) { /* * If the page was filled, then we still need * to update the real_end. Reset it to zero * and the reader will ignore it. */ if (tail == BUF_PAGE_SIZE) tail_page->real_end = 0; local_sub(length, &tail_page->write); return; } event = __rb_page_index(tail_page, tail); kmemcheck_annotate_bitfield(event, bitfield); /* account for padding bytes */ local_add(BUF_PAGE_SIZE - tail, &cpu_buffer->entries_bytes); /* * Save the original length to the meta data. * This will be used by the reader to add lost event * counter. */ tail_page->real_end = tail; /* * If this event is bigger than the minimum size, then * we need to be careful that we don't subtract the * write counter enough to allow another writer to slip * in on this page. * We put in a discarded commit instead, to make sure * that this space is not used again. * * If we are less than the minimum size, we don't need to * worry about it. */ if (tail > (BUF_PAGE_SIZE - RB_EVNT_MIN_SIZE)) { /* No room for any events */ /* Mark the rest of the page with padding */ rb_event_set_padding(event); /* Set the write back to the previous setting */ local_sub(length, &tail_page->write); return; } /* Put in a discarded event */ event->array[0] = (BUF_PAGE_SIZE - tail) - RB_EVNT_HDR_SIZE; event->type_len = RINGBUF_TYPE_PADDING; /* time delta must be non zero */ event->time_delta = 1; /* Set write to end of buffer */ length = (tail + length) - BUF_PAGE_SIZE; local_sub(length, &tail_page->write); } /* * This is the slow path, force gcc not to inline it. */ static noinline struct ring_buffer_event * rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, unsigned long length, unsigned long tail, struct buffer_page *tail_page, u64 ts) { struct buffer_page *commit_page = cpu_buffer->commit_page; struct ring_buffer *buffer = cpu_buffer->buffer; struct buffer_page *next_page; int ret; next_page = tail_page; rb_inc_page(cpu_buffer, &next_page); /* * If for some reason, we had an interrupt storm that made * it all the way around the buffer, bail, and warn * about it. */ if (unlikely(next_page == commit_page)) { local_inc(&cpu_buffer->commit_overrun); goto out_reset; } /* * This is where the fun begins! * * We are fighting against races between a reader that * could be on another CPU trying to swap its reader * page with the buffer head. * * We are also fighting against interrupts coming in and * moving the head or tail on us as well. * * If the next page is the head page then we have filled * the buffer, unless the commit page is still on the * reader page. */ if (rb_is_head_page(cpu_buffer, next_page, &tail_page->list)) { /* * If the commit is not on the reader page, then * move the header page. */ if (!rb_is_reader_page(cpu_buffer->commit_page)) { /* * If we are not in overwrite mode, * this is easy, just stop here. */ if (!(buffer->flags & RB_FL_OVERWRITE)) { local_inc(&cpu_buffer->dropped_events); goto out_reset; } ret = rb_handle_head_page(cpu_buffer, tail_page, next_page); if (ret < 0) goto out_reset; if (ret) goto out_again; } else { /* * We need to be careful here too. The * commit page could still be on the reader * page. We could have a small buffer, and * have filled up the buffer with events * from interrupts and such, and wrapped. * * Note, if the tail page is also the on the * reader_page, we let it move out. */ if (unlikely((cpu_buffer->commit_page != cpu_buffer->tail_page) && (cpu_buffer->commit_page == cpu_buffer->reader_page))) { local_inc(&cpu_buffer->commit_overrun); goto out_reset; } } } ret = rb_tail_page_update(cpu_buffer, tail_page, next_page); if (ret) { /* * Nested commits always have zero deltas, so * just reread the time stamp */ ts = rb_time_stamp(buffer); next_page->page->time_stamp = ts; } out_again: rb_reset_tail(cpu_buffer, tail_page, tail, length); /* fail and let the caller try again */ return ERR_PTR(-EAGAIN); out_reset: /* reset write */ rb_reset_tail(cpu_buffer, tail_page, tail, length); return NULL; } static struct ring_buffer_event * __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, unsigned long length, u64 ts, u64 delta, int add_timestamp) { struct buffer_page *tail_page; struct ring_buffer_event *event; unsigned long tail, write; /* * If the time delta since the last event is too big to * hold in the time field of the event, then we append a * TIME EXTEND event ahead of the data event. */ if (unlikely(add_timestamp)) length += RB_LEN_TIME_EXTEND; tail_page = cpu_buffer->tail_page; write = local_add_return(length, &tail_page->write); /* set write to only the index of the write */ write &= RB_WRITE_MASK; tail = write - length; /* * If this is the first commit on the page, then it has the same * timestamp as the page itself. */ if (!tail) delta = 0; /* See if we shot pass the end of this buffer page */ if (unlikely(write > BUF_PAGE_SIZE)) return rb_move_tail(cpu_buffer, length, tail, tail_page, ts); /* We reserved something on the buffer */ event = __rb_page_index(tail_page, tail); kmemcheck_annotate_bitfield(event, bitfield); rb_update_event(cpu_buffer, event, length, add_timestamp, delta); local_inc(&tail_page->entries); /* * If this is the first commit on the page, then update * its timestamp. */ if (!tail) tail_page->page->time_stamp = ts; /* account for these added bytes */ local_add(length, &cpu_buffer->entries_bytes); return event; } static inline int rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event) { unsigned long new_index, old_index; struct buffer_page *bpage; unsigned long index; unsigned long addr; new_index = rb_event_index(event); old_index = new_index + rb_event_ts_length(event); addr = (unsigned long)event; addr &= PAGE_MASK; bpage = cpu_buffer->tail_page; if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { unsigned long write_mask = local_read(&bpage->write) & ~RB_WRITE_MASK; unsigned long event_length = rb_event_length(event); /* * This is on the tail page. It is possible that * a write could come in and move the tail page * and write to the next page. That is fine * because we just shorten what is on this page. */ old_index += write_mask; new_index += write_mask; index = local_cmpxchg(&bpage->write, old_index, new_index); if (index == old_index) { /* update counters */ local_sub(event_length, &cpu_buffer->entries_bytes); return 1; } } /* could not discard */ return 0; } static void rb_start_commit(struct ring_buffer_per_cpu *cpu_buffer) { local_inc(&cpu_buffer->committing); local_inc(&cpu_buffer->commits); } static inline void rb_end_commit(struct ring_buffer_per_cpu *cpu_buffer) { unsigned long commits; if (RB_WARN_ON(cpu_buffer, !local_read(&cpu_buffer->committing))) return; again: commits = local_read(&cpu_buffer->commits); /* synchronize with interrupts */ barrier(); if (local_read(&cpu_buffer->committing) == 1) rb_set_commit_to_write(cpu_buffer); local_dec(&cpu_buffer->committing); /* synchronize with interrupts */ barrier(); /* * Need to account for interrupts coming in between the * updating of the commit page and the clearing of the * committing counter. */ if (unlikely(local_read(&cpu_buffer->commits) != commits) && !local_read(&cpu_buffer->committing)) { local_inc(&cpu_buffer->committing); goto again; } } static struct ring_buffer_event * rb_reserve_next_event(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer, unsigned long length) { struct ring_buffer_event *event; u64 ts, delta; int nr_loops = 0; int add_timestamp; u64 diff; rb_start_commit(cpu_buffer); #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP /* * Due to the ability to swap a cpu buffer from a buffer * it is possible it was swapped before we committed. * (committing stops a swap). We check for it here and * if it happened, we have to fail the write. */ barrier(); if (unlikely(ACCESS_ONCE(cpu_buffer->buffer) != buffer)) { local_dec(&cpu_buffer->committing); local_dec(&cpu_buffer->commits); return NULL; } #endif length = rb_calculate_event_length(length); again: add_timestamp = 0; delta = 0; /* * We allow for interrupts to reenter here and do a trace. * If one does, it will cause this original code to loop * back here. Even with heavy interrupts happening, this * should only happen a few times in a row. If this happens * 1000 times in a row, there must be either an interrupt * storm or we have something buggy. * Bail! */ if (RB_WARN_ON(cpu_buffer, ++nr_loops > 1000)) goto out_fail; ts = rb_time_stamp(cpu_buffer->buffer); diff = ts - cpu_buffer->write_stamp; /* make sure this diff is calculated here */ barrier(); /* Did the write stamp get updated already? */ if (likely(ts >= cpu_buffer->write_stamp)) { delta = diff; if (unlikely(test_time_stamp(delta))) { int local_clock_stable = 1; #ifdef CONFIG_HAVE_UNSTABLE_SCHED_CLOCK local_clock_stable = sched_clock_stable; #endif WARN_ONCE(delta > (1ULL << 59), KERN_WARNING "Delta way too big! %llu ts=%llu write stamp = %llu\n%s", (unsigned long long)delta, (unsigned long long)ts, (unsigned long long)cpu_buffer->write_stamp, local_clock_stable ? "" : "If you just came from a suspend/resume,\n" "please switch to the trace global clock:\n" " echo global > /sys/kernel/debug/tracing/trace_clock\n"); add_timestamp = 1; } } event = __rb_reserve_next(cpu_buffer, length, ts, delta, add_timestamp); if (unlikely(PTR_ERR(event) == -EAGAIN)) goto again; if (!event) goto out_fail; return event; out_fail: rb_end_commit(cpu_buffer); return NULL; } #ifdef CONFIG_TRACING /* * The lock and unlock are done within a preempt disable section. * The current_context per_cpu variable can only be modified * by the current task between lock and unlock. But it can * be modified more than once via an interrupt. To pass this * information from the lock to the unlock without having to * access the 'in_interrupt()' functions again (which do show * a bit of overhead in something as critical as function tracing, * we use a bitmask trick. * * bit 0 = NMI context * bit 1 = IRQ context * bit 2 = SoftIRQ context * bit 3 = normal context. * * This works because this is the order of contexts that can * preempt other contexts. A SoftIRQ never preempts an IRQ * context. * * When the context is determined, the corresponding bit is * checked and set (if it was set, then a recursion of that context * happened). * * On unlock, we need to clear this bit. To do so, just subtract * 1 from the current_context and AND it to itself. * * (binary) * 101 - 1 = 100 * 101 & 100 = 100 (clearing bit zero) * * 1010 - 1 = 1001 * 1010 & 1001 = 1000 (clearing bit 1) * * The least significant bit can be cleared this way, and it * just so happens that it is the same bit corresponding to * the current context. */ static DEFINE_PER_CPU(unsigned int, current_context); static __always_inline int trace_recursive_lock(void) { unsigned int val = __this_cpu_read(current_context); int bit; if (in_interrupt()) { if (in_nmi()) bit = 0; else if (in_irq()) bit = 1; else bit = 2; } else bit = 3; if (unlikely(val & (1 << bit))) return 1; val |= (1 << bit); __this_cpu_write(current_context, val); return 0; } static __always_inline void trace_recursive_unlock(void) { unsigned int val = __this_cpu_read(current_context); val &= val & (val - 1); __this_cpu_write(current_context, val); } #else #define trace_recursive_lock() (0) #define trace_recursive_unlock() do { } while (0) #endif /** * ring_buffer_lock_reserve - reserve a part of the buffer * @buffer: the ring buffer to reserve from * @length: the length of the data to reserve (excluding event header) * * Returns a reseverd event on the ring buffer to copy directly to. * The user of this interface will need to get the body to write into * and can use the ring_buffer_event_data() interface. * * The length is the length of the data needed, not the event length * which also includes the event header. * * Must be paired with ring_buffer_unlock_commit, unless NULL is returned. * If NULL is returned, then nothing has been allocated or locked. */ struct ring_buffer_event * ring_buffer_lock_reserve(struct ring_buffer *buffer, unsigned long length) { struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_event *event; int cpu; if (ring_buffer_flags != RB_BUFFERS_ON) return NULL; /* If we are tracing schedule, we don't want to recurse */ preempt_disable_notrace(); if (atomic_read(&buffer->record_disabled)) goto out_nocheck; if (trace_recursive_lock()) goto out_nocheck; cpu = raw_smp_processor_id(); if (!cpumask_test_cpu(cpu, buffer->cpumask)) goto out; cpu_buffer = buffer->buffers[cpu]; if (atomic_read(&cpu_buffer->record_disabled)) goto out; if (length > BUF_MAX_DATA_SIZE) goto out; event = rb_reserve_next_event(buffer, cpu_buffer, length); if (!event) goto out; return event; out: trace_recursive_unlock(); out_nocheck: preempt_enable_notrace(); return NULL; } EXPORT_SYMBOL_GPL(ring_buffer_lock_reserve); static void rb_update_write_stamp(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event) { u64 delta; /* * The event first in the commit queue updates the * time stamp. */ if (rb_event_is_commit(cpu_buffer, event)) { /* * A commit event that is first on a page * updates the write timestamp with the page stamp */ if (!rb_event_index(event)) cpu_buffer->write_stamp = cpu_buffer->commit_page->page->time_stamp; else if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) { delta = event->array[0]; delta <<= TS_SHIFT; delta += event->time_delta; cpu_buffer->write_stamp += delta; } else cpu_buffer->write_stamp += event->time_delta; } } static void rb_commit(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event) { local_inc(&cpu_buffer->entries); rb_update_write_stamp(cpu_buffer, event); rb_end_commit(cpu_buffer); } static __always_inline void rb_wakeups(struct ring_buffer *buffer, struct ring_buffer_per_cpu *cpu_buffer) { if (buffer->irq_work.waiters_pending) { buffer->irq_work.waiters_pending = false; /* irq_work_queue() supplies it's own memory barriers */ irq_work_queue(&buffer->irq_work.work); } if (cpu_buffer->irq_work.waiters_pending) { cpu_buffer->irq_work.waiters_pending = false; /* irq_work_queue() supplies it's own memory barriers */ irq_work_queue(&cpu_buffer->irq_work.work); } } /** * ring_buffer_unlock_commit - commit a reserved * @buffer: The buffer to commit to * @event: The event pointer to commit. * * This commits the data to the ring buffer, and releases any locks held. * * Must be paired with ring_buffer_lock_reserve. */ int ring_buffer_unlock_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) { struct ring_buffer_per_cpu *cpu_buffer; int cpu = raw_smp_processor_id(); cpu_buffer = buffer->buffers[cpu]; rb_commit(cpu_buffer, event); rb_wakeups(buffer, cpu_buffer); trace_recursive_unlock(); preempt_enable_notrace(); return 0; } EXPORT_SYMBOL_GPL(ring_buffer_unlock_commit); static inline void rb_event_discard(struct ring_buffer_event *event) { if (event->type_len == RINGBUF_TYPE_TIME_EXTEND) event = skip_time_extend(event); /* array[0] holds the actual length for the discarded event */ event->array[0] = rb_event_data_length(event) - RB_EVNT_HDR_SIZE; event->type_len = RINGBUF_TYPE_PADDING; /* time delta must be non zero */ if (!event->time_delta) event->time_delta = 1; } /* * Decrement the entries to the page that an event is on. * The event does not even need to exist, only the pointer * to the page it is on. This may only be called before the commit * takes place. */ static inline void rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event) { unsigned long addr = (unsigned long)event; struct buffer_page *bpage = cpu_buffer->commit_page; struct buffer_page *start; addr &= PAGE_MASK; /* Do the likely case first */ if (likely(bpage->page == (void *)addr)) { local_dec(&bpage->entries); return; } /* * Because the commit page may be on the reader page we * start with the next page and check the end loop there. */ rb_inc_page(cpu_buffer, &bpage); start = bpage; do { if (bpage->page == (void *)addr) { local_dec(&bpage->entries); return; } rb_inc_page(cpu_buffer, &bpage); } while (bpage != start); /* commit not part of this buffer?? */ RB_WARN_ON(cpu_buffer, 1); } /** * ring_buffer_commit_discard - discard an event that has not been committed * @buffer: the ring buffer * @event: non committed event to discard * * Sometimes an event that is in the ring buffer needs to be ignored. * This function lets the user discard an event in the ring buffer * and then that event will not be read later. * * This function only works if it is called before the the item has been * committed. It will try to free the event from the ring buffer * if another event has not been added behind it. * * If another event has been added behind it, it will set the event * up as discarded, and perform the commit. * * If this function is called, do not call ring_buffer_unlock_commit on * the event. */ void ring_buffer_discard_commit(struct ring_buffer *buffer, struct ring_buffer_event *event) { struct ring_buffer_per_cpu *cpu_buffer; int cpu; /* The event is discarded regardless */ rb_event_discard(event); cpu = smp_processor_id(); cpu_buffer = buffer->buffers[cpu]; /* * This must only be called if the event has not been * committed yet. Thus we can assume that preemption * is still disabled. */ RB_WARN_ON(buffer, !local_read(&cpu_buffer->committing)); rb_decrement_entry(cpu_buffer, event); if (rb_try_to_discard(cpu_buffer, event)) goto out; /* * The commit is still visible by the reader, so we * must still update the timestamp. */ rb_update_write_stamp(cpu_buffer, event); out: rb_end_commit(cpu_buffer); trace_recursive_unlock(); preempt_enable_notrace(); } EXPORT_SYMBOL_GPL(ring_buffer_discard_commit); /** * ring_buffer_write - write data to the buffer without reserving * @buffer: The ring buffer to write to. * @length: The length of the data being written (excluding the event header) * @data: The data to write to the buffer. * * This is like ring_buffer_lock_reserve and ring_buffer_unlock_commit as * one function. If you already have the data to write to the buffer, it * may be easier to simply call this function. * * Note, like ring_buffer_lock_reserve, the length is the length of the data * and not the length of the event which would hold the header. */ int ring_buffer_write(struct ring_buffer *buffer, unsigned long length, void *data) { struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_event *event; void *body; int ret = -EBUSY; int cpu; if (ring_buffer_flags != RB_BUFFERS_ON) return -EBUSY; preempt_disable_notrace(); if (atomic_read(&buffer->record_disabled)) goto out; cpu = raw_smp_processor_id(); if (!cpumask_test_cpu(cpu, buffer->cpumask)) goto out; cpu_buffer = buffer->buffers[cpu]; if (atomic_read(&cpu_buffer->record_disabled)) goto out; if (length > BUF_MAX_DATA_SIZE) goto out; event = rb_reserve_next_event(buffer, cpu_buffer, length); if (!event) goto out; body = rb_event_data(event); memcpy(body, data, length); rb_commit(cpu_buffer, event); rb_wakeups(buffer, cpu_buffer); ret = 0; out: preempt_enable_notrace(); return ret; } EXPORT_SYMBOL_GPL(ring_buffer_write); static int rb_per_cpu_empty(struct ring_buffer_per_cpu *cpu_buffer) { struct buffer_page *reader = cpu_buffer->reader_page; struct buffer_page *head = rb_set_head_page(cpu_buffer); struct buffer_page *commit = cpu_buffer->commit_page; /* In case of error, head will be NULL */ if (unlikely(!head)) return 1; return reader->read == rb_page_commit(reader) && (commit == reader || (commit == head && head->read == rb_page_commit(commit))); } /** * ring_buffer_record_disable - stop all writes into the buffer * @buffer: The ring buffer to stop writes to. * * This prevents all writes to the buffer. Any attempt to write * to the buffer after this will fail and return NULL. * * The caller should call synchronize_sched() after this. */ void ring_buffer_record_disable(struct ring_buffer *buffer) { atomic_inc(&buffer->record_disabled); } EXPORT_SYMBOL_GPL(ring_buffer_record_disable); /** * ring_buffer_record_enable - enable writes to the buffer * @buffer: The ring buffer to enable writes * * Note, multiple disables will need the same number of enables * to truly enable the writing (much like preempt_disable). */ void ring_buffer_record_enable(struct ring_buffer *buffer) { atomic_dec(&buffer->record_disabled); } EXPORT_SYMBOL_GPL(ring_buffer_record_enable); /** * ring_buffer_record_off - stop all writes into the buffer * @buffer: The ring buffer to stop writes to. * * This prevents all writes to the buffer. Any attempt to write * to the buffer after this will fail and return NULL. * * This is different than ring_buffer_record_disable() as * it works like an on/off switch, where as the disable() version * must be paired with a enable(). */ void ring_buffer_record_off(struct ring_buffer *buffer) { unsigned int rd; unsigned int new_rd; do { rd = atomic_read(&buffer->record_disabled); new_rd = rd | RB_BUFFER_OFF; } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); } EXPORT_SYMBOL_GPL(ring_buffer_record_off); /** * ring_buffer_record_on - restart writes into the buffer * @buffer: The ring buffer to start writes to. * * This enables all writes to the buffer that was disabled by * ring_buffer_record_off(). * * This is different than ring_buffer_record_enable() as * it works like an on/off switch, where as the enable() version * must be paired with a disable(). */ void ring_buffer_record_on(struct ring_buffer *buffer) { unsigned int rd; unsigned int new_rd; do { rd = atomic_read(&buffer->record_disabled); new_rd = rd & ~RB_BUFFER_OFF; } while (atomic_cmpxchg(&buffer->record_disabled, rd, new_rd) != rd); } EXPORT_SYMBOL_GPL(ring_buffer_record_on); /** * ring_buffer_record_is_on - return true if the ring buffer can write * @buffer: The ring buffer to see if write is enabled * * Returns true if the ring buffer is in a state that it accepts writes. */ int ring_buffer_record_is_on(struct ring_buffer *buffer) { return !atomic_read(&buffer->record_disabled); } /** * ring_buffer_record_disable_cpu - stop all writes into the cpu_buffer * @buffer: The ring buffer to stop writes to. * @cpu: The CPU buffer to stop * * This prevents all writes to the buffer. Any attempt to write * to the buffer after this will fail and return NULL. * * The caller should call synchronize_sched() after this. */ void ring_buffer_record_disable_cpu(struct ring_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; if (!cpumask_test_cpu(cpu, buffer->cpumask)) return; cpu_buffer = buffer->buffers[cpu]; atomic_inc(&cpu_buffer->record_disabled); } EXPORT_SYMBOL_GPL(ring_buffer_record_disable_cpu); /** * ring_buffer_record_enable_cpu - enable writes to the buffer * @buffer: The ring buffer to enable writes * @cpu: The CPU to enable. * * Note, multiple disables will need the same number of enables * to truly enable the writing (much like preempt_disable). */ void ring_buffer_record_enable_cpu(struct ring_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; if (!cpumask_test_cpu(cpu, buffer->cpumask)) return; cpu_buffer = buffer->buffers[cpu]; atomic_dec(&cpu_buffer->record_disabled); } EXPORT_SYMBOL_GPL(ring_buffer_record_enable_cpu); /* * The total entries in the ring buffer is the running counter * of entries entered into the ring buffer, minus the sum of * the entries read from the ring buffer and the number of * entries that were overwritten. */ static inline unsigned long rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) { return local_read(&cpu_buffer->entries) - (local_read(&cpu_buffer->overrun) + cpu_buffer->read); } /** * ring_buffer_oldest_event_ts - get the oldest event timestamp from the buffer * @buffer: The ring buffer * @cpu: The per CPU buffer to read from. */ u64 ring_buffer_oldest_event_ts(struct ring_buffer *buffer, int cpu) { unsigned long flags; struct ring_buffer_per_cpu *cpu_buffer; struct buffer_page *bpage; u64 ret = 0; if (!cpumask_test_cpu(cpu, buffer->cpumask)) return 0; cpu_buffer = buffer->buffers[cpu]; raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); /* * if the tail is on reader_page, oldest time stamp is on the reader * page */ if (cpu_buffer->tail_page == cpu_buffer->reader_page) bpage = cpu_buffer->reader_page; else bpage = rb_set_head_page(cpu_buffer); if (bpage) ret = bpage->page->time_stamp; raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); return ret; } EXPORT_SYMBOL_GPL(ring_buffer_oldest_event_ts); /** * ring_buffer_bytes_cpu - get the number of bytes consumed in a cpu buffer * @buffer: The ring buffer * @cpu: The per CPU buffer to read from. */ unsigned long ring_buffer_bytes_cpu(struct ring_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; unsigned long ret; if (!cpumask_test_cpu(cpu, buffer->cpumask)) return 0; cpu_buffer = buffer->buffers[cpu]; ret = local_read(&cpu_buffer->entries_bytes) - cpu_buffer->read_bytes; return ret; } EXPORT_SYMBOL_GPL(ring_buffer_bytes_cpu); /** * ring_buffer_entries_cpu - get the number of entries in a cpu buffer * @buffer: The ring buffer * @cpu: The per CPU buffer to get the entries from. */ unsigned long ring_buffer_entries_cpu(struct ring_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; if (!cpumask_test_cpu(cpu, buffer->cpumask)) return 0; cpu_buffer = buffer->buffers[cpu]; return rb_num_of_entries(cpu_buffer); } EXPORT_SYMBOL_GPL(ring_buffer_entries_cpu); /** * ring_buffer_overrun_cpu - get the number of overruns caused by the ring * buffer wrapping around (only if RB_FL_OVERWRITE is on). * @buffer: The ring buffer * @cpu: The per CPU buffer to get the number of overruns from */ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; unsigned long ret; if (!cpumask_test_cpu(cpu, buffer->cpumask)) return 0; cpu_buffer = buffer->buffers[cpu]; ret = local_read(&cpu_buffer->overrun); return ret; } EXPORT_SYMBOL_GPL(ring_buffer_overrun_cpu); /** * ring_buffer_commit_overrun_cpu - get the number of overruns caused by * commits failing due to the buffer wrapping around while there are uncommitted * events, such as during an interrupt storm. * @buffer: The ring buffer * @cpu: The per CPU buffer to get the number of overruns from */ unsigned long ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; unsigned long ret; if (!cpumask_test_cpu(cpu, buffer->cpumask)) return 0; cpu_buffer = buffer->buffers[cpu]; ret = local_read(&cpu_buffer->commit_overrun); return ret; } EXPORT_SYMBOL_GPL(ring_buffer_commit_overrun_cpu); /** * ring_buffer_dropped_events_cpu - get the number of dropped events caused by * the ring buffer filling up (only if RB_FL_OVERWRITE is off). * @buffer: The ring buffer * @cpu: The per CPU buffer to get the number of overruns from */ unsigned long ring_buffer_dropped_events_cpu(struct ring_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; unsigned long ret; if (!cpumask_test_cpu(cpu, buffer->cpumask)) return 0; cpu_buffer = buffer->buffers[cpu]; ret = local_read(&cpu_buffer->dropped_events); return ret; } EXPORT_SYMBOL_GPL(ring_buffer_dropped_events_cpu); /** * ring_buffer_read_events_cpu - get the number of events successfully read * @buffer: The ring buffer * @cpu: The per CPU buffer to get the number of events read */ unsigned long ring_buffer_read_events_cpu(struct ring_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; if (!cpumask_test_cpu(cpu, buffer->cpumask)) return 0; cpu_buffer = buffer->buffers[cpu]; return cpu_buffer->read; } EXPORT_SYMBOL_GPL(ring_buffer_read_events_cpu); /** * ring_buffer_entries - get the number of entries in a buffer * @buffer: The ring buffer * * Returns the total number of entries in the ring buffer * (all CPU entries) */ unsigned long ring_buffer_entries(struct ring_buffer *buffer) { struct ring_buffer_per_cpu *cpu_buffer; unsigned long entries = 0; int cpu; /* if you care about this being correct, lock the buffer */ for_each_buffer_cpu(buffer, cpu) { cpu_buffer = buffer->buffers[cpu]; entries += rb_num_of_entries(cpu_buffer); } return entries; } EXPORT_SYMBOL_GPL(ring_buffer_entries); /** * ring_buffer_overruns - get the number of overruns in buffer * @buffer: The ring buffer * * Returns the total number of overruns in the ring buffer * (all CPU entries) */ unsigned long ring_buffer_overruns(struct ring_buffer *buffer) { struct ring_buffer_per_cpu *cpu_buffer; unsigned long overruns = 0; int cpu; /* if you care about this being correct, lock the buffer */ for_each_buffer_cpu(buffer, cpu) { cpu_buffer = buffer->buffers[cpu]; overruns += local_read(&cpu_buffer->overrun); } return overruns; } EXPORT_SYMBOL_GPL(ring_buffer_overruns); static void rb_iter_reset(struct ring_buffer_iter *iter) { struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; /* Iterator usage is expected to have record disabled */ iter->head_page = cpu_buffer->reader_page; iter->head = cpu_buffer->reader_page->read; iter->cache_reader_page = iter->head_page; iter->cache_read = cpu_buffer->read; if (iter->head) iter->read_stamp = cpu_buffer->read_stamp; else iter->read_stamp = iter->head_page->page->time_stamp; } /** * ring_buffer_iter_reset - reset an iterator * @iter: The iterator to reset * * Resets the iterator, so that it will start from the beginning * again. */ void ring_buffer_iter_reset(struct ring_buffer_iter *iter) { struct ring_buffer_per_cpu *cpu_buffer; unsigned long flags; if (!iter) return; cpu_buffer = iter->cpu_buffer; raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); rb_iter_reset(iter); raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); } EXPORT_SYMBOL_GPL(ring_buffer_iter_reset); /** * ring_buffer_iter_empty - check if an iterator has no more to read * @iter: The iterator to check */ int ring_buffer_iter_empty(struct ring_buffer_iter *iter) { struct ring_buffer_per_cpu *cpu_buffer; cpu_buffer = iter->cpu_buffer; return iter->head_page == cpu_buffer->commit_page && iter->head == rb_commit_index(cpu_buffer); } EXPORT_SYMBOL_GPL(ring_buffer_iter_empty); static void rb_update_read_stamp(struct ring_buffer_per_cpu *cpu_buffer, struct ring_buffer_event *event) { u64 delta; switch (event->type_len) { case RINGBUF_TYPE_PADDING: return; case RINGBUF_TYPE_TIME_EXTEND: delta = event->array[0]; delta <<= TS_SHIFT; delta += event->time_delta; cpu_buffer->read_stamp += delta; return; case RINGBUF_TYPE_TIME_STAMP: /* FIXME: not implemented */ return; case RINGBUF_TYPE_DATA: cpu_buffer->read_stamp += event->time_delta; return; default: BUG(); } return; } static void rb_update_iter_read_stamp(struct ring_buffer_iter *iter, struct ring_buffer_event *event) { u64 delta; switch (event->type_len) { case RINGBUF_TYPE_PADDING: return; case RINGBUF_TYPE_TIME_EXTEND: delta = event->array[0]; delta <<= TS_SHIFT; delta += event->time_delta; iter->read_stamp += delta; return; case RINGBUF_TYPE_TIME_STAMP: /* FIXME: not implemented */ return; case RINGBUF_TYPE_DATA: iter->read_stamp += event->time_delta; return; default: BUG(); } return; } static struct buffer_page * rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) { struct buffer_page *reader = NULL; unsigned long overwrite; unsigned long flags; int nr_loops = 0; int ret; local_irq_save(flags); arch_spin_lock(&cpu_buffer->lock); again: /* * This should normally only loop twice. But because the * start of the reader inserts an empty page, it causes * a case where we will loop three times. There should be no * reason to loop four times (that I know of). */ if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) { reader = NULL; goto out; } reader = cpu_buffer->reader_page; /* If there's more to read, return this page */ if (cpu_buffer->reader_page->read < rb_page_size(reader)) goto out; /* Never should we have an index greater than the size */ if (RB_WARN_ON(cpu_buffer, cpu_buffer->reader_page->read > rb_page_size(reader))) goto out; /* check if we caught up to the tail */ reader = NULL; if (cpu_buffer->commit_page == cpu_buffer->reader_page) goto out; /* Don't bother swapping if the ring buffer is empty */ if (rb_num_of_entries(cpu_buffer) == 0) goto out; /* * Reset the reader page to size zero. */ local_set(&cpu_buffer->reader_page->write, 0); local_set(&cpu_buffer->reader_page->entries, 0); local_set(&cpu_buffer->reader_page->page->commit, 0); cpu_buffer->reader_page->real_end = 0; spin: /* * Splice the empty reader page into the list around the head. */ reader = rb_set_head_page(cpu_buffer); if (!reader) goto out; cpu_buffer->reader_page->list.next = rb_list_head(reader->list.next); cpu_buffer->reader_page->list.prev = reader->list.prev; /* * cpu_buffer->pages just needs to point to the buffer, it * has no specific buffer page to point to. Lets move it out * of our way so we don't accidentally swap it. */ cpu_buffer->pages = reader->list.prev; /* The reader page will be pointing to the new head */ rb_set_list_to_head(cpu_buffer, &cpu_buffer->reader_page->list); /* * We want to make sure we read the overruns after we set up our * pointers to the next object. The writer side does a * cmpxchg to cross pages which acts as the mb on the writer * side. Note, the reader will constantly fail the swap * while the writer is updating the pointers, so this * guarantees that the overwrite recorded here is the one we * want to compare with the last_overrun. */ smp_mb(); overwrite = local_read(&(cpu_buffer->overrun)); /* * Here's the tricky part. * * We need to move the pointer past the header page. * But we can only do that if a writer is not currently * moving it. The page before the header page has the * flag bit '1' set if it is pointing to the page we want. * but if the writer is in the process of moving it * than it will be '2' or already moved '0'. */ ret = rb_head_page_replace(reader, cpu_buffer->reader_page); /* * If we did not convert it, then we must try again. */ if (!ret) goto spin; /* * Yeah! We succeeded in replacing the page. * * Now make the new head point back to the reader page. */ rb_list_head(reader->list.next)->prev = &cpu_buffer->reader_page->list; rb_inc_page(cpu_buffer, &cpu_buffer->head_page); /* Finally update the reader page to the new head */ cpu_buffer->reader_page = reader; rb_reset_reader_page(cpu_buffer); if (overwrite != cpu_buffer->last_overrun) { cpu_buffer->lost_events = overwrite - cpu_buffer->last_overrun; cpu_buffer->last_overrun = overwrite; } goto again; out: arch_spin_unlock(&cpu_buffer->lock); local_irq_restore(flags); return reader; } static void rb_advance_reader(struct ring_buffer_per_cpu *cpu_buffer) { struct ring_buffer_event *event; struct buffer_page *reader; unsigned length; reader = rb_get_reader_page(cpu_buffer); /* This function should not be called when buffer is empty */ if (RB_WARN_ON(cpu_buffer, !reader)) return; event = rb_reader_event(cpu_buffer); if (event->type_len <= RINGBUF_TYPE_DATA_TYPE_LEN_MAX) cpu_buffer->read++; rb_update_read_stamp(cpu_buffer, event); length = rb_event_length(event); cpu_buffer->reader_page->read += length; } static void rb_advance_iter(struct ring_buffer_iter *iter) { struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_event *event; unsigned length; cpu_buffer = iter->cpu_buffer; /* * Check if we are at the end of the buffer. */ if (iter->head >= rb_page_size(iter->head_page)) { /* discarded commits can make the page empty */ if (iter->head_page == cpu_buffer->commit_page) return; rb_inc_iter(iter); return; } event = rb_iter_head_event(iter); length = rb_event_length(event); /* * This should not be called to advance the header if we are * at the tail of the buffer. */ if (RB_WARN_ON(cpu_buffer, (iter->head_page == cpu_buffer->commit_page) && (iter->head + length > rb_commit_index(cpu_buffer)))) return; rb_update_iter_read_stamp(iter, event); iter->head += length; /* check for end of page padding */ if ((iter->head >= rb_page_size(iter->head_page)) && (iter->head_page != cpu_buffer->commit_page)) rb_inc_iter(iter); } static int rb_lost_events(struct ring_buffer_per_cpu *cpu_buffer) { return cpu_buffer->lost_events; } static struct ring_buffer_event * rb_buffer_peek(struct ring_buffer_per_cpu *cpu_buffer, u64 *ts, unsigned long *lost_events) { struct ring_buffer_event *event; struct buffer_page *reader; int nr_loops = 0; again: /* * We repeat when a time extend is encountered. * Since the time extend is always attached to a data event, * we should never loop more than once. * (We never hit the following condition more than twice). */ if (RB_WARN_ON(cpu_buffer, ++nr_loops > 2)) return NULL; reader = rb_get_reader_page(cpu_buffer); if (!reader) return NULL; event = rb_reader_event(cpu_buffer); switch (event->type_len) { case RINGBUF_TYPE_PADDING: if (rb_null_event(event)) RB_WARN_ON(cpu_buffer, 1); /* * Because the writer could be discarding every * event it creates (which would probably be bad) * if we were to go back to "again" then we may never * catch up, and will trigger the warn on, or lock * the box. Return the padding, and we will release * the current locks, and try again. */ return event; case RINGBUF_TYPE_TIME_EXTEND: /* Internal data, OK to advance */ rb_advance_reader(cpu_buffer); goto again; case RINGBUF_TYPE_TIME_STAMP: /* FIXME: not implemented */ rb_advance_reader(cpu_buffer); goto again; case RINGBUF_TYPE_DATA: if (ts) { *ts = cpu_buffer->read_stamp + event->time_delta; ring_buffer_normalize_time_stamp(cpu_buffer->buffer, cpu_buffer->cpu, ts); } if (lost_events) *lost_events = rb_lost_events(cpu_buffer); return event; default: BUG(); } return NULL; } EXPORT_SYMBOL_GPL(ring_buffer_peek); static struct ring_buffer_event * rb_iter_peek(struct ring_buffer_iter *iter, u64 *ts) { struct ring_buffer *buffer; struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_event *event; int nr_loops = 0; cpu_buffer = iter->cpu_buffer; buffer = cpu_buffer->buffer; /* * Check if someone performed a consuming read to * the buffer. A consuming read invalidates the iterator * and we need to reset the iterator in this case. */ if (unlikely(iter->cache_read != cpu_buffer->read || iter->cache_reader_page != cpu_buffer->reader_page)) rb_iter_reset(iter); again: if (ring_buffer_iter_empty(iter)) return NULL; /* * We repeat when a time extend is encountered or we hit * the end of the page. Since the time extend is always attached * to a data event, we should never loop more than three times. * Once for going to next page, once on time extend, and * finally once to get the event. * (We never hit the following condition more than thrice). */ if (RB_WARN_ON(cpu_buffer, ++nr_loops > 3)) return NULL; if (rb_per_cpu_empty(cpu_buffer)) return NULL; if (iter->head >= local_read(&iter->head_page->page->commit)) { rb_inc_iter(iter); goto again; } event = rb_iter_head_event(iter); switch (event->type_len) { case RINGBUF_TYPE_PADDING: if (rb_null_event(event)) { rb_inc_iter(iter); goto again; } rb_advance_iter(iter); return event; case RINGBUF_TYPE_TIME_EXTEND: /* Internal data, OK to advance */ rb_advance_iter(iter); goto again; case RINGBUF_TYPE_TIME_STAMP: /* FIXME: not implemented */ rb_advance_iter(iter); goto again; case RINGBUF_TYPE_DATA: if (ts) { *ts = iter->read_stamp + event->time_delta; ring_buffer_normalize_time_stamp(buffer, cpu_buffer->cpu, ts); } return event; default: BUG(); } return NULL; } EXPORT_SYMBOL_GPL(ring_buffer_iter_peek); static inline int rb_ok_to_lock(void) { /* * If an NMI die dumps out the content of the ring buffer * do not grab locks. We also permanently disable the ring * buffer too. A one time deal is all you get from reading * the ring buffer from an NMI. */ if (likely(!in_nmi())) return 1; tracing_off_permanent(); return 0; } /** * ring_buffer_peek - peek at the next event to be read * @buffer: The ring buffer to read * @cpu: The cpu to peak at * @ts: The timestamp counter of this event. * @lost_events: a variable to store if events were lost (may be NULL) * * This will return the event that will be read next, but does * not consume the data. */ struct ring_buffer_event * ring_buffer_peek(struct ring_buffer *buffer, int cpu, u64 *ts, unsigned long *lost_events) { struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; struct ring_buffer_event *event; unsigned long flags; int dolock; if (!cpumask_test_cpu(cpu, buffer->cpumask)) return NULL; dolock = rb_ok_to_lock(); again: local_irq_save(flags); if (dolock) raw_spin_lock(&cpu_buffer->reader_lock); event = rb_buffer_peek(cpu_buffer, ts, lost_events); if (event && event->type_len == RINGBUF_TYPE_PADDING) rb_advance_reader(cpu_buffer); if (dolock) raw_spin_unlock(&cpu_buffer->reader_lock); local_irq_restore(flags); if (event && event->type_len == RINGBUF_TYPE_PADDING) goto again; return event; } /** * ring_buffer_iter_peek - peek at the next event to be read * @iter: The ring buffer iterator * @ts: The timestamp counter of this event. * * This will return the event that will be read next, but does * not increment the iterator. */ struct ring_buffer_event * ring_buffer_iter_peek(struct ring_buffer_iter *iter, u64 *ts) { struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; struct ring_buffer_event *event; unsigned long flags; again: raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); event = rb_iter_peek(iter, ts); raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); if (event && event->type_len == RINGBUF_TYPE_PADDING) goto again; return event; } /** * ring_buffer_consume - return an event and consume it * @buffer: The ring buffer to get the next event from * @cpu: the cpu to read the buffer from * @ts: a variable to store the timestamp (may be NULL) * @lost_events: a variable to store if events were lost (may be NULL) * * Returns the next event in the ring buffer, and that event is consumed. * Meaning, that sequential reads will keep returning a different event, * and eventually empty the ring buffer if the producer is slower. */ struct ring_buffer_event * ring_buffer_consume(struct ring_buffer *buffer, int cpu, u64 *ts, unsigned long *lost_events) { struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_event *event = NULL; unsigned long flags; int dolock; dolock = rb_ok_to_lock(); again: /* might be called in atomic */ preempt_disable(); if (!cpumask_test_cpu(cpu, buffer->cpumask)) goto out; cpu_buffer = buffer->buffers[cpu]; local_irq_save(flags); if (dolock) raw_spin_lock(&cpu_buffer->reader_lock); event = rb_buffer_peek(cpu_buffer, ts, lost_events); if (event) { cpu_buffer->lost_events = 0; rb_advance_reader(cpu_buffer); } if (dolock) raw_spin_unlock(&cpu_buffer->reader_lock); local_irq_restore(flags); out: preempt_enable(); if (event && event->type_len == RINGBUF_TYPE_PADDING) goto again; return event; } EXPORT_SYMBOL_GPL(ring_buffer_consume); /** * ring_buffer_read_prepare - Prepare for a non consuming read of the buffer * @buffer: The ring buffer to read from * @cpu: The cpu buffer to iterate over * * This performs the initial preparations necessary to iterate * through the buffer. Memory is allocated, buffer recording * is disabled, and the iterator pointer is returned to the caller. * * Disabling buffer recordng prevents the reading from being * corrupted. This is not a consuming read, so a producer is not * expected. * * After a sequence of ring_buffer_read_prepare calls, the user is * expected to make at least one call to ring_buffer_prepare_sync. * Afterwards, ring_buffer_read_start is invoked to get things going * for real. * * This overall must be paired with ring_buffer_finish. */ struct ring_buffer_iter * ring_buffer_read_prepare(struct ring_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; struct ring_buffer_iter *iter; if (!cpumask_test_cpu(cpu, buffer->cpumask)) return NULL; iter = kmalloc(sizeof(*iter), GFP_KERNEL); if (!iter) return NULL; cpu_buffer = buffer->buffers[cpu]; iter->cpu_buffer = cpu_buffer; atomic_inc(&buffer->resize_disabled); atomic_inc(&cpu_buffer->record_disabled); return iter; } EXPORT_SYMBOL_GPL(ring_buffer_read_prepare); /** * ring_buffer_read_prepare_sync - Synchronize a set of prepare calls * * All previously invoked ring_buffer_read_prepare calls to prepare * iterators will be synchronized. Afterwards, read_buffer_read_start * calls on those iterators are allowed. */ void ring_buffer_read_prepare_sync(void) { synchronize_sched(); } EXPORT_SYMBOL_GPL(ring_buffer_read_prepare_sync); /** * ring_buffer_read_start - start a non consuming read of the buffer * @iter: The iterator returned by ring_buffer_read_prepare * * This finalizes the startup of an iteration through the buffer. * The iterator comes from a call to ring_buffer_read_prepare and * an intervening ring_buffer_read_prepare_sync must have been * performed. * * Must be paired with ring_buffer_finish. */ void ring_buffer_read_start(struct ring_buffer_iter *iter) { struct ring_buffer_per_cpu *cpu_buffer; unsigned long flags; if (!iter) return; cpu_buffer = iter->cpu_buffer; raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); arch_spin_lock(&cpu_buffer->lock); rb_iter_reset(iter); arch_spin_unlock(&cpu_buffer->lock); raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); } EXPORT_SYMBOL_GPL(ring_buffer_read_start); /** * ring_buffer_finish - finish reading the iterator of the buffer * @iter: The iterator retrieved by ring_buffer_start * * This re-enables the recording to the buffer, and frees the * iterator. */ void ring_buffer_read_finish(struct ring_buffer_iter *iter) { struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; unsigned long flags; /* * Ring buffer is disabled from recording, here's a good place * to check the integrity of the ring buffer. * Must prevent readers from trying to read, as the check * clears the HEAD page and readers require it. */ raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); rb_check_pages(cpu_buffer); raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); atomic_dec(&cpu_buffer->record_disabled); atomic_dec(&cpu_buffer->buffer->resize_disabled); kfree(iter); } EXPORT_SYMBOL_GPL(ring_buffer_read_finish); /** * ring_buffer_read - read the next item in the ring buffer by the iterator * @iter: The ring buffer iterator * @ts: The time stamp of the event read. * * This reads the next event in the ring buffer and increments the iterator. */ struct ring_buffer_event * ring_buffer_read(struct ring_buffer_iter *iter, u64 *ts) { struct ring_buffer_event *event; struct ring_buffer_per_cpu *cpu_buffer = iter->cpu_buffer; unsigned long flags; raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); again: event = rb_iter_peek(iter, ts); if (!event) goto out; if (event->type_len == RINGBUF_TYPE_PADDING) goto again; rb_advance_iter(iter); out: raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); return event; } EXPORT_SYMBOL_GPL(ring_buffer_read); /** * ring_buffer_size - return the size of the ring buffer (in bytes) * @buffer: The ring buffer. */ unsigned long ring_buffer_size(struct ring_buffer *buffer, int cpu) { /* * Earlier, this method returned * BUF_PAGE_SIZE * buffer->nr_pages * Since the nr_pages field is now removed, we have converted this to * return the per cpu buffer value. */ if (!cpumask_test_cpu(cpu, buffer->cpumask)) return 0; return BUF_PAGE_SIZE * buffer->buffers[cpu]->nr_pages; } EXPORT_SYMBOL_GPL(ring_buffer_size); static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) { rb_head_page_deactivate(cpu_buffer); cpu_buffer->head_page = list_entry(cpu_buffer->pages, struct buffer_page, list); local_set(&cpu_buffer->head_page->write, 0); local_set(&cpu_buffer->head_page->entries, 0); local_set(&cpu_buffer->head_page->page->commit, 0); cpu_buffer->head_page->read = 0; cpu_buffer->tail_page = cpu_buffer->head_page; cpu_buffer->commit_page = cpu_buffer->head_page; INIT_LIST_HEAD(&cpu_buffer->reader_page->list); INIT_LIST_HEAD(&cpu_buffer->new_pages); local_set(&cpu_buffer->reader_page->write, 0); local_set(&cpu_buffer->reader_page->entries, 0); local_set(&cpu_buffer->reader_page->page->commit, 0); cpu_buffer->reader_page->read = 0; local_set(&cpu_buffer->entries_bytes, 0); local_set(&cpu_buffer->overrun, 0); local_set(&cpu_buffer->commit_overrun, 0); local_set(&cpu_buffer->dropped_events, 0); local_set(&cpu_buffer->entries, 0); local_set(&cpu_buffer->committing, 0); local_set(&cpu_buffer->commits, 0); cpu_buffer->read = 0; cpu_buffer->read_bytes = 0; cpu_buffer->write_stamp = 0; cpu_buffer->read_stamp = 0; cpu_buffer->lost_events = 0; cpu_buffer->last_overrun = 0; rb_head_page_activate(cpu_buffer); } /** * ring_buffer_reset_cpu - reset a ring buffer per CPU buffer * @buffer: The ring buffer to reset a per cpu buffer of * @cpu: The CPU buffer to be reset */ void ring_buffer_reset_cpu(struct ring_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; unsigned long flags; if (!cpumask_test_cpu(cpu, buffer->cpumask)) return; atomic_inc(&buffer->resize_disabled); atomic_inc(&cpu_buffer->record_disabled); /* Make sure all commits have finished */ synchronize_sched(); raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); if (RB_WARN_ON(cpu_buffer, local_read(&cpu_buffer->committing))) goto out; arch_spin_lock(&cpu_buffer->lock); rb_reset_cpu(cpu_buffer); arch_spin_unlock(&cpu_buffer->lock); out: raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); atomic_dec(&cpu_buffer->record_disabled); atomic_dec(&buffer->resize_disabled); } EXPORT_SYMBOL_GPL(ring_buffer_reset_cpu); /** * ring_buffer_reset - reset a ring buffer * @buffer: The ring buffer to reset all cpu buffers */ void ring_buffer_reset(struct ring_buffer *buffer) { int cpu; for_each_buffer_cpu(buffer, cpu) ring_buffer_reset_cpu(buffer, cpu); } EXPORT_SYMBOL_GPL(ring_buffer_reset); /** * rind_buffer_empty - is the ring buffer empty? * @buffer: The ring buffer to test */ int ring_buffer_empty(struct ring_buffer *buffer) { struct ring_buffer_per_cpu *cpu_buffer; unsigned long flags; int dolock; int cpu; int ret; dolock = rb_ok_to_lock(); /* yes this is racy, but if you don't like the race, lock the buffer */ for_each_buffer_cpu(buffer, cpu) { cpu_buffer = buffer->buffers[cpu]; local_irq_save(flags); if (dolock) raw_spin_lock(&cpu_buffer->reader_lock); ret = rb_per_cpu_empty(cpu_buffer); if (dolock) raw_spin_unlock(&cpu_buffer->reader_lock); local_irq_restore(flags); if (!ret) return 0; } return 1; } EXPORT_SYMBOL_GPL(ring_buffer_empty); /** * ring_buffer_empty_cpu - is a cpu buffer of a ring buffer empty? * @buffer: The ring buffer * @cpu: The CPU buffer to test */ int ring_buffer_empty_cpu(struct ring_buffer *buffer, int cpu) { struct ring_buffer_per_cpu *cpu_buffer; unsigned long flags; int dolock; int ret; if (!cpumask_test_cpu(cpu, buffer->cpumask)) return 1; dolock = rb_ok_to_lock(); cpu_buffer = buffer->buffers[cpu]; local_irq_save(flags); if (dolock) raw_spin_lock(&cpu_buffer->reader_lock); ret = rb_per_cpu_empty(cpu_buffer); if (dolock) raw_spin_unlock(&cpu_buffer->reader_lock); local_irq_restore(flags); return ret; } EXPORT_SYMBOL_GPL(ring_buffer_empty_cpu); #ifdef CONFIG_RING_BUFFER_ALLOW_SWAP /** * ring_buffer_swap_cpu - swap a CPU buffer between two ring buffers * @buffer_a: One buffer to swap with * @buffer_b: The other buffer to swap with * * This function is useful for tracers that want to take a "snapshot" * of a CPU buffer and has another back up buffer lying around. * it is expected that the tracer handles the cpu buffer not being * used at the moment. */ int ring_buffer_swap_cpu(struct ring_buffer *buffer_a, struct ring_buffer *buffer_b, int cpu) { struct ring_buffer_per_cpu *cpu_buffer_a; struct ring_buffer_per_cpu *cpu_buffer_b; int ret = -EINVAL; if (!cpumask_test_cpu(cpu, buffer_a->cpumask) || !cpumask_test_cpu(cpu, buffer_b->cpumask)) goto out; cpu_buffer_a = buffer_a->buffers[cpu]; cpu_buffer_b = buffer_b->buffers[cpu]; /* At least make sure the two buffers are somewhat the same */ if (cpu_buffer_a->nr_pages != cpu_buffer_b->nr_pages) goto out; ret = -EAGAIN; if (ring_buffer_flags != RB_BUFFERS_ON) goto out; if (atomic_read(&buffer_a->record_disabled)) goto out; if (atomic_read(&buffer_b->record_disabled)) goto out; if (atomic_read(&cpu_buffer_a->record_disabled)) goto out; if (atomic_read(&cpu_buffer_b->record_disabled)) goto out; /* * We can't do a synchronize_sched here because this * function can be called in atomic context. * Normally this will be called from the same CPU as cpu. * If not it's up to the caller to protect this. */ atomic_inc(&cpu_buffer_a->record_disabled); atomic_inc(&cpu_buffer_b->record_disabled); ret = -EBUSY; if (local_read(&cpu_buffer_a->committing)) goto out_dec; if (local_read(&cpu_buffer_b->committing)) goto out_dec; buffer_a->buffers[cpu] = cpu_buffer_b; buffer_b->buffers[cpu] = cpu_buffer_a; cpu_buffer_b->buffer = buffer_a; cpu_buffer_a->buffer = buffer_b; ret = 0; out_dec: atomic_dec(&cpu_buffer_a->record_disabled); atomic_dec(&cpu_buffer_b->record_disabled); out: return ret; } EXPORT_SYMBOL_GPL(ring_buffer_swap_cpu); #endif /* CONFIG_RING_BUFFER_ALLOW_SWAP */ /** * ring_buffer_alloc_read_page - allocate a page to read from buffer * @buffer: the buffer to allocate for. * * This function is used in conjunction with ring_buffer_read_page. * When reading a full page from the ring buffer, these functions * can be used to speed up the process. The calling function should * allocate a few pages first with this function. Then when it * needs to get pages from the ring buffer, it passes the result * of this function into ring_buffer_read_page, which will swap * the page that was allocated, with the read page of the buffer. * * Returns: * The page allocated, or NULL on error. */ void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu) { struct buffer_data_page *bpage; struct page *page; page = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL | __GFP_NORETRY, 0); if (!page) return NULL; bpage = page_address(page); rb_init_page(bpage); return bpage; } EXPORT_SYMBOL_GPL(ring_buffer_alloc_read_page); /** * ring_buffer_free_read_page - free an allocated read page * @buffer: the buffer the page was allocate for * @data: the page to free * * Free a page allocated from ring_buffer_alloc_read_page. */ void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data) { free_page((unsigned long)data); } EXPORT_SYMBOL_GPL(ring_buffer_free_read_page); /** * ring_buffer_read_page - extract a page from the ring buffer * @buffer: buffer to extract from * @data_page: the page to use allocated from ring_buffer_alloc_read_page * @len: amount to extract * @cpu: the cpu of the buffer to extract * @full: should the extraction only happen when the page is full. * * This function will pull out a page from the ring buffer and consume it. * @data_page must be the address of the variable that was returned * from ring_buffer_alloc_read_page. This is because the page might be used * to swap with a page in the ring buffer. * * for example: * rpage = ring_buffer_alloc_read_page(buffer); * if (!rpage) * return error; * ret = ring_buffer_read_page(buffer, &rpage, len, cpu, 0); * if (ret >= 0) * process_page(rpage, ret); * * When @full is set, the function will not return true unless * the writer is off the reader page. * * Note: it is up to the calling functions to handle sleeps and wakeups. * The ring buffer can be used anywhere in the kernel and can not * blindly call wake_up. The layer that uses the ring buffer must be * responsible for that. * * Returns: * >=0 if data has been transferred, returns the offset of consumed data. * <0 if no data has been transferred. */ int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page, size_t len, int cpu, int full) { struct ring_buffer_per_cpu *cpu_buffer = buffer->buffers[cpu]; struct ring_buffer_event *event; struct buffer_data_page *bpage; struct buffer_page *reader; unsigned long missed_events; unsigned long flags; unsigned int commit; unsigned int read; u64 save_timestamp; int ret = -1; if (!cpumask_test_cpu(cpu, buffer->cpumask)) goto out; /* * If len is not big enough to hold the page header, then * we can not copy anything. */ if (len <= BUF_PAGE_HDR_SIZE) goto out; len -= BUF_PAGE_HDR_SIZE; if (!data_page) goto out; bpage = *data_page; if (!bpage) goto out; raw_spin_lock_irqsave(&cpu_buffer->reader_lock, flags); reader = rb_get_reader_page(cpu_buffer); if (!reader) goto out_unlock; event = rb_reader_event(cpu_buffer); read = reader->read; commit = rb_page_commit(reader); /* Check if any events were dropped */ missed_events = cpu_buffer->lost_events; /* * If this page has been partially read or * if len is not big enough to read the rest of the page or * a writer is still on the page, then * we must copy the data from the page to the buffer. * Otherwise, we can simply swap the page with the one passed in. */ if (read || (len < (commit - read)) || cpu_buffer->reader_page == cpu_buffer->commit_page) { struct buffer_data_page *rpage = cpu_buffer->reader_page->page; unsigned int rpos = read; unsigned int pos = 0; unsigned int size; if (full) goto out_unlock; if (len > (commit - read)) len = (commit - read); /* Always keep the time extend and data together */ size = rb_event_ts_length(event); if (len < size) goto out_unlock; /* save the current timestamp, since the user will need it */ save_timestamp = cpu_buffer->read_stamp; /* Need to copy one event at a time */ do { /* We need the size of one event, because * rb_advance_reader only advances by one event, * whereas rb_event_ts_length may include the size of * one or two events. * We have already ensured there's enough space if this * is a time extend. */ size = rb_event_length(event); memcpy(bpage->data + pos, rpage->data + rpos, size); len -= size; rb_advance_reader(cpu_buffer); rpos = reader->read; pos += size; if (rpos >= commit) break; event = rb_reader_event(cpu_buffer); /* Always keep the time extend and data together */ size = rb_event_ts_length(event); } while (len >= size); /* update bpage */ local_set(&bpage->commit, pos); bpage->time_stamp = save_timestamp; /* we copied everything to the beginning */ read = 0; } else { /* update the entry counter */ cpu_buffer->read += rb_page_entries(reader); cpu_buffer->read_bytes += BUF_PAGE_SIZE; /* swap the pages */ rb_init_page(bpage); bpage = reader->page; reader->page = *data_page; local_set(&reader->write, 0); local_set(&reader->entries, 0); reader->read = 0; *data_page = bpage; /* * Use the real_end for the data size, * This gives us a chance to store the lost events * on the page. */ if (reader->real_end) local_set(&bpage->commit, reader->real_end); } ret = read; cpu_buffer->lost_events = 0; commit = local_read(&bpage->commit); /* * Set a flag in the commit field if we lost events */ if (missed_events) { /* If there is room at the end of the page to save the * missed events, then record it there. */ if (BUF_PAGE_SIZE - commit >= sizeof(missed_events)) { memcpy(&bpage->data[commit], &missed_events, sizeof(missed_events)); local_add(RB_MISSED_STORED, &bpage->commit); commit += sizeof(missed_events); } local_add(RB_MISSED_EVENTS, &bpage->commit); } /* * This page may be off to user land. Zero it out here. */ if (commit < BUF_PAGE_SIZE) memset(&bpage->data[commit], 0, BUF_PAGE_SIZE - commit); out_unlock: raw_spin_unlock_irqrestore(&cpu_buffer->reader_lock, flags); out: return ret; } EXPORT_SYMBOL_GPL(ring_buffer_read_page); #ifdef CONFIG_HOTPLUG_CPU static int rb_cpu_notify(struct notifier_block *self, unsigned long action, void *hcpu) { struct ring_buffer *buffer = container_of(self, struct ring_buffer, cpu_notify); long cpu = (long)hcpu; int cpu_i, nr_pages_same; unsigned int nr_pages; switch (action) { case CPU_UP_PREPARE: case CPU_UP_PREPARE_FROZEN: if (cpumask_test_cpu(cpu, buffer->cpumask)) return NOTIFY_OK; nr_pages = 0; nr_pages_same = 1; /* check if all cpu sizes are same */ for_each_buffer_cpu(buffer, cpu_i) { /* fill in the size from first enabled cpu */ if (nr_pages == 0) nr_pages = buffer->buffers[cpu_i]->nr_pages; if (nr_pages != buffer->buffers[cpu_i]->nr_pages) { nr_pages_same = 0; break; } } /* allocate minimum pages, user can later expand it */ if (!nr_pages_same) nr_pages = 2; buffer->buffers[cpu] = rb_allocate_cpu_buffer(buffer, nr_pages, cpu); if (!buffer->buffers[cpu]) { WARN(1, "failed to allocate ring buffer on CPU %ld\n", cpu); return NOTIFY_OK; } smp_wmb(); cpumask_set_cpu(cpu, buffer->cpumask); break; case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE_FROZEN: /* * Do nothing. * If we were to free the buffer, then the user would * lose any trace that was in the buffer. */ break; default: break; } return NOTIFY_OK; } #endif #ifdef CONFIG_RING_BUFFER_STARTUP_TEST /* * This is a basic integrity check of the ring buffer. * Late in the boot cycle this test will run when configured in. * It will kick off a thread per CPU that will go into a loop * writing to the per cpu ring buffer various sizes of data. * Some of the data will be large items, some small. * * Another thread is created that goes into a spin, sending out * IPIs to the other CPUs to also write into the ring buffer. * this is to test the nesting ability of the buffer. * * Basic stats are recorded and reported. If something in the * ring buffer should happen that's not expected, a big warning * is displayed and all ring buffers are disabled. */ static struct task_struct *rb_threads[NR_CPUS] __initdata; struct rb_test_data { struct ring_buffer *buffer; unsigned long events; unsigned long bytes_written; unsigned long bytes_alloc; unsigned long bytes_dropped; unsigned long events_nested; unsigned long bytes_written_nested; unsigned long bytes_alloc_nested; unsigned long bytes_dropped_nested; int min_size_nested; int max_size_nested; int max_size; int min_size; int cpu; int cnt; }; static struct rb_test_data rb_data[NR_CPUS] __initdata; /* 1 meg per cpu */ #define RB_TEST_BUFFER_SIZE 1048576 static char rb_string[] __initdata = "abcdefghijklmnopqrstuvwxyz1234567890!@#$%^&*()?+\\" "?+|:';\",.<>/?abcdefghijklmnopqrstuvwxyz1234567890" "!@#$%^&*()?+\\?+|:';\",.<>/?abcdefghijklmnopqrstuv"; static bool rb_test_started __initdata; struct rb_item { int size; char str[]; }; static __init int rb_write_something(struct rb_test_data *data, bool nested) { struct ring_buffer_event *event; struct rb_item *item; bool started; int event_len; int size; int len; int cnt; /* Have nested writes different that what is written */ cnt = data->cnt + (nested ? 27 : 0); /* Multiply cnt by ~e, to make some unique increment */ size = (data->cnt * 68 / 25) % (sizeof(rb_string) - 1); len = size + sizeof(struct rb_item); started = rb_test_started; /* read rb_test_started before checking buffer enabled */ smp_rmb(); event = ring_buffer_lock_reserve(data->buffer, len); if (!event) { /* Ignore dropped events before test starts. */ if (started) { if (nested) data->bytes_dropped += len; else data->bytes_dropped_nested += len; } return len; } event_len = ring_buffer_event_length(event); if (RB_WARN_ON(data->buffer, event_len < len)) goto out; item = ring_buffer_event_data(event); item->size = size; memcpy(item->str, rb_string, size); if (nested) { data->bytes_alloc_nested += event_len; data->bytes_written_nested += len; data->events_nested++; if (!data->min_size_nested || len < data->min_size_nested) data->min_size_nested = len; if (len > data->max_size_nested) data->max_size_nested = len; } else { data->bytes_alloc += event_len; data->bytes_written += len; data->events++; if (!data->min_size || len < data->min_size) data->max_size = len; if (len > data->max_size) data->max_size = len; } out: ring_buffer_unlock_commit(data->buffer, event); return 0; } static __init int rb_test(void *arg) { struct rb_test_data *data = arg; while (!kthread_should_stop()) { rb_write_something(data, false); data->cnt++; set_current_state(TASK_INTERRUPTIBLE); /* Now sleep between a min of 100-300us and a max of 1ms */ usleep_range(((data->cnt % 3) + 1) * 100, 1000); } return 0; } static __init void rb_ipi(void *ignore) { struct rb_test_data *data; int cpu = smp_processor_id(); data = &rb_data[cpu]; rb_write_something(data, true); } static __init int rb_hammer_test(void *arg) { while (!kthread_should_stop()) { /* Send an IPI to all cpus to write data! */ smp_call_function(rb_ipi, NULL, 1); /* No sleep, but for non preempt, let others run */ schedule(); } return 0; } static __init int test_ringbuffer(void) { struct task_struct *rb_hammer; struct ring_buffer *buffer; int cpu; int ret = 0; pr_info("Running ring buffer tests...\n"); buffer = ring_buffer_alloc(RB_TEST_BUFFER_SIZE, RB_FL_OVERWRITE); if (WARN_ON(!buffer)) return 0; /* Disable buffer so that threads can't write to it yet */ ring_buffer_record_off(buffer); for_each_online_cpu(cpu) { rb_data[cpu].buffer = buffer; rb_data[cpu].cpu = cpu; rb_data[cpu].cnt = cpu; rb_threads[cpu] = kthread_create(rb_test, &rb_data[cpu], "rbtester/%d", cpu); if (WARN_ON(!rb_threads[cpu])) { pr_cont("FAILED\n"); ret = -1; goto out_free; } kthread_bind(rb_threads[cpu], cpu); wake_up_process(rb_threads[cpu]); } /* Now create the rb hammer! */ rb_hammer = kthread_run(rb_hammer_test, NULL, "rbhammer"); if (WARN_ON(!rb_hammer)) { pr_cont("FAILED\n"); ret = -1; goto out_free; } ring_buffer_record_on(buffer); /* * Show buffer is enabled before setting rb_test_started. * Yes there's a small race window where events could be * dropped and the thread wont catch it. But when a ring * buffer gets enabled, there will always be some kind of * delay before other CPUs see it. Thus, we don't care about * those dropped events. We care about events dropped after * the threads see that the buffer is active. */ smp_wmb(); rb_test_started = true; set_current_state(TASK_INTERRUPTIBLE); /* Just run for 10 seconds */; schedule_timeout(10 * HZ); kthread_stop(rb_hammer); out_free: for_each_online_cpu(cpu) { if (!rb_threads[cpu]) break; kthread_stop(rb_threads[cpu]); } if (ret) { ring_buffer_free(buffer); return ret; } /* Report! */ pr_info("finished\n"); for_each_online_cpu(cpu) { struct ring_buffer_event *event; struct rb_test_data *data = &rb_data[cpu]; struct rb_item *item; unsigned long total_events; unsigned long total_dropped; unsigned long total_written; unsigned long total_alloc; unsigned long total_read = 0; unsigned long total_size = 0; unsigned long total_len = 0; unsigned long total_lost = 0; unsigned long lost; int big_event_size; int small_event_size; ret = -1; total_events = data->events + data->events_nested; total_written = data->bytes_written + data->bytes_written_nested; total_alloc = data->bytes_alloc + data->bytes_alloc_nested; total_dropped = data->bytes_dropped + data->bytes_dropped_nested; big_event_size = data->max_size + data->max_size_nested; small_event_size = data->min_size + data->min_size_nested; pr_info("CPU %d:\n", cpu); pr_info(" events: %ld\n", total_events); pr_info(" dropped bytes: %ld\n", total_dropped); pr_info(" alloced bytes: %ld\n", total_alloc); pr_info(" written bytes: %ld\n", total_written); pr_info(" biggest event: %d\n", big_event_size); pr_info(" smallest event: %d\n", small_event_size); if (RB_WARN_ON(buffer, total_dropped)) break; ret = 0; while ((event = ring_buffer_consume(buffer, cpu, NULL, &lost))) { total_lost += lost; item = ring_buffer_event_data(event); total_len += ring_buffer_event_length(event); total_size += item->size + sizeof(struct rb_item); if (memcmp(&item->str[0], rb_string, item->size) != 0) { pr_info("FAILED!\n"); pr_info("buffer had: %.*s\n", item->size, item->str); pr_info("expected: %.*s\n", item->size, rb_string); RB_WARN_ON(buffer, 1); ret = -1; break; } total_read++; } if (ret) break; ret = -1; pr_info(" read events: %ld\n", total_read); pr_info(" lost events: %ld\n", total_lost); pr_info(" total events: %ld\n", total_lost + total_read); pr_info(" recorded len bytes: %ld\n", total_len); pr_info(" recorded size bytes: %ld\n", total_size); if (total_lost) pr_info(" With dropped events, record len and size may not match\n" " alloced and written from above\n"); if (!total_lost) { if (RB_WARN_ON(buffer, total_len != total_alloc || total_size != total_written)) break; } if (RB_WARN_ON(buffer, total_lost + total_read != total_events)) break; ret = 0; } if (!ret) pr_info("Ring buffer PASSED!\n"); ring_buffer_free(buffer); return 0; } late_initcall(test_ringbuffer); #endif /* CONFIG_RING_BUFFER_STARTUP_TEST */
gpl-2.0
dpuyosa/android_kernel_wiko_l5460
arch/arm/kernel/time.c
622
2896
/* * linux/arch/arm/kernel/time.c * * Copyright (C) 1991, 1992, 1995 Linus Torvalds * Modifications for ARM (C) 1994-2001 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This file contains the ARM-specific time handling details: * reading the RTC at bootup, etc... */ #include <linux/clk-provider.h> #include <linux/clocksource.h> #include <linux/errno.h> #include <linux/export.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/kernel.h> #include <linux/profile.h> #include <linux/sched.h> #include <linux/sched_clock.h> #include <linux/smp.h> #include <linux/time.h> #include <linux/timex.h> #include <linux/timer.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include <asm/stacktrace.h> #include <asm/thread_info.h> #if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE) || \ defined(CONFIG_NVRAM) || defined(CONFIG_NVRAM_MODULE) /* this needs a better home */ DEFINE_SPINLOCK(rtc_lock); EXPORT_SYMBOL(rtc_lock); #endif /* pc-style 'CMOS' RTC support */ /* change this if you have some constant time drift */ #define USECS_PER_JIFFY (1000000/HZ) #ifdef CONFIG_SMP unsigned long profile_pc(struct pt_regs *regs) { struct stackframe frame; if (!in_lock_functions(regs->ARM_pc)) return regs->ARM_pc; arm_get_current_stackframe(regs, &frame); do { int ret = unwind_frame(&frame); if (ret < 0) return 0; } while (in_lock_functions(frame.pc)); return frame.pc; } EXPORT_SYMBOL(profile_pc); #endif #ifndef CONFIG_GENERIC_CLOCKEVENTS /* * Kernel system timer support. */ void timer_tick(void) { profile_tick(CPU_PROFILING); xtime_update(1); #ifndef CONFIG_SMP update_process_times(user_mode(get_irq_regs())); #endif } #endif static void dummy_clock_access(struct timespec *ts) { ts->tv_sec = 0; ts->tv_nsec = 0; } static clock_access_fn __read_persistent_clock = dummy_clock_access; static clock_access_fn __read_boot_clock = dummy_clock_access;; void read_persistent_clock(struct timespec *ts) { __read_persistent_clock(ts); } void read_boot_clock(struct timespec *ts) { __read_boot_clock(ts); } int __init register_persistent_clock(clock_access_fn read_boot, clock_access_fn read_persistent) { /* Only allow the clockaccess functions to be registered once */ if (__read_persistent_clock == dummy_clock_access && __read_boot_clock == dummy_clock_access) { if (read_boot) __read_boot_clock = read_boot; if (read_persistent) __read_persistent_clock = read_persistent; return 0; } return -EINVAL; } void __init time_init(void) { if (machine_desc->init_time) { machine_desc->init_time(); } else { #ifdef CONFIG_COMMON_CLK of_clk_init(NULL); #endif clocksource_of_init(); } }
gpl-2.0
cfriedt/bluetooth-next
drivers/pinctrl/mvebu/pinctrl-armada-370.c
622
14166
/* * Marvell Armada 370 pinctrl driver based on mvebu pinctrl core * * Copyright (C) 2012 Marvell * * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/err.h> #include <linux/init.h> #include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/pinctrl/pinctrl.h> #include "pinctrl-mvebu.h" static void __iomem *mpp_base; static int armada_370_mpp_ctrl_get(unsigned pid, unsigned long *config) { return default_mpp_ctrl_get(mpp_base, pid, config); } static int armada_370_mpp_ctrl_set(unsigned pid, unsigned long config) { return default_mpp_ctrl_set(mpp_base, pid, config); } static struct mvebu_mpp_mode mv88f6710_mpp_modes[] = { MPP_MODE(0, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "uart0", "rxd")), MPP_MODE(1, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "uart0", "txd")), MPP_MODE(2, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "i2c0", "sck"), MPP_FUNCTION(0x2, "uart0", "txd")), MPP_MODE(3, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "i2c0", "sda"), MPP_FUNCTION(0x2, "uart0", "rxd")), MPP_MODE(4, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "vdd", "cpu-pd")), MPP_MODE(5, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "ge0", "txclkout"), MPP_FUNCTION(0x2, "uart1", "txd"), MPP_FUNCTION(0x4, "spi1", "sck"), MPP_FUNCTION(0x5, "audio", "mclk")), MPP_MODE(6, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "txd0"), MPP_FUNCTION(0x2, "sata0", "prsnt"), MPP_FUNCTION(0x4, "tdm", "rst"), MPP_FUNCTION(0x5, "audio", "sdo")), MPP_MODE(7, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "ge0", "txd1"), MPP_FUNCTION(0x4, "tdm", "dtx"), MPP_FUNCTION(0x5, "audio", "lrclk")), MPP_MODE(8, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "txd2"), MPP_FUNCTION(0x2, "uart0", "rts"), MPP_FUNCTION(0x4, "tdm", "drx"), MPP_FUNCTION(0x5, "audio", "bclk")), MPP_MODE(9, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "ge0", "txd3"), MPP_FUNCTION(0x2, "uart1", "txd"), MPP_FUNCTION(0x3, "sd0", "clk"), MPP_FUNCTION(0x5, "audio", "spdifo")), MPP_MODE(10, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "txctl"), MPP_FUNCTION(0x2, "uart0", "cts"), MPP_FUNCTION(0x4, "tdm", "fsync"), MPP_FUNCTION(0x5, "audio", "sdi")), MPP_MODE(11, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "rxd0"), MPP_FUNCTION(0x2, "uart1", "rxd"), MPP_FUNCTION(0x3, "sd0", "cmd"), MPP_FUNCTION(0x4, "spi0", "cs1"), MPP_FUNCTION(0x5, "sata1", "prsnt"), MPP_FUNCTION(0x6, "spi1", "cs1")), MPP_MODE(12, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "rxd1"), MPP_FUNCTION(0x2, "i2c1", "sda"), MPP_FUNCTION(0x3, "sd0", "d0"), MPP_FUNCTION(0x4, "spi1", "cs0"), MPP_FUNCTION(0x5, "audio", "spdifi")), MPP_MODE(13, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "rxd2"), MPP_FUNCTION(0x2, "i2c1", "sck"), MPP_FUNCTION(0x3, "sd0", "d1"), MPP_FUNCTION(0x4, "tdm", "pclk"), MPP_FUNCTION(0x5, "audio", "rmclk")), MPP_MODE(14, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "rxd3"), MPP_FUNCTION(0x2, "pcie", "clkreq0"), MPP_FUNCTION(0x3, "sd0", "d2"), MPP_FUNCTION(0x4, "spi1", "mosi"), MPP_FUNCTION(0x5, "spi0", "cs2")), MPP_MODE(15, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "rxctl"), MPP_FUNCTION(0x2, "pcie", "clkreq1"), MPP_FUNCTION(0x3, "sd0", "d3"), MPP_FUNCTION(0x4, "spi1", "miso"), MPP_FUNCTION(0x5, "spi0", "cs3")), MPP_MODE(16, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "rxclk"), MPP_FUNCTION(0x2, "uart1", "rxd"), MPP_FUNCTION(0x4, "tdm", "int"), MPP_FUNCTION(0x5, "audio", "extclk")), MPP_MODE(17, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "ge", "mdc")), MPP_MODE(18, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge", "mdio")), MPP_MODE(19, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "txclk"), MPP_FUNCTION(0x2, "ge1", "txclkout"), MPP_FUNCTION(0x4, "tdm", "pclk")), MPP_MODE(20, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "ge0", "txd4"), MPP_FUNCTION(0x2, "ge1", "txd0")), MPP_MODE(21, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "ge0", "txd5"), MPP_FUNCTION(0x2, "ge1", "txd1"), MPP_FUNCTION(0x4, "uart1", "txd")), MPP_MODE(22, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "ge0", "txd6"), MPP_FUNCTION(0x2, "ge1", "txd2"), MPP_FUNCTION(0x4, "uart0", "rts")), MPP_MODE(23, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "ge0", "txd7"), MPP_FUNCTION(0x2, "ge1", "txd3"), MPP_FUNCTION(0x4, "spi1", "mosi")), MPP_MODE(24, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "col"), MPP_FUNCTION(0x2, "ge1", "txctl"), MPP_FUNCTION(0x4, "spi1", "cs0")), MPP_MODE(25, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "rxerr"), MPP_FUNCTION(0x2, "ge1", "rxd0"), MPP_FUNCTION(0x4, "uart1", "rxd")), MPP_MODE(26, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "crs"), MPP_FUNCTION(0x2, "ge1", "rxd1"), MPP_FUNCTION(0x4, "spi1", "miso")), MPP_MODE(27, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "rxd4"), MPP_FUNCTION(0x2, "ge1", "rxd2"), MPP_FUNCTION(0x4, "uart0", "cts")), MPP_MODE(28, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "rxd5"), MPP_FUNCTION(0x2, "ge1", "rxd3")), MPP_MODE(29, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "rxd6"), MPP_FUNCTION(0x2, "ge1", "rxctl"), MPP_FUNCTION(0x4, "i2c1", "sda")), MPP_MODE(30, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "ge0", "rxd7"), MPP_FUNCTION(0x2, "ge1", "rxclk"), MPP_FUNCTION(0x4, "i2c1", "sck")), MPP_MODE(31, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x3, "tclk", NULL), MPP_FUNCTION(0x4, "ge0", "txerr")), MPP_MODE(32, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "spi0", "cs0")), MPP_MODE(33, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "bootcs"), MPP_FUNCTION(0x2, "spi0", "cs0")), MPP_MODE(34, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "dev", "we0"), MPP_FUNCTION(0x2, "spi0", "mosi")), MPP_MODE(35, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "dev", "oe"), MPP_FUNCTION(0x2, "spi0", "sck")), MPP_MODE(36, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "dev", "a1"), MPP_FUNCTION(0x2, "spi0", "miso")), MPP_MODE(37, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "dev", "a0"), MPP_FUNCTION(0x2, "sata0", "prsnt")), MPP_MODE(38, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "ready"), MPP_FUNCTION(0x2, "uart1", "cts"), MPP_FUNCTION(0x3, "uart0", "cts")), MPP_MODE(39, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "dev", "ad0"), MPP_FUNCTION(0x2, "audio", "spdifo")), MPP_MODE(40, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "ad1"), MPP_FUNCTION(0x2, "uart1", "rts"), MPP_FUNCTION(0x3, "uart0", "rts")), MPP_MODE(41, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "ad2"), MPP_FUNCTION(0x2, "uart1", "rxd")), MPP_MODE(42, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "dev", "ad3"), MPP_FUNCTION(0x2, "uart1", "txd")), MPP_MODE(43, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "dev", "ad4"), MPP_FUNCTION(0x2, "audio", "bclk")), MPP_MODE(44, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "dev", "ad5"), MPP_FUNCTION(0x2, "audio", "mclk")), MPP_MODE(45, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "dev", "ad6"), MPP_FUNCTION(0x2, "audio", "lrclk")), MPP_MODE(46, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "dev", "ad7"), MPP_FUNCTION(0x2, "audio", "sdo")), MPP_MODE(47, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "dev", "ad8"), MPP_FUNCTION(0x3, "sd0", "clk"), MPP_FUNCTION(0x5, "audio", "spdifo")), MPP_MODE(48, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "ad9"), MPP_FUNCTION(0x2, "uart0", "rts"), MPP_FUNCTION(0x3, "sd0", "cmd"), MPP_FUNCTION(0x4, "sata1", "prsnt"), MPP_FUNCTION(0x5, "spi0", "cs1")), MPP_MODE(49, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "ad10"), MPP_FUNCTION(0x2, "pcie", "clkreq1"), MPP_FUNCTION(0x3, "sd0", "d0"), MPP_FUNCTION(0x4, "spi1", "cs0"), MPP_FUNCTION(0x5, "audio", "spdifi")), MPP_MODE(50, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "ad11"), MPP_FUNCTION(0x2, "uart0", "cts"), MPP_FUNCTION(0x3, "sd0", "d1"), MPP_FUNCTION(0x4, "spi1", "miso"), MPP_FUNCTION(0x5, "audio", "rmclk")), MPP_MODE(51, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "ad12"), MPP_FUNCTION(0x2, "i2c1", "sda"), MPP_FUNCTION(0x3, "sd0", "d2"), MPP_FUNCTION(0x4, "spi1", "mosi")), MPP_MODE(52, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "ad13"), MPP_FUNCTION(0x2, "i2c1", "sck"), MPP_FUNCTION(0x3, "sd0", "d3"), MPP_FUNCTION(0x4, "spi1", "sck")), MPP_MODE(53, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "ad14"), MPP_FUNCTION(0x2, "sd0", "clk"), MPP_FUNCTION(0x3, "tdm", "pclk"), MPP_FUNCTION(0x4, "spi0", "cs2"), MPP_FUNCTION(0x5, "pcie", "clkreq1")), MPP_MODE(54, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "dev", "ad15"), MPP_FUNCTION(0x3, "tdm", "dtx")), MPP_MODE(55, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "cs1"), MPP_FUNCTION(0x2, "uart1", "txd"), MPP_FUNCTION(0x3, "tdm", "rst"), MPP_FUNCTION(0x4, "sata1", "prsnt"), MPP_FUNCTION(0x5, "sata0", "prsnt")), MPP_MODE(56, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "cs2"), MPP_FUNCTION(0x2, "uart1", "cts"), MPP_FUNCTION(0x3, "uart0", "cts"), MPP_FUNCTION(0x4, "spi0", "cs3"), MPP_FUNCTION(0x5, "pcie", "clkreq0"), MPP_FUNCTION(0x6, "spi1", "cs1")), MPP_MODE(57, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "cs3"), MPP_FUNCTION(0x2, "uart1", "rxd"), MPP_FUNCTION(0x3, "tdm", "fsync"), MPP_FUNCTION(0x4, "sata0", "prsnt"), MPP_FUNCTION(0x5, "audio", "sdo")), MPP_MODE(58, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "cs0"), MPP_FUNCTION(0x2, "uart1", "rts"), MPP_FUNCTION(0x3, "tdm", "int"), MPP_FUNCTION(0x5, "audio", "extclk"), MPP_FUNCTION(0x6, "uart0", "rts")), MPP_MODE(59, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "dev", "ale0"), MPP_FUNCTION(0x2, "uart1", "rts"), MPP_FUNCTION(0x3, "uart0", "rts"), MPP_FUNCTION(0x5, "audio", "bclk")), MPP_MODE(60, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "ale1"), MPP_FUNCTION(0x2, "uart1", "rxd"), MPP_FUNCTION(0x3, "sata0", "prsnt"), MPP_FUNCTION(0x4, "pcie", "rstout"), MPP_FUNCTION(0x5, "audio", "sdi")), MPP_MODE(61, MPP_FUNCTION(0x0, "gpo", NULL), MPP_FUNCTION(0x1, "dev", "we1"), MPP_FUNCTION(0x2, "uart1", "txd"), MPP_FUNCTION(0x5, "audio", "lrclk")), MPP_MODE(62, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "dev", "a2"), MPP_FUNCTION(0x2, "uart1", "cts"), MPP_FUNCTION(0x3, "tdm", "drx"), MPP_FUNCTION(0x4, "pcie", "clkreq0"), MPP_FUNCTION(0x5, "audio", "mclk"), MPP_FUNCTION(0x6, "uart0", "cts")), MPP_MODE(63, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "spi0", "sck"), MPP_FUNCTION(0x2, "tclk", NULL)), MPP_MODE(64, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "spi0", "miso"), MPP_FUNCTION(0x2, "spi0", "cs1")), MPP_MODE(65, MPP_FUNCTION(0x0, "gpio", NULL), MPP_FUNCTION(0x1, "spi0", "mosi"), MPP_FUNCTION(0x2, "spi0", "cs2")), }; static struct mvebu_pinctrl_soc_info armada_370_pinctrl_info; static const struct of_device_id armada_370_pinctrl_of_match[] = { { .compatible = "marvell,mv88f6710-pinctrl" }, { }, }; static struct mvebu_mpp_ctrl mv88f6710_mpp_controls[] = { MPP_FUNC_CTRL(0, 65, NULL, armada_370_mpp_ctrl), }; static struct pinctrl_gpio_range mv88f6710_mpp_gpio_ranges[] = { MPP_GPIO_RANGE(0, 0, 0, 32), MPP_GPIO_RANGE(1, 32, 32, 32), MPP_GPIO_RANGE(2, 64, 64, 2), }; static int armada_370_pinctrl_probe(struct platform_device *pdev) { struct mvebu_pinctrl_soc_info *soc = &armada_370_pinctrl_info; struct resource *res; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); mpp_base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(mpp_base)) return PTR_ERR(mpp_base); soc->variant = 0; /* no variants for Armada 370 */ soc->controls = mv88f6710_mpp_controls; soc->ncontrols = ARRAY_SIZE(mv88f6710_mpp_controls); soc->modes = mv88f6710_mpp_modes; soc->nmodes = ARRAY_SIZE(mv88f6710_mpp_modes); soc->gpioranges = mv88f6710_mpp_gpio_ranges; soc->ngpioranges = ARRAY_SIZE(mv88f6710_mpp_gpio_ranges); pdev->dev.platform_data = soc; return mvebu_pinctrl_probe(pdev); } static int armada_370_pinctrl_remove(struct platform_device *pdev) { return mvebu_pinctrl_remove(pdev); } static struct platform_driver armada_370_pinctrl_driver = { .driver = { .name = "armada-370-pinctrl", .of_match_table = armada_370_pinctrl_of_match, }, .probe = armada_370_pinctrl_probe, .remove = armada_370_pinctrl_remove, }; module_platform_driver(armada_370_pinctrl_driver); MODULE_AUTHOR("Thomas Petazzoni <thomas.petazzoni@free-electrons.com>"); MODULE_DESCRIPTION("Marvell Armada 370 pinctrl driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
GalaxyTab4/android_kernel_samsung_degas
arch/arm/mach-shmobile/setup-sh73a0.c
1390
25055
/* * sh73a0 processor support * * Copyright (C) 2010 Takashi Yoshii * Copyright (C) 2010 Magnus Damm * Copyright (C) 2008 Yoshihiro Shimoda * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/irqchip.h> #include <linux/platform_device.h> #include <linux/of_platform.h> #include <linux/delay.h> #include <linux/input.h> #include <linux/io.h> #include <linux/serial_sci.h> #include <linux/sh_dma.h> #include <linux/sh_intc.h> #include <linux/sh_timer.h> #include <linux/platform_data/sh_ipmmu.h> #include <linux/platform_data/irq-renesas-intc-irqpin.h> #include <mach/dma-register.h> #include <mach/hardware.h> #include <mach/irqs.h> #include <mach/sh73a0.h> #include <mach/common.h> #include <asm/mach-types.h> #include <asm/mach/map.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> static struct map_desc sh73a0_io_desc[] __initdata = { /* create a 1:1 entity map for 0xe6xxxxxx * used by CPGA, INTC and PFC. */ { .virtual = 0xe6000000, .pfn = __phys_to_pfn(0xe6000000), .length = 256 << 20, .type = MT_DEVICE_NONSHARED }, }; void __init sh73a0_map_io(void) { iotable_init(sh73a0_io_desc, ARRAY_SIZE(sh73a0_io_desc)); } static struct resource sh73a0_pfc_resources[] = { [0] = { .start = 0xe6050000, .end = 0xe6057fff, .flags = IORESOURCE_MEM, }, [1] = { .start = 0xe605801c, .end = 0xe6058027, .flags = IORESOURCE_MEM, } }; static struct platform_device sh73a0_pfc_device = { .name = "pfc-sh73a0", .id = -1, .resource = sh73a0_pfc_resources, .num_resources = ARRAY_SIZE(sh73a0_pfc_resources), }; void __init sh73a0_pinmux_init(void) { platform_device_register(&sh73a0_pfc_device); } static struct plat_sci_port scif0_platform_data = { .mapbase = 0xe6c40000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { gic_spi(72), gic_spi(72), gic_spi(72), gic_spi(72) }, }; static struct platform_device scif0_device = { .name = "sh-sci", .id = 0, .dev = { .platform_data = &scif0_platform_data, }, }; static struct plat_sci_port scif1_platform_data = { .mapbase = 0xe6c50000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { gic_spi(73), gic_spi(73), gic_spi(73), gic_spi(73) }, }; static struct platform_device scif1_device = { .name = "sh-sci", .id = 1, .dev = { .platform_data = &scif1_platform_data, }, }; static struct plat_sci_port scif2_platform_data = { .mapbase = 0xe6c60000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { gic_spi(74), gic_spi(74), gic_spi(74), gic_spi(74) }, }; static struct platform_device scif2_device = { .name = "sh-sci", .id = 2, .dev = { .platform_data = &scif2_platform_data, }, }; static struct plat_sci_port scif3_platform_data = { .mapbase = 0xe6c70000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { gic_spi(75), gic_spi(75), gic_spi(75), gic_spi(75) }, }; static struct platform_device scif3_device = { .name = "sh-sci", .id = 3, .dev = { .platform_data = &scif3_platform_data, }, }; static struct plat_sci_port scif4_platform_data = { .mapbase = 0xe6c80000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { gic_spi(78), gic_spi(78), gic_spi(78), gic_spi(78) }, }; static struct platform_device scif4_device = { .name = "sh-sci", .id = 4, .dev = { .platform_data = &scif4_platform_data, }, }; static struct plat_sci_port scif5_platform_data = { .mapbase = 0xe6cb0000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { gic_spi(79), gic_spi(79), gic_spi(79), gic_spi(79) }, }; static struct platform_device scif5_device = { .name = "sh-sci", .id = 5, .dev = { .platform_data = &scif5_platform_data, }, }; static struct plat_sci_port scif6_platform_data = { .mapbase = 0xe6cc0000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { gic_spi(156), gic_spi(156), gic_spi(156), gic_spi(156) }, }; static struct platform_device scif6_device = { .name = "sh-sci", .id = 6, .dev = { .platform_data = &scif6_platform_data, }, }; static struct plat_sci_port scif7_platform_data = { .mapbase = 0xe6cd0000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFA, .irqs = { gic_spi(143), gic_spi(143), gic_spi(143), gic_spi(143) }, }; static struct platform_device scif7_device = { .name = "sh-sci", .id = 7, .dev = { .platform_data = &scif7_platform_data, }, }; static struct plat_sci_port scif8_platform_data = { .mapbase = 0xe6c30000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE, .scbrr_algo_id = SCBRR_ALGO_4, .type = PORT_SCIFB, .irqs = { gic_spi(80), gic_spi(80), gic_spi(80), gic_spi(80) }, }; static struct platform_device scif8_device = { .name = "sh-sci", .id = 8, .dev = { .platform_data = &scif8_platform_data, }, }; static struct sh_timer_config cmt10_platform_data = { .name = "CMT10", .channel_offset = 0x10, .timer_bit = 0, .clockevent_rating = 80, .clocksource_rating = 125, }; static struct resource cmt10_resources[] = { [0] = { .name = "CMT10", .start = 0xe6138010, .end = 0xe613801b, .flags = IORESOURCE_MEM, }, [1] = { .start = gic_spi(65), .flags = IORESOURCE_IRQ, }, }; static struct platform_device cmt10_device = { .name = "sh_cmt", .id = 10, .dev = { .platform_data = &cmt10_platform_data, }, .resource = cmt10_resources, .num_resources = ARRAY_SIZE(cmt10_resources), }; /* TMU */ static struct sh_timer_config tmu00_platform_data = { .name = "TMU00", .channel_offset = 0x4, .timer_bit = 0, .clockevent_rating = 200, }; static struct resource tmu00_resources[] = { [0] = { .name = "TMU00", .start = 0xfff60008, .end = 0xfff60013, .flags = IORESOURCE_MEM, }, [1] = { .start = intcs_evt2irq(0x0e80), /* TMU0_TUNI00 */ .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu00_device = { .name = "sh_tmu", .id = 0, .dev = { .platform_data = &tmu00_platform_data, }, .resource = tmu00_resources, .num_resources = ARRAY_SIZE(tmu00_resources), }; static struct sh_timer_config tmu01_platform_data = { .name = "TMU01", .channel_offset = 0x10, .timer_bit = 1, .clocksource_rating = 200, }; static struct resource tmu01_resources[] = { [0] = { .name = "TMU01", .start = 0xfff60014, .end = 0xfff6001f, .flags = IORESOURCE_MEM, }, [1] = { .start = intcs_evt2irq(0x0ea0), /* TMU0_TUNI01 */ .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu01_device = { .name = "sh_tmu", .id = 1, .dev = { .platform_data = &tmu01_platform_data, }, .resource = tmu01_resources, .num_resources = ARRAY_SIZE(tmu01_resources), }; static struct resource i2c0_resources[] = { [0] = { .name = "IIC0", .start = 0xe6820000, .end = 0xe6820425 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = gic_spi(167), .end = gic_spi(170), .flags = IORESOURCE_IRQ, }, }; static struct resource i2c1_resources[] = { [0] = { .name = "IIC1", .start = 0xe6822000, .end = 0xe6822425 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = gic_spi(51), .end = gic_spi(54), .flags = IORESOURCE_IRQ, }, }; static struct resource i2c2_resources[] = { [0] = { .name = "IIC2", .start = 0xe6824000, .end = 0xe6824425 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = gic_spi(171), .end = gic_spi(174), .flags = IORESOURCE_IRQ, }, }; static struct resource i2c3_resources[] = { [0] = { .name = "IIC3", .start = 0xe6826000, .end = 0xe6826425 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = gic_spi(183), .end = gic_spi(186), .flags = IORESOURCE_IRQ, }, }; static struct resource i2c4_resources[] = { [0] = { .name = "IIC4", .start = 0xe6828000, .end = 0xe6828425 - 1, .flags = IORESOURCE_MEM, }, [1] = { .start = gic_spi(187), .end = gic_spi(190), .flags = IORESOURCE_IRQ, }, }; static struct platform_device i2c0_device = { .name = "i2c-sh_mobile", .id = 0, .resource = i2c0_resources, .num_resources = ARRAY_SIZE(i2c0_resources), }; static struct platform_device i2c1_device = { .name = "i2c-sh_mobile", .id = 1, .resource = i2c1_resources, .num_resources = ARRAY_SIZE(i2c1_resources), }; static struct platform_device i2c2_device = { .name = "i2c-sh_mobile", .id = 2, .resource = i2c2_resources, .num_resources = ARRAY_SIZE(i2c2_resources), }; static struct platform_device i2c3_device = { .name = "i2c-sh_mobile", .id = 3, .resource = i2c3_resources, .num_resources = ARRAY_SIZE(i2c3_resources), }; static struct platform_device i2c4_device = { .name = "i2c-sh_mobile", .id = 4, .resource = i2c4_resources, .num_resources = ARRAY_SIZE(i2c4_resources), }; static const struct sh_dmae_slave_config sh73a0_dmae_slaves[] = { { .slave_id = SHDMA_SLAVE_SCIF0_TX, .addr = 0xe6c40020, .chcr = CHCR_TX(XMIT_SZ_8BIT), .mid_rid = 0x21, }, { .slave_id = SHDMA_SLAVE_SCIF0_RX, .addr = 0xe6c40024, .chcr = CHCR_RX(XMIT_SZ_8BIT), .mid_rid = 0x22, }, { .slave_id = SHDMA_SLAVE_SCIF1_TX, .addr = 0xe6c50020, .chcr = CHCR_TX(XMIT_SZ_8BIT), .mid_rid = 0x25, }, { .slave_id = SHDMA_SLAVE_SCIF1_RX, .addr = 0xe6c50024, .chcr = CHCR_RX(XMIT_SZ_8BIT), .mid_rid = 0x26, }, { .slave_id = SHDMA_SLAVE_SCIF2_TX, .addr = 0xe6c60020, .chcr = CHCR_TX(XMIT_SZ_8BIT), .mid_rid = 0x29, }, { .slave_id = SHDMA_SLAVE_SCIF2_RX, .addr = 0xe6c60024, .chcr = CHCR_RX(XMIT_SZ_8BIT), .mid_rid = 0x2a, }, { .slave_id = SHDMA_SLAVE_SCIF3_TX, .addr = 0xe6c70020, .chcr = CHCR_TX(XMIT_SZ_8BIT), .mid_rid = 0x2d, }, { .slave_id = SHDMA_SLAVE_SCIF3_RX, .addr = 0xe6c70024, .chcr = CHCR_RX(XMIT_SZ_8BIT), .mid_rid = 0x2e, }, { .slave_id = SHDMA_SLAVE_SCIF4_TX, .addr = 0xe6c80020, .chcr = CHCR_TX(XMIT_SZ_8BIT), .mid_rid = 0x39, }, { .slave_id = SHDMA_SLAVE_SCIF4_RX, .addr = 0xe6c80024, .chcr = CHCR_RX(XMIT_SZ_8BIT), .mid_rid = 0x3a, }, { .slave_id = SHDMA_SLAVE_SCIF5_TX, .addr = 0xe6cb0020, .chcr = CHCR_TX(XMIT_SZ_8BIT), .mid_rid = 0x35, }, { .slave_id = SHDMA_SLAVE_SCIF5_RX, .addr = 0xe6cb0024, .chcr = CHCR_RX(XMIT_SZ_8BIT), .mid_rid = 0x36, }, { .slave_id = SHDMA_SLAVE_SCIF6_TX, .addr = 0xe6cc0020, .chcr = CHCR_TX(XMIT_SZ_8BIT), .mid_rid = 0x1d, }, { .slave_id = SHDMA_SLAVE_SCIF6_RX, .addr = 0xe6cc0024, .chcr = CHCR_RX(XMIT_SZ_8BIT), .mid_rid = 0x1e, }, { .slave_id = SHDMA_SLAVE_SCIF7_TX, .addr = 0xe6cd0020, .chcr = CHCR_TX(XMIT_SZ_8BIT), .mid_rid = 0x19, }, { .slave_id = SHDMA_SLAVE_SCIF7_RX, .addr = 0xe6cd0024, .chcr = CHCR_RX(XMIT_SZ_8BIT), .mid_rid = 0x1a, }, { .slave_id = SHDMA_SLAVE_SCIF8_TX, .addr = 0xe6c30040, .chcr = CHCR_TX(XMIT_SZ_8BIT), .mid_rid = 0x3d, }, { .slave_id = SHDMA_SLAVE_SCIF8_RX, .addr = 0xe6c30060, .chcr = CHCR_RX(XMIT_SZ_8BIT), .mid_rid = 0x3e, }, { .slave_id = SHDMA_SLAVE_SDHI0_TX, .addr = 0xee100030, .chcr = CHCR_TX(XMIT_SZ_16BIT), .mid_rid = 0xc1, }, { .slave_id = SHDMA_SLAVE_SDHI0_RX, .addr = 0xee100030, .chcr = CHCR_RX(XMIT_SZ_16BIT), .mid_rid = 0xc2, }, { .slave_id = SHDMA_SLAVE_SDHI1_TX, .addr = 0xee120030, .chcr = CHCR_TX(XMIT_SZ_16BIT), .mid_rid = 0xc9, }, { .slave_id = SHDMA_SLAVE_SDHI1_RX, .addr = 0xee120030, .chcr = CHCR_RX(XMIT_SZ_16BIT), .mid_rid = 0xca, }, { .slave_id = SHDMA_SLAVE_SDHI2_TX, .addr = 0xee140030, .chcr = CHCR_TX(XMIT_SZ_16BIT), .mid_rid = 0xcd, }, { .slave_id = SHDMA_SLAVE_SDHI2_RX, .addr = 0xee140030, .chcr = CHCR_RX(XMIT_SZ_16BIT), .mid_rid = 0xce, }, { .slave_id = SHDMA_SLAVE_MMCIF_TX, .addr = 0xe6bd0034, .chcr = CHCR_TX(XMIT_SZ_32BIT), .mid_rid = 0xd1, }, { .slave_id = SHDMA_SLAVE_MMCIF_RX, .addr = 0xe6bd0034, .chcr = CHCR_RX(XMIT_SZ_32BIT), .mid_rid = 0xd2, }, }; #define DMAE_CHANNEL(_offset) \ { \ .offset = _offset - 0x20, \ .dmars = _offset - 0x20 + 0x40, \ } static const struct sh_dmae_channel sh73a0_dmae_channels[] = { DMAE_CHANNEL(0x8000), DMAE_CHANNEL(0x8080), DMAE_CHANNEL(0x8100), DMAE_CHANNEL(0x8180), DMAE_CHANNEL(0x8200), DMAE_CHANNEL(0x8280), DMAE_CHANNEL(0x8300), DMAE_CHANNEL(0x8380), DMAE_CHANNEL(0x8400), DMAE_CHANNEL(0x8480), DMAE_CHANNEL(0x8500), DMAE_CHANNEL(0x8580), DMAE_CHANNEL(0x8600), DMAE_CHANNEL(0x8680), DMAE_CHANNEL(0x8700), DMAE_CHANNEL(0x8780), DMAE_CHANNEL(0x8800), DMAE_CHANNEL(0x8880), DMAE_CHANNEL(0x8900), DMAE_CHANNEL(0x8980), }; static struct sh_dmae_pdata sh73a0_dmae_platform_data = { .slave = sh73a0_dmae_slaves, .slave_num = ARRAY_SIZE(sh73a0_dmae_slaves), .channel = sh73a0_dmae_channels, .channel_num = ARRAY_SIZE(sh73a0_dmae_channels), .ts_low_shift = TS_LOW_SHIFT, .ts_low_mask = TS_LOW_BIT << TS_LOW_SHIFT, .ts_high_shift = TS_HI_SHIFT, .ts_high_mask = TS_HI_BIT << TS_HI_SHIFT, .ts_shift = dma_ts_shift, .ts_shift_num = ARRAY_SIZE(dma_ts_shift), .dmaor_init = DMAOR_DME, }; static struct resource sh73a0_dmae_resources[] = { { /* Registers including DMAOR and channels including DMARSx */ .start = 0xfe000020, .end = 0xfe008a00 - 1, .flags = IORESOURCE_MEM, }, { .name = "error_irq", .start = gic_spi(129), .end = gic_spi(129), .flags = IORESOURCE_IRQ, }, { /* IRQ for channels 0-19 */ .start = gic_spi(109), .end = gic_spi(128), .flags = IORESOURCE_IRQ, }, }; static struct platform_device dma0_device = { .name = "sh-dma-engine", .id = 0, .resource = sh73a0_dmae_resources, .num_resources = ARRAY_SIZE(sh73a0_dmae_resources), .dev = { .platform_data = &sh73a0_dmae_platform_data, }, }; /* MPDMAC */ static const struct sh_dmae_slave_config sh73a0_mpdma_slaves[] = { { .slave_id = SHDMA_SLAVE_FSI2A_RX, .addr = 0xec230020, .chcr = CHCR_RX(XMIT_SZ_32BIT), .mid_rid = 0xd6, /* CHECK ME */ }, { .slave_id = SHDMA_SLAVE_FSI2A_TX, .addr = 0xec230024, .chcr = CHCR_TX(XMIT_SZ_32BIT), .mid_rid = 0xd5, /* CHECK ME */ }, { .slave_id = SHDMA_SLAVE_FSI2C_RX, .addr = 0xec230060, .chcr = CHCR_RX(XMIT_SZ_32BIT), .mid_rid = 0xda, /* CHECK ME */ }, { .slave_id = SHDMA_SLAVE_FSI2C_TX, .addr = 0xec230064, .chcr = CHCR_TX(XMIT_SZ_32BIT), .mid_rid = 0xd9, /* CHECK ME */ }, { .slave_id = SHDMA_SLAVE_FSI2B_RX, .addr = 0xec240020, .chcr = CHCR_RX(XMIT_SZ_32BIT), .mid_rid = 0x8e, /* CHECK ME */ }, { .slave_id = SHDMA_SLAVE_FSI2B_TX, .addr = 0xec240024, .chcr = CHCR_RX(XMIT_SZ_32BIT), .mid_rid = 0x8d, /* CHECK ME */ }, { .slave_id = SHDMA_SLAVE_FSI2D_RX, .addr = 0xec240060, .chcr = CHCR_RX(XMIT_SZ_32BIT), .mid_rid = 0x9a, /* CHECK ME */ }, }; #define MPDMA_CHANNEL(a, b, c) \ { \ .offset = a, \ .dmars = b, \ .dmars_bit = c, \ .chclr_offset = (0x220 - 0x20) + a \ } static const struct sh_dmae_channel sh73a0_mpdma_channels[] = { MPDMA_CHANNEL(0x00, 0, 0), MPDMA_CHANNEL(0x10, 0, 8), MPDMA_CHANNEL(0x20, 4, 0), MPDMA_CHANNEL(0x30, 4, 8), MPDMA_CHANNEL(0x50, 8, 0), MPDMA_CHANNEL(0x70, 8, 8), }; static struct sh_dmae_pdata sh73a0_mpdma_platform_data = { .slave = sh73a0_mpdma_slaves, .slave_num = ARRAY_SIZE(sh73a0_mpdma_slaves), .channel = sh73a0_mpdma_channels, .channel_num = ARRAY_SIZE(sh73a0_mpdma_channels), .ts_low_shift = TS_LOW_SHIFT, .ts_low_mask = TS_LOW_BIT << TS_LOW_SHIFT, .ts_high_shift = TS_HI_SHIFT, .ts_high_mask = TS_HI_BIT << TS_HI_SHIFT, .ts_shift = dma_ts_shift, .ts_shift_num = ARRAY_SIZE(dma_ts_shift), .dmaor_init = DMAOR_DME, .chclr_present = 1, }; /* Resource order important! */ static struct resource sh73a0_mpdma_resources[] = { { /* Channel registers and DMAOR */ .start = 0xec618020, .end = 0xec61828f, .flags = IORESOURCE_MEM, }, { /* DMARSx */ .start = 0xec619000, .end = 0xec61900b, .flags = IORESOURCE_MEM, }, { .name = "error_irq", .start = gic_spi(181), .end = gic_spi(181), .flags = IORESOURCE_IRQ, }, { /* IRQ for channels 0-5 */ .start = gic_spi(175), .end = gic_spi(180), .flags = IORESOURCE_IRQ, }, }; static struct platform_device mpdma0_device = { .name = "sh-dma-engine", .id = 1, .resource = sh73a0_mpdma_resources, .num_resources = ARRAY_SIZE(sh73a0_mpdma_resources), .dev = { .platform_data = &sh73a0_mpdma_platform_data, }, }; static struct resource pmu_resources[] = { [0] = { .start = gic_spi(55), .end = gic_spi(55), .flags = IORESOURCE_IRQ, }, [1] = { .start = gic_spi(56), .end = gic_spi(56), .flags = IORESOURCE_IRQ, }, }; static struct platform_device pmu_device = { .name = "arm-pmu", .id = -1, .num_resources = ARRAY_SIZE(pmu_resources), .resource = pmu_resources, }; /* an IPMMU module for ICB */ static struct resource ipmmu_resources[] = { [0] = { .name = "IPMMU", .start = 0xfe951000, .end = 0xfe9510ff, .flags = IORESOURCE_MEM, }, }; static const char * const ipmmu_dev_names[] = { "sh_mobile_lcdc_fb.0", }; static struct shmobile_ipmmu_platform_data ipmmu_platform_data = { .dev_names = ipmmu_dev_names, .num_dev_names = ARRAY_SIZE(ipmmu_dev_names), }; static struct platform_device ipmmu_device = { .name = "ipmmu", .id = -1, .dev = { .platform_data = &ipmmu_platform_data, }, .resource = ipmmu_resources, .num_resources = ARRAY_SIZE(ipmmu_resources), }; static struct renesas_intc_irqpin_config irqpin0_platform_data = { .irq_base = irq_pin(0), /* IRQ0 -> IRQ7 */ }; static struct resource irqpin0_resources[] = { DEFINE_RES_MEM(0xe6900000, 4), /* ICR1A */ DEFINE_RES_MEM(0xe6900010, 4), /* INTPRI00A */ DEFINE_RES_MEM(0xe6900020, 1), /* INTREQ00A */ DEFINE_RES_MEM(0xe6900040, 1), /* INTMSK00A */ DEFINE_RES_MEM(0xe6900060, 1), /* INTMSKCLR00A */ DEFINE_RES_IRQ(gic_spi(1)), /* IRQ0 */ DEFINE_RES_IRQ(gic_spi(2)), /* IRQ1 */ DEFINE_RES_IRQ(gic_spi(3)), /* IRQ2 */ DEFINE_RES_IRQ(gic_spi(4)), /* IRQ3 */ DEFINE_RES_IRQ(gic_spi(5)), /* IRQ4 */ DEFINE_RES_IRQ(gic_spi(6)), /* IRQ5 */ DEFINE_RES_IRQ(gic_spi(7)), /* IRQ6 */ DEFINE_RES_IRQ(gic_spi(8)), /* IRQ7 */ }; static struct platform_device irqpin0_device = { .name = "renesas_intc_irqpin", .id = 0, .resource = irqpin0_resources, .num_resources = ARRAY_SIZE(irqpin0_resources), .dev = { .platform_data = &irqpin0_platform_data, }, }; static struct renesas_intc_irqpin_config irqpin1_platform_data = { .irq_base = irq_pin(8), /* IRQ8 -> IRQ15 */ .control_parent = true, /* Disable spurious IRQ10 */ }; static struct resource irqpin1_resources[] = { DEFINE_RES_MEM(0xe6900004, 4), /* ICR2A */ DEFINE_RES_MEM(0xe6900014, 4), /* INTPRI10A */ DEFINE_RES_MEM(0xe6900024, 1), /* INTREQ10A */ DEFINE_RES_MEM(0xe6900044, 1), /* INTMSK10A */ DEFINE_RES_MEM(0xe6900064, 1), /* INTMSKCLR10A */ DEFINE_RES_IRQ(gic_spi(9)), /* IRQ8 */ DEFINE_RES_IRQ(gic_spi(10)), /* IRQ9 */ DEFINE_RES_IRQ(gic_spi(11)), /* IRQ10 */ DEFINE_RES_IRQ(gic_spi(12)), /* IRQ11 */ DEFINE_RES_IRQ(gic_spi(13)), /* IRQ12 */ DEFINE_RES_IRQ(gic_spi(14)), /* IRQ13 */ DEFINE_RES_IRQ(gic_spi(15)), /* IRQ14 */ DEFINE_RES_IRQ(gic_spi(16)), /* IRQ15 */ }; static struct platform_device irqpin1_device = { .name = "renesas_intc_irqpin", .id = 1, .resource = irqpin1_resources, .num_resources = ARRAY_SIZE(irqpin1_resources), .dev = { .platform_data = &irqpin1_platform_data, }, }; static struct renesas_intc_irqpin_config irqpin2_platform_data = { .irq_base = irq_pin(16), /* IRQ16 -> IRQ23 */ }; static struct resource irqpin2_resources[] = { DEFINE_RES_MEM(0xe6900008, 4), /* ICR3A */ DEFINE_RES_MEM(0xe6900018, 4), /* INTPRI20A */ DEFINE_RES_MEM(0xe6900028, 1), /* INTREQ20A */ DEFINE_RES_MEM(0xe6900048, 1), /* INTMSK20A */ DEFINE_RES_MEM(0xe6900068, 1), /* INTMSKCLR20A */ DEFINE_RES_IRQ(gic_spi(17)), /* IRQ16 */ DEFINE_RES_IRQ(gic_spi(18)), /* IRQ17 */ DEFINE_RES_IRQ(gic_spi(19)), /* IRQ18 */ DEFINE_RES_IRQ(gic_spi(20)), /* IRQ19 */ DEFINE_RES_IRQ(gic_spi(21)), /* IRQ20 */ DEFINE_RES_IRQ(gic_spi(22)), /* IRQ21 */ DEFINE_RES_IRQ(gic_spi(23)), /* IRQ22 */ DEFINE_RES_IRQ(gic_spi(24)), /* IRQ23 */ }; static struct platform_device irqpin2_device = { .name = "renesas_intc_irqpin", .id = 2, .resource = irqpin2_resources, .num_resources = ARRAY_SIZE(irqpin2_resources), .dev = { .platform_data = &irqpin2_platform_data, }, }; static struct renesas_intc_irqpin_config irqpin3_platform_data = { .irq_base = irq_pin(24), /* IRQ24 -> IRQ31 */ }; static struct resource irqpin3_resources[] = { DEFINE_RES_MEM(0xe690000c, 4), /* ICR4A */ DEFINE_RES_MEM(0xe690001c, 4), /* INTPRI30A */ DEFINE_RES_MEM(0xe690002c, 1), /* INTREQ30A */ DEFINE_RES_MEM(0xe690004c, 1), /* INTMSK30A */ DEFINE_RES_MEM(0xe690006c, 1), /* INTMSKCLR30A */ DEFINE_RES_IRQ(gic_spi(25)), /* IRQ24 */ DEFINE_RES_IRQ(gic_spi(26)), /* IRQ25 */ DEFINE_RES_IRQ(gic_spi(27)), /* IRQ26 */ DEFINE_RES_IRQ(gic_spi(28)), /* IRQ27 */ DEFINE_RES_IRQ(gic_spi(29)), /* IRQ28 */ DEFINE_RES_IRQ(gic_spi(30)), /* IRQ29 */ DEFINE_RES_IRQ(gic_spi(31)), /* IRQ30 */ DEFINE_RES_IRQ(gic_spi(32)), /* IRQ31 */ }; static struct platform_device irqpin3_device = { .name = "renesas_intc_irqpin", .id = 3, .resource = irqpin3_resources, .num_resources = ARRAY_SIZE(irqpin3_resources), .dev = { .platform_data = &irqpin3_platform_data, }, }; static struct platform_device *sh73a0_devices_dt[] __initdata = { &scif0_device, &scif1_device, &scif2_device, &scif3_device, &scif4_device, &scif5_device, &scif6_device, &scif7_device, &scif8_device, &cmt10_device, }; static struct platform_device *sh73a0_early_devices[] __initdata = { &tmu00_device, &tmu01_device, &ipmmu_device, }; static struct platform_device *sh73a0_late_devices[] __initdata = { &i2c0_device, &i2c1_device, &i2c2_device, &i2c3_device, &i2c4_device, &dma0_device, &mpdma0_device, &pmu_device, &irqpin0_device, &irqpin1_device, &irqpin2_device, &irqpin3_device, }; #define SRCR2 IOMEM(0xe61580b0) void __init sh73a0_add_standard_devices(void) { /* Clear software reset bit on SY-DMAC module */ __raw_writel(__raw_readl(SRCR2) & ~(1 << 18), SRCR2); platform_add_devices(sh73a0_devices_dt, ARRAY_SIZE(sh73a0_devices_dt)); platform_add_devices(sh73a0_early_devices, ARRAY_SIZE(sh73a0_early_devices)); platform_add_devices(sh73a0_late_devices, ARRAY_SIZE(sh73a0_late_devices)); } /* do nothing for !CONFIG_SMP or !CONFIG_HAVE_TWD */ void __init __weak sh73a0_register_twd(void) { } void __init sh73a0_earlytimer_init(void) { sh73a0_clock_init(); shmobile_earlytimer_init(); sh73a0_register_twd(); } void __init sh73a0_add_early_devices(void) { early_platform_add_devices(sh73a0_devices_dt, ARRAY_SIZE(sh73a0_devices_dt)); early_platform_add_devices(sh73a0_early_devices, ARRAY_SIZE(sh73a0_early_devices)); /* setup early console here as well */ shmobile_setup_console(); } #ifdef CONFIG_USE_OF void __init sh73a0_init_delay(void) { shmobile_setup_delay(1196, 44, 46); /* Cortex-A9 @ 1196MHz */ } static const struct of_dev_auxdata sh73a0_auxdata_lookup[] __initconst = { {}, }; void __init sh73a0_add_standard_devices_dt(void) { /* clocks are setup late during boot in the case of DT */ sh73a0_clock_init(); platform_add_devices(sh73a0_devices_dt, ARRAY_SIZE(sh73a0_devices_dt)); of_platform_populate(NULL, of_default_bus_match_table, sh73a0_auxdata_lookup, NULL); } static const char *sh73a0_boards_compat_dt[] __initdata = { "renesas,sh73a0", NULL, }; DT_MACHINE_START(SH73A0_DT, "Generic SH73A0 (Flattened Device Tree)") .smp = smp_ops(sh73a0_smp_ops), .map_io = sh73a0_map_io, .init_early = sh73a0_init_delay, .nr_irqs = NR_IRQS_LEGACY, .init_irq = irqchip_init, .init_machine = sh73a0_add_standard_devices_dt, .dt_compat = sh73a0_boards_compat_dt, MACHINE_END #endif /* CONFIG_USE_OF */
gpl-2.0
KylinMod/android_kernel_motorola_msm8960-common
drivers/mmc/host/sdhci-esdhc-imx.c
1390
9366
/* * Freescale eSDHC i.MX controller driver for the platform bus. * * derived from the OF-version. * * Copyright (c) 2010 Pengutronix e.K. * Author: Wolfram Sang <w.sang@pengutronix.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License. */ #include <linux/io.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/gpio.h> #include <linux/slab.h> #include <linux/mmc/host.h> #include <linux/mmc/sdhci-pltfm.h> #include <linux/mmc/mmc.h> #include <linux/mmc/sdio.h> #include <mach/hardware.h> #include <mach/esdhc.h> #include "sdhci.h" #include "sdhci-pltfm.h" #include "sdhci-esdhc.h" /* VENDOR SPEC register */ #define SDHCI_VENDOR_SPEC 0xC0 #define SDHCI_VENDOR_SPEC_SDIO_QUIRK 0x00000002 #define ESDHC_FLAG_GPIO_FOR_CD_WP (1 << 0) /* * The CMDTYPE of the CMD register (offset 0xE) should be set to * "11" when the STOP CMD12 is issued on imx53 to abort one * open ended multi-blk IO. Otherwise the TC INT wouldn't * be generated. * In exact block transfer, the controller doesn't complete the * operations automatically as required at the end of the * transfer and remains on hold if the abort command is not sent. * As a result, the TC flag is not asserted and SW received timeout * exeception. Bit1 of Vendor Spec registor is used to fix it. */ #define ESDHC_FLAG_MULTIBLK_NO_INT (1 << 1) struct pltfm_imx_data { int flags; u32 scratchpad; }; static inline void esdhc_clrset_le(struct sdhci_host *host, u32 mask, u32 val, int reg) { void __iomem *base = host->ioaddr + (reg & ~0x3); u32 shift = (reg & 0x3) * 8; writel(((readl(base) & ~(mask << shift)) | (val << shift)), base); } static u32 esdhc_readl_le(struct sdhci_host *host, int reg) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct pltfm_imx_data *imx_data = pltfm_host->priv; /* fake CARD_PRESENT flag on mx25/35 */ u32 val = readl(host->ioaddr + reg); if (unlikely((reg == SDHCI_PRESENT_STATE) && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) { struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; if (boarddata && gpio_is_valid(boarddata->cd_gpio) && gpio_get_value(boarddata->cd_gpio)) /* no card, if a valid gpio says so... */ val &= ~SDHCI_CARD_PRESENT; else /* ... in all other cases assume card is present */ val |= SDHCI_CARD_PRESENT; } return val; } static void esdhc_writel_le(struct sdhci_host *host, u32 val, int reg) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct pltfm_imx_data *imx_data = pltfm_host->priv; if (unlikely((reg == SDHCI_INT_ENABLE || reg == SDHCI_SIGNAL_ENABLE) && (imx_data->flags & ESDHC_FLAG_GPIO_FOR_CD_WP))) /* * these interrupts won't work with a custom card_detect gpio * (only applied to mx25/35) */ val &= ~(SDHCI_INT_CARD_REMOVE | SDHCI_INT_CARD_INSERT); if (unlikely((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) && (reg == SDHCI_INT_STATUS) && (val & SDHCI_INT_DATA_END))) { u32 v; v = readl(host->ioaddr + SDHCI_VENDOR_SPEC); v &= ~SDHCI_VENDOR_SPEC_SDIO_QUIRK; writel(v, host->ioaddr + SDHCI_VENDOR_SPEC); } writel(val, host->ioaddr + reg); } static u16 esdhc_readw_le(struct sdhci_host *host, int reg) { if (unlikely(reg == SDHCI_HOST_VERSION)) reg ^= 2; return readw(host->ioaddr + reg); } static void esdhc_writew_le(struct sdhci_host *host, u16 val, int reg) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct pltfm_imx_data *imx_data = pltfm_host->priv; switch (reg) { case SDHCI_TRANSFER_MODE: /* * Postpone this write, we must do it together with a * command write that is down below. */ if ((imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT) && (host->cmd->opcode == SD_IO_RW_EXTENDED) && (host->cmd->data->blocks > 1) && (host->cmd->data->flags & MMC_DATA_READ)) { u32 v; v = readl(host->ioaddr + SDHCI_VENDOR_SPEC); v |= SDHCI_VENDOR_SPEC_SDIO_QUIRK; writel(v, host->ioaddr + SDHCI_VENDOR_SPEC); } imx_data->scratchpad = val; return; case SDHCI_COMMAND: if ((host->cmd->opcode == MMC_STOP_TRANSMISSION || host->cmd->opcode == MMC_SET_BLOCK_COUNT) && (imx_data->flags & ESDHC_FLAG_MULTIBLK_NO_INT)) val |= SDHCI_CMD_ABORTCMD; writel(val << 16 | imx_data->scratchpad, host->ioaddr + SDHCI_TRANSFER_MODE); return; case SDHCI_BLOCK_SIZE: val &= ~SDHCI_MAKE_BLKSZ(0x7, 0); break; } esdhc_clrset_le(host, 0xffff, val, reg); } static void esdhc_writeb_le(struct sdhci_host *host, u8 val, int reg) { u32 new_val; switch (reg) { case SDHCI_POWER_CONTROL: /* * FSL put some DMA bits here * If your board has a regulator, code should be here */ return; case SDHCI_HOST_CONTROL: /* FSL messed up here, so we can just keep those two */ new_val = val & (SDHCI_CTRL_LED | SDHCI_CTRL_4BITBUS); /* ensure the endianess */ new_val |= ESDHC_HOST_CONTROL_LE; /* DMA mode bits are shifted */ new_val |= (val & SDHCI_CTRL_DMA_MASK) << 5; esdhc_clrset_le(host, 0xffff, new_val, reg); return; } esdhc_clrset_le(host, 0xff, val, reg); } static unsigned int esdhc_pltfm_get_max_clock(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); return clk_get_rate(pltfm_host->clk); } static unsigned int esdhc_pltfm_get_min_clock(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); return clk_get_rate(pltfm_host->clk) / 256 / 16; } static unsigned int esdhc_pltfm_get_ro(struct sdhci_host *host) { struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; if (boarddata && gpio_is_valid(boarddata->wp_gpio)) return gpio_get_value(boarddata->wp_gpio); else return -ENOSYS; } static struct sdhci_ops sdhci_esdhc_ops = { .read_l = esdhc_readl_le, .read_w = esdhc_readw_le, .write_l = esdhc_writel_le, .write_w = esdhc_writew_le, .write_b = esdhc_writeb_le, .set_clock = esdhc_set_clock, .get_max_clock = esdhc_pltfm_get_max_clock, .get_min_clock = esdhc_pltfm_get_min_clock, }; static irqreturn_t cd_irq(int irq, void *data) { struct sdhci_host *sdhost = (struct sdhci_host *)data; tasklet_schedule(&sdhost->card_tasklet); return IRQ_HANDLED; }; static int esdhc_pltfm_init(struct sdhci_host *host, struct sdhci_pltfm_data *pdata) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; struct clk *clk; int err; struct pltfm_imx_data *imx_data; clk = clk_get(mmc_dev(host->mmc), NULL); if (IS_ERR(clk)) { dev_err(mmc_dev(host->mmc), "clk err\n"); return PTR_ERR(clk); } clk_enable(clk); pltfm_host->clk = clk; imx_data = kzalloc(sizeof(struct pltfm_imx_data), GFP_KERNEL); if (!imx_data) { clk_disable(pltfm_host->clk); clk_put(pltfm_host->clk); return -ENOMEM; } pltfm_host->priv = imx_data; host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; if (cpu_is_mx25() || cpu_is_mx35()) { /* Fix errata ENGcm07207 present on i.MX25 and i.MX35 */ host->quirks |= SDHCI_QUIRK_NO_MULTIBLOCK; /* write_protect can't be routed to controller, use gpio */ sdhci_esdhc_ops.get_ro = esdhc_pltfm_get_ro; } if (!(cpu_is_mx25() || cpu_is_mx35() || cpu_is_mx51())) imx_data->flags |= ESDHC_FLAG_MULTIBLK_NO_INT; if (boarddata) { err = gpio_request_one(boarddata->wp_gpio, GPIOF_IN, "ESDHC_WP"); if (err) { dev_warn(mmc_dev(host->mmc), "no write-protect pin available!\n"); boarddata->wp_gpio = err; } err = gpio_request_one(boarddata->cd_gpio, GPIOF_IN, "ESDHC_CD"); if (err) { dev_warn(mmc_dev(host->mmc), "no card-detect pin available!\n"); goto no_card_detect_pin; } /* i.MX5x has issues to be researched */ if (!cpu_is_mx25() && !cpu_is_mx35()) goto not_supported; err = request_irq(gpio_to_irq(boarddata->cd_gpio), cd_irq, IRQF_TRIGGER_FALLING | IRQF_TRIGGER_RISING, mmc_hostname(host->mmc), host); if (err) { dev_warn(mmc_dev(host->mmc), "request irq error\n"); goto no_card_detect_irq; } imx_data->flags |= ESDHC_FLAG_GPIO_FOR_CD_WP; /* Now we have a working card_detect again */ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; } return 0; no_card_detect_irq: gpio_free(boarddata->cd_gpio); no_card_detect_pin: boarddata->cd_gpio = err; not_supported: kfree(imx_data); return 0; } static void esdhc_pltfm_exit(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct esdhc_platform_data *boarddata = host->mmc->parent->platform_data; struct pltfm_imx_data *imx_data = pltfm_host->priv; if (boarddata && gpio_is_valid(boarddata->wp_gpio)) gpio_free(boarddata->wp_gpio); if (boarddata && gpio_is_valid(boarddata->cd_gpio)) { gpio_free(boarddata->cd_gpio); if (!(host->quirks & SDHCI_QUIRK_BROKEN_CARD_DETECTION)) free_irq(gpio_to_irq(boarddata->cd_gpio), host); } clk_disable(pltfm_host->clk); clk_put(pltfm_host->clk); kfree(imx_data); } struct sdhci_pltfm_data sdhci_esdhc_imx_pdata = { .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_BROKEN_CARD_DETECTION, /* ADMA has issues. Might be fixable */ .ops = &sdhci_esdhc_ops, .init = esdhc_pltfm_init, .exit = esdhc_pltfm_exit, };
gpl-2.0
mydongistiny/kernel_huawei_angler-ak
fs/lockd/svclock.c
1646
26489
/* * linux/fs/lockd/svclock.c * * Handling of server-side locks, mostly of the blocked variety. * This is the ugliest part of lockd because we tread on very thin ice. * GRANT and CANCEL calls may get stuck, meet in mid-flight, etc. * IMNSHO introducing the grant callback into the NLM protocol was one * of the worst ideas Sun ever had. Except maybe for the idea of doing * NFS file locking at all. * * I'm trying hard to avoid race conditions by protecting most accesses * to a file's list of blocked locks through a semaphore. The global * list of blocked locks is not protected in this fashion however. * Therefore, some functions (such as the RPC callback for the async grant * call) move blocked locks towards the head of the list *while some other * process might be traversing it*. This should not be a problem in * practice, because this will only cause functions traversing the list * to visit some blocks twice. * * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> */ #include <linux/types.h> #include <linux/slab.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/svc_xprt.h> #include <linux/lockd/nlm.h> #include <linux/lockd/lockd.h> #include <linux/kthread.h> #define NLMDBG_FACILITY NLMDBG_SVCLOCK #ifdef CONFIG_LOCKD_V4 #define nlm_deadlock nlm4_deadlock #else #define nlm_deadlock nlm_lck_denied #endif static void nlmsvc_release_block(struct nlm_block *block); static void nlmsvc_insert_block(struct nlm_block *block, unsigned long); static void nlmsvc_remove_block(struct nlm_block *block); static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock); static void nlmsvc_freegrantargs(struct nlm_rqst *call); static const struct rpc_call_ops nlmsvc_grant_ops; /* * The list of blocked locks to retry */ static LIST_HEAD(nlm_blocked); static DEFINE_SPINLOCK(nlm_blocked_lock); #ifdef LOCKD_DEBUG static const char *nlmdbg_cookie2a(const struct nlm_cookie *cookie) { /* * We can get away with a static buffer because we're only * called with BKL held. */ static char buf[2*NLM_MAXCOOKIELEN+1]; unsigned int i, len = sizeof(buf); char *p = buf; len--; /* allow for trailing \0 */ if (len < 3) return "???"; for (i = 0 ; i < cookie->len ; i++) { if (len < 2) { strcpy(p-3, "..."); break; } sprintf(p, "%02x", cookie->data[i]); p += 2; len -= 2; } *p = '\0'; return buf; } #endif /* * Insert a blocked lock into the global list */ static void nlmsvc_insert_block_locked(struct nlm_block *block, unsigned long when) { struct nlm_block *b; struct list_head *pos; dprintk("lockd: nlmsvc_insert_block(%p, %ld)\n", block, when); if (list_empty(&block->b_list)) { kref_get(&block->b_count); } else { list_del_init(&block->b_list); } pos = &nlm_blocked; if (when != NLM_NEVER) { if ((when += jiffies) == NLM_NEVER) when ++; list_for_each(pos, &nlm_blocked) { b = list_entry(pos, struct nlm_block, b_list); if (time_after(b->b_when,when) || b->b_when == NLM_NEVER) break; } /* On normal exit from the loop, pos == &nlm_blocked, * so we will be adding to the end of the list - good */ } list_add_tail(&block->b_list, pos); block->b_when = when; } static void nlmsvc_insert_block(struct nlm_block *block, unsigned long when) { spin_lock(&nlm_blocked_lock); nlmsvc_insert_block_locked(block, when); spin_unlock(&nlm_blocked_lock); } /* * Remove a block from the global list */ static inline void nlmsvc_remove_block(struct nlm_block *block) { if (!list_empty(&block->b_list)) { spin_lock(&nlm_blocked_lock); list_del_init(&block->b_list); spin_unlock(&nlm_blocked_lock); nlmsvc_release_block(block); } } /* * Find a block for a given lock */ static struct nlm_block * nlmsvc_lookup_block(struct nlm_file *file, struct nlm_lock *lock) { struct nlm_block *block; struct file_lock *fl; dprintk("lockd: nlmsvc_lookup_block f=%p pd=%d %Ld-%Ld ty=%d\n", file, lock->fl.fl_pid, (long long)lock->fl.fl_start, (long long)lock->fl.fl_end, lock->fl.fl_type); list_for_each_entry(block, &nlm_blocked, b_list) { fl = &block->b_call->a_args.lock.fl; dprintk("lockd: check f=%p pd=%d %Ld-%Ld ty=%d cookie=%s\n", block->b_file, fl->fl_pid, (long long)fl->fl_start, (long long)fl->fl_end, fl->fl_type, nlmdbg_cookie2a(&block->b_call->a_args.cookie)); if (block->b_file == file && nlm_compare_locks(fl, &lock->fl)) { kref_get(&block->b_count); return block; } } return NULL; } static inline int nlm_cookie_match(struct nlm_cookie *a, struct nlm_cookie *b) { if (a->len != b->len) return 0; if (memcmp(a->data, b->data, a->len)) return 0; return 1; } /* * Find a block with a given NLM cookie. */ static inline struct nlm_block * nlmsvc_find_block(struct nlm_cookie *cookie) { struct nlm_block *block; list_for_each_entry(block, &nlm_blocked, b_list) { if (nlm_cookie_match(&block->b_call->a_args.cookie,cookie)) goto found; } return NULL; found: dprintk("nlmsvc_find_block(%s): block=%p\n", nlmdbg_cookie2a(cookie), block); kref_get(&block->b_count); return block; } /* * Create a block and initialize it. * * Note: we explicitly set the cookie of the grant reply to that of * the blocked lock request. The spec explicitly mentions that the client * should _not_ rely on the callback containing the same cookie as the * request, but (as I found out later) that's because some implementations * do just this. Never mind the standards comittees, they support our * logging industries. * * 10 years later: I hope we can safely ignore these old and broken * clients by now. Let's fix this so we can uniquely identify an incoming * GRANTED_RES message by cookie, without having to rely on the client's IP * address. --okir */ static struct nlm_block * nlmsvc_create_block(struct svc_rqst *rqstp, struct nlm_host *host, struct nlm_file *file, struct nlm_lock *lock, struct nlm_cookie *cookie) { struct nlm_block *block; struct nlm_rqst *call = NULL; call = nlm_alloc_call(host); if (call == NULL) return NULL; /* Allocate memory for block, and initialize arguments */ block = kzalloc(sizeof(*block), GFP_KERNEL); if (block == NULL) goto failed; kref_init(&block->b_count); INIT_LIST_HEAD(&block->b_list); INIT_LIST_HEAD(&block->b_flist); if (!nlmsvc_setgrantargs(call, lock)) goto failed_free; /* Set notifier function for VFS, and init args */ call->a_args.lock.fl.fl_flags |= FL_SLEEP; call->a_args.lock.fl.fl_lmops = &nlmsvc_lock_operations; nlmclnt_next_cookie(&call->a_args.cookie); dprintk("lockd: created block %p...\n", block); /* Create and initialize the block */ block->b_daemon = rqstp->rq_server; block->b_host = host; block->b_file = file; block->b_fl = NULL; file->f_count++; /* Add to file's list of blocks */ list_add(&block->b_flist, &file->f_blocks); /* Set up RPC arguments for callback */ block->b_call = call; call->a_flags = RPC_TASK_ASYNC; call->a_block = block; return block; failed_free: kfree(block); failed: nlmsvc_release_call(call); return NULL; } /* * Delete a block. * It is the caller's responsibility to check whether the file * can be closed hereafter. */ static int nlmsvc_unlink_block(struct nlm_block *block) { int status; dprintk("lockd: unlinking block %p...\n", block); /* Remove block from list */ status = posix_unblock_lock(block->b_file->f_file, &block->b_call->a_args.lock.fl); nlmsvc_remove_block(block); return status; } static void nlmsvc_free_block(struct kref *kref) { struct nlm_block *block = container_of(kref, struct nlm_block, b_count); struct nlm_file *file = block->b_file; dprintk("lockd: freeing block %p...\n", block); /* Remove block from file's list of blocks */ list_del_init(&block->b_flist); mutex_unlock(&file->f_mutex); nlmsvc_freegrantargs(block->b_call); nlmsvc_release_call(block->b_call); nlm_release_file(block->b_file); kfree(block->b_fl); kfree(block); } static void nlmsvc_release_block(struct nlm_block *block) { if (block != NULL) kref_put_mutex(&block->b_count, nlmsvc_free_block, &block->b_file->f_mutex); } /* * Loop over all blocks and delete blocks held by * a matching host. */ void nlmsvc_traverse_blocks(struct nlm_host *host, struct nlm_file *file, nlm_host_match_fn_t match) { struct nlm_block *block, *next; restart: mutex_lock(&file->f_mutex); list_for_each_entry_safe(block, next, &file->f_blocks, b_flist) { if (!match(block->b_host, host)) continue; /* Do not destroy blocks that are not on * the global retry list - why? */ if (list_empty(&block->b_list)) continue; kref_get(&block->b_count); mutex_unlock(&file->f_mutex); nlmsvc_unlink_block(block); nlmsvc_release_block(block); goto restart; } mutex_unlock(&file->f_mutex); } /* * Initialize arguments for GRANTED call. The nlm_rqst structure * has been cleared already. */ static int nlmsvc_setgrantargs(struct nlm_rqst *call, struct nlm_lock *lock) { locks_copy_lock(&call->a_args.lock.fl, &lock->fl); memcpy(&call->a_args.lock.fh, &lock->fh, sizeof(call->a_args.lock.fh)); call->a_args.lock.caller = utsname()->nodename; call->a_args.lock.oh.len = lock->oh.len; /* set default data area */ call->a_args.lock.oh.data = call->a_owner; call->a_args.lock.svid = lock->fl.fl_pid; if (lock->oh.len > NLMCLNT_OHSIZE) { void *data = kmalloc(lock->oh.len, GFP_KERNEL); if (!data) return 0; call->a_args.lock.oh.data = (u8 *) data; } memcpy(call->a_args.lock.oh.data, lock->oh.data, lock->oh.len); return 1; } static void nlmsvc_freegrantargs(struct nlm_rqst *call) { if (call->a_args.lock.oh.data != call->a_owner) kfree(call->a_args.lock.oh.data); locks_release_private(&call->a_args.lock.fl); } /* * Deferred lock request handling for non-blocking lock */ static __be32 nlmsvc_defer_lock_rqst(struct svc_rqst *rqstp, struct nlm_block *block) { __be32 status = nlm_lck_denied_nolocks; block->b_flags |= B_QUEUED; nlmsvc_insert_block(block, NLM_TIMEOUT); block->b_cache_req = &rqstp->rq_chandle; if (rqstp->rq_chandle.defer) { block->b_deferred_req = rqstp->rq_chandle.defer(block->b_cache_req); if (block->b_deferred_req != NULL) status = nlm_drop_reply; } dprintk("lockd: nlmsvc_defer_lock_rqst block %p flags %d status %d\n", block, block->b_flags, ntohl(status)); return status; } /* * Attempt to establish a lock, and if it can't be granted, block it * if required. */ __be32 nlmsvc_lock(struct svc_rqst *rqstp, struct nlm_file *file, struct nlm_host *host, struct nlm_lock *lock, int wait, struct nlm_cookie *cookie, int reclaim) { struct nlm_block *block = NULL; int error; __be32 ret; dprintk("lockd: nlmsvc_lock(%s/%ld, ty=%d, pi=%d, %Ld-%Ld, bl=%d)\n", file_inode(file->f_file)->i_sb->s_id, file_inode(file->f_file)->i_ino, lock->fl.fl_type, lock->fl.fl_pid, (long long)lock->fl.fl_start, (long long)lock->fl.fl_end, wait); /* Lock file against concurrent access */ mutex_lock(&file->f_mutex); /* Get existing block (in case client is busy-waiting) * or create new block */ block = nlmsvc_lookup_block(file, lock); if (block == NULL) { block = nlmsvc_create_block(rqstp, host, file, lock, cookie); ret = nlm_lck_denied_nolocks; if (block == NULL) goto out; lock = &block->b_call->a_args.lock; } else lock->fl.fl_flags &= ~FL_SLEEP; if (block->b_flags & B_QUEUED) { dprintk("lockd: nlmsvc_lock deferred block %p flags %d\n", block, block->b_flags); if (block->b_granted) { nlmsvc_unlink_block(block); ret = nlm_granted; goto out; } if (block->b_flags & B_TIMED_OUT) { nlmsvc_unlink_block(block); ret = nlm_lck_denied; goto out; } ret = nlm_drop_reply; goto out; } if (locks_in_grace(SVC_NET(rqstp)) && !reclaim) { ret = nlm_lck_denied_grace_period; goto out; } if (reclaim && !locks_in_grace(SVC_NET(rqstp))) { ret = nlm_lck_denied_grace_period; goto out; } if (!wait) lock->fl.fl_flags &= ~FL_SLEEP; error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL); lock->fl.fl_flags &= ~FL_SLEEP; dprintk("lockd: vfs_lock_file returned %d\n", error); switch (error) { case 0: ret = nlm_granted; goto out; case -EAGAIN: /* * If this is a blocking request for an * already pending lock request then we need * to put it back on lockd's block list */ if (wait) break; ret = nlm_lck_denied; goto out; case FILE_LOCK_DEFERRED: if (wait) break; /* Filesystem lock operation is in progress Add it to the queue waiting for callback */ ret = nlmsvc_defer_lock_rqst(rqstp, block); goto out; case -EDEADLK: ret = nlm_deadlock; goto out; default: /* includes ENOLCK */ ret = nlm_lck_denied_nolocks; goto out; } ret = nlm_lck_blocked; /* Append to list of blocked */ nlmsvc_insert_block(block, NLM_NEVER); out: mutex_unlock(&file->f_mutex); nlmsvc_release_block(block); dprintk("lockd: nlmsvc_lock returned %u\n", ret); return ret; } /* * Test for presence of a conflicting lock. */ __be32 nlmsvc_testlock(struct svc_rqst *rqstp, struct nlm_file *file, struct nlm_host *host, struct nlm_lock *lock, struct nlm_lock *conflock, struct nlm_cookie *cookie) { struct nlm_block *block = NULL; int error; __be32 ret; dprintk("lockd: nlmsvc_testlock(%s/%ld, ty=%d, %Ld-%Ld)\n", file_inode(file->f_file)->i_sb->s_id, file_inode(file->f_file)->i_ino, lock->fl.fl_type, (long long)lock->fl.fl_start, (long long)lock->fl.fl_end); /* Get existing block (in case client is busy-waiting) */ block = nlmsvc_lookup_block(file, lock); if (block == NULL) { struct file_lock *conf = kzalloc(sizeof(*conf), GFP_KERNEL); if (conf == NULL) return nlm_granted; block = nlmsvc_create_block(rqstp, host, file, lock, cookie); if (block == NULL) { kfree(conf); return nlm_granted; } block->b_fl = conf; } if (block->b_flags & B_QUEUED) { dprintk("lockd: nlmsvc_testlock deferred block %p flags %d fl %p\n", block, block->b_flags, block->b_fl); if (block->b_flags & B_TIMED_OUT) { nlmsvc_unlink_block(block); ret = nlm_lck_denied; goto out; } if (block->b_flags & B_GOT_CALLBACK) { nlmsvc_unlink_block(block); if (block->b_fl != NULL && block->b_fl->fl_type != F_UNLCK) { lock->fl = *block->b_fl; goto conf_lock; } else { ret = nlm_granted; goto out; } } ret = nlm_drop_reply; goto out; } if (locks_in_grace(SVC_NET(rqstp))) { ret = nlm_lck_denied_grace_period; goto out; } error = vfs_test_lock(file->f_file, &lock->fl); if (error == FILE_LOCK_DEFERRED) { ret = nlmsvc_defer_lock_rqst(rqstp, block); goto out; } if (error) { ret = nlm_lck_denied_nolocks; goto out; } if (lock->fl.fl_type == F_UNLCK) { ret = nlm_granted; goto out; } conf_lock: dprintk("lockd: conflicting lock(ty=%d, %Ld-%Ld)\n", lock->fl.fl_type, (long long)lock->fl.fl_start, (long long)lock->fl.fl_end); conflock->caller = "somehost"; /* FIXME */ conflock->len = strlen(conflock->caller); conflock->oh.len = 0; /* don't return OH info */ conflock->svid = lock->fl.fl_pid; conflock->fl.fl_type = lock->fl.fl_type; conflock->fl.fl_start = lock->fl.fl_start; conflock->fl.fl_end = lock->fl.fl_end; ret = nlm_lck_denied; out: if (block) nlmsvc_release_block(block); return ret; } /* * Remove a lock. * This implies a CANCEL call: We send a GRANT_MSG, the client replies * with a GRANT_RES call which gets lost, and calls UNLOCK immediately * afterwards. In this case the block will still be there, and hence * must be removed. */ __be32 nlmsvc_unlock(struct net *net, struct nlm_file *file, struct nlm_lock *lock) { int error; dprintk("lockd: nlmsvc_unlock(%s/%ld, pi=%d, %Ld-%Ld)\n", file_inode(file->f_file)->i_sb->s_id, file_inode(file->f_file)->i_ino, lock->fl.fl_pid, (long long)lock->fl.fl_start, (long long)lock->fl.fl_end); /* First, cancel any lock that might be there */ nlmsvc_cancel_blocked(net, file, lock); lock->fl.fl_type = F_UNLCK; error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL); return (error < 0)? nlm_lck_denied_nolocks : nlm_granted; } /* * Cancel a previously blocked request. * * A cancel request always overrides any grant that may currently * be in progress. * The calling procedure must check whether the file can be closed. */ __be32 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *file, struct nlm_lock *lock) { struct nlm_block *block; int status = 0; dprintk("lockd: nlmsvc_cancel(%s/%ld, pi=%d, %Ld-%Ld)\n", file_inode(file->f_file)->i_sb->s_id, file_inode(file->f_file)->i_ino, lock->fl.fl_pid, (long long)lock->fl.fl_start, (long long)lock->fl.fl_end); if (locks_in_grace(net)) return nlm_lck_denied_grace_period; mutex_lock(&file->f_mutex); block = nlmsvc_lookup_block(file, lock); mutex_unlock(&file->f_mutex); if (block != NULL) { vfs_cancel_lock(block->b_file->f_file, &block->b_call->a_args.lock.fl); status = nlmsvc_unlink_block(block); nlmsvc_release_block(block); } return status ? nlm_lck_denied : nlm_granted; } /* * This is a callback from the filesystem for VFS file lock requests. * It will be used if lm_grant is defined and the filesystem can not * respond to the request immediately. * For GETLK request it will copy the reply to the nlm_block. * For SETLK or SETLKW request it will get the local posix lock. * In all cases it will move the block to the head of nlm_blocked q where * nlmsvc_retry_blocked() can send back a reply for SETLKW or revisit the * deferred rpc for GETLK and SETLK. */ static void nlmsvc_update_deferred_block(struct nlm_block *block, struct file_lock *conf, int result) { block->b_flags |= B_GOT_CALLBACK; if (result == 0) block->b_granted = 1; else block->b_flags |= B_TIMED_OUT; if (conf) { if (block->b_fl) __locks_copy_lock(block->b_fl, conf); } } static int nlmsvc_grant_deferred(struct file_lock *fl, struct file_lock *conf, int result) { struct nlm_block *block; int rc = -ENOENT; spin_lock(&nlm_blocked_lock); list_for_each_entry(block, &nlm_blocked, b_list) { if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) { dprintk("lockd: nlmsvc_notify_blocked block %p flags %d\n", block, block->b_flags); if (block->b_flags & B_QUEUED) { if (block->b_flags & B_TIMED_OUT) { rc = -ENOLCK; break; } nlmsvc_update_deferred_block(block, conf, result); } else if (result == 0) block->b_granted = 1; nlmsvc_insert_block_locked(block, 0); svc_wake_up(block->b_daemon); rc = 0; break; } } spin_unlock(&nlm_blocked_lock); if (rc == -ENOENT) printk(KERN_WARNING "lockd: grant for unknown block\n"); return rc; } /* * Unblock a blocked lock request. This is a callback invoked from the * VFS layer when a lock on which we blocked is removed. * * This function doesn't grant the blocked lock instantly, but rather moves * the block to the head of nlm_blocked where it can be picked up by lockd. */ static void nlmsvc_notify_blocked(struct file_lock *fl) { struct nlm_block *block; dprintk("lockd: VFS unblock notification for block %p\n", fl); spin_lock(&nlm_blocked_lock); list_for_each_entry(block, &nlm_blocked, b_list) { if (nlm_compare_locks(&block->b_call->a_args.lock.fl, fl)) { nlmsvc_insert_block_locked(block, 0); spin_unlock(&nlm_blocked_lock); svc_wake_up(block->b_daemon); return; } } spin_unlock(&nlm_blocked_lock); printk(KERN_WARNING "lockd: notification for unknown block!\n"); } static int nlmsvc_same_owner(struct file_lock *fl1, struct file_lock *fl2) { return fl1->fl_owner == fl2->fl_owner && fl1->fl_pid == fl2->fl_pid; } const struct lock_manager_operations nlmsvc_lock_operations = { .lm_compare_owner = nlmsvc_same_owner, .lm_notify = nlmsvc_notify_blocked, .lm_grant = nlmsvc_grant_deferred, }; /* * Try to claim a lock that was previously blocked. * * Note that we use both the RPC_GRANTED_MSG call _and_ an async * RPC thread when notifying the client. This seems like overkill... * Here's why: * - we don't want to use a synchronous RPC thread, otherwise * we might find ourselves hanging on a dead portmapper. * - Some lockd implementations (e.g. HP) don't react to * RPC_GRANTED calls; they seem to insist on RPC_GRANTED_MSG calls. */ static void nlmsvc_grant_blocked(struct nlm_block *block) { struct nlm_file *file = block->b_file; struct nlm_lock *lock = &block->b_call->a_args.lock; int error; loff_t fl_start, fl_end; dprintk("lockd: grant blocked lock %p\n", block); kref_get(&block->b_count); /* Unlink block request from list */ nlmsvc_unlink_block(block); /* If b_granted is true this means we've been here before. * Just retry the grant callback, possibly refreshing the RPC * binding */ if (block->b_granted) { nlm_rebind_host(block->b_host); goto callback; } /* Try the lock operation again */ /* vfs_lock_file() can mangle fl_start and fl_end, but we need * them unchanged for the GRANT_MSG */ lock->fl.fl_flags |= FL_SLEEP; fl_start = lock->fl.fl_start; fl_end = lock->fl.fl_end; error = vfs_lock_file(file->f_file, F_SETLK, &lock->fl, NULL); lock->fl.fl_flags &= ~FL_SLEEP; lock->fl.fl_start = fl_start; lock->fl.fl_end = fl_end; switch (error) { case 0: break; case FILE_LOCK_DEFERRED: dprintk("lockd: lock still blocked error %d\n", error); nlmsvc_insert_block(block, NLM_NEVER); nlmsvc_release_block(block); return; default: printk(KERN_WARNING "lockd: unexpected error %d in %s!\n", -error, __func__); nlmsvc_insert_block(block, 10 * HZ); nlmsvc_release_block(block); return; } callback: /* Lock was granted by VFS. */ dprintk("lockd: GRANTing blocked lock.\n"); block->b_granted = 1; /* keep block on the list, but don't reattempt until the RPC * completes or the submission fails */ nlmsvc_insert_block(block, NLM_NEVER); /* Call the client -- use a soft RPC task since nlmsvc_retry_blocked * will queue up a new one if this one times out */ error = nlm_async_call(block->b_call, NLMPROC_GRANTED_MSG, &nlmsvc_grant_ops); /* RPC submission failed, wait a bit and retry */ if (error < 0) nlmsvc_insert_block(block, 10 * HZ); } /* * This is the callback from the RPC layer when the NLM_GRANTED_MSG * RPC call has succeeded or timed out. * Like all RPC callbacks, it is invoked by the rpciod process, so it * better not sleep. Therefore, we put the blocked lock on the nlm_blocked * chain once more in order to have it removed by lockd itself (which can * then sleep on the file semaphore without disrupting e.g. the nfs client). */ static void nlmsvc_grant_callback(struct rpc_task *task, void *data) { struct nlm_rqst *call = data; struct nlm_block *block = call->a_block; unsigned long timeout; dprintk("lockd: GRANT_MSG RPC callback\n"); spin_lock(&nlm_blocked_lock); /* if the block is not on a list at this point then it has * been invalidated. Don't try to requeue it. * * FIXME: it's possible that the block is removed from the list * after this check but before the nlmsvc_insert_block. In that * case it will be added back. Perhaps we need better locking * for nlm_blocked? */ if (list_empty(&block->b_list)) goto out; /* Technically, we should down the file semaphore here. Since we * move the block towards the head of the queue only, no harm * can be done, though. */ if (task->tk_status < 0) { /* RPC error: Re-insert for retransmission */ timeout = 10 * HZ; } else { /* Call was successful, now wait for client callback */ timeout = 60 * HZ; } nlmsvc_insert_block_locked(block, timeout); svc_wake_up(block->b_daemon); out: spin_unlock(&nlm_blocked_lock); } /* * FIXME: nlmsvc_release_block() grabs a mutex. This is not allowed for an * .rpc_release rpc_call_op */ static void nlmsvc_grant_release(void *data) { struct nlm_rqst *call = data; nlmsvc_release_block(call->a_block); } static const struct rpc_call_ops nlmsvc_grant_ops = { .rpc_call_done = nlmsvc_grant_callback, .rpc_release = nlmsvc_grant_release, }; /* * We received a GRANT_RES callback. Try to find the corresponding * block. */ void nlmsvc_grant_reply(struct nlm_cookie *cookie, __be32 status) { struct nlm_block *block; dprintk("grant_reply: looking for cookie %x, s=%d \n", *(unsigned int *)(cookie->data), status); if (!(block = nlmsvc_find_block(cookie))) return; if (block) { if (status == nlm_lck_denied_grace_period) { /* Try again in a couple of seconds */ nlmsvc_insert_block(block, 10 * HZ); } else { /* Lock is now held by client, or has been rejected. * In both cases, the block should be removed. */ nlmsvc_unlink_block(block); } } nlmsvc_release_block(block); } /* Helper function to handle retry of a deferred block. * If it is a blocking lock, call grant_blocked. * For a non-blocking lock or test lock, revisit the request. */ static void retry_deferred_block(struct nlm_block *block) { if (!(block->b_flags & B_GOT_CALLBACK)) block->b_flags |= B_TIMED_OUT; nlmsvc_insert_block(block, NLM_TIMEOUT); dprintk("revisit block %p flags %d\n", block, block->b_flags); if (block->b_deferred_req) { block->b_deferred_req->revisit(block->b_deferred_req, 0); block->b_deferred_req = NULL; } } /* * Retry all blocked locks that have been notified. This is where lockd * picks up locks that can be granted, or grant notifications that must * be retransmitted. */ unsigned long nlmsvc_retry_blocked(void) { unsigned long timeout = MAX_SCHEDULE_TIMEOUT; struct nlm_block *block; spin_lock(&nlm_blocked_lock); while (!list_empty(&nlm_blocked) && !kthread_should_stop()) { block = list_entry(nlm_blocked.next, struct nlm_block, b_list); if (block->b_when == NLM_NEVER) break; if (time_after(block->b_when, jiffies)) { timeout = block->b_when - jiffies; break; } spin_unlock(&nlm_blocked_lock); dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n", block, block->b_when); if (block->b_flags & B_QUEUED) { dprintk("nlmsvc_retry_blocked delete block (%p, granted=%d, flags=%d)\n", block, block->b_granted, block->b_flags); retry_deferred_block(block); } else nlmsvc_grant_blocked(block); spin_lock(&nlm_blocked_lock); } spin_unlock(&nlm_blocked_lock); return timeout; }
gpl-2.0