repo_name
string
path
string
copies
string
size
string
content
string
license
string
semdoc/kernel_htc_msm8960
arch/arm/plat-mxc/devices/platform-fec.c
5558
2034
/* * Copyright (C) 2010 Pengutronix * Uwe Kleine-Koenig <u.kleine-koenig@pengutronix.de> * * This program is free software; you can redistribute it and/or modify it under * the terms of the GNU General Public License version 2 as published by the * Free Software Foundation. */ #include <linux/dma-mapping.h> #include <asm/sizes.h> #include <mach/hardware.h> #include <mach/devices-common.h> #define imx_fec_data_entry_single(soc, _devid) \ { \ .devid = _devid, \ .iobase = soc ## _FEC_BASE_ADDR, \ .irq = soc ## _INT_FEC, \ } #ifdef CONFIG_SOC_IMX25 const struct imx_fec_data imx25_fec_data __initconst = imx_fec_data_entry_single(MX25, "imx25-fec"); #endif /* ifdef CONFIG_SOC_IMX25 */ #ifdef CONFIG_SOC_IMX27 const struct imx_fec_data imx27_fec_data __initconst = imx_fec_data_entry_single(MX27, "imx27-fec"); #endif /* ifdef CONFIG_SOC_IMX27 */ #ifdef CONFIG_SOC_IMX35 /* i.mx35 has the i.mx27 type fec */ const struct imx_fec_data imx35_fec_data __initconst = imx_fec_data_entry_single(MX35, "imx27-fec"); #endif #ifdef CONFIG_SOC_IMX50 /* i.mx50 has the i.mx25 type fec */ const struct imx_fec_data imx50_fec_data __initconst = imx_fec_data_entry_single(MX50, "imx25-fec"); #endif #ifdef CONFIG_SOC_IMX51 /* i.mx51 has the i.mx27 type fec */ const struct imx_fec_data imx51_fec_data __initconst = imx_fec_data_entry_single(MX51, "imx27-fec"); #endif #ifdef CONFIG_SOC_IMX53 /* i.mx53 has the i.mx25 type fec */ const struct imx_fec_data imx53_fec_data __initconst = imx_fec_data_entry_single(MX53, "imx25-fec"); #endif struct platform_device *__init imx_add_fec( const struct imx_fec_data *data, const struct fec_platform_data *pdata) { struct resource res[] = { { .start = data->iobase, .end = data->iobase + SZ_4K - 1, .flags = IORESOURCE_MEM, }, { .start = data->irq, .end = data->irq, .flags = IORESOURCE_IRQ, }, }; return imx_add_platform_device_dmamask(data->devid, 0, res, ARRAY_SIZE(res), pdata, sizeof(*pdata), DMA_BIT_MASK(32)); }
gpl-2.0
EPDCenter/android_kernel_rikomagic_mk808
fs/quota/quota_v2.c
8118
10350
/* * vfsv0 quota IO operations on file */ #include <linux/errno.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/dqblk_v2.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/quotaops.h> #include <asm/byteorder.h> #include "quota_tree.h" #include "quotaio_v2.h" MODULE_AUTHOR("Jan Kara"); MODULE_DESCRIPTION("Quota format v2 support"); MODULE_LICENSE("GPL"); #define __QUOTA_V2_PARANOIA static void v2r0_mem2diskdqb(void *dp, struct dquot *dquot); static void v2r0_disk2memdqb(struct dquot *dquot, void *dp); static int v2r0_is_id(void *dp, struct dquot *dquot); static void v2r1_mem2diskdqb(void *dp, struct dquot *dquot); static void v2r1_disk2memdqb(struct dquot *dquot, void *dp); static int v2r1_is_id(void *dp, struct dquot *dquot); static struct qtree_fmt_operations v2r0_qtree_ops = { .mem2disk_dqblk = v2r0_mem2diskdqb, .disk2mem_dqblk = v2r0_disk2memdqb, .is_id = v2r0_is_id, }; static struct qtree_fmt_operations v2r1_qtree_ops = { .mem2disk_dqblk = v2r1_mem2diskdqb, .disk2mem_dqblk = v2r1_disk2memdqb, .is_id = v2r1_is_id, }; #define QUOTABLOCK_BITS 10 #define QUOTABLOCK_SIZE (1 << QUOTABLOCK_BITS) static inline qsize_t v2_stoqb(qsize_t space) { return (space + QUOTABLOCK_SIZE - 1) >> QUOTABLOCK_BITS; } static inline qsize_t v2_qbtos(qsize_t blocks) { return blocks << QUOTABLOCK_BITS; } static int v2_read_header(struct super_block *sb, int type, struct v2_disk_dqheader *dqhead) { ssize_t size; size = sb->s_op->quota_read(sb, type, (char *)dqhead, sizeof(struct v2_disk_dqheader), 0); if (size != sizeof(struct v2_disk_dqheader)) { quota_error(sb, "Failed header read: expected=%zd got=%zd", sizeof(struct v2_disk_dqheader), size); return 0; } return 1; } /* Check whether given file is really vfsv0 quotafile */ static int v2_check_quota_file(struct super_block *sb, int type) { struct v2_disk_dqheader dqhead; static const uint quota_magics[] = V2_INITQMAGICS; static const uint quota_versions[] = V2_INITQVERSIONS; if (!v2_read_header(sb, type, &dqhead)) return 0; if (le32_to_cpu(dqhead.dqh_magic) != quota_magics[type] || le32_to_cpu(dqhead.dqh_version) > quota_versions[type]) return 0; return 1; } /* Read information header from quota file */ static int v2_read_file_info(struct super_block *sb, int type) { struct v2_disk_dqinfo dinfo; struct v2_disk_dqheader dqhead; struct mem_dqinfo *info = sb_dqinfo(sb, type); struct qtree_mem_dqinfo *qinfo; ssize_t size; unsigned int version; if (!v2_read_header(sb, type, &dqhead)) return -1; version = le32_to_cpu(dqhead.dqh_version); if ((info->dqi_fmt_id == QFMT_VFS_V0 && version != 0) || (info->dqi_fmt_id == QFMT_VFS_V1 && version != 1)) return -1; size = sb->s_op->quota_read(sb, type, (char *)&dinfo, sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF); if (size != sizeof(struct v2_disk_dqinfo)) { quota_error(sb, "Can't read info structure"); return -1; } info->dqi_priv = kmalloc(sizeof(struct qtree_mem_dqinfo), GFP_NOFS); if (!info->dqi_priv) { printk(KERN_WARNING "Not enough memory for quota information structure.\n"); return -ENOMEM; } qinfo = info->dqi_priv; if (version == 0) { /* limits are stored as unsigned 32-bit data */ info->dqi_maxblimit = 0xffffffff; info->dqi_maxilimit = 0xffffffff; } else { /* used space is stored as unsigned 64-bit value */ info->dqi_maxblimit = 0xffffffffffffffffULL; /* 2^64-1 */ info->dqi_maxilimit = 0xffffffffffffffffULL; } info->dqi_bgrace = le32_to_cpu(dinfo.dqi_bgrace); info->dqi_igrace = le32_to_cpu(dinfo.dqi_igrace); info->dqi_flags = le32_to_cpu(dinfo.dqi_flags); qinfo->dqi_sb = sb; qinfo->dqi_type = type; qinfo->dqi_blocks = le32_to_cpu(dinfo.dqi_blocks); qinfo->dqi_free_blk = le32_to_cpu(dinfo.dqi_free_blk); qinfo->dqi_free_entry = le32_to_cpu(dinfo.dqi_free_entry); qinfo->dqi_blocksize_bits = V2_DQBLKSIZE_BITS; qinfo->dqi_usable_bs = 1 << V2_DQBLKSIZE_BITS; qinfo->dqi_qtree_depth = qtree_depth(qinfo); if (version == 0) { qinfo->dqi_entry_size = sizeof(struct v2r0_disk_dqblk); qinfo->dqi_ops = &v2r0_qtree_ops; } else { qinfo->dqi_entry_size = sizeof(struct v2r1_disk_dqblk); qinfo->dqi_ops = &v2r1_qtree_ops; } return 0; } /* Write information header to quota file */ static int v2_write_file_info(struct super_block *sb, int type) { struct v2_disk_dqinfo dinfo; struct mem_dqinfo *info = sb_dqinfo(sb, type); struct qtree_mem_dqinfo *qinfo = info->dqi_priv; ssize_t size; spin_lock(&dq_data_lock); info->dqi_flags &= ~DQF_INFO_DIRTY; dinfo.dqi_bgrace = cpu_to_le32(info->dqi_bgrace); dinfo.dqi_igrace = cpu_to_le32(info->dqi_igrace); dinfo.dqi_flags = cpu_to_le32(info->dqi_flags & DQF_MASK); spin_unlock(&dq_data_lock); dinfo.dqi_blocks = cpu_to_le32(qinfo->dqi_blocks); dinfo.dqi_free_blk = cpu_to_le32(qinfo->dqi_free_blk); dinfo.dqi_free_entry = cpu_to_le32(qinfo->dqi_free_entry); size = sb->s_op->quota_write(sb, type, (char *)&dinfo, sizeof(struct v2_disk_dqinfo), V2_DQINFOOFF); if (size != sizeof(struct v2_disk_dqinfo)) { quota_error(sb, "Can't write info structure"); return -1; } return 0; } static void v2r0_disk2memdqb(struct dquot *dquot, void *dp) { struct v2r0_disk_dqblk *d = dp, empty; struct mem_dqblk *m = &dquot->dq_dqb; m->dqb_ihardlimit = le32_to_cpu(d->dqb_ihardlimit); m->dqb_isoftlimit = le32_to_cpu(d->dqb_isoftlimit); m->dqb_curinodes = le32_to_cpu(d->dqb_curinodes); m->dqb_itime = le64_to_cpu(d->dqb_itime); m->dqb_bhardlimit = v2_qbtos(le32_to_cpu(d->dqb_bhardlimit)); m->dqb_bsoftlimit = v2_qbtos(le32_to_cpu(d->dqb_bsoftlimit)); m->dqb_curspace = le64_to_cpu(d->dqb_curspace); m->dqb_btime = le64_to_cpu(d->dqb_btime); /* We need to escape back all-zero structure */ memset(&empty, 0, sizeof(struct v2r0_disk_dqblk)); empty.dqb_itime = cpu_to_le64(1); if (!memcmp(&empty, dp, sizeof(struct v2r0_disk_dqblk))) m->dqb_itime = 0; } static void v2r0_mem2diskdqb(void *dp, struct dquot *dquot) { struct v2r0_disk_dqblk *d = dp; struct mem_dqblk *m = &dquot->dq_dqb; struct qtree_mem_dqinfo *info = sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; d->dqb_ihardlimit = cpu_to_le32(m->dqb_ihardlimit); d->dqb_isoftlimit = cpu_to_le32(m->dqb_isoftlimit); d->dqb_curinodes = cpu_to_le32(m->dqb_curinodes); d->dqb_itime = cpu_to_le64(m->dqb_itime); d->dqb_bhardlimit = cpu_to_le32(v2_stoqb(m->dqb_bhardlimit)); d->dqb_bsoftlimit = cpu_to_le32(v2_stoqb(m->dqb_bsoftlimit)); d->dqb_curspace = cpu_to_le64(m->dqb_curspace); d->dqb_btime = cpu_to_le64(m->dqb_btime); d->dqb_id = cpu_to_le32(dquot->dq_id); if (qtree_entry_unused(info, dp)) d->dqb_itime = cpu_to_le64(1); } static int v2r0_is_id(void *dp, struct dquot *dquot) { struct v2r0_disk_dqblk *d = dp; struct qtree_mem_dqinfo *info = sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; if (qtree_entry_unused(info, dp)) return 0; return le32_to_cpu(d->dqb_id) == dquot->dq_id; } static void v2r1_disk2memdqb(struct dquot *dquot, void *dp) { struct v2r1_disk_dqblk *d = dp, empty; struct mem_dqblk *m = &dquot->dq_dqb; m->dqb_ihardlimit = le64_to_cpu(d->dqb_ihardlimit); m->dqb_isoftlimit = le64_to_cpu(d->dqb_isoftlimit); m->dqb_curinodes = le64_to_cpu(d->dqb_curinodes); m->dqb_itime = le64_to_cpu(d->dqb_itime); m->dqb_bhardlimit = v2_qbtos(le64_to_cpu(d->dqb_bhardlimit)); m->dqb_bsoftlimit = v2_qbtos(le64_to_cpu(d->dqb_bsoftlimit)); m->dqb_curspace = le64_to_cpu(d->dqb_curspace); m->dqb_btime = le64_to_cpu(d->dqb_btime); /* We need to escape back all-zero structure */ memset(&empty, 0, sizeof(struct v2r1_disk_dqblk)); empty.dqb_itime = cpu_to_le64(1); if (!memcmp(&empty, dp, sizeof(struct v2r1_disk_dqblk))) m->dqb_itime = 0; } static void v2r1_mem2diskdqb(void *dp, struct dquot *dquot) { struct v2r1_disk_dqblk *d = dp; struct mem_dqblk *m = &dquot->dq_dqb; struct qtree_mem_dqinfo *info = sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; d->dqb_ihardlimit = cpu_to_le64(m->dqb_ihardlimit); d->dqb_isoftlimit = cpu_to_le64(m->dqb_isoftlimit); d->dqb_curinodes = cpu_to_le64(m->dqb_curinodes); d->dqb_itime = cpu_to_le64(m->dqb_itime); d->dqb_bhardlimit = cpu_to_le64(v2_stoqb(m->dqb_bhardlimit)); d->dqb_bsoftlimit = cpu_to_le64(v2_stoqb(m->dqb_bsoftlimit)); d->dqb_curspace = cpu_to_le64(m->dqb_curspace); d->dqb_btime = cpu_to_le64(m->dqb_btime); d->dqb_id = cpu_to_le32(dquot->dq_id); if (qtree_entry_unused(info, dp)) d->dqb_itime = cpu_to_le64(1); } static int v2r1_is_id(void *dp, struct dquot *dquot) { struct v2r1_disk_dqblk *d = dp; struct qtree_mem_dqinfo *info = sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv; if (qtree_entry_unused(info, dp)) return 0; return le32_to_cpu(d->dqb_id) == dquot->dq_id; } static int v2_read_dquot(struct dquot *dquot) { return qtree_read_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot); } static int v2_write_dquot(struct dquot *dquot) { return qtree_write_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot); } static int v2_release_dquot(struct dquot *dquot) { return qtree_release_dquot(sb_dqinfo(dquot->dq_sb, dquot->dq_type)->dqi_priv, dquot); } static int v2_free_file_info(struct super_block *sb, int type) { kfree(sb_dqinfo(sb, type)->dqi_priv); return 0; } static const struct quota_format_ops v2_format_ops = { .check_quota_file = v2_check_quota_file, .read_file_info = v2_read_file_info, .write_file_info = v2_write_file_info, .free_file_info = v2_free_file_info, .read_dqblk = v2_read_dquot, .commit_dqblk = v2_write_dquot, .release_dqblk = v2_release_dquot, }; static struct quota_format_type v2r0_quota_format = { .qf_fmt_id = QFMT_VFS_V0, .qf_ops = &v2_format_ops, .qf_owner = THIS_MODULE }; static struct quota_format_type v2r1_quota_format = { .qf_fmt_id = QFMT_VFS_V1, .qf_ops = &v2_format_ops, .qf_owner = THIS_MODULE }; static int __init init_v2_quota_format(void) { int ret; ret = register_quota_format(&v2r0_quota_format); if (ret) return ret; return register_quota_format(&v2r1_quota_format); } static void __exit exit_v2_quota_format(void) { unregister_quota_format(&v2r0_quota_format); unregister_quota_format(&v2r1_quota_format); } module_init(init_v2_quota_format); module_exit(exit_v2_quota_format);
gpl-2.0
nok07635/UnleaZhed_XTZ
arch/ia64/uv/kernel/setup.c
13238
3403
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * SGI UV Core Functions * * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved. */ #include <linux/module.h> #include <linux/percpu.h> #include <asm/sn/simulator.h> #include <asm/uv/uv_mmrs.h> #include <asm/uv/uv_hub.h> DEFINE_PER_CPU(struct uv_hub_info_s, __uv_hub_info); EXPORT_PER_CPU_SYMBOL_GPL(__uv_hub_info); #ifdef CONFIG_IA64_SGI_UV int sn_prom_type; long sn_partition_id; EXPORT_SYMBOL(sn_partition_id); long sn_coherency_id; EXPORT_SYMBOL_GPL(sn_coherency_id); long sn_region_size; EXPORT_SYMBOL(sn_region_size); #endif struct redir_addr { unsigned long redirect; unsigned long alias; }; #define DEST_SHIFT UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR_DEST_BASE_SHFT static __initdata struct redir_addr redir_addrs[] = { {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_0_MMR, UVH_SI_ALIAS0_OVERLAY_CONFIG}, {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_1_MMR, UVH_SI_ALIAS1_OVERLAY_CONFIG}, {UVH_RH_GAM_ALIAS210_REDIRECT_CONFIG_2_MMR, UVH_SI_ALIAS2_OVERLAY_CONFIG}, }; static __init void get_lowmem_redirect(unsigned long *base, unsigned long *size) { union uvh_si_alias0_overlay_config_u alias; union uvh_rh_gam_alias210_redirect_config_2_mmr_u redirect; int i; for (i = 0; i < ARRAY_SIZE(redir_addrs); i++) { alias.v = uv_read_local_mmr(redir_addrs[i].alias); if (alias.s.base == 0) { *size = (1UL << alias.s.m_alias); redirect.v = uv_read_local_mmr(redir_addrs[i].redirect); *base = (unsigned long)redirect.s.dest_base << DEST_SHIFT; return; } } BUG(); } void __init uv_setup(char **cmdline_p) { union uvh_si_addr_map_config_u m_n_config; union uvh_node_id_u node_id; unsigned long gnode_upper; int nid, cpu, m_val, n_val; unsigned long mmr_base, lowmem_redir_base, lowmem_redir_size; if (IS_MEDUSA()) { lowmem_redir_base = 0; lowmem_redir_size = 0; node_id.v = 0; m_n_config.s.m_skt = 37; m_n_config.s.n_skt = 0; mmr_base = 0; #if 0 /* Need BIOS calls - TDB */ if (!ia64_sn_is_fake_prom()) sn_prom_type = 1; else #endif sn_prom_type = 2; printk(KERN_INFO "Running on medusa with %s PROM\n", (sn_prom_type == 1) ? "real" : "fake"); } else { get_lowmem_redirect(&lowmem_redir_base, &lowmem_redir_size); node_id.v = uv_read_local_mmr(UVH_NODE_ID); m_n_config.v = uv_read_local_mmr(UVH_SI_ADDR_MAP_CONFIG); mmr_base = uv_read_local_mmr(UVH_RH_GAM_MMR_OVERLAY_CONFIG_MMR) & ~UV_MMR_ENABLE; } m_val = m_n_config.s.m_skt; n_val = m_n_config.s.n_skt; printk(KERN_DEBUG "UV: global MMR base 0x%lx\n", mmr_base); gnode_upper = (((unsigned long)node_id.s.node_id) & ~((1 << n_val) - 1)) << m_val; for_each_present_cpu(cpu) { nid = cpu_to_node(cpu); uv_cpu_hub_info(cpu)->lowmem_remap_base = lowmem_redir_base; uv_cpu_hub_info(cpu)->lowmem_remap_top = lowmem_redir_base + lowmem_redir_size; uv_cpu_hub_info(cpu)->m_val = m_val; uv_cpu_hub_info(cpu)->n_val = n_val; uv_cpu_hub_info(cpu)->pnode_mask = (1 << n_val) -1; uv_cpu_hub_info(cpu)->gpa_mask = (1 << (m_val + n_val)) - 1; uv_cpu_hub_info(cpu)->gnode_upper = gnode_upper; uv_cpu_hub_info(cpu)->global_mmr_base = mmr_base; uv_cpu_hub_info(cpu)->coherency_domain_number = 0;/* ZZZ */ printk(KERN_DEBUG "UV cpu %d, nid %d\n", cpu, nid); } }
gpl-2.0
trader418/android_kernel_samsung_hlte_N
arch/score/lib/ashldi3.c
13750
1254
/* * arch/score/lib/ashldi3.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see the file COPYING, or write * to the Free Software Foundation, Inc., * 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/module.h> #include "libgcc.h" long long __ashldi3(long long u, word_type b) { DWunion uu, w; word_type bm; if (b == 0) return u; uu.ll = u; bm = 32 - b; if (bm <= 0) { w.s.low = 0; w.s.high = (unsigned int) uu.s.low << -bm; } else { const unsigned int carries = (unsigned int) uu.s.low >> bm; w.s.low = (unsigned int) uu.s.low << b; w.s.high = ((unsigned int) uu.s.high << b) | carries; } return w.ll; } EXPORT_SYMBOL(__ashldi3);
gpl-2.0
Chad0989/recovery-kernel
arch/m68knommu/platform/coldfire/timers.c
183
5208
/***************************************************************************/ /* * timers.c -- generic ColdFire hardware timer support. * * Copyright (C) 1999-2008, Greg Ungerer <gerg@snapgear.com> */ /***************************************************************************/ #include <linux/kernel.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/profile.h> #include <linux/clocksource.h> #include <asm/io.h> #include <asm/traps.h> #include <asm/machdep.h> #include <asm/coldfire.h> #include <asm/mcftimer.h> #include <asm/mcfsim.h> /***************************************************************************/ /* * By default use timer1 as the system clock timer. */ #define FREQ (MCF_BUSCLK / 16) #define TA(a) (MCF_MBAR + MCFTIMER_BASE1 + (a)) /* * Default the timer and vector to use for ColdFire. Some ColdFire * CPU's and some boards may want different. Their sub-architecture * startup code (in config.c) can change these if they want. */ unsigned int mcf_timervector = 29; unsigned int mcf_profilevector = 31; unsigned int mcf_timerlevel = 5; /* * These provide the underlying interrupt vector support. * Unfortunately it is a little different on each ColdFire. */ extern void mcf_settimericr(int timer, int level); void coldfire_profile_init(void); #if defined(CONFIG_M532x) #define __raw_readtrr __raw_readl #define __raw_writetrr __raw_writel #else #define __raw_readtrr __raw_readw #define __raw_writetrr __raw_writew #endif static u32 mcftmr_cycles_per_jiffy; static u32 mcftmr_cnt; /***************************************************************************/ static irqreturn_t mcftmr_tick(int irq, void *dummy) { /* Reset the ColdFire timer */ __raw_writeb(MCFTIMER_TER_CAP | MCFTIMER_TER_REF, TA(MCFTIMER_TER)); mcftmr_cnt += mcftmr_cycles_per_jiffy; return arch_timer_interrupt(irq, dummy); } /***************************************************************************/ static struct irqaction mcftmr_timer_irq = { .name = "timer", .flags = IRQF_DISABLED | IRQF_TIMER, .handler = mcftmr_tick, }; /***************************************************************************/ static cycle_t mcftmr_read_clk(void) { unsigned long flags; u32 cycles; u16 tcn; local_irq_save(flags); tcn = __raw_readw(TA(MCFTIMER_TCN)); cycles = mcftmr_cnt; local_irq_restore(flags); return cycles + tcn; } /***************************************************************************/ static struct clocksource mcftmr_clk = { .name = "tmr", .rating = 250, .read = mcftmr_read_clk, .shift = 20, .mask = CLOCKSOURCE_MASK(32), .flags = CLOCK_SOURCE_IS_CONTINUOUS, }; /***************************************************************************/ void hw_timer_init(void) { setup_irq(mcf_timervector, &mcftmr_timer_irq); __raw_writew(MCFTIMER_TMR_DISABLE, TA(MCFTIMER_TMR)); mcftmr_cycles_per_jiffy = FREQ / HZ; /* * The coldfire timer runs from 0 to TRR included, then 0 * again and so on. It counts thus actually TRR + 1 steps * for 1 tick, not TRR. So if you want n cycles, * initialize TRR with n - 1. */ __raw_writetrr(mcftmr_cycles_per_jiffy - 1, TA(MCFTIMER_TRR)); __raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 | MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, TA(MCFTIMER_TMR)); mcftmr_clk.mult = clocksource_hz2mult(FREQ, mcftmr_clk.shift); clocksource_register(&mcftmr_clk); mcf_settimericr(1, mcf_timerlevel); #ifdef CONFIG_HIGHPROFILE coldfire_profile_init(); #endif } /***************************************************************************/ #ifdef CONFIG_HIGHPROFILE /***************************************************************************/ /* * By default use timer2 as the profiler clock timer. */ #define PA(a) (MCF_MBAR + MCFTIMER_BASE2 + (a)) /* * Choose a reasonably fast profile timer. Make it an odd value to * try and get good coverage of kernel operations. */ #define PROFILEHZ 1013 /* * Use the other timer to provide high accuracy profiling info. */ irqreturn_t coldfire_profile_tick(int irq, void *dummy) { /* Reset ColdFire timer2 */ __raw_writeb(MCFTIMER_TER_CAP | MCFTIMER_TER_REF, PA(MCFTIMER_TER)); if (current->pid) profile_tick(CPU_PROFILING); return IRQ_HANDLED; } /***************************************************************************/ static struct irqaction coldfire_profile_irq = { .name = "profile timer", .flags = IRQF_DISABLED | IRQF_TIMER, .handler = coldfire_profile_tick, }; void coldfire_profile_init(void) { printk(KERN_INFO "PROFILE: lodging TIMER2 @ %dHz as profile timer\n", PROFILEHZ); setup_irq(mcf_profilevector, &coldfire_profile_irq); /* Set up TIMER 2 as high speed profile clock */ __raw_writew(MCFTIMER_TMR_DISABLE, PA(MCFTIMER_TMR)); __raw_writetrr(((MCF_BUSCLK / 16) / PROFILEHZ), PA(MCFTIMER_TRR)); __raw_writew(MCFTIMER_TMR_ENORI | MCFTIMER_TMR_CLK16 | MCFTIMER_TMR_RESTART | MCFTIMER_TMR_ENABLE, PA(MCFTIMER_TMR)); mcf_settimericr(2, 7); } /***************************************************************************/ #endif /* CONFIG_HIGHPROFILE */ /***************************************************************************/
gpl-2.0
Andorreta/android_kernel_google_msm
arch/arm/mach-msm/qdsp6v2/ultrasound/usfcdev.c
1207
6832
/* Copyright (c) 2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/sched.h> #include <linux/slab.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/init.h> #include "usfcdev.h" struct usfcdev_event { bool (*match_cb)(uint16_t, struct input_dev *dev); bool registered_event; bool filter; }; static struct usfcdev_event s_usfcdev_events[MAX_EVENT_TYPE_NUM]; static bool usfcdev_filter(struct input_handle *handle, unsigned int type, unsigned int code, int value); static bool usfcdev_match(struct input_handler *handler, struct input_dev *dev); static int usfcdev_connect(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id); static void usfcdev_disconnect(struct input_handle *handle); static const struct input_device_id usfc_tsc_ids[] = { { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT | INPUT_DEVICE_ID_MATCH_ABSBIT, .evbit = { BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY) }, .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) }, /* assumption: ABS_X & ABS_Y are in the same long */ .absbit = { [BIT_WORD(ABS_X)] = BIT_MASK(ABS_X) | BIT_MASK(ABS_Y) }, }, { .flags = INPUT_DEVICE_ID_MATCH_EVBIT | INPUT_DEVICE_ID_MATCH_KEYBIT | INPUT_DEVICE_ID_MATCH_ABSBIT, .evbit = { BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY) }, .keybit = { [BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH) }, /* assumption: MT_.._X & MT_.._Y are in the same long */ .absbit = { [BIT_WORD(ABS_MT_POSITION_X)] = BIT_MASK(ABS_MT_POSITION_X) | BIT_MASK(ABS_MT_POSITION_Y) }, }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(input, usfc_tsc_ids); static struct input_handler s_usfc_handlers[MAX_EVENT_TYPE_NUM] = { { /* TSC handler */ .filter = usfcdev_filter, .match = usfcdev_match, .connect = usfcdev_connect, .disconnect = usfcdev_disconnect, /* .minor can be used as index in the container, */ /* because .fops isn't supported */ .minor = TSC_EVENT_TYPE_IND, .name = "usfc_tsc_handler", .id_table = usfc_tsc_ids, }, }; /* For each event type, one conflicting device (and handle) is supported */ static struct input_handle s_usfc_handles[MAX_EVENT_TYPE_NUM] = { { /* TSC handle */ .handler = &s_usfc_handlers[TSC_EVENT_TYPE_IND], .name = "usfc_tsc_handle", }, }; static bool usfcdev_match(struct input_handler *handler, struct input_dev *dev) { bool rc = false; int ind = handler->minor; pr_debug("%s: name=[%s]; ind=%d\n", __func__, dev->name, ind); if (s_usfcdev_events[ind].registered_event && s_usfcdev_events[ind].match_cb) { rc = (*s_usfcdev_events[ind].match_cb)((uint16_t)ind, dev); pr_debug("%s: [%s]; rc=%d\n", __func__, dev->name, rc); } return rc; } static int usfcdev_connect(struct input_handler *handler, struct input_dev *dev, const struct input_device_id *id) { int ret = 0; uint16_t ind = handler->minor; s_usfc_handles[ind].dev = dev; ret = input_register_handle(&s_usfc_handles[ind]); if (ret) { pr_err("%s: input_register_handle[%d] failed: ret=%d\n", __func__, ind, ret); } else { ret = input_open_device(&s_usfc_handles[ind]); if (ret) { pr_err("%s: input_open_device[%d] failed: ret=%d\n", __func__, ind, ret); input_unregister_handle(&s_usfc_handles[ind]); } else pr_debug("%s: device[%d] is opened\n", __func__, ind); } return ret; } static void usfcdev_disconnect(struct input_handle *handle) { input_unregister_handle(handle); pr_debug("%s: handle[%d] is disconnect\n", __func__, handle->handler->minor); } static bool usfcdev_filter(struct input_handle *handle, unsigned int type, unsigned int code, int value) { uint16_t ind = (uint16_t)handle->handler->minor; pr_debug("%s: event_type=%d; filter=%d; abs_xy=%ld; abs_y_mt[]=%ld\n", __func__, ind, s_usfcdev_events[ind].filter, usfc_tsc_ids[0].absbit[0], usfc_tsc_ids[1].absbit[1]); return s_usfcdev_events[ind].filter; } bool usfcdev_register( uint16_t event_type_ind, bool (*match_cb)(uint16_t, struct input_dev *dev)) { int ret = 0; bool rc = false; if ((event_type_ind >= MAX_EVENT_TYPE_NUM) || !match_cb) { pr_err("%s: wrong input: event_type_ind=%d; match_cb=0x%p\n", __func__, event_type_ind, match_cb); return false; } if (s_usfcdev_events[event_type_ind].registered_event) { pr_info("%s: handler[%d] was already registered\n", __func__, event_type_ind); return true; } s_usfcdev_events[event_type_ind].registered_event = true; s_usfcdev_events[event_type_ind].match_cb = match_cb; s_usfcdev_events[event_type_ind].filter = false; ret = input_register_handler(&s_usfc_handlers[event_type_ind]); if (!ret) { rc = true; pr_debug("%s: handler[%d] was registered\n", __func__, event_type_ind); } else { s_usfcdev_events[event_type_ind].registered_event = false; s_usfcdev_events[event_type_ind].match_cb = NULL; pr_err("%s: handler[%d] registration failed: ret=%d\n", __func__, event_type_ind, ret); } return rc; } void usfcdev_unregister(uint16_t event_type_ind) { if (event_type_ind >= MAX_EVENT_TYPE_NUM) { pr_err("%s: wrong input: event_type_ind=%d\n", __func__, event_type_ind); return; } if (s_usfcdev_events[event_type_ind].registered_event) { input_unregister_handler(&s_usfc_handlers[event_type_ind]); pr_debug("%s: handler[%d] was unregistered\n", __func__, event_type_ind); s_usfcdev_events[event_type_ind].registered_event = false; s_usfcdev_events[event_type_ind].match_cb = NULL; s_usfcdev_events[event_type_ind].filter = false; } } bool usfcdev_set_filter(uint16_t event_type_ind, bool filter) { bool rc = true; if (event_type_ind >= MAX_EVENT_TYPE_NUM) { pr_err("%s: wrong input: event_type_ind=%d\n", __func__, event_type_ind); return false; } if (s_usfcdev_events[event_type_ind].registered_event) { s_usfcdev_events[event_type_ind].filter = filter; pr_debug("%s: event_type[%d]; filter=%d\n", __func__, event_type_ind, filter ); } else { pr_err("%s: event_type[%d] isn't registered\n", __func__, event_type_ind); rc = false; } return rc; } static int __init usfcdev_init(void) { return 0; } device_initcall(usfcdev_init); MODULE_DESCRIPTION("Handle of events from devices, conflicting with USF");
gpl-2.0
RenderBroken/OP3-LOS-kernel
arch/xtensa/kernel/xtensa_ksyms.c
1719
3104
/* * arch/xtensa/kernel/xtensa_ksyms.c * * Export Xtensa-specific functions for loadable modules. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2001 - 2005 Tensilica Inc. * * Joe Taylor <joe@tensilica.com> */ #include <linux/module.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <asm/irq.h> #include <linux/in6.h> #include <asm/uaccess.h> #include <asm/cacheflush.h> #include <asm/checksum.h> #include <asm/dma.h> #include <asm/io.h> #include <asm/page.h> #include <asm/pgalloc.h> #include <asm/ftrace.h> #ifdef CONFIG_BLK_DEV_FD #include <asm/floppy.h> #endif #ifdef CONFIG_NET #include <net/checksum.h> #endif /* CONFIG_NET */ /* * String functions */ EXPORT_SYMBOL(memset); EXPORT_SYMBOL(memcpy); EXPORT_SYMBOL(memmove); EXPORT_SYMBOL(__strncpy_user); EXPORT_SYMBOL(clear_page); EXPORT_SYMBOL(copy_page); EXPORT_SYMBOL(empty_zero_page); /* * gcc internal math functions */ extern long long __ashrdi3(long long, int); extern long long __ashldi3(long long, int); extern long long __lshrdi3(long long, int); extern int __divsi3(int, int); extern int __modsi3(int, int); extern long long __muldi3(long long, long long); extern int __mulsi3(int, int); extern unsigned int __udivsi3(unsigned int, unsigned int); extern unsigned int __umodsi3(unsigned int, unsigned int); extern unsigned long long __umoddi3(unsigned long long, unsigned long long); extern unsigned long long __udivdi3(unsigned long long, unsigned long long); extern int __ucmpdi2(int, int); EXPORT_SYMBOL(__ashldi3); EXPORT_SYMBOL(__ashrdi3); EXPORT_SYMBOL(__lshrdi3); EXPORT_SYMBOL(__divsi3); EXPORT_SYMBOL(__modsi3); EXPORT_SYMBOL(__muldi3); EXPORT_SYMBOL(__mulsi3); EXPORT_SYMBOL(__udivsi3); EXPORT_SYMBOL(__umodsi3); EXPORT_SYMBOL(__udivdi3); EXPORT_SYMBOL(__umoddi3); EXPORT_SYMBOL(__ucmpdi2); void __xtensa_libgcc_window_spill(void) { BUG(); } EXPORT_SYMBOL(__xtensa_libgcc_window_spill); unsigned long __sync_fetch_and_and_4(unsigned long *p, unsigned long v) { BUG(); } EXPORT_SYMBOL(__sync_fetch_and_and_4); unsigned long __sync_fetch_and_or_4(unsigned long *p, unsigned long v) { BUG(); } EXPORT_SYMBOL(__sync_fetch_and_or_4); #ifdef CONFIG_NET /* * Networking support */ EXPORT_SYMBOL(csum_partial); EXPORT_SYMBOL(csum_partial_copy_generic); #endif /* CONFIG_NET */ /* * Architecture-specific symbols */ EXPORT_SYMBOL(__xtensa_copy_user); EXPORT_SYMBOL(__invalidate_icache_range); /* * Kernel hacking ... */ #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) // FIXME EXPORT_SYMBOL(screen_info); #endif EXPORT_SYMBOL(outsb); EXPORT_SYMBOL(outsw); EXPORT_SYMBOL(outsl); EXPORT_SYMBOL(insb); EXPORT_SYMBOL(insw); EXPORT_SYMBOL(insl); extern long common_exception_return; EXPORT_SYMBOL(common_exception_return); #ifdef CONFIG_FUNCTION_TRACER EXPORT_SYMBOL(_mcount); #endif EXPORT_SYMBOL(__invalidate_dcache_range); #if XCHAL_DCACHE_IS_WRITEBACK EXPORT_SYMBOL(__flush_dcache_range); #endif
gpl-2.0
SerenityS/android_kernel_samsung_a8elte
drivers/gpu/drm/exynos/exynos_drm_core.c
2231
5299
/* exynos_drm_core.c * * Copyright (c) 2011 Samsung Electronics Co., Ltd. * Author: * Inki Dae <inki.dae@samsung.com> * Joonyoung Shim <jy0922.shim@samsung.com> * Seung-Woo Kim <sw0312.kim@samsung.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <drm/drmP.h> #include "exynos_drm_drv.h" #include "exynos_drm_encoder.h" #include "exynos_drm_connector.h" #include "exynos_drm_fbdev.h" static LIST_HEAD(exynos_drm_subdrv_list); static int exynos_drm_create_enc_conn(struct drm_device *dev, struct exynos_drm_subdrv *subdrv) { struct drm_encoder *encoder; struct drm_connector *connector; int ret; DRM_DEBUG_DRIVER("%s\n", __FILE__); subdrv->manager->dev = subdrv->dev; /* create and initialize a encoder for this sub driver. */ encoder = exynos_drm_encoder_create(dev, subdrv->manager, (1 << MAX_CRTC) - 1); if (!encoder) { DRM_ERROR("failed to create encoder\n"); return -EFAULT; } /* * create and initialize a connector for this sub driver and * attach the encoder created above to the connector. */ connector = exynos_drm_connector_create(dev, encoder); if (!connector) { DRM_ERROR("failed to create connector\n"); ret = -EFAULT; goto err_destroy_encoder; } subdrv->encoder = encoder; subdrv->connector = connector; return 0; err_destroy_encoder: encoder->funcs->destroy(encoder); return ret; } static void exynos_drm_destroy_enc_conn(struct exynos_drm_subdrv *subdrv) { if (subdrv->encoder) { struct drm_encoder *encoder = subdrv->encoder; encoder->funcs->destroy(encoder); subdrv->encoder = NULL; } if (subdrv->connector) { struct drm_connector *connector = subdrv->connector; connector->funcs->destroy(connector); subdrv->connector = NULL; } } static int exynos_drm_subdrv_probe(struct drm_device *dev, struct exynos_drm_subdrv *subdrv) { if (subdrv->probe) { int ret; subdrv->drm_dev = dev; /* * this probe callback would be called by sub driver * after setting of all resources to this sub driver, * such as clock, irq and register map are done or by load() * of exynos drm driver. * * P.S. note that this driver is considered for modularization. */ ret = subdrv->probe(dev, subdrv->dev); if (ret) return ret; } return 0; } static void exynos_drm_subdrv_remove(struct drm_device *dev, struct exynos_drm_subdrv *subdrv) { DRM_DEBUG_DRIVER("%s\n", __FILE__); if (subdrv->remove) subdrv->remove(dev, subdrv->dev); } int exynos_drm_device_register(struct drm_device *dev) { struct exynos_drm_subdrv *subdrv, *n; unsigned int fine_cnt = 0; int err; DRM_DEBUG_DRIVER("%s\n", __FILE__); if (!dev) return -EINVAL; list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) { err = exynos_drm_subdrv_probe(dev, subdrv); if (err) { DRM_DEBUG("exynos drm subdrv probe failed.\n"); list_del(&subdrv->list); continue; } /* * if manager is null then it means that this sub driver * doesn't need encoder and connector. */ if (!subdrv->manager) { fine_cnt++; continue; } err = exynos_drm_create_enc_conn(dev, subdrv); if (err) { DRM_DEBUG("failed to create encoder and connector.\n"); exynos_drm_subdrv_remove(dev, subdrv); list_del(&subdrv->list); continue; } fine_cnt++; } if (!fine_cnt) return -EINVAL; return 0; } EXPORT_SYMBOL_GPL(exynos_drm_device_register); int exynos_drm_device_unregister(struct drm_device *dev) { struct exynos_drm_subdrv *subdrv; DRM_DEBUG_DRIVER("%s\n", __FILE__); if (!dev) { WARN(1, "Unexpected drm device unregister!\n"); return -EINVAL; } list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) { exynos_drm_subdrv_remove(dev, subdrv); exynos_drm_destroy_enc_conn(subdrv); } return 0; } EXPORT_SYMBOL_GPL(exynos_drm_device_unregister); int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv) { DRM_DEBUG_DRIVER("%s\n", __FILE__); if (!subdrv) return -EINVAL; list_add_tail(&subdrv->list, &exynos_drm_subdrv_list); return 0; } EXPORT_SYMBOL_GPL(exynos_drm_subdrv_register); int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv) { DRM_DEBUG_DRIVER("%s\n", __FILE__); if (!subdrv) return -EINVAL; list_del(&subdrv->list); return 0; } EXPORT_SYMBOL_GPL(exynos_drm_subdrv_unregister); int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file) { struct exynos_drm_subdrv *subdrv; int ret; list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) { if (subdrv->open) { ret = subdrv->open(dev, subdrv->dev, file); if (ret) goto err; } } return 0; err: list_for_each_entry_reverse(subdrv, &subdrv->list, list) { if (subdrv->close) subdrv->close(dev, subdrv->dev, file); } return ret; } EXPORT_SYMBOL_GPL(exynos_drm_subdrv_open); void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file) { struct exynos_drm_subdrv *subdrv; list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) { if (subdrv->close) subdrv->close(dev, subdrv->dev, file); } } EXPORT_SYMBOL_GPL(exynos_drm_subdrv_close);
gpl-2.0
chadouming/android_kernel_htc_msm8994
drivers/bcma/host_soc.c
2999
3683
/* * Broadcom specific AMBA * System on Chip (SoC) Host * * Licensed under the GNU/GPL. See COPYING for details. */ #include "bcma_private.h" #include "scan.h" #include <linux/bcma/bcma.h> #include <linux/bcma/bcma_soc.h> static u8 bcma_host_soc_read8(struct bcma_device *core, u16 offset) { return readb(core->io_addr + offset); } static u16 bcma_host_soc_read16(struct bcma_device *core, u16 offset) { return readw(core->io_addr + offset); } static u32 bcma_host_soc_read32(struct bcma_device *core, u16 offset) { return readl(core->io_addr + offset); } static void bcma_host_soc_write8(struct bcma_device *core, u16 offset, u8 value) { writeb(value, core->io_addr + offset); } static void bcma_host_soc_write16(struct bcma_device *core, u16 offset, u16 value) { writew(value, core->io_addr + offset); } static void bcma_host_soc_write32(struct bcma_device *core, u16 offset, u32 value) { writel(value, core->io_addr + offset); } #ifdef CONFIG_BCMA_BLOCKIO static void bcma_host_soc_block_read(struct bcma_device *core, void *buffer, size_t count, u16 offset, u8 reg_width) { void __iomem *addr = core->io_addr + offset; switch (reg_width) { case sizeof(u8): { u8 *buf = buffer; while (count) { *buf = __raw_readb(addr); buf++; count--; } break; } case sizeof(u16): { __le16 *buf = buffer; WARN_ON(count & 1); while (count) { *buf = (__force __le16)__raw_readw(addr); buf++; count -= 2; } break; } case sizeof(u32): { __le32 *buf = buffer; WARN_ON(count & 3); while (count) { *buf = (__force __le32)__raw_readl(addr); buf++; count -= 4; } break; } default: WARN_ON(1); } } static void bcma_host_soc_block_write(struct bcma_device *core, const void *buffer, size_t count, u16 offset, u8 reg_width) { void __iomem *addr = core->io_addr + offset; switch (reg_width) { case sizeof(u8): { const u8 *buf = buffer; while (count) { __raw_writeb(*buf, addr); buf++; count--; } break; } case sizeof(u16): { const __le16 *buf = buffer; WARN_ON(count & 1); while (count) { __raw_writew((__force u16)(*buf), addr); buf++; count -= 2; } break; } case sizeof(u32): { const __le32 *buf = buffer; WARN_ON(count & 3); while (count) { __raw_writel((__force u32)(*buf), addr); buf++; count -= 4; } break; } default: WARN_ON(1); } } #endif /* CONFIG_BCMA_BLOCKIO */ static u32 bcma_host_soc_aread32(struct bcma_device *core, u16 offset) { return readl(core->io_wrap + offset); } static void bcma_host_soc_awrite32(struct bcma_device *core, u16 offset, u32 value) { writel(value, core->io_wrap + offset); } static const struct bcma_host_ops bcma_host_soc_ops = { .read8 = bcma_host_soc_read8, .read16 = bcma_host_soc_read16, .read32 = bcma_host_soc_read32, .write8 = bcma_host_soc_write8, .write16 = bcma_host_soc_write16, .write32 = bcma_host_soc_write32, #ifdef CONFIG_BCMA_BLOCKIO .block_read = bcma_host_soc_block_read, .block_write = bcma_host_soc_block_write, #endif .aread32 = bcma_host_soc_aread32, .awrite32 = bcma_host_soc_awrite32, }; int __init bcma_host_soc_register(struct bcma_soc *soc) { struct bcma_bus *bus = &soc->bus; int err; /* iomap only first core. We have to read some register on this core * to scan the bus. */ bus->mmio = ioremap_nocache(BCMA_ADDR_BASE, BCMA_CORE_SIZE * 1); if (!bus->mmio) return -ENOMEM; /* Host specific */ bus->hosttype = BCMA_HOSTTYPE_SOC; bus->ops = &bcma_host_soc_ops; /* Register */ err = bcma_bus_early_register(bus, &soc->core_cc, &soc->core_mips); if (err) iounmap(bus->mmio); return err; }
gpl-2.0
teto/mptcp-old
arch/sparc/kernel/pmc.c
2999
2078
/* pmc - Driver implementation for power management functions * of Power Management Controller (PMC) on SPARCstation-Voyager. * * Copyright (c) 2002 Eric Brower (ebrower@usa.net) */ #include <linux/kernel.h> #include <linux/fs.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/pm.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/module.h> #include <asm/io.h> #include <asm/oplib.h> #include <asm/uaccess.h> #include <asm/auxio.h> #include <asm/processor.h> /* Debug * * #define PMC_DEBUG_LED * #define PMC_NO_IDLE */ #define PMC_OBPNAME "SUNW,pmc" #define PMC_DEVNAME "pmc" #define PMC_IDLE_REG 0x00 #define PMC_IDLE_ON 0x01 static u8 __iomem *regs; #define pmc_readb(offs) (sbus_readb(regs+offs)) #define pmc_writeb(val, offs) (sbus_writeb(val, regs+offs)) /* * CPU idle callback function * See .../arch/sparc/kernel/process.c */ static void pmc_swift_idle(void) { #ifdef PMC_DEBUG_LED set_auxio(0x00, AUXIO_LED); #endif pmc_writeb(pmc_readb(PMC_IDLE_REG) | PMC_IDLE_ON, PMC_IDLE_REG); #ifdef PMC_DEBUG_LED set_auxio(AUXIO_LED, 0x00); #endif } static int pmc_probe(struct platform_device *op) { regs = of_ioremap(&op->resource[0], 0, resource_size(&op->resource[0]), PMC_OBPNAME); if (!regs) { printk(KERN_ERR "%s: unable to map registers\n", PMC_DEVNAME); return -ENODEV; } #ifndef PMC_NO_IDLE /* Assign power management IDLE handler */ sparc_idle = pmc_swift_idle; #endif printk(KERN_INFO "%s: power management initialized\n", PMC_DEVNAME); return 0; } static struct of_device_id pmc_match[] = { { .name = PMC_OBPNAME, }, {}, }; MODULE_DEVICE_TABLE(of, pmc_match); static struct platform_driver pmc_driver = { .driver = { .name = "pmc", .owner = THIS_MODULE, .of_match_table = pmc_match, }, .probe = pmc_probe, }; static int __init pmc_init(void) { return platform_driver_register(&pmc_driver); } /* This driver is not critical to the boot process * and is easiest to ioremap when SBus is already * initialized, so we install ourselves thusly: */ __initcall(pmc_init);
gpl-2.0
htc-msm8660/android_kernel_htc_msm8660
drivers/staging/comedi/drivers/daqboard2000.c
3255
27245
/* comedi/drivers/daqboard2000.c hardware driver for IOtech DAQboard/2000 COMEDI - Linux Control and Measurement Device Interface Copyright (C) 1999 Anders Blomdell <anders.blomdell@control.lth.se> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ /* Driver: daqboard2000 Description: IOTech DAQBoard/2000 Author: Anders Blomdell <anders.blomdell@control.lth.se> Status: works Updated: Mon, 14 Apr 2008 15:28:52 +0100 Devices: [IOTech] DAQBoard/2000 (daqboard2000) Much of the functionality of this driver was determined from reading the source code for the Windows driver. The FPGA on the board requires initialization code, which can be loaded by comedi_config using the -i option. The initialization code is available from http://www.comedi.org in the comedi_nonfree_firmware tarball. Configuration options: [0] - PCI bus of device (optional) [1] - PCI slot of device (optional) If bus/slot is not specified, the first supported PCI device found will be used. */ /* This card was obviously never intended to leave the Windows world, since it lacked all kind of hardware documentation (except for cable pinouts, plug and pray has something to catch up with yet). With some help from our swedish distributor, we got the Windows sourcecode for the card, and here are the findings so far. 1. A good document that describes the PCI interface chip is 9080db-106.pdf available from http://www.plxtech.com/products/io/pci9080 2. The initialization done so far is: a. program the FPGA (windows code sans a lot of error messages) b. 3. Analog out seems to work OK with DAC's disabled, if DAC's are enabled, you have to output values to all enabled DAC's until result appears, I guess that it has something to do with pacer clocks, but the source gives me no clues. I'll keep it simple so far. 4. Analog in. Each channel in the scanlist seems to be controlled by four control words: Word0: +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ! | | | ! | | | ! | | | ! | | | ! +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ Word1: +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ! | | | ! | | | ! | | | ! | | | ! +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | | | | | +------+------+ | | | | +-- Digital input (??) | | | | +---- 10 us settling time | | | +------ Suspend acquisition (last to scan) | | +-------- Simultaneous sample and hold | +---------- Signed data format +------------------------- Correction offset low Word2: +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ! | | | ! | | | ! | | | ! | | | ! +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | | | | | | | | +-----+ +--+--+ +++ +++ +--+--+ | | | | +----- Expansion channel | | | +----------- Expansion gain | | +--------------- Channel (low) | +--------------------- Correction offset high +----------------------------- Correction gain low Word3: +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ ! | | | ! | | | ! | | | ! | | | ! +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ | | | | | | | | | +------+------+ | | +-+-+ | | +-- Low bank enable | | | | | +---- High bank enable | | | | +------ Hi/low select | | | +---------- Gain (1,?,2,4,8,16,32,64) | | +-------------- differential/single ended | +---------------- Unipolar +------------------------- Correction gain high 999. The card seems to have an incredible amount of capabilities, but trying to reverse engineer them from the Windows source is beyond my patience. */ #include "../comedidev.h" #include <linux/delay.h> #include <linux/interrupt.h> #include "comedi_pci.h" #include "8255.h" #define DAQBOARD2000_SUBSYSTEM_IDS2 0x00021616 /* Daqboard/2000 - 2 Dacs */ #define DAQBOARD2000_SUBSYSTEM_IDS4 0x00041616 /* Daqboard/2000 - 4 Dacs */ #define DAQBOARD2000_DAQ_SIZE 0x1002 #define DAQBOARD2000_PLX_SIZE 0x100 /* Initialization bits for the Serial EEPROM Control Register */ #define DAQBOARD2000_SECRProgPinHi 0x8001767e #define DAQBOARD2000_SECRProgPinLo 0x8000767e #define DAQBOARD2000_SECRLocalBusHi 0xc000767e #define DAQBOARD2000_SECRLocalBusLo 0x8000767e #define DAQBOARD2000_SECRReloadHi 0xa000767e #define DAQBOARD2000_SECRReloadLo 0x8000767e /* SECR status bits */ #define DAQBOARD2000_EEPROM_PRESENT 0x10000000 /* CPLD status bits */ #define DAQBOARD2000_CPLD_INIT 0x0002 #define DAQBOARD2000_CPLD_DONE 0x0004 /* Available ranges */ static const struct comedi_lrange range_daqboard2000_ai = { 13, { RANGE(-10, 10), RANGE(-5, 5), RANGE(-2.5, 2.5), RANGE(-1.25, 1.25), RANGE(-0.625, 0.625), RANGE(-0.3125, 0.3125), RANGE(-0.156, 0.156), RANGE(0, 10), RANGE(0, 5), RANGE(0, 2.5), RANGE(0, 1.25), RANGE(0, 0.625), RANGE(0, 0.3125) } }; static const struct comedi_lrange range_daqboard2000_ao = { 1, { RANGE(-10, 10) } }; struct daqboard2000_hw { volatile u16 acqControl; /* 0x00 */ volatile u16 acqScanListFIFO; /* 0x02 */ volatile u32 acqPacerClockDivLow; /* 0x04 */ volatile u16 acqScanCounter; /* 0x08 */ volatile u16 acqPacerClockDivHigh; /* 0x0a */ volatile u16 acqTriggerCount; /* 0x0c */ volatile u16 fill2; /* 0x0e */ volatile u16 acqResultsFIFO; /* 0x10 */ volatile u16 fill3; /* 0x12 */ volatile u16 acqResultsShadow; /* 0x14 */ volatile u16 fill4; /* 0x16 */ volatile u16 acqAdcResult; /* 0x18 */ volatile u16 fill5; /* 0x1a */ volatile u16 dacScanCounter; /* 0x1c */ volatile u16 fill6; /* 0x1e */ volatile u16 dacControl; /* 0x20 */ volatile u16 fill7; /* 0x22 */ volatile s16 dacFIFO; /* 0x24 */ volatile u16 fill8[2]; /* 0x26 */ volatile u16 dacPacerClockDiv; /* 0x2a */ volatile u16 refDacs; /* 0x2c */ volatile u16 fill9; /* 0x2e */ volatile u16 dioControl; /* 0x30 */ volatile s16 dioP3hsioData; /* 0x32 */ volatile u16 dioP3Control; /* 0x34 */ volatile u16 calEepromControl; /* 0x36 */ volatile s16 dacSetting[4]; /* 0x38 */ volatile s16 dioP2ExpansionIO8Bit[32]; /* 0x40 */ volatile u16 ctrTmrControl; /* 0x80 */ volatile u16 fill10[3]; /* 0x82 */ volatile s16 ctrInput[4]; /* 0x88 */ volatile u16 fill11[8]; /* 0x90 */ volatile u16 timerDivisor[2]; /* 0xa0 */ volatile u16 fill12[6]; /* 0xa4 */ volatile u16 dmaControl; /* 0xb0 */ volatile u16 trigControl; /* 0xb2 */ volatile u16 fill13[2]; /* 0xb4 */ volatile u16 calEeprom; /* 0xb8 */ volatile u16 acqDigitalMark; /* 0xba */ volatile u16 trigDacs; /* 0xbc */ volatile u16 fill14; /* 0xbe */ volatile s16 dioP2ExpansionIO16Bit[32]; /* 0xc0 */ }; /* Scan Sequencer programming */ #define DAQBOARD2000_SeqStartScanList 0x0011 #define DAQBOARD2000_SeqStopScanList 0x0010 /* Prepare for acquisition */ #define DAQBOARD2000_AcqResetScanListFifo 0x0004 #define DAQBOARD2000_AcqResetResultsFifo 0x0002 #define DAQBOARD2000_AcqResetConfigPipe 0x0001 /* Acqusition status bits */ #define DAQBOARD2000_AcqResultsFIFOMore1Sample 0x0001 #define DAQBOARD2000_AcqResultsFIFOHasValidData 0x0002 #define DAQBOARD2000_AcqResultsFIFOOverrun 0x0004 #define DAQBOARD2000_AcqLogicScanning 0x0008 #define DAQBOARD2000_AcqConfigPipeFull 0x0010 #define DAQBOARD2000_AcqScanListFIFOEmpty 0x0020 #define DAQBOARD2000_AcqAdcNotReady 0x0040 #define DAQBOARD2000_ArbitrationFailure 0x0080 #define DAQBOARD2000_AcqPacerOverrun 0x0100 #define DAQBOARD2000_DacPacerOverrun 0x0200 #define DAQBOARD2000_AcqHardwareError 0x01c0 /* Scan Sequencer programming */ #define DAQBOARD2000_SeqStartScanList 0x0011 #define DAQBOARD2000_SeqStopScanList 0x0010 /* Pacer Clock Control */ #define DAQBOARD2000_AdcPacerInternal 0x0030 #define DAQBOARD2000_AdcPacerExternal 0x0032 #define DAQBOARD2000_AdcPacerEnable 0x0031 #define DAQBOARD2000_AdcPacerEnableDacPacer 0x0034 #define DAQBOARD2000_AdcPacerDisable 0x0030 #define DAQBOARD2000_AdcPacerNormalMode 0x0060 #define DAQBOARD2000_AdcPacerCompatibilityMode 0x0061 #define DAQBOARD2000_AdcPacerInternalOutEnable 0x0008 #define DAQBOARD2000_AdcPacerExternalRising 0x0100 /* DAC status */ #define DAQBOARD2000_DacFull 0x0001 #define DAQBOARD2000_RefBusy 0x0002 #define DAQBOARD2000_TrgBusy 0x0004 #define DAQBOARD2000_CalBusy 0x0008 #define DAQBOARD2000_Dac0Busy 0x0010 #define DAQBOARD2000_Dac1Busy 0x0020 #define DAQBOARD2000_Dac2Busy 0x0040 #define DAQBOARD2000_Dac3Busy 0x0080 /* DAC control */ #define DAQBOARD2000_Dac0Enable 0x0021 #define DAQBOARD2000_Dac1Enable 0x0031 #define DAQBOARD2000_Dac2Enable 0x0041 #define DAQBOARD2000_Dac3Enable 0x0051 #define DAQBOARD2000_DacEnableBit 0x0001 #define DAQBOARD2000_Dac0Disable 0x0020 #define DAQBOARD2000_Dac1Disable 0x0030 #define DAQBOARD2000_Dac2Disable 0x0040 #define DAQBOARD2000_Dac3Disable 0x0050 #define DAQBOARD2000_DacResetFifo 0x0004 #define DAQBOARD2000_DacPatternDisable 0x0060 #define DAQBOARD2000_DacPatternEnable 0x0061 #define DAQBOARD2000_DacSelectSignedData 0x0002 #define DAQBOARD2000_DacSelectUnsignedData 0x0000 /* Trigger Control */ #define DAQBOARD2000_TrigAnalog 0x0000 #define DAQBOARD2000_TrigTTL 0x0010 #define DAQBOARD2000_TrigTransHiLo 0x0004 #define DAQBOARD2000_TrigTransLoHi 0x0000 #define DAQBOARD2000_TrigAbove 0x0000 #define DAQBOARD2000_TrigBelow 0x0004 #define DAQBOARD2000_TrigLevelSense 0x0002 #define DAQBOARD2000_TrigEdgeSense 0x0000 #define DAQBOARD2000_TrigEnable 0x0001 #define DAQBOARD2000_TrigDisable 0x0000 /* Reference Dac Selection */ #define DAQBOARD2000_PosRefDacSelect 0x0100 #define DAQBOARD2000_NegRefDacSelect 0x0000 static int daqboard2000_attach(struct comedi_device *dev, struct comedi_devconfig *it); static int daqboard2000_detach(struct comedi_device *dev); static struct comedi_driver driver_daqboard2000 = { .driver_name = "daqboard2000", .module = THIS_MODULE, .attach = daqboard2000_attach, .detach = daqboard2000_detach, }; struct daq200_boardtype { const char *name; int id; }; static const struct daq200_boardtype boardtypes[] = { {"ids2", DAQBOARD2000_SUBSYSTEM_IDS2}, {"ids4", DAQBOARD2000_SUBSYSTEM_IDS4}, }; #define n_boardtypes (sizeof(boardtypes)/sizeof(struct daq200_boardtype)) #define this_board ((const struct daq200_boardtype *)dev->board_ptr) static DEFINE_PCI_DEVICE_TABLE(daqboard2000_pci_table) = { { 0x1616, 0x0409, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { 0} }; MODULE_DEVICE_TABLE(pci, daqboard2000_pci_table); struct daqboard2000_private { enum { card_daqboard_2000 } card; struct pci_dev *pci_dev; void *daq; void *plx; int got_regions; unsigned int ao_readback[2]; }; #define devpriv ((struct daqboard2000_private *)dev->private) static void writeAcqScanListEntry(struct comedi_device *dev, u16 entry) { struct daqboard2000_hw *fpga = devpriv->daq; /* udelay(4); */ fpga->acqScanListFIFO = entry & 0x00ff; /* udelay(4); */ fpga->acqScanListFIFO = (entry >> 8) & 0x00ff; } static void setup_sampling(struct comedi_device *dev, int chan, int gain) { u16 word0, word1, word2, word3; /* Channel 0-7 diff, channel 8-23 single ended */ word0 = 0; word1 = 0x0004; /* Last scan */ word2 = (chan << 6) & 0x00c0; switch (chan / 4) { case 0: word3 = 0x0001; break; case 1: word3 = 0x0002; break; case 2: word3 = 0x0005; break; case 3: word3 = 0x0006; break; case 4: word3 = 0x0041; break; case 5: word3 = 0x0042; break; default: word3 = 0; break; } /* dev->eeprom.correctionDACSE[i][j][k].offset = 0x800; dev->eeprom.correctionDACSE[i][j][k].gain = 0xc00; */ /* These should be read from EEPROM */ word2 |= 0x0800; word3 |= 0xc000; /* printk("%d %4.4x %4.4x %4.4x %4.4x\n", chan, word0, word1, word2, word3);*/ writeAcqScanListEntry(dev, word0); writeAcqScanListEntry(dev, word1); writeAcqScanListEntry(dev, word2); writeAcqScanListEntry(dev, word3); } static int daqboard2000_ai_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; struct daqboard2000_hw *fpga = devpriv->daq; int gain, chan, timeout; fpga->acqControl = DAQBOARD2000_AcqResetScanListFifo | DAQBOARD2000_AcqResetResultsFifo | DAQBOARD2000_AcqResetConfigPipe; /* If pacer clock is not set to some high value (> 10 us), we risk multiple samples to be put into the result FIFO. */ fpga->acqPacerClockDivLow = 1000000; /* 1 second, should be long enough */ fpga->acqPacerClockDivHigh = 0; gain = CR_RANGE(insn->chanspec); chan = CR_CHAN(insn->chanspec); /* This doesn't look efficient. I decided to take the conservative * approach when I did the insn conversion. Perhaps it would be * better to have broken it completely, then someone would have been * forced to fix it. --ds */ for (i = 0; i < insn->n; i++) { setup_sampling(dev, chan, gain); /* Enable reading from the scanlist FIFO */ fpga->acqControl = DAQBOARD2000_SeqStartScanList; for (timeout = 0; timeout < 20; timeout++) { if (fpga->acqControl & DAQBOARD2000_AcqConfigPipeFull) { break; } /* udelay(2); */ } fpga->acqControl = DAQBOARD2000_AdcPacerEnable; for (timeout = 0; timeout < 20; timeout++) { if (fpga->acqControl & DAQBOARD2000_AcqLogicScanning) { break; } /* udelay(2); */ } for (timeout = 0; timeout < 20; timeout++) { if (fpga->acqControl & DAQBOARD2000_AcqResultsFIFOHasValidData) { break; } /* udelay(2); */ } data[i] = fpga->acqResultsFIFO; fpga->acqControl = DAQBOARD2000_AdcPacerDisable; fpga->acqControl = DAQBOARD2000_SeqStopScanList; } return i; } static int daqboard2000_ao_insn_read(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; int chan = CR_CHAN(insn->chanspec); for (i = 0; i < insn->n; i++) { data[i] = devpriv->ao_readback[chan]; } return i; } static int daqboard2000_ao_insn_write(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { int i; int chan = CR_CHAN(insn->chanspec); struct daqboard2000_hw *fpga = devpriv->daq; int timeout; for (i = 0; i < insn->n; i++) { /* * OK, since it works OK without enabling the DAC's, let's keep * it as simple as possible... */ /* fpga->dacControl = (chan + 2) * 0x0010 | 0x0001; udelay(1000); */ fpga->dacSetting[chan] = data[i]; for (timeout = 0; timeout < 20; timeout++) { if ((fpga->dacControl & ((chan + 1) * 0x0010)) == 0) { break; } /* udelay(2); */ } devpriv->ao_readback[chan] = data[i]; /* * Since we never enabled the DAC's, we don't need to disable it... * fpga->dacControl = (chan + 2) * 0x0010 | 0x0000; udelay(1000); */ } return i; } static void daqboard2000_resetLocalBus(struct comedi_device *dev) { printk("daqboard2000_resetLocalBus\n"); writel(DAQBOARD2000_SECRLocalBusHi, devpriv->plx + 0x6c); udelay(10000); writel(DAQBOARD2000_SECRLocalBusLo, devpriv->plx + 0x6c); udelay(10000); } static void daqboard2000_reloadPLX(struct comedi_device *dev) { printk("daqboard2000_reloadPLX\n"); writel(DAQBOARD2000_SECRReloadLo, devpriv->plx + 0x6c); udelay(10000); writel(DAQBOARD2000_SECRReloadHi, devpriv->plx + 0x6c); udelay(10000); writel(DAQBOARD2000_SECRReloadLo, devpriv->plx + 0x6c); udelay(10000); } static void daqboard2000_pulseProgPin(struct comedi_device *dev) { printk("daqboard2000_pulseProgPin 1\n"); writel(DAQBOARD2000_SECRProgPinHi, devpriv->plx + 0x6c); udelay(10000); writel(DAQBOARD2000_SECRProgPinLo, devpriv->plx + 0x6c); udelay(10000); /* Not in the original code, but I like symmetry... */ } static int daqboard2000_pollCPLD(struct comedi_device *dev, int mask) { int result = 0; int i; int cpld; /* timeout after 50 tries -> 5ms */ for (i = 0; i < 50; i++) { cpld = readw(devpriv->daq + 0x1000); if ((cpld & mask) == mask) { result = 1; break; } udelay(100); } udelay(5); return result; } static int daqboard2000_writeCPLD(struct comedi_device *dev, int data) { int result = 0; udelay(10); writew(data, devpriv->daq + 0x1000); if ((readw(devpriv->daq + 0x1000) & DAQBOARD2000_CPLD_INIT) == DAQBOARD2000_CPLD_INIT) { result = 1; } return result; } static int initialize_daqboard2000(struct comedi_device *dev, unsigned char *cpld_array, int len) { int result = -EIO; /* Read the serial EEPROM control register */ int secr; int retry; int i; /* Check to make sure the serial eeprom is present on the board */ secr = readl(devpriv->plx + 0x6c); if (!(secr & DAQBOARD2000_EEPROM_PRESENT)) { #ifdef DEBUG_EEPROM printk("no serial eeprom\n"); #endif return -EIO; } for (retry = 0; retry < 3; retry++) { #ifdef DEBUG_EEPROM printk("Programming EEPROM try %x\n", retry); #endif daqboard2000_resetLocalBus(dev); daqboard2000_reloadPLX(dev); daqboard2000_pulseProgPin(dev); if (daqboard2000_pollCPLD(dev, DAQBOARD2000_CPLD_INIT)) { for (i = 0; i < len; i++) { if (cpld_array[i] == 0xff && cpld_array[i + 1] == 0x20) { #ifdef DEBUG_EEPROM printk("Preamble found at %d\n", i); #endif break; } } for (; i < len; i += 2) { int data = (cpld_array[i] << 8) + cpld_array[i + 1]; if (!daqboard2000_writeCPLD(dev, data)) { break; } } if (i >= len) { #ifdef DEBUG_EEPROM printk("Programmed\n"); #endif daqboard2000_resetLocalBus(dev); daqboard2000_reloadPLX(dev); result = 0; break; } } } return result; } static void daqboard2000_adcStopDmaTransfer(struct comedi_device *dev) { /* printk("Implement: daqboard2000_adcStopDmaTransfer\n");*/ } static void daqboard2000_adcDisarm(struct comedi_device *dev) { struct daqboard2000_hw *fpga = devpriv->daq; /* Disable hardware triggers */ udelay(2); fpga->trigControl = DAQBOARD2000_TrigAnalog | DAQBOARD2000_TrigDisable; udelay(2); fpga->trigControl = DAQBOARD2000_TrigTTL | DAQBOARD2000_TrigDisable; /* Stop the scan list FIFO from loading the configuration pipe */ udelay(2); fpga->acqControl = DAQBOARD2000_SeqStopScanList; /* Stop the pacer clock */ udelay(2); fpga->acqControl = DAQBOARD2000_AdcPacerDisable; /* Stop the input dma (abort channel 1) */ daqboard2000_adcStopDmaTransfer(dev); } static void daqboard2000_activateReferenceDacs(struct comedi_device *dev) { struct daqboard2000_hw *fpga = devpriv->daq; int timeout; /* Set the + reference dac value in the FPGA */ fpga->refDacs = 0x80 | DAQBOARD2000_PosRefDacSelect; for (timeout = 0; timeout < 20; timeout++) { if ((fpga->dacControl & DAQBOARD2000_RefBusy) == 0) { break; } udelay(2); } /* printk("DAQBOARD2000_PosRefDacSelect %d\n", timeout);*/ /* Set the - reference dac value in the FPGA */ fpga->refDacs = 0x80 | DAQBOARD2000_NegRefDacSelect; for (timeout = 0; timeout < 20; timeout++) { if ((fpga->dacControl & DAQBOARD2000_RefBusy) == 0) { break; } udelay(2); } /* printk("DAQBOARD2000_NegRefDacSelect %d\n", timeout);*/ } static void daqboard2000_initializeCtrs(struct comedi_device *dev) { /* printk("Implement: daqboard2000_initializeCtrs\n");*/ } static void daqboard2000_initializeTmrs(struct comedi_device *dev) { /* printk("Implement: daqboard2000_initializeTmrs\n");*/ } static void daqboard2000_dacDisarm(struct comedi_device *dev) { /* printk("Implement: daqboard2000_dacDisarm\n");*/ } static void daqboard2000_initializeAdc(struct comedi_device *dev) { daqboard2000_adcDisarm(dev); daqboard2000_activateReferenceDacs(dev); daqboard2000_initializeCtrs(dev); daqboard2000_initializeTmrs(dev); } static void daqboard2000_initializeDac(struct comedi_device *dev) { daqboard2000_dacDisarm(dev); } /* The test command, REMOVE!!: rmmod daqboard2000 ; rmmod comedi; make install ; modprobe daqboard2000; /usr/sbin/comedi_config /dev/comedi0 daqboard/2000 ; tail -40 /var/log/messages */ static int daqboard2000_8255_cb(int dir, int port, int data, unsigned long ioaddr) { int result = 0; if (dir) { writew(data, ((void *)ioaddr) + port * 2); result = 0; } else { result = readw(((void *)ioaddr) + port * 2); } /* printk("daqboard2000_8255_cb %x %d %d %2.2x -> %2.2x\n", arg, dir, port, data, result); */ return result; } static int daqboard2000_attach(struct comedi_device *dev, struct comedi_devconfig *it) { int result = 0; struct comedi_subdevice *s; struct pci_dev *card = NULL; void *aux_data; unsigned int aux_len; int bus, slot; printk("comedi%d: daqboard2000:", dev->minor); bus = it->options[0]; slot = it->options[1]; result = alloc_private(dev, sizeof(struct daqboard2000_private)); if (result < 0) { return -ENOMEM; } for (card = pci_get_device(0x1616, 0x0409, NULL); card != NULL; card = pci_get_device(0x1616, 0x0409, card)) { if (bus || slot) { /* requested particular bus/slot */ if (card->bus->number != bus || PCI_SLOT(card->devfn) != slot) { continue; } } break; /* found one */ } if (!card) { if (bus || slot) printk(" no daqboard2000 found at bus/slot: %d/%d\n", bus, slot); else printk(" no daqboard2000 found\n"); return -EIO; } else { u32 id; int i; devpriv->pci_dev = card; id = ((u32) card-> subsystem_device << 16) | card->subsystem_vendor; for (i = 0; i < n_boardtypes; i++) { if (boardtypes[i].id == id) { printk(" %s", boardtypes[i].name); dev->board_ptr = boardtypes + i; } } if (!dev->board_ptr) { printk (" unknown subsystem id %08x (pretend it is an ids2)", id); dev->board_ptr = boardtypes; } } result = comedi_pci_enable(card, "daqboard2000"); if (result < 0) { printk(" failed to enable PCI device and request regions\n"); return -EIO; } devpriv->got_regions = 1; devpriv->plx = ioremap(pci_resource_start(card, 0), DAQBOARD2000_PLX_SIZE); devpriv->daq = ioremap(pci_resource_start(card, 2), DAQBOARD2000_DAQ_SIZE); if (!devpriv->plx || !devpriv->daq) { return -ENOMEM; } result = alloc_subdevices(dev, 3); if (result < 0) goto out; readl(devpriv->plx + 0x6c); /* u8 interrupt; Windows code does restore interrupts, but since we don't use them... pci_read_config_byte(card, PCI_INTERRUPT_LINE, &interrupt); printk("Interrupt before is: %x\n", interrupt); */ aux_data = comedi_aux_data(it->options, 0); aux_len = it->options[COMEDI_DEVCONF_AUX_DATA_LENGTH]; if (aux_data && aux_len) { result = initialize_daqboard2000(dev, aux_data, aux_len); } else { printk("no FPGA initialization code, aborting\n"); result = -EIO; } if (result < 0) goto out; daqboard2000_initializeAdc(dev); daqboard2000_initializeDac(dev); /* Windows code does restore interrupts, but since we don't use them... pci_read_config_byte(card, PCI_INTERRUPT_LINE, &interrupt); printk("Interrupt after is: %x\n", interrupt); */ dev->iobase = (unsigned long)devpriv->daq; dev->board_name = this_board->name; s = dev->subdevices + 0; /* ai subdevice */ s->type = COMEDI_SUBD_AI; s->subdev_flags = SDF_READABLE | SDF_GROUND; s->n_chan = 24; s->maxdata = 0xffff; s->insn_read = daqboard2000_ai_insn_read; s->range_table = &range_daqboard2000_ai; s = dev->subdevices + 1; /* ao subdevice */ s->type = COMEDI_SUBD_AO; s->subdev_flags = SDF_WRITABLE; s->n_chan = 2; s->maxdata = 0xffff; s->insn_read = daqboard2000_ao_insn_read; s->insn_write = daqboard2000_ao_insn_write; s->range_table = &range_daqboard2000_ao; s = dev->subdevices + 2; result = subdev_8255_init(dev, s, daqboard2000_8255_cb, (unsigned long)(dev->iobase + 0x40)); printk("\n"); out: return result; } static int daqboard2000_detach(struct comedi_device *dev) { printk("comedi%d: daqboard2000: remove\n", dev->minor); if (dev->subdevices) subdev_8255_cleanup(dev, dev->subdevices + 2); if (dev->irq) { free_irq(dev->irq, dev); } if (devpriv) { if (devpriv->daq) iounmap(devpriv->daq); if (devpriv->plx) iounmap(devpriv->plx); if (devpriv->pci_dev) { if (devpriv->got_regions) { comedi_pci_disable(devpriv->pci_dev); } pci_dev_put(devpriv->pci_dev); } } return 0; } static int __devinit driver_daqboard2000_pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { return comedi_pci_auto_config(dev, driver_daqboard2000.driver_name); } static void __devexit driver_daqboard2000_pci_remove(struct pci_dev *dev) { comedi_pci_auto_unconfig(dev); } static struct pci_driver driver_daqboard2000_pci_driver = { .id_table = daqboard2000_pci_table, .probe = &driver_daqboard2000_pci_probe, .remove = __devexit_p(&driver_daqboard2000_pci_remove) }; static int __init driver_daqboard2000_init_module(void) { int retval; retval = comedi_driver_register(&driver_daqboard2000); if (retval < 0) return retval; driver_daqboard2000_pci_driver.name = (char *)driver_daqboard2000.driver_name; return pci_register_driver(&driver_daqboard2000_pci_driver); } static void __exit driver_daqboard2000_cleanup_module(void) { pci_unregister_driver(&driver_daqboard2000_pci_driver); comedi_driver_unregister(&driver_daqboard2000); } module_init(driver_daqboard2000_init_module); module_exit(driver_daqboard2000_cleanup_module); MODULE_AUTHOR("Comedi http://www.comedi.org"); MODULE_DESCRIPTION("Comedi low-level driver"); MODULE_LICENSE("GPL");
gpl-2.0
xboxfanj/android_kernel_oneplus_msm8974
sound/core/vmaster.c
3511
12431
/* * Virtual master and slave controls * * Copyright (c) 2008 by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. * */ #include <linux/slab.h> #include <linux/export.h> #include <sound/core.h> #include <sound/control.h> #include <sound/tlv.h> /* * a subset of information returned via ctl info callback */ struct link_ctl_info { snd_ctl_elem_type_t type; /* value type */ int count; /* item count */ int min_val, max_val; /* min, max values */ }; /* * link master - this contains a list of slave controls that are * identical types, i.e. info returns the same value type and value * ranges, but may have different number of counts. * * The master control is so far only mono volume/switch for simplicity. * The same value will be applied to all slaves. */ struct link_master { struct list_head slaves; struct link_ctl_info info; int val; /* the master value */ unsigned int tlv[4]; void (*hook)(void *private_data, int); void *hook_private_data; }; /* * link slave - this contains a slave control element * * It fakes the control callbacsk with additional attenuation by the * master control. A slave may have either one or two channels. */ struct link_slave { struct list_head list; struct link_master *master; struct link_ctl_info info; int vals[2]; /* current values */ unsigned int flags; struct snd_kcontrol *kctl; /* original kcontrol pointer */ struct snd_kcontrol slave; /* the copy of original control entry */ }; static int slave_update(struct link_slave *slave) { struct snd_ctl_elem_value *uctl; int err, ch; uctl = kmalloc(sizeof(*uctl), GFP_KERNEL); if (!uctl) return -ENOMEM; uctl->id = slave->slave.id; err = slave->slave.get(&slave->slave, uctl); for (ch = 0; ch < slave->info.count; ch++) slave->vals[ch] = uctl->value.integer.value[ch]; kfree(uctl); return 0; } /* get the slave ctl info and save the initial values */ static int slave_init(struct link_slave *slave) { struct snd_ctl_elem_info *uinfo; int err; if (slave->info.count) { /* already initialized */ if (slave->flags & SND_CTL_SLAVE_NEED_UPDATE) return slave_update(slave); return 0; } uinfo = kmalloc(sizeof(*uinfo), GFP_KERNEL); if (!uinfo) return -ENOMEM; uinfo->id = slave->slave.id; err = slave->slave.info(&slave->slave, uinfo); if (err < 0) { kfree(uinfo); return err; } slave->info.type = uinfo->type; slave->info.count = uinfo->count; if (slave->info.count > 2 || (slave->info.type != SNDRV_CTL_ELEM_TYPE_INTEGER && slave->info.type != SNDRV_CTL_ELEM_TYPE_BOOLEAN)) { snd_printk(KERN_ERR "invalid slave element\n"); kfree(uinfo); return -EINVAL; } slave->info.min_val = uinfo->value.integer.min; slave->info.max_val = uinfo->value.integer.max; kfree(uinfo); return slave_update(slave); } /* initialize master volume */ static int master_init(struct link_master *master) { struct link_slave *slave; if (master->info.count) return 0; /* already initialized */ list_for_each_entry(slave, &master->slaves, list) { int err = slave_init(slave); if (err < 0) return err; master->info = slave->info; master->info.count = 1; /* always mono */ /* set full volume as default (= no attenuation) */ master->val = master->info.max_val; if (master->hook) master->hook(master->hook_private_data, master->val); return 1; } return -ENOENT; } static int slave_get_val(struct link_slave *slave, struct snd_ctl_elem_value *ucontrol) { int err, ch; err = slave_init(slave); if (err < 0) return err; for (ch = 0; ch < slave->info.count; ch++) ucontrol->value.integer.value[ch] = slave->vals[ch]; return 0; } static int slave_put_val(struct link_slave *slave, struct snd_ctl_elem_value *ucontrol) { int err, ch, vol; err = master_init(slave->master); if (err < 0) return err; switch (slave->info.type) { case SNDRV_CTL_ELEM_TYPE_BOOLEAN: for (ch = 0; ch < slave->info.count; ch++) ucontrol->value.integer.value[ch] &= !!slave->master->val; break; case SNDRV_CTL_ELEM_TYPE_INTEGER: for (ch = 0; ch < slave->info.count; ch++) { /* max master volume is supposed to be 0 dB */ vol = ucontrol->value.integer.value[ch]; vol += slave->master->val - slave->master->info.max_val; if (vol < slave->info.min_val) vol = slave->info.min_val; else if (vol > slave->info.max_val) vol = slave->info.max_val; ucontrol->value.integer.value[ch] = vol; } break; } return slave->slave.put(&slave->slave, ucontrol); } /* * ctl callbacks for slaves */ static int slave_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct link_slave *slave = snd_kcontrol_chip(kcontrol); return slave->slave.info(&slave->slave, uinfo); } static int slave_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct link_slave *slave = snd_kcontrol_chip(kcontrol); return slave_get_val(slave, ucontrol); } static int slave_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct link_slave *slave = snd_kcontrol_chip(kcontrol); int err, ch, changed = 0; err = slave_init(slave); if (err < 0) return err; for (ch = 0; ch < slave->info.count; ch++) { if (slave->vals[ch] != ucontrol->value.integer.value[ch]) { changed = 1; slave->vals[ch] = ucontrol->value.integer.value[ch]; } } if (!changed) return 0; return slave_put_val(slave, ucontrol); } static int slave_tlv_cmd(struct snd_kcontrol *kcontrol, int op_flag, unsigned int size, unsigned int __user *tlv) { struct link_slave *slave = snd_kcontrol_chip(kcontrol); /* FIXME: this assumes that the max volume is 0 dB */ return slave->slave.tlv.c(&slave->slave, op_flag, size, tlv); } static void slave_free(struct snd_kcontrol *kcontrol) { struct link_slave *slave = snd_kcontrol_chip(kcontrol); if (slave->slave.private_free) slave->slave.private_free(&slave->slave); if (slave->master) list_del(&slave->list); kfree(slave); } /* * Add a slave control to the group with the given master control * * All slaves must be the same type (returning the same information * via info callback). The function doesn't check it, so it's your * responsibility. * * Also, some additional limitations: * - at most two channels * - logarithmic volume control (dB level), no linear volume * - master can only attenuate the volume, no gain */ int _snd_ctl_add_slave(struct snd_kcontrol *master, struct snd_kcontrol *slave, unsigned int flags) { struct link_master *master_link = snd_kcontrol_chip(master); struct link_slave *srec; srec = kzalloc(sizeof(*srec) + slave->count * sizeof(*slave->vd), GFP_KERNEL); if (!srec) return -ENOMEM; srec->kctl = slave; srec->slave = *slave; memcpy(srec->slave.vd, slave->vd, slave->count * sizeof(*slave->vd)); srec->master = master_link; srec->flags = flags; /* override callbacks */ slave->info = slave_info; slave->get = slave_get; slave->put = slave_put; if (slave->vd[0].access & SNDRV_CTL_ELEM_ACCESS_TLV_CALLBACK) slave->tlv.c = slave_tlv_cmd; slave->private_data = srec; slave->private_free = slave_free; list_add_tail(&srec->list, &master_link->slaves); return 0; } EXPORT_SYMBOL(_snd_ctl_add_slave); /* * ctl callbacks for master controls */ static int master_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct link_master *master = snd_kcontrol_chip(kcontrol); int ret; ret = master_init(master); if (ret < 0) return ret; uinfo->type = master->info.type; uinfo->count = master->info.count; uinfo->value.integer.min = master->info.min_val; uinfo->value.integer.max = master->info.max_val; return 0; } static int master_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct link_master *master = snd_kcontrol_chip(kcontrol); int err = master_init(master); if (err < 0) return err; ucontrol->value.integer.value[0] = master->val; return 0; } static int master_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct link_master *master = snd_kcontrol_chip(kcontrol); struct link_slave *slave; struct snd_ctl_elem_value *uval; int err, old_val; err = master_init(master); if (err < 0) return err; old_val = master->val; if (ucontrol->value.integer.value[0] == old_val) return 0; uval = kmalloc(sizeof(*uval), GFP_KERNEL); if (!uval) return -ENOMEM; list_for_each_entry(slave, &master->slaves, list) { master->val = old_val; uval->id = slave->slave.id; slave_get_val(slave, uval); master->val = ucontrol->value.integer.value[0]; slave_put_val(slave, uval); } kfree(uval); if (master->hook && !err) master->hook(master->hook_private_data, master->val); return 1; } static void master_free(struct snd_kcontrol *kcontrol) { struct link_master *master = snd_kcontrol_chip(kcontrol); struct link_slave *slave, *n; /* free all slave links and retore the original slave kctls */ list_for_each_entry_safe(slave, n, &master->slaves, list) { struct snd_kcontrol *sctl = slave->kctl; struct list_head olist = sctl->list; memcpy(sctl, &slave->slave, sizeof(*sctl)); memcpy(sctl->vd, slave->slave.vd, sctl->count * sizeof(*sctl->vd)); sctl->list = olist; /* keep the current linked-list */ kfree(slave); } kfree(master); } /** * snd_ctl_make_virtual_master - Create a virtual master control * @name: name string of the control element to create * @tlv: optional TLV int array for dB information * * Creates a virtual matster control with the given name string. * Returns the created control element, or NULL for errors (ENOMEM). * * After creating a vmaster element, you can add the slave controls * via snd_ctl_add_slave() or snd_ctl_add_slave_uncached(). * * The optional argument @tlv can be used to specify the TLV information * for dB scale of the master control. It should be a single element * with #SNDRV_CTL_TLVT_DB_SCALE, #SNDRV_CTL_TLV_DB_MINMAX or * #SNDRV_CTL_TLVT_DB_MINMAX_MUTE type, and should be the max 0dB. */ struct snd_kcontrol *snd_ctl_make_virtual_master(char *name, const unsigned int *tlv) { struct link_master *master; struct snd_kcontrol *kctl; struct snd_kcontrol_new knew; memset(&knew, 0, sizeof(knew)); knew.iface = SNDRV_CTL_ELEM_IFACE_MIXER; knew.name = name; knew.info = master_info; master = kzalloc(sizeof(*master), GFP_KERNEL); if (!master) return NULL; INIT_LIST_HEAD(&master->slaves); kctl = snd_ctl_new1(&knew, master); if (!kctl) { kfree(master); return NULL; } /* override some callbacks */ kctl->info = master_info; kctl->get = master_get; kctl->put = master_put; kctl->private_free = master_free; /* additional (constant) TLV read */ if (tlv && (tlv[0] == SNDRV_CTL_TLVT_DB_SCALE || tlv[0] == SNDRV_CTL_TLVT_DB_MINMAX || tlv[0] == SNDRV_CTL_TLVT_DB_MINMAX_MUTE)) { kctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_TLV_READ; memcpy(master->tlv, tlv, sizeof(master->tlv)); kctl->tlv.p = master->tlv; } return kctl; } EXPORT_SYMBOL(snd_ctl_make_virtual_master); /** * snd_ctl_add_vmaster_hook - Add a hook to a vmaster control * @kcontrol: vmaster kctl element * @hook: the hook function * @private_data: the private_data pointer to be saved * * Adds the given hook to the vmaster control element so that it's called * at each time when the value is changed. */ int snd_ctl_add_vmaster_hook(struct snd_kcontrol *kcontrol, void (*hook)(void *private_data, int), void *private_data) { struct link_master *master = snd_kcontrol_chip(kcontrol); master->hook = hook; master->hook_private_data = private_data; return 0; } EXPORT_SYMBOL_GPL(snd_ctl_add_vmaster_hook); /** * snd_ctl_sync_vmaster_hook - Sync the vmaster hook * @kcontrol: vmaster kctl element * * Call the hook function to synchronize with the current value of the given * vmaster element. NOP when NULL is passed to @kcontrol or the hook doesn't * exist. */ void snd_ctl_sync_vmaster_hook(struct snd_kcontrol *kcontrol) { struct link_master *master; if (!kcontrol) return; master = snd_kcontrol_chip(kcontrol); if (master->hook) master->hook(master->hook_private_data, master->val); } EXPORT_SYMBOL_GPL(snd_ctl_sync_vmaster_hook);
gpl-2.0
mdeejay/android_kernel_dlxub1
drivers/video/omap2/dss/venc.c
4791
21906
/* * linux/drivers/video/omap2/dss/venc.c * * Copyright (C) 2009 Nokia Corporation * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com> * * VENC settings from TI's DSS driver * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #define DSS_SUBSYS_NAME "VENC" #include <linux/kernel.h> #include <linux/module.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <linux/mutex.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/seq_file.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #include <linux/pm_runtime.h> #include <video/omapdss.h> #include <plat/cpu.h> #include "dss.h" #include "dss_features.h" /* Venc registers */ #define VENC_REV_ID 0x00 #define VENC_STATUS 0x04 #define VENC_F_CONTROL 0x08 #define VENC_VIDOUT_CTRL 0x10 #define VENC_SYNC_CTRL 0x14 #define VENC_LLEN 0x1C #define VENC_FLENS 0x20 #define VENC_HFLTR_CTRL 0x24 #define VENC_CC_CARR_WSS_CARR 0x28 #define VENC_C_PHASE 0x2C #define VENC_GAIN_U 0x30 #define VENC_GAIN_V 0x34 #define VENC_GAIN_Y 0x38 #define VENC_BLACK_LEVEL 0x3C #define VENC_BLANK_LEVEL 0x40 #define VENC_X_COLOR 0x44 #define VENC_M_CONTROL 0x48 #define VENC_BSTAMP_WSS_DATA 0x4C #define VENC_S_CARR 0x50 #define VENC_LINE21 0x54 #define VENC_LN_SEL 0x58 #define VENC_L21__WC_CTL 0x5C #define VENC_HTRIGGER_VTRIGGER 0x60 #define VENC_SAVID__EAVID 0x64 #define VENC_FLEN__FAL 0x68 #define VENC_LAL__PHASE_RESET 0x6C #define VENC_HS_INT_START_STOP_X 0x70 #define VENC_HS_EXT_START_STOP_X 0x74 #define VENC_VS_INT_START_X 0x78 #define VENC_VS_INT_STOP_X__VS_INT_START_Y 0x7C #define VENC_VS_INT_STOP_Y__VS_EXT_START_X 0x80 #define VENC_VS_EXT_STOP_X__VS_EXT_START_Y 0x84 #define VENC_VS_EXT_STOP_Y 0x88 #define VENC_AVID_START_STOP_X 0x90 #define VENC_AVID_START_STOP_Y 0x94 #define VENC_FID_INT_START_X__FID_INT_START_Y 0xA0 #define VENC_FID_INT_OFFSET_Y__FID_EXT_START_X 0xA4 #define VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y 0xA8 #define VENC_TVDETGP_INT_START_STOP_X 0xB0 #define VENC_TVDETGP_INT_START_STOP_Y 0xB4 #define VENC_GEN_CTRL 0xB8 #define VENC_OUTPUT_CONTROL 0xC4 #define VENC_OUTPUT_TEST 0xC8 #define VENC_DAC_B__DAC_C 0xC8 struct venc_config { u32 f_control; u32 vidout_ctrl; u32 sync_ctrl; u32 llen; u32 flens; u32 hfltr_ctrl; u32 cc_carr_wss_carr; u32 c_phase; u32 gain_u; u32 gain_v; u32 gain_y; u32 black_level; u32 blank_level; u32 x_color; u32 m_control; u32 bstamp_wss_data; u32 s_carr; u32 line21; u32 ln_sel; u32 l21__wc_ctl; u32 htrigger_vtrigger; u32 savid__eavid; u32 flen__fal; u32 lal__phase_reset; u32 hs_int_start_stop_x; u32 hs_ext_start_stop_x; u32 vs_int_start_x; u32 vs_int_stop_x__vs_int_start_y; u32 vs_int_stop_y__vs_ext_start_x; u32 vs_ext_stop_x__vs_ext_start_y; u32 vs_ext_stop_y; u32 avid_start_stop_x; u32 avid_start_stop_y; u32 fid_int_start_x__fid_int_start_y; u32 fid_int_offset_y__fid_ext_start_x; u32 fid_ext_start_y__fid_ext_offset_y; u32 tvdetgp_int_start_stop_x; u32 tvdetgp_int_start_stop_y; u32 gen_ctrl; }; /* from TRM */ static const struct venc_config venc_config_pal_trm = { .f_control = 0, .vidout_ctrl = 1, .sync_ctrl = 0x40, .llen = 0x35F, /* 863 */ .flens = 0x270, /* 624 */ .hfltr_ctrl = 0, .cc_carr_wss_carr = 0x2F7225ED, .c_phase = 0, .gain_u = 0x111, .gain_v = 0x181, .gain_y = 0x140, .black_level = 0x3B, .blank_level = 0x3B, .x_color = 0x7, .m_control = 0x2, .bstamp_wss_data = 0x3F, .s_carr = 0x2A098ACB, .line21 = 0, .ln_sel = 0x01290015, .l21__wc_ctl = 0x0000F603, .htrigger_vtrigger = 0, .savid__eavid = 0x06A70108, .flen__fal = 0x00180270, .lal__phase_reset = 0x00040135, .hs_int_start_stop_x = 0x00880358, .hs_ext_start_stop_x = 0x000F035F, .vs_int_start_x = 0x01A70000, .vs_int_stop_x__vs_int_start_y = 0x000001A7, .vs_int_stop_y__vs_ext_start_x = 0x01AF0000, .vs_ext_stop_x__vs_ext_start_y = 0x000101AF, .vs_ext_stop_y = 0x00000025, .avid_start_stop_x = 0x03530083, .avid_start_stop_y = 0x026C002E, .fid_int_start_x__fid_int_start_y = 0x0001008A, .fid_int_offset_y__fid_ext_start_x = 0x002E0138, .fid_ext_start_y__fid_ext_offset_y = 0x01380001, .tvdetgp_int_start_stop_x = 0x00140001, .tvdetgp_int_start_stop_y = 0x00010001, .gen_ctrl = 0x00FF0000, }; /* from TRM */ static const struct venc_config venc_config_ntsc_trm = { .f_control = 0, .vidout_ctrl = 1, .sync_ctrl = 0x8040, .llen = 0x359, .flens = 0x20C, .hfltr_ctrl = 0, .cc_carr_wss_carr = 0x043F2631, .c_phase = 0, .gain_u = 0x102, .gain_v = 0x16C, .gain_y = 0x12F, .black_level = 0x43, .blank_level = 0x38, .x_color = 0x7, .m_control = 0x1, .bstamp_wss_data = 0x38, .s_carr = 0x21F07C1F, .line21 = 0, .ln_sel = 0x01310011, .l21__wc_ctl = 0x0000F003, .htrigger_vtrigger = 0, .savid__eavid = 0x069300F4, .flen__fal = 0x0016020C, .lal__phase_reset = 0x00060107, .hs_int_start_stop_x = 0x008E0350, .hs_ext_start_stop_x = 0x000F0359, .vs_int_start_x = 0x01A00000, .vs_int_stop_x__vs_int_start_y = 0x020701A0, .vs_int_stop_y__vs_ext_start_x = 0x01AC0024, .vs_ext_stop_x__vs_ext_start_y = 0x020D01AC, .vs_ext_stop_y = 0x00000006, .avid_start_stop_x = 0x03480078, .avid_start_stop_y = 0x02060024, .fid_int_start_x__fid_int_start_y = 0x0001008A, .fid_int_offset_y__fid_ext_start_x = 0x01AC0106, .fid_ext_start_y__fid_ext_offset_y = 0x01060006, .tvdetgp_int_start_stop_x = 0x00140001, .tvdetgp_int_start_stop_y = 0x00010001, .gen_ctrl = 0x00F90000, }; static const struct venc_config venc_config_pal_bdghi = { .f_control = 0, .vidout_ctrl = 0, .sync_ctrl = 0, .hfltr_ctrl = 0, .x_color = 0, .line21 = 0, .ln_sel = 21, .htrigger_vtrigger = 0, .tvdetgp_int_start_stop_x = 0x00140001, .tvdetgp_int_start_stop_y = 0x00010001, .gen_ctrl = 0x00FB0000, .llen = 864-1, .flens = 625-1, .cc_carr_wss_carr = 0x2F7625ED, .c_phase = 0xDF, .gain_u = 0x111, .gain_v = 0x181, .gain_y = 0x140, .black_level = 0x3e, .blank_level = 0x3e, .m_control = 0<<2 | 1<<1, .bstamp_wss_data = 0x42, .s_carr = 0x2a098acb, .l21__wc_ctl = 0<<13 | 0x16<<8 | 0<<0, .savid__eavid = 0x06A70108, .flen__fal = 23<<16 | 624<<0, .lal__phase_reset = 2<<17 | 310<<0, .hs_int_start_stop_x = 0x00920358, .hs_ext_start_stop_x = 0x000F035F, .vs_int_start_x = 0x1a7<<16, .vs_int_stop_x__vs_int_start_y = 0x000601A7, .vs_int_stop_y__vs_ext_start_x = 0x01AF0036, .vs_ext_stop_x__vs_ext_start_y = 0x27101af, .vs_ext_stop_y = 0x05, .avid_start_stop_x = 0x03530082, .avid_start_stop_y = 0x0270002E, .fid_int_start_x__fid_int_start_y = 0x0005008A, .fid_int_offset_y__fid_ext_start_x = 0x002E0138, .fid_ext_start_y__fid_ext_offset_y = 0x01380005, }; const struct omap_video_timings omap_dss_pal_timings = { .x_res = 720, .y_res = 574, .pixel_clock = 13500, .hsw = 64, .hfp = 12, .hbp = 68, .vsw = 5, .vfp = 5, .vbp = 41, }; EXPORT_SYMBOL(omap_dss_pal_timings); const struct omap_video_timings omap_dss_ntsc_timings = { .x_res = 720, .y_res = 482, .pixel_clock = 13500, .hsw = 64, .hfp = 16, .hbp = 58, .vsw = 6, .vfp = 6, .vbp = 31, }; EXPORT_SYMBOL(omap_dss_ntsc_timings); static struct { struct platform_device *pdev; void __iomem *base; struct mutex venc_lock; u32 wss_data; struct regulator *vdda_dac_reg; struct clk *tv_dac_clk; } venc; static inline void venc_write_reg(int idx, u32 val) { __raw_writel(val, venc.base + idx); } static inline u32 venc_read_reg(int idx) { u32 l = __raw_readl(venc.base + idx); return l; } static void venc_write_config(const struct venc_config *config) { DSSDBG("write venc conf\n"); venc_write_reg(VENC_LLEN, config->llen); venc_write_reg(VENC_FLENS, config->flens); venc_write_reg(VENC_CC_CARR_WSS_CARR, config->cc_carr_wss_carr); venc_write_reg(VENC_C_PHASE, config->c_phase); venc_write_reg(VENC_GAIN_U, config->gain_u); venc_write_reg(VENC_GAIN_V, config->gain_v); venc_write_reg(VENC_GAIN_Y, config->gain_y); venc_write_reg(VENC_BLACK_LEVEL, config->black_level); venc_write_reg(VENC_BLANK_LEVEL, config->blank_level); venc_write_reg(VENC_M_CONTROL, config->m_control); venc_write_reg(VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data | venc.wss_data); venc_write_reg(VENC_S_CARR, config->s_carr); venc_write_reg(VENC_L21__WC_CTL, config->l21__wc_ctl); venc_write_reg(VENC_SAVID__EAVID, config->savid__eavid); venc_write_reg(VENC_FLEN__FAL, config->flen__fal); venc_write_reg(VENC_LAL__PHASE_RESET, config->lal__phase_reset); venc_write_reg(VENC_HS_INT_START_STOP_X, config->hs_int_start_stop_x); venc_write_reg(VENC_HS_EXT_START_STOP_X, config->hs_ext_start_stop_x); venc_write_reg(VENC_VS_INT_START_X, config->vs_int_start_x); venc_write_reg(VENC_VS_INT_STOP_X__VS_INT_START_Y, config->vs_int_stop_x__vs_int_start_y); venc_write_reg(VENC_VS_INT_STOP_Y__VS_EXT_START_X, config->vs_int_stop_y__vs_ext_start_x); venc_write_reg(VENC_VS_EXT_STOP_X__VS_EXT_START_Y, config->vs_ext_stop_x__vs_ext_start_y); venc_write_reg(VENC_VS_EXT_STOP_Y, config->vs_ext_stop_y); venc_write_reg(VENC_AVID_START_STOP_X, config->avid_start_stop_x); venc_write_reg(VENC_AVID_START_STOP_Y, config->avid_start_stop_y); venc_write_reg(VENC_FID_INT_START_X__FID_INT_START_Y, config->fid_int_start_x__fid_int_start_y); venc_write_reg(VENC_FID_INT_OFFSET_Y__FID_EXT_START_X, config->fid_int_offset_y__fid_ext_start_x); venc_write_reg(VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y, config->fid_ext_start_y__fid_ext_offset_y); venc_write_reg(VENC_DAC_B__DAC_C, venc_read_reg(VENC_DAC_B__DAC_C)); venc_write_reg(VENC_VIDOUT_CTRL, config->vidout_ctrl); venc_write_reg(VENC_HFLTR_CTRL, config->hfltr_ctrl); venc_write_reg(VENC_X_COLOR, config->x_color); venc_write_reg(VENC_LINE21, config->line21); venc_write_reg(VENC_LN_SEL, config->ln_sel); venc_write_reg(VENC_HTRIGGER_VTRIGGER, config->htrigger_vtrigger); venc_write_reg(VENC_TVDETGP_INT_START_STOP_X, config->tvdetgp_int_start_stop_x); venc_write_reg(VENC_TVDETGP_INT_START_STOP_Y, config->tvdetgp_int_start_stop_y); venc_write_reg(VENC_GEN_CTRL, config->gen_ctrl); venc_write_reg(VENC_F_CONTROL, config->f_control); venc_write_reg(VENC_SYNC_CTRL, config->sync_ctrl); } static void venc_reset(void) { int t = 1000; venc_write_reg(VENC_F_CONTROL, 1<<8); while (venc_read_reg(VENC_F_CONTROL) & (1<<8)) { if (--t == 0) { DSSERR("Failed to reset venc\n"); return; } } #ifdef CONFIG_OMAP2_DSS_SLEEP_AFTER_VENC_RESET /* the magical sleep that makes things work */ /* XXX more info? What bug this circumvents? */ msleep(20); #endif } static int venc_runtime_get(void) { int r; DSSDBG("venc_runtime_get\n"); r = pm_runtime_get_sync(&venc.pdev->dev); WARN_ON(r < 0); return r < 0 ? r : 0; } static void venc_runtime_put(void) { int r; DSSDBG("venc_runtime_put\n"); r = pm_runtime_put_sync(&venc.pdev->dev); WARN_ON(r < 0); } static const struct venc_config *venc_timings_to_config( struct omap_video_timings *timings) { if (memcmp(&omap_dss_pal_timings, timings, sizeof(*timings)) == 0) return &venc_config_pal_trm; if (memcmp(&omap_dss_ntsc_timings, timings, sizeof(*timings)) == 0) return &venc_config_ntsc_trm; BUG(); } static int venc_power_on(struct omap_dss_device *dssdev) { u32 l; int r; venc_reset(); venc_write_config(venc_timings_to_config(&dssdev->panel.timings)); dss_set_venc_output(dssdev->phy.venc.type); dss_set_dac_pwrdn_bgz(1); l = 0; if (dssdev->phy.venc.type == OMAP_DSS_VENC_TYPE_COMPOSITE) l |= 1 << 1; else /* S-Video */ l |= (1 << 0) | (1 << 2); if (dssdev->phy.venc.invert_polarity == false) l |= 1 << 3; venc_write_reg(VENC_OUTPUT_CONTROL, l); dispc_set_digit_size(dssdev->panel.timings.x_res, dssdev->panel.timings.y_res/2); regulator_enable(venc.vdda_dac_reg); if (dssdev->platform_enable) dssdev->platform_enable(dssdev); r = dss_mgr_enable(dssdev->manager); if (r) goto err; return 0; err: venc_write_reg(VENC_OUTPUT_CONTROL, 0); dss_set_dac_pwrdn_bgz(0); if (dssdev->platform_disable) dssdev->platform_disable(dssdev); regulator_disable(venc.vdda_dac_reg); return r; } static void venc_power_off(struct omap_dss_device *dssdev) { venc_write_reg(VENC_OUTPUT_CONTROL, 0); dss_set_dac_pwrdn_bgz(0); dss_mgr_disable(dssdev->manager); if (dssdev->platform_disable) dssdev->platform_disable(dssdev); regulator_disable(venc.vdda_dac_reg); } unsigned long venc_get_pixel_clock(void) { /* VENC Pixel Clock in Mhz */ return 13500000; } /* driver */ static int venc_panel_probe(struct omap_dss_device *dssdev) { dssdev->panel.timings = omap_dss_pal_timings; return 0; } static void venc_panel_remove(struct omap_dss_device *dssdev) { } static int venc_panel_enable(struct omap_dss_device *dssdev) { int r = 0; DSSDBG("venc_enable_display\n"); mutex_lock(&venc.venc_lock); r = omap_dss_start_device(dssdev); if (r) { DSSERR("failed to start device\n"); goto err0; } if (dssdev->state != OMAP_DSS_DISPLAY_DISABLED) { r = -EINVAL; goto err1; } r = venc_runtime_get(); if (r) goto err1; r = venc_power_on(dssdev); if (r) goto err2; venc.wss_data = 0; dssdev->state = OMAP_DSS_DISPLAY_ACTIVE; mutex_unlock(&venc.venc_lock); return 0; err2: venc_runtime_put(); err1: omap_dss_stop_device(dssdev); err0: mutex_unlock(&venc.venc_lock); return r; } static void venc_panel_disable(struct omap_dss_device *dssdev) { DSSDBG("venc_disable_display\n"); mutex_lock(&venc.venc_lock); if (dssdev->state == OMAP_DSS_DISPLAY_DISABLED) goto end; if (dssdev->state == OMAP_DSS_DISPLAY_SUSPENDED) { /* suspended is the same as disabled with venc */ dssdev->state = OMAP_DSS_DISPLAY_DISABLED; goto end; } venc_power_off(dssdev); venc_runtime_put(); dssdev->state = OMAP_DSS_DISPLAY_DISABLED; omap_dss_stop_device(dssdev); end: mutex_unlock(&venc.venc_lock); } static int venc_panel_suspend(struct omap_dss_device *dssdev) { venc_panel_disable(dssdev); return 0; } static int venc_panel_resume(struct omap_dss_device *dssdev) { return venc_panel_enable(dssdev); } static void venc_get_timings(struct omap_dss_device *dssdev, struct omap_video_timings *timings) { *timings = dssdev->panel.timings; } static void venc_set_timings(struct omap_dss_device *dssdev, struct omap_video_timings *timings) { DSSDBG("venc_set_timings\n"); /* Reset WSS data when the TV standard changes. */ if (memcmp(&dssdev->panel.timings, timings, sizeof(*timings))) venc.wss_data = 0; dssdev->panel.timings = *timings; if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) { /* turn the venc off and on to get new timings to use */ venc_panel_disable(dssdev); venc_panel_enable(dssdev); } } static int venc_check_timings(struct omap_dss_device *dssdev, struct omap_video_timings *timings) { DSSDBG("venc_check_timings\n"); if (memcmp(&omap_dss_pal_timings, timings, sizeof(*timings)) == 0) return 0; if (memcmp(&omap_dss_ntsc_timings, timings, sizeof(*timings)) == 0) return 0; return -EINVAL; } static u32 venc_get_wss(struct omap_dss_device *dssdev) { /* Invert due to VENC_L21_WC_CTL:INV=1 */ return (venc.wss_data >> 8) ^ 0xfffff; } static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss) { const struct venc_config *config; int r; DSSDBG("venc_set_wss\n"); mutex_lock(&venc.venc_lock); config = venc_timings_to_config(&dssdev->panel.timings); /* Invert due to VENC_L21_WC_CTL:INV=1 */ venc.wss_data = (wss ^ 0xfffff) << 8; r = venc_runtime_get(); if (r) goto err; venc_write_reg(VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data | venc.wss_data); venc_runtime_put(); err: mutex_unlock(&venc.venc_lock); return r; } static struct omap_dss_driver venc_driver = { .probe = venc_panel_probe, .remove = venc_panel_remove, .enable = venc_panel_enable, .disable = venc_panel_disable, .suspend = venc_panel_suspend, .resume = venc_panel_resume, .get_resolution = omapdss_default_get_resolution, .get_recommended_bpp = omapdss_default_get_recommended_bpp, .get_timings = venc_get_timings, .set_timings = venc_set_timings, .check_timings = venc_check_timings, .get_wss = venc_get_wss, .set_wss = venc_set_wss, .driver = { .name = "venc", .owner = THIS_MODULE, }, }; /* driver end */ int venc_init_display(struct omap_dss_device *dssdev) { DSSDBG("init_display\n"); if (venc.vdda_dac_reg == NULL) { struct regulator *vdda_dac; vdda_dac = regulator_get(&venc.pdev->dev, "vdda_dac"); if (IS_ERR(vdda_dac)) { DSSERR("can't get VDDA_DAC regulator\n"); return PTR_ERR(vdda_dac); } venc.vdda_dac_reg = vdda_dac; } return 0; } void venc_dump_regs(struct seq_file *s) { #define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, venc_read_reg(r)) if (cpu_is_omap44xx()) { seq_printf(s, "VENC currently disabled on OMAP44xx\n"); return; } if (venc_runtime_get()) return; DUMPREG(VENC_F_CONTROL); DUMPREG(VENC_VIDOUT_CTRL); DUMPREG(VENC_SYNC_CTRL); DUMPREG(VENC_LLEN); DUMPREG(VENC_FLENS); DUMPREG(VENC_HFLTR_CTRL); DUMPREG(VENC_CC_CARR_WSS_CARR); DUMPREG(VENC_C_PHASE); DUMPREG(VENC_GAIN_U); DUMPREG(VENC_GAIN_V); DUMPREG(VENC_GAIN_Y); DUMPREG(VENC_BLACK_LEVEL); DUMPREG(VENC_BLANK_LEVEL); DUMPREG(VENC_X_COLOR); DUMPREG(VENC_M_CONTROL); DUMPREG(VENC_BSTAMP_WSS_DATA); DUMPREG(VENC_S_CARR); DUMPREG(VENC_LINE21); DUMPREG(VENC_LN_SEL); DUMPREG(VENC_L21__WC_CTL); DUMPREG(VENC_HTRIGGER_VTRIGGER); DUMPREG(VENC_SAVID__EAVID); DUMPREG(VENC_FLEN__FAL); DUMPREG(VENC_LAL__PHASE_RESET); DUMPREG(VENC_HS_INT_START_STOP_X); DUMPREG(VENC_HS_EXT_START_STOP_X); DUMPREG(VENC_VS_INT_START_X); DUMPREG(VENC_VS_INT_STOP_X__VS_INT_START_Y); DUMPREG(VENC_VS_INT_STOP_Y__VS_EXT_START_X); DUMPREG(VENC_VS_EXT_STOP_X__VS_EXT_START_Y); DUMPREG(VENC_VS_EXT_STOP_Y); DUMPREG(VENC_AVID_START_STOP_X); DUMPREG(VENC_AVID_START_STOP_Y); DUMPREG(VENC_FID_INT_START_X__FID_INT_START_Y); DUMPREG(VENC_FID_INT_OFFSET_Y__FID_EXT_START_X); DUMPREG(VENC_FID_EXT_START_Y__FID_EXT_OFFSET_Y); DUMPREG(VENC_TVDETGP_INT_START_STOP_X); DUMPREG(VENC_TVDETGP_INT_START_STOP_Y); DUMPREG(VENC_GEN_CTRL); DUMPREG(VENC_OUTPUT_CONTROL); DUMPREG(VENC_OUTPUT_TEST); venc_runtime_put(); #undef DUMPREG } static int venc_get_clocks(struct platform_device *pdev) { struct clk *clk; if (dss_has_feature(FEAT_VENC_REQUIRES_TV_DAC_CLK)) { clk = clk_get(&pdev->dev, "tv_dac_clk"); if (IS_ERR(clk)) { DSSERR("can't get tv_dac_clk\n"); return PTR_ERR(clk); } } else { clk = NULL; } venc.tv_dac_clk = clk; return 0; } static void venc_put_clocks(void) { if (venc.tv_dac_clk) clk_put(venc.tv_dac_clk); } /* VENC HW IP initialisation */ static int omap_venchw_probe(struct platform_device *pdev) { u8 rev_id; struct resource *venc_mem; int r; venc.pdev = pdev; mutex_init(&venc.venc_lock); venc.wss_data = 0; venc_mem = platform_get_resource(venc.pdev, IORESOURCE_MEM, 0); if (!venc_mem) { DSSERR("can't get IORESOURCE_MEM VENC\n"); return -EINVAL; } venc.base = devm_ioremap(&pdev->dev, venc_mem->start, resource_size(venc_mem)); if (!venc.base) { DSSERR("can't ioremap VENC\n"); return -ENOMEM; } r = venc_get_clocks(pdev); if (r) return r; pm_runtime_enable(&pdev->dev); r = venc_runtime_get(); if (r) goto err_runtime_get; rev_id = (u8)(venc_read_reg(VENC_REV_ID) & 0xff); dev_dbg(&pdev->dev, "OMAP VENC rev %d\n", rev_id); venc_runtime_put(); r = omap_dss_register_driver(&venc_driver); if (r) goto err_reg_panel_driver; return 0; err_reg_panel_driver: err_runtime_get: pm_runtime_disable(&pdev->dev); venc_put_clocks(); return r; } static int omap_venchw_remove(struct platform_device *pdev) { if (venc.vdda_dac_reg != NULL) { regulator_put(venc.vdda_dac_reg); venc.vdda_dac_reg = NULL; } omap_dss_unregister_driver(&venc_driver); pm_runtime_disable(&pdev->dev); venc_put_clocks(); return 0; } static int venc_runtime_suspend(struct device *dev) { if (venc.tv_dac_clk) clk_disable(venc.tv_dac_clk); dispc_runtime_put(); dss_runtime_put(); return 0; } static int venc_runtime_resume(struct device *dev) { int r; r = dss_runtime_get(); if (r < 0) goto err_get_dss; r = dispc_runtime_get(); if (r < 0) goto err_get_dispc; if (venc.tv_dac_clk) clk_enable(venc.tv_dac_clk); return 0; err_get_dispc: dss_runtime_put(); err_get_dss: return r; } static const struct dev_pm_ops venc_pm_ops = { .runtime_suspend = venc_runtime_suspend, .runtime_resume = venc_runtime_resume, }; static struct platform_driver omap_venchw_driver = { .probe = omap_venchw_probe, .remove = omap_venchw_remove, .driver = { .name = "omapdss_venc", .owner = THIS_MODULE, .pm = &venc_pm_ops, }, }; int venc_init_platform_driver(void) { if (cpu_is_omap44xx()) return 0; return platform_driver_register(&omap_venchw_driver); } void venc_uninit_platform_driver(void) { if (cpu_is_omap44xx()) return; return platform_driver_unregister(&omap_venchw_driver); }
gpl-2.0
hiepgia/kernel_note3_lollipop
sound/soc/omap/omap-mcbsp.c
4791
22333
/* * omap-mcbsp.c -- OMAP ALSA SoC DAI driver using McBSP port * * Copyright (C) 2008 Nokia Corporation * * Contact: Jarkko Nikula <jarkko.nikula@bitmer.com> * Peter Ujfalusi <peter.ujfalusi@ti.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/pm_runtime.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/initval.h> #include <sound/soc.h> #include <plat/dma.h> #include <plat/mcbsp.h> #include "mcbsp.h" #include "omap-mcbsp.h" #include "omap-pcm.h" #define OMAP_MCBSP_RATES (SNDRV_PCM_RATE_8000_96000) #define OMAP_MCBSP_SOC_SINGLE_S16_EXT(xname, xmin, xmax, \ xhandler_get, xhandler_put) \ { .iface = SNDRV_CTL_ELEM_IFACE_MIXER, .name = xname, \ .info = omap_mcbsp_st_info_volsw, \ .get = xhandler_get, .put = xhandler_put, \ .private_value = (unsigned long) &(struct soc_mixer_control) \ {.min = xmin, .max = xmax} } enum { OMAP_MCBSP_WORD_8 = 0, OMAP_MCBSP_WORD_12, OMAP_MCBSP_WORD_16, OMAP_MCBSP_WORD_20, OMAP_MCBSP_WORD_24, OMAP_MCBSP_WORD_32, }; /* * Stream DMA parameters. DMA request line and port address are set runtime * since they are different between OMAP1 and later OMAPs */ static void omap_mcbsp_set_threshold(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai); struct omap_pcm_dma_data *dma_data; int words; dma_data = snd_soc_dai_get_dma_data(rtd->cpu_dai, substream); /* TODO: Currently, MODE_ELEMENT == MODE_FRAME */ if (mcbsp->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD) /* * Configure McBSP threshold based on either: * packet_size, when the sDMA is in packet mode, or * based on the period size. */ if (dma_data->packet_size) words = dma_data->packet_size; else words = snd_pcm_lib_period_bytes(substream) / (mcbsp->wlen / 8); else words = 1; /* Configure McBSP internal buffer usage */ if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) omap_mcbsp_set_tx_threshold(mcbsp, words); else omap_mcbsp_set_rx_threshold(mcbsp, words); } static int omap_mcbsp_hwrule_min_buffersize(struct snd_pcm_hw_params *params, struct snd_pcm_hw_rule *rule) { struct snd_interval *buffer_size = hw_param_interval(params, SNDRV_PCM_HW_PARAM_BUFFER_SIZE); struct snd_interval *channels = hw_param_interval(params, SNDRV_PCM_HW_PARAM_CHANNELS); struct omap_mcbsp *mcbsp = rule->private; struct snd_interval frames; int size; snd_interval_any(&frames); size = mcbsp->pdata->buffer_size; frames.min = size / channels->min; frames.integer = 1; return snd_interval_refine(buffer_size, &frames); } static int omap_mcbsp_dai_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai) { struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai); int err = 0; if (!cpu_dai->active) err = omap_mcbsp_request(mcbsp); /* * OMAP3 McBSP FIFO is word structured. * McBSP2 has 1024 + 256 = 1280 word long buffer, * McBSP1,3,4,5 has 128 word long buffer * This means that the size of the FIFO depends on the sample format. * For example on McBSP3: * 16bit samples: size is 128 * 2 = 256 bytes * 32bit samples: size is 128 * 4 = 512 bytes * It is simpler to place constraint for buffer and period based on * channels. * McBSP3 as example again (16 or 32 bit samples): * 1 channel (mono): size is 128 frames (128 words) * 2 channels (stereo): size is 128 / 2 = 64 frames (2 * 64 words) * 4 channels: size is 128 / 4 = 32 frames (4 * 32 words) */ if (mcbsp->pdata->buffer_size) { /* * Rule for the buffer size. We should not allow * smaller buffer than the FIFO size to avoid underruns */ snd_pcm_hw_rule_add(substream->runtime, 0, SNDRV_PCM_HW_PARAM_BUFFER_SIZE, omap_mcbsp_hwrule_min_buffersize, mcbsp, SNDRV_PCM_HW_PARAM_CHANNELS, -1); /* Make sure, that the period size is always even */ snd_pcm_hw_constraint_step(substream->runtime, 0, SNDRV_PCM_HW_PARAM_PERIOD_SIZE, 2); } return err; } static void omap_mcbsp_dai_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *cpu_dai) { struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai); if (!cpu_dai->active) { omap_mcbsp_free(mcbsp); mcbsp->configured = 0; } } static int omap_mcbsp_dai_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *cpu_dai) { struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai); int err = 0, play = (substream->stream == SNDRV_PCM_STREAM_PLAYBACK); switch (cmd) { case SNDRV_PCM_TRIGGER_START: case SNDRV_PCM_TRIGGER_RESUME: case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: mcbsp->active++; omap_mcbsp_start(mcbsp, play, !play); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_SUSPEND: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: omap_mcbsp_stop(mcbsp, play, !play); mcbsp->active--; break; default: err = -EINVAL; } return err; } static snd_pcm_sframes_t omap_mcbsp_dai_delay( struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai); u16 fifo_use; snd_pcm_sframes_t delay; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) fifo_use = omap_mcbsp_get_tx_delay(mcbsp); else fifo_use = omap_mcbsp_get_rx_delay(mcbsp); /* * Divide the used locations with the channel count to get the * FIFO usage in samples (don't care about partial samples in the * buffer). */ delay = fifo_use / substream->runtime->channels; return delay; } static int omap_mcbsp_dai_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *cpu_dai) { struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai); struct omap_mcbsp_reg_cfg *regs = &mcbsp->cfg_regs; struct omap_pcm_dma_data *dma_data; int wlen, channels, wpf, sync_mode = OMAP_DMA_SYNC_ELEMENT; int pkt_size = 0; unsigned int format, div, framesize, master; dma_data = &mcbsp->dma_data[substream->stream]; switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: dma_data->data_type = OMAP_DMA_DATA_TYPE_S16; wlen = 16; break; case SNDRV_PCM_FORMAT_S32_LE: dma_data->data_type = OMAP_DMA_DATA_TYPE_S32; wlen = 32; break; default: return -EINVAL; } if (mcbsp->pdata->buffer_size) { dma_data->set_threshold = omap_mcbsp_set_threshold; /* TODO: Currently, MODE_ELEMENT == MODE_FRAME */ if (mcbsp->dma_op_mode == MCBSP_DMA_MODE_THRESHOLD) { int period_words, max_thrsh; period_words = params_period_bytes(params) / (wlen / 8); if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) max_thrsh = mcbsp->max_tx_thres; else max_thrsh = mcbsp->max_rx_thres; /* * If the period contains less or equal number of words, * we are using the original threshold mode setup: * McBSP threshold = sDMA frame size = period_size * Otherwise we switch to sDMA packet mode: * McBSP threshold = sDMA packet size * sDMA frame size = period size */ if (period_words > max_thrsh) { int divider = 0; /* * Look for the biggest threshold value, which * divides the period size evenly. */ divider = period_words / max_thrsh; if (period_words % max_thrsh) divider++; while (period_words % divider && divider < period_words) divider++; if (divider == period_words) return -EINVAL; pkt_size = period_words / divider; sync_mode = OMAP_DMA_SYNC_PACKET; } else { sync_mode = OMAP_DMA_SYNC_FRAME; } } } dma_data->sync_mode = sync_mode; dma_data->packet_size = pkt_size; snd_soc_dai_set_dma_data(cpu_dai, substream, dma_data); if (mcbsp->configured) { /* McBSP already configured by another stream */ return 0; } regs->rcr2 &= ~(RPHASE | RFRLEN2(0x7f) | RWDLEN2(7)); regs->xcr2 &= ~(RPHASE | XFRLEN2(0x7f) | XWDLEN2(7)); regs->rcr1 &= ~(RFRLEN1(0x7f) | RWDLEN1(7)); regs->xcr1 &= ~(XFRLEN1(0x7f) | XWDLEN1(7)); format = mcbsp->fmt & SND_SOC_DAIFMT_FORMAT_MASK; wpf = channels = params_channels(params); if (channels == 2 && (format == SND_SOC_DAIFMT_I2S || format == SND_SOC_DAIFMT_LEFT_J)) { /* Use dual-phase frames */ regs->rcr2 |= RPHASE; regs->xcr2 |= XPHASE; /* Set 1 word per (McBSP) frame for phase1 and phase2 */ wpf--; regs->rcr2 |= RFRLEN2(wpf - 1); regs->xcr2 |= XFRLEN2(wpf - 1); } regs->rcr1 |= RFRLEN1(wpf - 1); regs->xcr1 |= XFRLEN1(wpf - 1); switch (params_format(params)) { case SNDRV_PCM_FORMAT_S16_LE: /* Set word lengths */ regs->rcr2 |= RWDLEN2(OMAP_MCBSP_WORD_16); regs->rcr1 |= RWDLEN1(OMAP_MCBSP_WORD_16); regs->xcr2 |= XWDLEN2(OMAP_MCBSP_WORD_16); regs->xcr1 |= XWDLEN1(OMAP_MCBSP_WORD_16); break; case SNDRV_PCM_FORMAT_S32_LE: /* Set word lengths */ regs->rcr2 |= RWDLEN2(OMAP_MCBSP_WORD_32); regs->rcr1 |= RWDLEN1(OMAP_MCBSP_WORD_32); regs->xcr2 |= XWDLEN2(OMAP_MCBSP_WORD_32); regs->xcr1 |= XWDLEN1(OMAP_MCBSP_WORD_32); break; default: /* Unsupported PCM format */ return -EINVAL; } /* In McBSP master modes, FRAME (i.e. sample rate) is generated * by _counting_ BCLKs. Calculate frame size in BCLKs */ master = mcbsp->fmt & SND_SOC_DAIFMT_MASTER_MASK; if (master == SND_SOC_DAIFMT_CBS_CFS) { div = mcbsp->clk_div ? mcbsp->clk_div : 1; framesize = (mcbsp->in_freq / div) / params_rate(params); if (framesize < wlen * channels) { printk(KERN_ERR "%s: not enough bandwidth for desired rate and " "channels\n", __func__); return -EINVAL; } } else framesize = wlen * channels; /* Set FS period and length in terms of bit clock periods */ regs->srgr2 &= ~FPER(0xfff); regs->srgr1 &= ~FWID(0xff); switch (format) { case SND_SOC_DAIFMT_I2S: case SND_SOC_DAIFMT_LEFT_J: regs->srgr2 |= FPER(framesize - 1); regs->srgr1 |= FWID((framesize >> 1) - 1); break; case SND_SOC_DAIFMT_DSP_A: case SND_SOC_DAIFMT_DSP_B: regs->srgr2 |= FPER(framesize - 1); regs->srgr1 |= FWID(0); break; } omap_mcbsp_config(mcbsp, &mcbsp->cfg_regs); mcbsp->wlen = wlen; mcbsp->configured = 1; return 0; } /* * This must be called before _set_clkdiv and _set_sysclk since McBSP register * cache is initialized here */ static int omap_mcbsp_dai_set_dai_fmt(struct snd_soc_dai *cpu_dai, unsigned int fmt) { struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai); struct omap_mcbsp_reg_cfg *regs = &mcbsp->cfg_regs; bool inv_fs = false; if (mcbsp->configured) return 0; mcbsp->fmt = fmt; memset(regs, 0, sizeof(*regs)); /* Generic McBSP register settings */ regs->spcr2 |= XINTM(3) | FREE; regs->spcr1 |= RINTM(3); /* RFIG and XFIG are not defined in 34xx */ if (!cpu_is_omap34xx() && !cpu_is_omap44xx()) { regs->rcr2 |= RFIG; regs->xcr2 |= XFIG; } if (cpu_is_omap2430() || cpu_is_omap34xx() || cpu_is_omap44xx()) { regs->xccr = DXENDLY(1) | XDMAEN | XDISABLE; regs->rccr = RFULL_CYCLE | RDMAEN | RDISABLE; } switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { case SND_SOC_DAIFMT_I2S: /* 1-bit data delay */ regs->rcr2 |= RDATDLY(1); regs->xcr2 |= XDATDLY(1); break; case SND_SOC_DAIFMT_LEFT_J: /* 0-bit data delay */ regs->rcr2 |= RDATDLY(0); regs->xcr2 |= XDATDLY(0); regs->spcr1 |= RJUST(2); /* Invert FS polarity configuration */ inv_fs = true; break; case SND_SOC_DAIFMT_DSP_A: /* 1-bit data delay */ regs->rcr2 |= RDATDLY(1); regs->xcr2 |= XDATDLY(1); /* Invert FS polarity configuration */ inv_fs = true; break; case SND_SOC_DAIFMT_DSP_B: /* 0-bit data delay */ regs->rcr2 |= RDATDLY(0); regs->xcr2 |= XDATDLY(0); /* Invert FS polarity configuration */ inv_fs = true; break; default: /* Unsupported data format */ return -EINVAL; } switch (fmt & SND_SOC_DAIFMT_MASTER_MASK) { case SND_SOC_DAIFMT_CBS_CFS: /* McBSP master. Set FS and bit clocks as outputs */ regs->pcr0 |= FSXM | FSRM | CLKXM | CLKRM; /* Sample rate generator drives the FS */ regs->srgr2 |= FSGM; break; case SND_SOC_DAIFMT_CBM_CFM: /* McBSP slave */ break; default: /* Unsupported master/slave configuration */ return -EINVAL; } /* Set bit clock (CLKX/CLKR) and FS polarities */ switch (fmt & SND_SOC_DAIFMT_INV_MASK) { case SND_SOC_DAIFMT_NB_NF: /* * Normal BCLK + FS. * FS active low. TX data driven on falling edge of bit clock * and RX data sampled on rising edge of bit clock. */ regs->pcr0 |= FSXP | FSRP | CLKXP | CLKRP; break; case SND_SOC_DAIFMT_NB_IF: regs->pcr0 |= CLKXP | CLKRP; break; case SND_SOC_DAIFMT_IB_NF: regs->pcr0 |= FSXP | FSRP; break; case SND_SOC_DAIFMT_IB_IF: break; default: return -EINVAL; } if (inv_fs == true) regs->pcr0 ^= FSXP | FSRP; return 0; } static int omap_mcbsp_dai_set_clkdiv(struct snd_soc_dai *cpu_dai, int div_id, int div) { struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai); struct omap_mcbsp_reg_cfg *regs = &mcbsp->cfg_regs; if (div_id != OMAP_MCBSP_CLKGDV) return -ENODEV; mcbsp->clk_div = div; regs->srgr1 &= ~CLKGDV(0xff); regs->srgr1 |= CLKGDV(div - 1); return 0; } static int omap_mcbsp_dai_set_dai_sysclk(struct snd_soc_dai *cpu_dai, int clk_id, unsigned int freq, int dir) { struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai); struct omap_mcbsp_reg_cfg *regs = &mcbsp->cfg_regs; int err = 0; if (mcbsp->active) { if (freq == mcbsp->in_freq) return 0; else return -EBUSY; } if (clk_id == OMAP_MCBSP_SYSCLK_CLK || clk_id == OMAP_MCBSP_SYSCLK_CLKS_FCLK || clk_id == OMAP_MCBSP_SYSCLK_CLKS_EXT || clk_id == OMAP_MCBSP_SYSCLK_CLKX_EXT || clk_id == OMAP_MCBSP_SYSCLK_CLKR_EXT) { mcbsp->in_freq = freq; regs->srgr2 &= ~CLKSM; regs->pcr0 &= ~SCLKME; } else if (cpu_class_is_omap1()) { /* * McBSP CLKR/FSR signal muxing functions are only available on * OMAP2 or newer versions */ return -EINVAL; } switch (clk_id) { case OMAP_MCBSP_SYSCLK_CLK: regs->srgr2 |= CLKSM; break; case OMAP_MCBSP_SYSCLK_CLKS_FCLK: if (cpu_class_is_omap1()) { err = -EINVAL; break; } err = omap2_mcbsp_set_clks_src(mcbsp, MCBSP_CLKS_PRCM_SRC); break; case OMAP_MCBSP_SYSCLK_CLKS_EXT: if (cpu_class_is_omap1()) { err = 0; break; } err = omap2_mcbsp_set_clks_src(mcbsp, MCBSP_CLKS_PAD_SRC); break; case OMAP_MCBSP_SYSCLK_CLKX_EXT: regs->srgr2 |= CLKSM; case OMAP_MCBSP_SYSCLK_CLKR_EXT: regs->pcr0 |= SCLKME; break; case OMAP_MCBSP_CLKR_SRC_CLKR: err = omap_mcbsp_6pin_src_mux(mcbsp, CLKR_SRC_CLKR); break; case OMAP_MCBSP_CLKR_SRC_CLKX: err = omap_mcbsp_6pin_src_mux(mcbsp, CLKR_SRC_CLKX); break; case OMAP_MCBSP_FSR_SRC_FSR: err = omap_mcbsp_6pin_src_mux(mcbsp, FSR_SRC_FSR); break; case OMAP_MCBSP_FSR_SRC_FSX: err = omap_mcbsp_6pin_src_mux(mcbsp, FSR_SRC_FSX); break; default: err = -ENODEV; } return err; } static const struct snd_soc_dai_ops mcbsp_dai_ops = { .startup = omap_mcbsp_dai_startup, .shutdown = omap_mcbsp_dai_shutdown, .trigger = omap_mcbsp_dai_trigger, .delay = omap_mcbsp_dai_delay, .hw_params = omap_mcbsp_dai_hw_params, .set_fmt = omap_mcbsp_dai_set_dai_fmt, .set_clkdiv = omap_mcbsp_dai_set_clkdiv, .set_sysclk = omap_mcbsp_dai_set_dai_sysclk, }; static int omap_mcbsp_probe(struct snd_soc_dai *dai) { struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(dai); pm_runtime_enable(mcbsp->dev); return 0; } static int omap_mcbsp_remove(struct snd_soc_dai *dai) { struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(dai); pm_runtime_disable(mcbsp->dev); return 0; } static struct snd_soc_dai_driver omap_mcbsp_dai = { .probe = omap_mcbsp_probe, .remove = omap_mcbsp_remove, .playback = { .channels_min = 1, .channels_max = 16, .rates = OMAP_MCBSP_RATES, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE, }, .capture = { .channels_min = 1, .channels_max = 16, .rates = OMAP_MCBSP_RATES, .formats = SNDRV_PCM_FMTBIT_S16_LE | SNDRV_PCM_FMTBIT_S32_LE, }, .ops = &mcbsp_dai_ops, }; static int omap_mcbsp_st_info_volsw(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { struct soc_mixer_control *mc = (struct soc_mixer_control *)kcontrol->private_value; int max = mc->max; int min = mc->min; uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = min; uinfo->value.integer.max = max; return 0; } #define OMAP_MCBSP_ST_SET_CHANNEL_VOLUME(channel) \ static int \ omap_mcbsp_set_st_ch##channel##_volume(struct snd_kcontrol *kc, \ struct snd_ctl_elem_value *uc) \ { \ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kc); \ struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai); \ struct soc_mixer_control *mc = \ (struct soc_mixer_control *)kc->private_value; \ int max = mc->max; \ int min = mc->min; \ int val = uc->value.integer.value[0]; \ \ if (val < min || val > max) \ return -EINVAL; \ \ /* OMAP McBSP implementation uses index values 0..4 */ \ return omap_st_set_chgain(mcbsp, channel, val); \ } #define OMAP_MCBSP_ST_GET_CHANNEL_VOLUME(channel) \ static int \ omap_mcbsp_get_st_ch##channel##_volume(struct snd_kcontrol *kc, \ struct snd_ctl_elem_value *uc) \ { \ struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kc); \ struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai); \ s16 chgain; \ \ if (omap_st_get_chgain(mcbsp, channel, &chgain)) \ return -EAGAIN; \ \ uc->value.integer.value[0] = chgain; \ return 0; \ } OMAP_MCBSP_ST_SET_CHANNEL_VOLUME(0) OMAP_MCBSP_ST_SET_CHANNEL_VOLUME(1) OMAP_MCBSP_ST_GET_CHANNEL_VOLUME(0) OMAP_MCBSP_ST_GET_CHANNEL_VOLUME(1) static int omap_mcbsp_st_put_mode(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol); struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai); u8 value = ucontrol->value.integer.value[0]; if (value == omap_st_is_enabled(mcbsp)) return 0; if (value) omap_st_enable(mcbsp); else omap_st_disable(mcbsp); return 1; } static int omap_mcbsp_st_get_mode(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_soc_dai *cpu_dai = snd_kcontrol_chip(kcontrol); struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai); ucontrol->value.integer.value[0] = omap_st_is_enabled(mcbsp); return 0; } static const struct snd_kcontrol_new omap_mcbsp2_st_controls[] = { SOC_SINGLE_EXT("McBSP2 Sidetone Switch", 1, 0, 1, 0, omap_mcbsp_st_get_mode, omap_mcbsp_st_put_mode), OMAP_MCBSP_SOC_SINGLE_S16_EXT("McBSP2 Sidetone Channel 0 Volume", -32768, 32767, omap_mcbsp_get_st_ch0_volume, omap_mcbsp_set_st_ch0_volume), OMAP_MCBSP_SOC_SINGLE_S16_EXT("McBSP2 Sidetone Channel 1 Volume", -32768, 32767, omap_mcbsp_get_st_ch1_volume, omap_mcbsp_set_st_ch1_volume), }; static const struct snd_kcontrol_new omap_mcbsp3_st_controls[] = { SOC_SINGLE_EXT("McBSP3 Sidetone Switch", 2, 0, 1, 0, omap_mcbsp_st_get_mode, omap_mcbsp_st_put_mode), OMAP_MCBSP_SOC_SINGLE_S16_EXT("McBSP3 Sidetone Channel 0 Volume", -32768, 32767, omap_mcbsp_get_st_ch0_volume, omap_mcbsp_set_st_ch0_volume), OMAP_MCBSP_SOC_SINGLE_S16_EXT("McBSP3 Sidetone Channel 1 Volume", -32768, 32767, omap_mcbsp_get_st_ch1_volume, omap_mcbsp_set_st_ch1_volume), }; int omap_mcbsp_st_add_controls(struct snd_soc_pcm_runtime *rtd) { struct snd_soc_dai *cpu_dai = rtd->cpu_dai; struct omap_mcbsp *mcbsp = snd_soc_dai_get_drvdata(cpu_dai); if (!mcbsp->st_data) return -ENODEV; switch (cpu_dai->id) { case 2: /* McBSP 2 */ return snd_soc_add_dai_controls(cpu_dai, omap_mcbsp2_st_controls, ARRAY_SIZE(omap_mcbsp2_st_controls)); case 3: /* McBSP 3 */ return snd_soc_add_dai_controls(cpu_dai, omap_mcbsp3_st_controls, ARRAY_SIZE(omap_mcbsp3_st_controls)); default: break; } return -EINVAL; } EXPORT_SYMBOL_GPL(omap_mcbsp_st_add_controls); static __devinit int asoc_mcbsp_probe(struct platform_device *pdev) { struct omap_mcbsp_platform_data *pdata = dev_get_platdata(&pdev->dev); struct omap_mcbsp *mcbsp; int ret; if (!pdata) { dev_err(&pdev->dev, "missing platform data.\n"); return -EINVAL; } mcbsp = devm_kzalloc(&pdev->dev, sizeof(struct omap_mcbsp), GFP_KERNEL); if (!mcbsp) return -ENOMEM; mcbsp->id = pdev->id; mcbsp->pdata = pdata; mcbsp->dev = &pdev->dev; platform_set_drvdata(pdev, mcbsp); ret = omap_mcbsp_init(pdev); if (!ret) return snd_soc_register_dai(&pdev->dev, &omap_mcbsp_dai); return ret; } static int __devexit asoc_mcbsp_remove(struct platform_device *pdev) { struct omap_mcbsp *mcbsp = platform_get_drvdata(pdev); snd_soc_unregister_dai(&pdev->dev); if (mcbsp->pdata->ops && mcbsp->pdata->ops->free) mcbsp->pdata->ops->free(mcbsp->id); omap_mcbsp_sysfs_remove(mcbsp); clk_put(mcbsp->fclk); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver asoc_mcbsp_driver = { .driver = { .name = "omap-mcbsp", .owner = THIS_MODULE, }, .probe = asoc_mcbsp_probe, .remove = __devexit_p(asoc_mcbsp_remove), }; module_platform_driver(asoc_mcbsp_driver); MODULE_AUTHOR("Jarkko Nikula <jarkko.nikula@bitmer.com>"); MODULE_DESCRIPTION("OMAP I2S SoC Interface"); MODULE_LICENSE("GPL");
gpl-2.0
TheEdge-/Leaping_kernel
tools/perf/util/ui/browsers/hists.c
4791
34426
#include <stdio.h> #include "../libslang.h" #include <stdlib.h> #include <string.h> #include <newt.h> #include <linux/rbtree.h> #include "../../evsel.h" #include "../../evlist.h" #include "../../hist.h" #include "../../pstack.h" #include "../../sort.h" #include "../../util.h" #include "../browser.h" #include "../helpline.h" #include "../util.h" #include "../ui.h" #include "map.h" struct hist_browser { struct ui_browser b; struct hists *hists; struct hist_entry *he_selection; struct map_symbol *selection; bool has_symbols; }; static int hists__browser_title(struct hists *self, char *bf, size_t size, const char *ev_name); static void hist_browser__refresh_dimensions(struct hist_browser *self) { /* 3 == +/- toggle symbol before actual hist_entry rendering */ self->b.width = 3 + (hists__sort_list_width(self->hists) + sizeof("[k]")); } static void hist_browser__reset(struct hist_browser *self) { self->b.nr_entries = self->hists->nr_entries; hist_browser__refresh_dimensions(self); ui_browser__reset_index(&self->b); } static char tree__folded_sign(bool unfolded) { return unfolded ? '-' : '+'; } static char map_symbol__folded(const struct map_symbol *self) { return self->has_children ? tree__folded_sign(self->unfolded) : ' '; } static char hist_entry__folded(const struct hist_entry *self) { return map_symbol__folded(&self->ms); } static char callchain_list__folded(const struct callchain_list *self) { return map_symbol__folded(&self->ms); } static void map_symbol__set_folding(struct map_symbol *self, bool unfold) { self->unfolded = unfold ? self->has_children : false; } static int callchain_node__count_rows_rb_tree(struct callchain_node *self) { int n = 0; struct rb_node *nd; for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) { struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node); struct callchain_list *chain; char folded_sign = ' '; /* No children */ list_for_each_entry(chain, &child->val, list) { ++n; /* We need this because we may not have children */ folded_sign = callchain_list__folded(chain); if (folded_sign == '+') break; } if (folded_sign == '-') /* Have children and they're unfolded */ n += callchain_node__count_rows_rb_tree(child); } return n; } static int callchain_node__count_rows(struct callchain_node *node) { struct callchain_list *chain; bool unfolded = false; int n = 0; list_for_each_entry(chain, &node->val, list) { ++n; unfolded = chain->ms.unfolded; } if (unfolded) n += callchain_node__count_rows_rb_tree(node); return n; } static int callchain__count_rows(struct rb_root *chain) { struct rb_node *nd; int n = 0; for (nd = rb_first(chain); nd; nd = rb_next(nd)) { struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node); n += callchain_node__count_rows(node); } return n; } static bool map_symbol__toggle_fold(struct map_symbol *self) { if (!self) return false; if (!self->has_children) return false; self->unfolded = !self->unfolded; return true; } static void callchain_node__init_have_children_rb_tree(struct callchain_node *self) { struct rb_node *nd = rb_first(&self->rb_root); for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) { struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node); struct callchain_list *chain; bool first = true; list_for_each_entry(chain, &child->val, list) { if (first) { first = false; chain->ms.has_children = chain->list.next != &child->val || !RB_EMPTY_ROOT(&child->rb_root); } else chain->ms.has_children = chain->list.next == &child->val && !RB_EMPTY_ROOT(&child->rb_root); } callchain_node__init_have_children_rb_tree(child); } } static void callchain_node__init_have_children(struct callchain_node *self) { struct callchain_list *chain; list_for_each_entry(chain, &self->val, list) chain->ms.has_children = !RB_EMPTY_ROOT(&self->rb_root); callchain_node__init_have_children_rb_tree(self); } static void callchain__init_have_children(struct rb_root *self) { struct rb_node *nd; for (nd = rb_first(self); nd; nd = rb_next(nd)) { struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node); callchain_node__init_have_children(node); } } static void hist_entry__init_have_children(struct hist_entry *self) { if (!self->init_have_children) { self->ms.has_children = !RB_EMPTY_ROOT(&self->sorted_chain); callchain__init_have_children(&self->sorted_chain); self->init_have_children = true; } } static bool hist_browser__toggle_fold(struct hist_browser *self) { if (map_symbol__toggle_fold(self->selection)) { struct hist_entry *he = self->he_selection; hist_entry__init_have_children(he); self->hists->nr_entries -= he->nr_rows; if (he->ms.unfolded) he->nr_rows = callchain__count_rows(&he->sorted_chain); else he->nr_rows = 0; self->hists->nr_entries += he->nr_rows; self->b.nr_entries = self->hists->nr_entries; return true; } /* If it doesn't have children, no toggling performed */ return false; } static int callchain_node__set_folding_rb_tree(struct callchain_node *self, bool unfold) { int n = 0; struct rb_node *nd; for (nd = rb_first(&self->rb_root); nd; nd = rb_next(nd)) { struct callchain_node *child = rb_entry(nd, struct callchain_node, rb_node); struct callchain_list *chain; bool has_children = false; list_for_each_entry(chain, &child->val, list) { ++n; map_symbol__set_folding(&chain->ms, unfold); has_children = chain->ms.has_children; } if (has_children) n += callchain_node__set_folding_rb_tree(child, unfold); } return n; } static int callchain_node__set_folding(struct callchain_node *node, bool unfold) { struct callchain_list *chain; bool has_children = false; int n = 0; list_for_each_entry(chain, &node->val, list) { ++n; map_symbol__set_folding(&chain->ms, unfold); has_children = chain->ms.has_children; } if (has_children) n += callchain_node__set_folding_rb_tree(node, unfold); return n; } static int callchain__set_folding(struct rb_root *chain, bool unfold) { struct rb_node *nd; int n = 0; for (nd = rb_first(chain); nd; nd = rb_next(nd)) { struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node); n += callchain_node__set_folding(node, unfold); } return n; } static void hist_entry__set_folding(struct hist_entry *self, bool unfold) { hist_entry__init_have_children(self); map_symbol__set_folding(&self->ms, unfold); if (self->ms.has_children) { int n = callchain__set_folding(&self->sorted_chain, unfold); self->nr_rows = unfold ? n : 0; } else self->nr_rows = 0; } static void hists__set_folding(struct hists *self, bool unfold) { struct rb_node *nd; self->nr_entries = 0; for (nd = rb_first(&self->entries); nd; nd = rb_next(nd)) { struct hist_entry *he = rb_entry(nd, struct hist_entry, rb_node); hist_entry__set_folding(he, unfold); self->nr_entries += 1 + he->nr_rows; } } static void hist_browser__set_folding(struct hist_browser *self, bool unfold) { hists__set_folding(self->hists, unfold); self->b.nr_entries = self->hists->nr_entries; /* Go to the start, we may be way after valid entries after a collapse */ ui_browser__reset_index(&self->b); } static void ui_browser__warn_lost_events(struct ui_browser *browser) { ui_browser__warning(browser, 4, "Events are being lost, check IO/CPU overload!\n\n" "You may want to run 'perf' using a RT scheduler policy:\n\n" " perf top -r 80\n\n" "Or reduce the sampling frequency."); } static int hist_browser__run(struct hist_browser *self, const char *ev_name, void(*timer)(void *arg), void *arg, int delay_secs) { int key; char title[160]; self->b.entries = &self->hists->entries; self->b.nr_entries = self->hists->nr_entries; hist_browser__refresh_dimensions(self); hists__browser_title(self->hists, title, sizeof(title), ev_name); if (ui_browser__show(&self->b, title, "Press '?' for help on key bindings") < 0) return -1; while (1) { key = ui_browser__run(&self->b, delay_secs); switch (key) { case K_TIMER: timer(arg); ui_browser__update_nr_entries(&self->b, self->hists->nr_entries); if (self->hists->stats.nr_lost_warned != self->hists->stats.nr_events[PERF_RECORD_LOST]) { self->hists->stats.nr_lost_warned = self->hists->stats.nr_events[PERF_RECORD_LOST]; ui_browser__warn_lost_events(&self->b); } hists__browser_title(self->hists, title, sizeof(title), ev_name); ui_browser__show_title(&self->b, title); continue; case 'D': { /* Debug */ static int seq; struct hist_entry *h = rb_entry(self->b.top, struct hist_entry, rb_node); ui_helpline__pop(); ui_helpline__fpush("%d: nr_ent=(%d,%d), height=%d, idx=%d, fve: idx=%d, row_off=%d, nrows=%d", seq++, self->b.nr_entries, self->hists->nr_entries, self->b.height, self->b.index, self->b.top_idx, h->row_offset, h->nr_rows); } break; case 'C': /* Collapse the whole world. */ hist_browser__set_folding(self, false); break; case 'E': /* Expand the whole world. */ hist_browser__set_folding(self, true); break; case K_ENTER: if (hist_browser__toggle_fold(self)) break; /* fall thru */ default: goto out; } } out: ui_browser__hide(&self->b); return key; } static char *callchain_list__sym_name(struct callchain_list *self, char *bf, size_t bfsize) { if (self->ms.sym) return self->ms.sym->name; snprintf(bf, bfsize, "%#" PRIx64, self->ip); return bf; } #define LEVEL_OFFSET_STEP 3 static int hist_browser__show_callchain_node_rb_tree(struct hist_browser *self, struct callchain_node *chain_node, u64 total, int level, unsigned short row, off_t *row_offset, bool *is_current_entry) { struct rb_node *node; int first_row = row, width, offset = level * LEVEL_OFFSET_STEP; u64 new_total, remaining; if (callchain_param.mode == CHAIN_GRAPH_REL) new_total = chain_node->children_hit; else new_total = total; remaining = new_total; node = rb_first(&chain_node->rb_root); while (node) { struct callchain_node *child = rb_entry(node, struct callchain_node, rb_node); struct rb_node *next = rb_next(node); u64 cumul = callchain_cumul_hits(child); struct callchain_list *chain; char folded_sign = ' '; int first = true; int extra_offset = 0; remaining -= cumul; list_for_each_entry(chain, &child->val, list) { char ipstr[BITS_PER_LONG / 4 + 1], *alloc_str; const char *str; int color; bool was_first = first; if (first) first = false; else extra_offset = LEVEL_OFFSET_STEP; folded_sign = callchain_list__folded(chain); if (*row_offset != 0) { --*row_offset; goto do_next; } alloc_str = NULL; str = callchain_list__sym_name(chain, ipstr, sizeof(ipstr)); if (was_first) { double percent = cumul * 100.0 / new_total; if (asprintf(&alloc_str, "%2.2f%% %s", percent, str) < 0) str = "Not enough memory!"; else str = alloc_str; } color = HE_COLORSET_NORMAL; width = self->b.width - (offset + extra_offset + 2); if (ui_browser__is_current_entry(&self->b, row)) { self->selection = &chain->ms; color = HE_COLORSET_SELECTED; *is_current_entry = true; } ui_browser__set_color(&self->b, color); ui_browser__gotorc(&self->b, row, 0); slsmg_write_nstring(" ", offset + extra_offset); slsmg_printf("%c ", folded_sign); slsmg_write_nstring(str, width); free(alloc_str); if (++row == self->b.height) goto out; do_next: if (folded_sign == '+') break; } if (folded_sign == '-') { const int new_level = level + (extra_offset ? 2 : 1); row += hist_browser__show_callchain_node_rb_tree(self, child, new_total, new_level, row, row_offset, is_current_entry); } if (row == self->b.height) goto out; node = next; } out: return row - first_row; } static int hist_browser__show_callchain_node(struct hist_browser *self, struct callchain_node *node, int level, unsigned short row, off_t *row_offset, bool *is_current_entry) { struct callchain_list *chain; int first_row = row, offset = level * LEVEL_OFFSET_STEP, width = self->b.width - offset; char folded_sign = ' '; list_for_each_entry(chain, &node->val, list) { char ipstr[BITS_PER_LONG / 4 + 1], *s; int color; folded_sign = callchain_list__folded(chain); if (*row_offset != 0) { --*row_offset; continue; } color = HE_COLORSET_NORMAL; if (ui_browser__is_current_entry(&self->b, row)) { self->selection = &chain->ms; color = HE_COLORSET_SELECTED; *is_current_entry = true; } s = callchain_list__sym_name(chain, ipstr, sizeof(ipstr)); ui_browser__gotorc(&self->b, row, 0); ui_browser__set_color(&self->b, color); slsmg_write_nstring(" ", offset); slsmg_printf("%c ", folded_sign); slsmg_write_nstring(s, width - 2); if (++row == self->b.height) goto out; } if (folded_sign == '-') row += hist_browser__show_callchain_node_rb_tree(self, node, self->hists->stats.total_period, level + 1, row, row_offset, is_current_entry); out: return row - first_row; } static int hist_browser__show_callchain(struct hist_browser *self, struct rb_root *chain, int level, unsigned short row, off_t *row_offset, bool *is_current_entry) { struct rb_node *nd; int first_row = row; for (nd = rb_first(chain); nd; nd = rb_next(nd)) { struct callchain_node *node = rb_entry(nd, struct callchain_node, rb_node); row += hist_browser__show_callchain_node(self, node, level, row, row_offset, is_current_entry); if (row == self->b.height) break; } return row - first_row; } static int hist_browser__show_entry(struct hist_browser *self, struct hist_entry *entry, unsigned short row) { char s[256]; double percent; int printed = 0; int width = self->b.width - 6; /* The percentage */ char folded_sign = ' '; bool current_entry = ui_browser__is_current_entry(&self->b, row); off_t row_offset = entry->row_offset; if (current_entry) { self->he_selection = entry; self->selection = &entry->ms; } if (symbol_conf.use_callchain) { hist_entry__init_have_children(entry); folded_sign = hist_entry__folded(entry); } if (row_offset == 0) { hist_entry__snprintf(entry, s, sizeof(s), self->hists); percent = (entry->period * 100.0) / self->hists->stats.total_period; ui_browser__set_percent_color(&self->b, percent, current_entry); ui_browser__gotorc(&self->b, row, 0); if (symbol_conf.use_callchain) { slsmg_printf("%c ", folded_sign); width -= 2; } slsmg_printf(" %5.2f%%", percent); /* The scroll bar isn't being used */ if (!self->b.navkeypressed) width += 1; if (!current_entry || !self->b.navkeypressed) ui_browser__set_color(&self->b, HE_COLORSET_NORMAL); if (symbol_conf.show_nr_samples) { slsmg_printf(" %11u", entry->nr_events); width -= 12; } if (symbol_conf.show_total_period) { slsmg_printf(" %12" PRIu64, entry->period); width -= 13; } slsmg_write_nstring(s, width); ++row; ++printed; } else --row_offset; if (folded_sign == '-' && row != self->b.height) { printed += hist_browser__show_callchain(self, &entry->sorted_chain, 1, row, &row_offset, &current_entry); if (current_entry) self->he_selection = entry; } return printed; } static void ui_browser__hists_init_top(struct ui_browser *browser) { if (browser->top == NULL) { struct hist_browser *hb; hb = container_of(browser, struct hist_browser, b); browser->top = rb_first(&hb->hists->entries); } } static unsigned int hist_browser__refresh(struct ui_browser *self) { unsigned row = 0; struct rb_node *nd; struct hist_browser *hb = container_of(self, struct hist_browser, b); ui_browser__hists_init_top(self); for (nd = self->top; nd; nd = rb_next(nd)) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); if (h->filtered) continue; row += hist_browser__show_entry(hb, h, row); if (row == self->height) break; } return row; } static struct rb_node *hists__filter_entries(struct rb_node *nd) { while (nd != NULL) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); if (!h->filtered) return nd; nd = rb_next(nd); } return NULL; } static struct rb_node *hists__filter_prev_entries(struct rb_node *nd) { while (nd != NULL) { struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); if (!h->filtered) return nd; nd = rb_prev(nd); } return NULL; } static void ui_browser__hists_seek(struct ui_browser *self, off_t offset, int whence) { struct hist_entry *h; struct rb_node *nd; bool first = true; if (self->nr_entries == 0) return; ui_browser__hists_init_top(self); switch (whence) { case SEEK_SET: nd = hists__filter_entries(rb_first(self->entries)); break; case SEEK_CUR: nd = self->top; goto do_offset; case SEEK_END: nd = hists__filter_prev_entries(rb_last(self->entries)); first = false; break; default: return; } /* * Moves not relative to the first visible entry invalidates its * row_offset: */ h = rb_entry(self->top, struct hist_entry, rb_node); h->row_offset = 0; /* * Here we have to check if nd is expanded (+), if it is we can't go * the next top level hist_entry, instead we must compute an offset of * what _not_ to show and not change the first visible entry. * * This offset increments when we are going from top to bottom and * decreases when we're going from bottom to top. * * As we don't have backpointers to the top level in the callchains * structure, we need to always print the whole hist_entry callchain, * skipping the first ones that are before the first visible entry * and stop when we printed enough lines to fill the screen. */ do_offset: if (offset > 0) { do { h = rb_entry(nd, struct hist_entry, rb_node); if (h->ms.unfolded) { u16 remaining = h->nr_rows - h->row_offset; if (offset > remaining) { offset -= remaining; h->row_offset = 0; } else { h->row_offset += offset; offset = 0; self->top = nd; break; } } nd = hists__filter_entries(rb_next(nd)); if (nd == NULL) break; --offset; self->top = nd; } while (offset != 0); } else if (offset < 0) { while (1) { h = rb_entry(nd, struct hist_entry, rb_node); if (h->ms.unfolded) { if (first) { if (-offset > h->row_offset) { offset += h->row_offset; h->row_offset = 0; } else { h->row_offset += offset; offset = 0; self->top = nd; break; } } else { if (-offset > h->nr_rows) { offset += h->nr_rows; h->row_offset = 0; } else { h->row_offset = h->nr_rows + offset; offset = 0; self->top = nd; break; } } } nd = hists__filter_prev_entries(rb_prev(nd)); if (nd == NULL) break; ++offset; self->top = nd; if (offset == 0) { /* * Last unfiltered hist_entry, check if it is * unfolded, if it is then we should have * row_offset at its last entry. */ h = rb_entry(nd, struct hist_entry, rb_node); if (h->ms.unfolded) h->row_offset = h->nr_rows; break; } first = false; } } else { self->top = nd; h = rb_entry(nd, struct hist_entry, rb_node); h->row_offset = 0; } } static struct hist_browser *hist_browser__new(struct hists *hists) { struct hist_browser *self = zalloc(sizeof(*self)); if (self) { self->hists = hists; self->b.refresh = hist_browser__refresh; self->b.seek = ui_browser__hists_seek; self->b.use_navkeypressed = true; if (sort__branch_mode == 1) self->has_symbols = sort_sym_from.list.next != NULL; else self->has_symbols = sort_sym.list.next != NULL; } return self; } static void hist_browser__delete(struct hist_browser *self) { free(self); } static struct hist_entry *hist_browser__selected_entry(struct hist_browser *self) { return self->he_selection; } static struct thread *hist_browser__selected_thread(struct hist_browser *self) { return self->he_selection->thread; } static int hists__browser_title(struct hists *self, char *bf, size_t size, const char *ev_name) { char unit; int printed; const struct dso *dso = self->dso_filter; const struct thread *thread = self->thread_filter; unsigned long nr_events = self->stats.nr_events[PERF_RECORD_SAMPLE]; nr_events = convert_unit(nr_events, &unit); printed = scnprintf(bf, size, "Events: %lu%c %s", nr_events, unit, ev_name); if (self->uid_filter_str) printed += snprintf(bf + printed, size - printed, ", UID: %s", self->uid_filter_str); if (thread) printed += scnprintf(bf + printed, size - printed, ", Thread: %s(%d)", (thread->comm_set ? thread->comm : ""), thread->pid); if (dso) printed += scnprintf(bf + printed, size - printed, ", DSO: %s", dso->short_name); return printed; } static inline void free_popup_options(char **options, int n) { int i; for (i = 0; i < n; ++i) { free(options[i]); options[i] = NULL; } } static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events, const char *helpline, const char *ev_name, bool left_exits, void(*timer)(void *arg), void *arg, int delay_secs) { struct hists *self = &evsel->hists; struct hist_browser *browser = hist_browser__new(self); struct branch_info *bi; struct pstack *fstack; char *options[16]; int nr_options = 0; int key = -1; char buf[64]; if (browser == NULL) return -1; fstack = pstack__new(2); if (fstack == NULL) goto out; ui_helpline__push(helpline); memset(options, 0, sizeof(options)); while (1) { const struct thread *thread = NULL; const struct dso *dso = NULL; int choice = 0, annotate = -2, zoom_dso = -2, zoom_thread = -2, annotate_f = -2, annotate_t = -2, browse_map = -2; nr_options = 0; key = hist_browser__run(browser, ev_name, timer, arg, delay_secs); if (browser->he_selection != NULL) { thread = hist_browser__selected_thread(browser); dso = browser->selection->map ? browser->selection->map->dso : NULL; } switch (key) { case K_TAB: case K_UNTAB: if (nr_events == 1) continue; /* * Exit the browser, let hists__browser_tree * go to the next or previous */ goto out_free_stack; case 'a': if (!browser->has_symbols) { ui_browser__warning(&browser->b, delay_secs * 2, "Annotation is only available for symbolic views, " "include \"sym*\" in --sort to use it."); continue; } if (browser->selection == NULL || browser->selection->sym == NULL || browser->selection->map->dso->annotate_warned) continue; goto do_annotate; case 'd': goto zoom_dso; case 't': goto zoom_thread; case 's': if (ui_browser__input_window("Symbol to show", "Please enter the name of symbol you want to see", buf, "ENTER: OK, ESC: Cancel", delay_secs * 2) == K_ENTER) { self->symbol_filter_str = *buf ? buf : NULL; hists__filter_by_symbol(self); hist_browser__reset(browser); } continue; case K_F1: case 'h': case '?': ui_browser__help_window(&browser->b, "h/?/F1 Show this window\n" "UP/DOWN/PGUP\n" "PGDN/SPACE Navigate\n" "q/ESC/CTRL+C Exit browser\n\n" "For multiple event sessions:\n\n" "TAB/UNTAB Switch events\n\n" "For symbolic views (--sort has sym):\n\n" "-> Zoom into DSO/Threads & Annotate current symbol\n" "<- Zoom out\n" "a Annotate current symbol\n" "C Collapse all callchains\n" "E Expand all callchains\n" "d Zoom into current DSO\n" "t Zoom into current Thread\n" "s Filter symbol by name"); continue; case K_ENTER: case K_RIGHT: /* menu */ break; case K_LEFT: { const void *top; if (pstack__empty(fstack)) { /* * Go back to the perf_evsel_menu__run or other user */ if (left_exits) goto out_free_stack; continue; } top = pstack__pop(fstack); if (top == &browser->hists->dso_filter) goto zoom_out_dso; if (top == &browser->hists->thread_filter) goto zoom_out_thread; continue; } case K_ESC: if (!left_exits && !ui_browser__dialog_yesno(&browser->b, "Do you really want to exit?")) continue; /* Fall thru */ case 'q': case CTRL('c'): goto out_free_stack; default: continue; } if (!browser->has_symbols) goto add_exit_option; if (sort__branch_mode == 1) { bi = browser->he_selection->branch_info; if (browser->selection != NULL && bi && bi->from.sym != NULL && !bi->from.map->dso->annotate_warned && asprintf(&options[nr_options], "Annotate %s", bi->from.sym->name) > 0) annotate_f = nr_options++; if (browser->selection != NULL && bi && bi->to.sym != NULL && !bi->to.map->dso->annotate_warned && (bi->to.sym != bi->from.sym || bi->to.map->dso != bi->from.map->dso) && asprintf(&options[nr_options], "Annotate %s", bi->to.sym->name) > 0) annotate_t = nr_options++; } else { if (browser->selection != NULL && browser->selection->sym != NULL && !browser->selection->map->dso->annotate_warned && asprintf(&options[nr_options], "Annotate %s", browser->selection->sym->name) > 0) annotate = nr_options++; } if (thread != NULL && asprintf(&options[nr_options], "Zoom %s %s(%d) thread", (browser->hists->thread_filter ? "out of" : "into"), (thread->comm_set ? thread->comm : ""), thread->pid) > 0) zoom_thread = nr_options++; if (dso != NULL && asprintf(&options[nr_options], "Zoom %s %s DSO", (browser->hists->dso_filter ? "out of" : "into"), (dso->kernel ? "the Kernel" : dso->short_name)) > 0) zoom_dso = nr_options++; if (browser->selection != NULL && browser->selection->map != NULL && asprintf(&options[nr_options], "Browse map details") > 0) browse_map = nr_options++; add_exit_option: options[nr_options++] = (char *)"Exit"; retry_popup_menu: choice = ui__popup_menu(nr_options, options); if (choice == nr_options - 1) break; if (choice == -1) { free_popup_options(options, nr_options - 1); continue; } if (choice == annotate || choice == annotate_t || choice == annotate_f) { struct hist_entry *he; int err; do_annotate: he = hist_browser__selected_entry(browser); if (he == NULL) continue; /* * we stash the branch_info symbol + map into the * the ms so we don't have to rewrite all the annotation * code to use branch_info. * in branch mode, the ms struct is not used */ if (choice == annotate_f) { he->ms.sym = he->branch_info->from.sym; he->ms.map = he->branch_info->from.map; } else if (choice == annotate_t) { he->ms.sym = he->branch_info->to.sym; he->ms.map = he->branch_info->to.map; } /* * Don't let this be freed, say, by hists__decay_entry. */ he->used = true; err = hist_entry__tui_annotate(he, evsel->idx, timer, arg, delay_secs); he->used = false; /* * offer option to annotate the other branch source or target * (if they exists) when returning from annotate */ if ((err == 'q' || err == CTRL('c')) && annotate_t != -2 && annotate_f != -2) goto retry_popup_menu; ui_browser__update_nr_entries(&browser->b, browser->hists->nr_entries); if (err) ui_browser__handle_resize(&browser->b); } else if (choice == browse_map) map__browse(browser->selection->map); else if (choice == zoom_dso) { zoom_dso: if (browser->hists->dso_filter) { pstack__remove(fstack, &browser->hists->dso_filter); zoom_out_dso: ui_helpline__pop(); browser->hists->dso_filter = NULL; sort_dso.elide = false; } else { if (dso == NULL) continue; ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s DSO\"", dso->kernel ? "the Kernel" : dso->short_name); browser->hists->dso_filter = dso; sort_dso.elide = true; pstack__push(fstack, &browser->hists->dso_filter); } hists__filter_by_dso(self); hist_browser__reset(browser); } else if (choice == zoom_thread) { zoom_thread: if (browser->hists->thread_filter) { pstack__remove(fstack, &browser->hists->thread_filter); zoom_out_thread: ui_helpline__pop(); browser->hists->thread_filter = NULL; sort_thread.elide = false; } else { ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s(%d) thread\"", thread->comm_set ? thread->comm : "", thread->pid); browser->hists->thread_filter = thread; sort_thread.elide = true; pstack__push(fstack, &browser->hists->thread_filter); } hists__filter_by_thread(self); hist_browser__reset(browser); } } out_free_stack: pstack__delete(fstack); out: hist_browser__delete(browser); free_popup_options(options, nr_options - 1); return key; } struct perf_evsel_menu { struct ui_browser b; struct perf_evsel *selection; bool lost_events, lost_events_warned; }; static void perf_evsel_menu__write(struct ui_browser *browser, void *entry, int row) { struct perf_evsel_menu *menu = container_of(browser, struct perf_evsel_menu, b); struct perf_evsel *evsel = list_entry(entry, struct perf_evsel, node); bool current_entry = ui_browser__is_current_entry(browser, row); unsigned long nr_events = evsel->hists.stats.nr_events[PERF_RECORD_SAMPLE]; const char *ev_name = event_name(evsel); char bf[256], unit; const char *warn = " "; size_t printed; ui_browser__set_color(browser, current_entry ? HE_COLORSET_SELECTED : HE_COLORSET_NORMAL); nr_events = convert_unit(nr_events, &unit); printed = scnprintf(bf, sizeof(bf), "%lu%c%s%s", nr_events, unit, unit == ' ' ? "" : " ", ev_name); slsmg_printf("%s", bf); nr_events = evsel->hists.stats.nr_events[PERF_RECORD_LOST]; if (nr_events != 0) { menu->lost_events = true; if (!current_entry) ui_browser__set_color(browser, HE_COLORSET_TOP); nr_events = convert_unit(nr_events, &unit); printed += scnprintf(bf, sizeof(bf), ": %ld%c%schunks LOST!", nr_events, unit, unit == ' ' ? "" : " "); warn = bf; } slsmg_write_nstring(warn, browser->width - printed); if (current_entry) menu->selection = evsel; } static int perf_evsel_menu__run(struct perf_evsel_menu *menu, int nr_events, const char *help, void(*timer)(void *arg), void *arg, int delay_secs) { struct perf_evlist *evlist = menu->b.priv; struct perf_evsel *pos; const char *ev_name, *title = "Available samples"; int key; if (ui_browser__show(&menu->b, title, "ESC: exit, ENTER|->: Browse histograms") < 0) return -1; while (1) { key = ui_browser__run(&menu->b, delay_secs); switch (key) { case K_TIMER: timer(arg); if (!menu->lost_events_warned && menu->lost_events) { ui_browser__warn_lost_events(&menu->b); menu->lost_events_warned = true; } continue; case K_RIGHT: case K_ENTER: if (!menu->selection) continue; pos = menu->selection; browse_hists: perf_evlist__set_selected(evlist, pos); /* * Give the calling tool a chance to populate the non * default evsel resorted hists tree. */ if (timer) timer(arg); ev_name = event_name(pos); key = perf_evsel__hists_browse(pos, nr_events, help, ev_name, true, timer, arg, delay_secs); ui_browser__show_title(&menu->b, title); switch (key) { case K_TAB: if (pos->node.next == &evlist->entries) pos = list_entry(evlist->entries.next, struct perf_evsel, node); else pos = list_entry(pos->node.next, struct perf_evsel, node); goto browse_hists; case K_UNTAB: if (pos->node.prev == &evlist->entries) pos = list_entry(evlist->entries.prev, struct perf_evsel, node); else pos = list_entry(pos->node.prev, struct perf_evsel, node); goto browse_hists; case K_ESC: if (!ui_browser__dialog_yesno(&menu->b, "Do you really want to exit?")) continue; /* Fall thru */ case 'q': case CTRL('c'): goto out; default: continue; } case K_LEFT: continue; case K_ESC: if (!ui_browser__dialog_yesno(&menu->b, "Do you really want to exit?")) continue; /* Fall thru */ case 'q': case CTRL('c'): goto out; default: continue; } } out: ui_browser__hide(&menu->b); return key; } static int __perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help, void(*timer)(void *arg), void *arg, int delay_secs) { struct perf_evsel *pos; struct perf_evsel_menu menu = { .b = { .entries = &evlist->entries, .refresh = ui_browser__list_head_refresh, .seek = ui_browser__list_head_seek, .write = perf_evsel_menu__write, .nr_entries = evlist->nr_entries, .priv = evlist, }, }; ui_helpline__push("Press ESC to exit"); list_for_each_entry(pos, &evlist->entries, node) { const char *ev_name = event_name(pos); size_t line_len = strlen(ev_name) + 7; if (menu.b.width < line_len) menu.b.width = line_len; /* * Cache the evsel name, tracepoints have a _high_ cost per * event_name() call. */ if (pos->name == NULL) pos->name = strdup(ev_name); } return perf_evsel_menu__run(&menu, evlist->nr_entries, help, timer, arg, delay_secs); } int perf_evlist__tui_browse_hists(struct perf_evlist *evlist, const char *help, void(*timer)(void *arg), void *arg, int delay_secs) { if (evlist->nr_entries == 1) { struct perf_evsel *first = list_entry(evlist->entries.next, struct perf_evsel, node); const char *ev_name = event_name(first); return perf_evsel__hists_browse(first, evlist->nr_entries, help, ev_name, false, timer, arg, delay_secs); } return __perf_evlist__tui_browse_hists(evlist, help, timer, arg, delay_secs); }
gpl-2.0
pio-masaki/android_kernel_samsung_jf
sound/soc/omap/omap-mcpdm.c
4791
13951
/* * omap-mcpdm.c -- OMAP ALSA SoC DAI driver using McPDM port * * Copyright (C) 2009 - 2011 Texas Instruments * * Author: Misael Lopez Cruz <misael.lopez@ti.com> * Contact: Jorge Eduardo Candelaria <x0107209@ti.com> * Margarita Olaya <magi.olaya@ti.com> * Peter Ujfalusi <peter.ujfalusi@ti.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA * */ #include <linux/init.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/err.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/slab.h> #include <linux/pm_runtime.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/soc.h> #include <plat/dma.h> #include <plat/omap_hwmod.h> #include "omap-mcpdm.h" #include "omap-pcm.h" struct omap_mcpdm { struct device *dev; unsigned long phys_base; void __iomem *io_base; int irq; struct mutex mutex; /* channel data */ u32 dn_channels; u32 up_channels; /* McPDM FIFO thresholds */ u32 dn_threshold; u32 up_threshold; /* McPDM dn offsets for rx1, and 2 channels */ u32 dn_rx_offset; }; /* * Stream DMA parameters */ static struct omap_pcm_dma_data omap_mcpdm_dai_dma_params[] = { { .name = "Audio playback", .dma_req = OMAP44XX_DMA_MCPDM_DL, .data_type = OMAP_DMA_DATA_TYPE_S32, .sync_mode = OMAP_DMA_SYNC_PACKET, .port_addr = OMAP44XX_MCPDM_L3_BASE + MCPDM_REG_DN_DATA, }, { .name = "Audio capture", .dma_req = OMAP44XX_DMA_MCPDM_UP, .data_type = OMAP_DMA_DATA_TYPE_S32, .sync_mode = OMAP_DMA_SYNC_PACKET, .port_addr = OMAP44XX_MCPDM_L3_BASE + MCPDM_REG_UP_DATA, }, }; static inline void omap_mcpdm_write(struct omap_mcpdm *mcpdm, u16 reg, u32 val) { __raw_writel(val, mcpdm->io_base + reg); } static inline int omap_mcpdm_read(struct omap_mcpdm *mcpdm, u16 reg) { return __raw_readl(mcpdm->io_base + reg); } #ifdef DEBUG static void omap_mcpdm_reg_dump(struct omap_mcpdm *mcpdm) { dev_dbg(mcpdm->dev, "***********************\n"); dev_dbg(mcpdm->dev, "IRQSTATUS_RAW: 0x%04x\n", omap_mcpdm_read(mcpdm, MCPDM_REG_IRQSTATUS_RAW)); dev_dbg(mcpdm->dev, "IRQSTATUS: 0x%04x\n", omap_mcpdm_read(mcpdm, MCPDM_REG_IRQSTATUS)); dev_dbg(mcpdm->dev, "IRQENABLE_SET: 0x%04x\n", omap_mcpdm_read(mcpdm, MCPDM_REG_IRQENABLE_SET)); dev_dbg(mcpdm->dev, "IRQENABLE_CLR: 0x%04x\n", omap_mcpdm_read(mcpdm, MCPDM_REG_IRQENABLE_CLR)); dev_dbg(mcpdm->dev, "IRQWAKE_EN: 0x%04x\n", omap_mcpdm_read(mcpdm, MCPDM_REG_IRQWAKE_EN)); dev_dbg(mcpdm->dev, "DMAENABLE_SET: 0x%04x\n", omap_mcpdm_read(mcpdm, MCPDM_REG_DMAENABLE_SET)); dev_dbg(mcpdm->dev, "DMAENABLE_CLR: 0x%04x\n", omap_mcpdm_read(mcpdm, MCPDM_REG_DMAENABLE_CLR)); dev_dbg(mcpdm->dev, "DMAWAKEEN: 0x%04x\n", omap_mcpdm_read(mcpdm, MCPDM_REG_DMAWAKEEN)); dev_dbg(mcpdm->dev, "CTRL: 0x%04x\n", omap_mcpdm_read(mcpdm, MCPDM_REG_CTRL)); dev_dbg(mcpdm->dev, "DN_DATA: 0x%04x\n", omap_mcpdm_read(mcpdm, MCPDM_REG_DN_DATA)); dev_dbg(mcpdm->dev, "UP_DATA: 0x%04x\n", omap_mcpdm_read(mcpdm, MCPDM_REG_UP_DATA)); dev_dbg(mcpdm->dev, "FIFO_CTRL_DN: 0x%04x\n", omap_mcpdm_read(mcpdm, MCPDM_REG_FIFO_CTRL_DN)); dev_dbg(mcpdm->dev, "FIFO_CTRL_UP: 0x%04x\n", omap_mcpdm_read(mcpdm, MCPDM_REG_FIFO_CTRL_UP)); dev_dbg(mcpdm->dev, "***********************\n"); } #else static void omap_mcpdm_reg_dump(struct omap_mcpdm *mcpdm) {} #endif /* * Enables the transfer through the PDM interface to/from the Phoenix * codec by enabling the corresponding UP or DN channels. */ static void omap_mcpdm_start(struct omap_mcpdm *mcpdm) { u32 ctrl = omap_mcpdm_read(mcpdm, MCPDM_REG_CTRL); ctrl |= (MCPDM_SW_DN_RST | MCPDM_SW_UP_RST); omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, ctrl); ctrl |= mcpdm->dn_channels | mcpdm->up_channels; omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, ctrl); ctrl &= ~(MCPDM_SW_DN_RST | MCPDM_SW_UP_RST); omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, ctrl); } /* * Disables the transfer through the PDM interface to/from the Phoenix * codec by disabling the corresponding UP or DN channels. */ static void omap_mcpdm_stop(struct omap_mcpdm *mcpdm) { u32 ctrl = omap_mcpdm_read(mcpdm, MCPDM_REG_CTRL); ctrl |= (MCPDM_SW_DN_RST | MCPDM_SW_UP_RST); omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, ctrl); ctrl &= ~(mcpdm->dn_channels | mcpdm->up_channels); omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, ctrl); ctrl &= ~(MCPDM_SW_DN_RST | MCPDM_SW_UP_RST); omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, ctrl); } /* * Is the physical McPDM interface active. */ static inline int omap_mcpdm_active(struct omap_mcpdm *mcpdm) { return omap_mcpdm_read(mcpdm, MCPDM_REG_CTRL) & (MCPDM_PDM_DN_MASK | MCPDM_PDM_UP_MASK); } /* * Configures McPDM uplink, and downlink for audio. * This function should be called before omap_mcpdm_start. */ static void omap_mcpdm_open_streams(struct omap_mcpdm *mcpdm) { omap_mcpdm_write(mcpdm, MCPDM_REG_IRQENABLE_SET, MCPDM_DN_IRQ_EMPTY | MCPDM_DN_IRQ_FULL | MCPDM_UP_IRQ_EMPTY | MCPDM_UP_IRQ_FULL); /* Enable DN RX1/2 offset cancellation feature, if configured */ if (mcpdm->dn_rx_offset) { u32 dn_offset = mcpdm->dn_rx_offset; omap_mcpdm_write(mcpdm, MCPDM_REG_DN_OFFSET, dn_offset); dn_offset |= (MCPDM_DN_OFST_RX1_EN | MCPDM_DN_OFST_RX2_EN); omap_mcpdm_write(mcpdm, MCPDM_REG_DN_OFFSET, dn_offset); } omap_mcpdm_write(mcpdm, MCPDM_REG_FIFO_CTRL_DN, mcpdm->dn_threshold); omap_mcpdm_write(mcpdm, MCPDM_REG_FIFO_CTRL_UP, mcpdm->up_threshold); omap_mcpdm_write(mcpdm, MCPDM_REG_DMAENABLE_SET, MCPDM_DMA_DN_ENABLE | MCPDM_DMA_UP_ENABLE); } /* * Cleans McPDM uplink, and downlink configuration. * This function should be called when the stream is closed. */ static void omap_mcpdm_close_streams(struct omap_mcpdm *mcpdm) { /* Disable irq request generation for downlink */ omap_mcpdm_write(mcpdm, MCPDM_REG_IRQENABLE_CLR, MCPDM_DN_IRQ_EMPTY | MCPDM_DN_IRQ_FULL); /* Disable DMA request generation for downlink */ omap_mcpdm_write(mcpdm, MCPDM_REG_DMAENABLE_CLR, MCPDM_DMA_DN_ENABLE); /* Disable irq request generation for uplink */ omap_mcpdm_write(mcpdm, MCPDM_REG_IRQENABLE_CLR, MCPDM_UP_IRQ_EMPTY | MCPDM_UP_IRQ_FULL); /* Disable DMA request generation for uplink */ omap_mcpdm_write(mcpdm, MCPDM_REG_DMAENABLE_CLR, MCPDM_DMA_UP_ENABLE); /* Disable RX1/2 offset cancellation */ if (mcpdm->dn_rx_offset) omap_mcpdm_write(mcpdm, MCPDM_REG_DN_OFFSET, 0); } static irqreturn_t omap_mcpdm_irq_handler(int irq, void *dev_id) { struct omap_mcpdm *mcpdm = dev_id; int irq_status; irq_status = omap_mcpdm_read(mcpdm, MCPDM_REG_IRQSTATUS); /* Acknowledge irq event */ omap_mcpdm_write(mcpdm, MCPDM_REG_IRQSTATUS, irq_status); if (irq_status & MCPDM_DN_IRQ_FULL) dev_dbg(mcpdm->dev, "DN (playback) FIFO Full\n"); if (irq_status & MCPDM_DN_IRQ_EMPTY) dev_dbg(mcpdm->dev, "DN (playback) FIFO Empty\n"); if (irq_status & MCPDM_DN_IRQ) dev_dbg(mcpdm->dev, "DN (playback) write request\n"); if (irq_status & MCPDM_UP_IRQ_FULL) dev_dbg(mcpdm->dev, "UP (capture) FIFO Full\n"); if (irq_status & MCPDM_UP_IRQ_EMPTY) dev_dbg(mcpdm->dev, "UP (capture) FIFO Empty\n"); if (irq_status & MCPDM_UP_IRQ) dev_dbg(mcpdm->dev, "UP (capture) write request\n"); return IRQ_HANDLED; } static int omap_mcpdm_dai_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai); mutex_lock(&mcpdm->mutex); if (!dai->active) { /* Enable watch dog for ES above ES 1.0 to avoid saturation */ if (omap_rev() != OMAP4430_REV_ES1_0) { u32 ctrl = omap_mcpdm_read(mcpdm, MCPDM_REG_CTRL); omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, ctrl | MCPDM_WD_EN); } omap_mcpdm_open_streams(mcpdm); } mutex_unlock(&mcpdm->mutex); return 0; } static void omap_mcpdm_dai_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai); mutex_lock(&mcpdm->mutex); if (!dai->active) { if (omap_mcpdm_active(mcpdm)) { omap_mcpdm_stop(mcpdm); omap_mcpdm_close_streams(mcpdm); } } mutex_unlock(&mcpdm->mutex); } static int omap_mcpdm_dai_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params, struct snd_soc_dai *dai) { struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai); int stream = substream->stream; struct omap_pcm_dma_data *dma_data; int channels; int link_mask = 0; channels = params_channels(params); switch (channels) { case 5: if (stream == SNDRV_PCM_STREAM_CAPTURE) /* up to 3 channels for capture */ return -EINVAL; link_mask |= 1 << 4; case 4: if (stream == SNDRV_PCM_STREAM_CAPTURE) /* up to 3 channels for capture */ return -EINVAL; link_mask |= 1 << 3; case 3: link_mask |= 1 << 2; case 2: link_mask |= 1 << 1; case 1: link_mask |= 1 << 0; break; default: /* unsupported number of channels */ return -EINVAL; } dma_data = &omap_mcpdm_dai_dma_params[stream]; /* Configure McPDM channels, and DMA packet size */ if (stream == SNDRV_PCM_STREAM_PLAYBACK) { mcpdm->dn_channels = link_mask << 3; dma_data->packet_size = (MCPDM_DN_THRES_MAX - mcpdm->dn_threshold) * channels; } else { mcpdm->up_channels = link_mask << 0; dma_data->packet_size = mcpdm->up_threshold * channels; } snd_soc_dai_set_dma_data(dai, substream, dma_data); return 0; } static int omap_mcpdm_prepare(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai); if (!omap_mcpdm_active(mcpdm)) { omap_mcpdm_start(mcpdm); omap_mcpdm_reg_dump(mcpdm); } return 0; } static const struct snd_soc_dai_ops omap_mcpdm_dai_ops = { .startup = omap_mcpdm_dai_startup, .shutdown = omap_mcpdm_dai_shutdown, .hw_params = omap_mcpdm_dai_hw_params, .prepare = omap_mcpdm_prepare, }; static int omap_mcpdm_probe(struct snd_soc_dai *dai) { struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai); int ret; pm_runtime_enable(mcpdm->dev); /* Disable lines while request is ongoing */ pm_runtime_get_sync(mcpdm->dev); omap_mcpdm_write(mcpdm, MCPDM_REG_CTRL, 0x00); ret = request_irq(mcpdm->irq, omap_mcpdm_irq_handler, 0, "McPDM", (void *)mcpdm); pm_runtime_put_sync(mcpdm->dev); if (ret) { dev_err(mcpdm->dev, "Request for IRQ failed\n"); pm_runtime_disable(mcpdm->dev); } /* Configure McPDM threshold values */ mcpdm->dn_threshold = 2; mcpdm->up_threshold = MCPDM_UP_THRES_MAX - 3; return ret; } static int omap_mcpdm_remove(struct snd_soc_dai *dai) { struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(dai); free_irq(mcpdm->irq, (void *)mcpdm); pm_runtime_disable(mcpdm->dev); return 0; } #define OMAP_MCPDM_RATES (SNDRV_PCM_RATE_88200 | SNDRV_PCM_RATE_96000) #define OMAP_MCPDM_FORMATS SNDRV_PCM_FMTBIT_S32_LE static struct snd_soc_dai_driver omap_mcpdm_dai = { .probe = omap_mcpdm_probe, .remove = omap_mcpdm_remove, .probe_order = SND_SOC_COMP_ORDER_LATE, .remove_order = SND_SOC_COMP_ORDER_EARLY, .playback = { .channels_min = 1, .channels_max = 5, .rates = OMAP_MCPDM_RATES, .formats = OMAP_MCPDM_FORMATS, .sig_bits = 24, }, .capture = { .channels_min = 1, .channels_max = 3, .rates = OMAP_MCPDM_RATES, .formats = OMAP_MCPDM_FORMATS, .sig_bits = 24, }, .ops = &omap_mcpdm_dai_ops, }; void omap_mcpdm_configure_dn_offsets(struct snd_soc_pcm_runtime *rtd, u8 rx1, u8 rx2) { struct omap_mcpdm *mcpdm = snd_soc_dai_get_drvdata(rtd->cpu_dai); mcpdm->dn_rx_offset = MCPDM_DNOFST_RX1(rx1) | MCPDM_DNOFST_RX2(rx2); } EXPORT_SYMBOL_GPL(omap_mcpdm_configure_dn_offsets); static __devinit int asoc_mcpdm_probe(struct platform_device *pdev) { struct omap_mcpdm *mcpdm; struct resource *res; int ret = 0; mcpdm = kzalloc(sizeof(struct omap_mcpdm), GFP_KERNEL); if (!mcpdm) return -ENOMEM; platform_set_drvdata(pdev, mcpdm); mutex_init(&mcpdm->mutex); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "no resource\n"); goto err_res; } if (!request_mem_region(res->start, resource_size(res), "McPDM")) { ret = -EBUSY; goto err_res; } mcpdm->io_base = ioremap(res->start, resource_size(res)); if (!mcpdm->io_base) { ret = -ENOMEM; goto err_iomap; } mcpdm->irq = platform_get_irq(pdev, 0); if (mcpdm->irq < 0) { ret = mcpdm->irq; goto err_irq; } mcpdm->dev = &pdev->dev; ret = snd_soc_register_dai(&pdev->dev, &omap_mcpdm_dai); if (!ret) return 0; err_irq: iounmap(mcpdm->io_base); err_iomap: release_mem_region(res->start, resource_size(res)); err_res: kfree(mcpdm); return ret; } static int __devexit asoc_mcpdm_remove(struct platform_device *pdev) { struct omap_mcpdm *mcpdm = platform_get_drvdata(pdev); struct resource *res; snd_soc_unregister_dai(&pdev->dev); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); iounmap(mcpdm->io_base); release_mem_region(res->start, resource_size(res)); kfree(mcpdm); return 0; } static struct platform_driver asoc_mcpdm_driver = { .driver = { .name = "omap-mcpdm", .owner = THIS_MODULE, }, .probe = asoc_mcpdm_probe, .remove = __devexit_p(asoc_mcpdm_remove), }; module_platform_driver(asoc_mcpdm_driver); MODULE_AUTHOR("Misael Lopez Cruz <misael.lopez@ti.com>"); MODULE_DESCRIPTION("OMAP PDM SoC Interface"); MODULE_LICENSE("GPL");
gpl-2.0
cbolumar/android_kernel_samsung_a3ultexx
drivers/ide/opti621.c
4791
4568
/* * Copyright (C) 1996-1998 Linus Torvalds & authors (see below) */ /* * Authors: * Jaromir Koutek <miri@punknet.cz>, * Jan Harkes <jaharkes@cwi.nl>, * Mark Lord <mlord@pobox.com> * Some parts of code are from ali14xx.c and from rz1000.c. */ #include <linux/types.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/pci.h> #include <linux/ide.h> #include <asm/io.h> #define DRV_NAME "opti621" #define READ_REG 0 /* index of Read cycle timing register */ #define WRITE_REG 1 /* index of Write cycle timing register */ #define CNTRL_REG 3 /* index of Control register */ #define STRAP_REG 5 /* index of Strap register */ #define MISC_REG 6 /* index of Miscellaneous register */ static int reg_base; static DEFINE_SPINLOCK(opti621_lock); /* Write value to register reg, base of register * is at reg_base (0x1f0 primary, 0x170 secondary, * if not changed by PCI configuration). * This is from setupvic.exe program. */ static void write_reg(u8 value, int reg) { inw(reg_base + 1); inw(reg_base + 1); outb(3, reg_base + 2); outb(value, reg_base + reg); outb(0x83, reg_base + 2); } /* Read value from register reg, base of register * is at reg_base (0x1f0 primary, 0x170 secondary, * if not changed by PCI configuration). * This is from setupvic.exe program. */ static u8 read_reg(int reg) { u8 ret = 0; inw(reg_base + 1); inw(reg_base + 1); outb(3, reg_base + 2); ret = inb(reg_base + reg); outb(0x83, reg_base + 2); return ret; } static void opti621_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive) { ide_drive_t *pair = ide_get_pair_dev(drive); unsigned long flags; unsigned long mode = drive->pio_mode, pair_mode; const u8 pio = mode - XFER_PIO_0; u8 tim, misc, addr_pio = pio, clk; /* DRDY is default 2 (by OPTi Databook) */ static const u8 addr_timings[2][5] = { { 0x20, 0x10, 0x00, 0x00, 0x00 }, /* 33 MHz */ { 0x10, 0x10, 0x00, 0x00, 0x00 }, /* 25 MHz */ }; static const u8 data_rec_timings[2][5] = { { 0x5b, 0x45, 0x32, 0x21, 0x20 }, /* 33 MHz */ { 0x48, 0x34, 0x21, 0x10, 0x10 } /* 25 MHz */ }; ide_set_drivedata(drive, (void *)mode); if (pair) { pair_mode = (unsigned long)ide_get_drivedata(pair); if (pair_mode && pair_mode < mode) addr_pio = pair_mode - XFER_PIO_0; } spin_lock_irqsave(&opti621_lock, flags); reg_base = hwif->io_ports.data_addr; /* allow Register-B */ outb(0xc0, reg_base + CNTRL_REG); /* hmm, setupvic.exe does this ;-) */ outb(0xff, reg_base + 5); /* if reads 0xff, adapter not exist? */ (void)inb(reg_base + CNTRL_REG); /* if reads 0xc0, no interface exist? */ read_reg(CNTRL_REG); /* check CLK speed */ clk = read_reg(STRAP_REG) & 1; printk(KERN_INFO "%s: CLK = %d MHz\n", hwif->name, clk ? 25 : 33); tim = data_rec_timings[clk][pio]; misc = addr_timings[clk][addr_pio]; /* select Index-0/1 for Register-A/B */ write_reg(drive->dn & 1, MISC_REG); /* set read cycle timings */ write_reg(tim, READ_REG); /* set write cycle timings */ write_reg(tim, WRITE_REG); /* use Register-A for drive 0 */ /* use Register-B for drive 1 */ write_reg(0x85, CNTRL_REG); /* set address setup, DRDY timings, */ /* and read prefetch for both drives */ write_reg(misc, MISC_REG); spin_unlock_irqrestore(&opti621_lock, flags); } static const struct ide_port_ops opti621_port_ops = { .set_pio_mode = opti621_set_pio_mode, }; static const struct ide_port_info opti621_chipset = { .name = DRV_NAME, .enablebits = { {0x45, 0x80, 0x00}, {0x40, 0x08, 0x00} }, .port_ops = &opti621_port_ops, .host_flags = IDE_HFLAG_NO_DMA, .pio_mask = ATA_PIO4, }; static int opti621_init_one(struct pci_dev *dev, const struct pci_device_id *id) { return ide_pci_init_one(dev, &opti621_chipset, NULL); } static const struct pci_device_id opti621_pci_tbl[] = { { PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C621), 0 }, { PCI_VDEVICE(OPTI, PCI_DEVICE_ID_OPTI_82C825), 0 }, { 0, }, }; MODULE_DEVICE_TABLE(pci, opti621_pci_tbl); static struct pci_driver opti621_pci_driver = { .name = "Opti621_IDE", .id_table = opti621_pci_tbl, .probe = opti621_init_one, .remove = ide_pci_remove, .suspend = ide_pci_suspend, .resume = ide_pci_resume, }; static int __init opti621_ide_init(void) { return ide_pci_register_driver(&opti621_pci_driver); } static void __exit opti621_ide_exit(void) { pci_unregister_driver(&opti621_pci_driver); } module_init(opti621_ide_init); module_exit(opti621_ide_exit); MODULE_AUTHOR("Jaromir Koutek, Jan Harkes, Mark Lord"); MODULE_DESCRIPTION("PCI driver module for Opti621 IDE"); MODULE_LICENSE("GPL");
gpl-2.0
agaphetos/cody_kernel_cancro
drivers/media/video/saa7134/saa7134-empress.c
5559
15155
/* * * (c) 2004 Gerd Knorr <kraxel@bytesex.org> [SuSE Labs] * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/list.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/delay.h> #include "saa7134-reg.h" #include "saa7134.h" #include <media/saa6752hs.h> #include <media/v4l2-common.h> #include <media/v4l2-chip-ident.h> /* ------------------------------------------------------------------ */ MODULE_AUTHOR("Gerd Knorr <kraxel@bytesex.org> [SuSE Labs]"); MODULE_LICENSE("GPL"); static unsigned int empress_nr[] = {[0 ... (SAA7134_MAXBOARDS - 1)] = UNSET }; module_param_array(empress_nr, int, NULL, 0444); MODULE_PARM_DESC(empress_nr,"ts device number"); static unsigned int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug,"enable debug messages"); #define dprintk(fmt, arg...) if (debug) \ printk(KERN_DEBUG "%s/empress: " fmt, dev->name , ## arg) /* ------------------------------------------------------------------ */ static void ts_reset_encoder(struct saa7134_dev* dev) { if (!dev->empress_started) return; saa_writeb(SAA7134_SPECIAL_MODE, 0x00); msleep(10); saa_writeb(SAA7134_SPECIAL_MODE, 0x01); msleep(100); dev->empress_started = 0; } static int ts_init_encoder(struct saa7134_dev* dev) { u32 leading_null_bytes = 0; /* If more cards start to need this, then this should probably be added to the card definitions. */ switch (dev->board) { case SAA7134_BOARD_BEHOLD_M6: case SAA7134_BOARD_BEHOLD_M63: case SAA7134_BOARD_BEHOLD_M6_EXTRA: leading_null_bytes = 1; break; } ts_reset_encoder(dev); saa_call_all(dev, core, init, leading_null_bytes); dev->empress_started = 1; return 0; } /* ------------------------------------------------------------------ */ static int ts_open(struct file *file) { struct video_device *vdev = video_devdata(file); struct saa7134_dev *dev = video_drvdata(file); int err; dprintk("open dev=%s\n", video_device_node_name(vdev)); err = -EBUSY; if (!mutex_trylock(&dev->empress_tsq.vb_lock)) return err; if (atomic_read(&dev->empress_users)) goto done; /* Unmute audio */ saa_writeb(SAA7134_AUDIO_MUTE_CTRL, saa_readb(SAA7134_AUDIO_MUTE_CTRL) & ~(1 << 6)); atomic_inc(&dev->empress_users); file->private_data = dev; err = 0; done: mutex_unlock(&dev->empress_tsq.vb_lock); return err; } static int ts_release(struct file *file) { struct saa7134_dev *dev = file->private_data; videobuf_stop(&dev->empress_tsq); videobuf_mmap_free(&dev->empress_tsq); /* stop the encoder */ ts_reset_encoder(dev); /* Mute audio */ saa_writeb(SAA7134_AUDIO_MUTE_CTRL, saa_readb(SAA7134_AUDIO_MUTE_CTRL) | (1 << 6)); atomic_dec(&dev->empress_users); return 0; } static ssize_t ts_read(struct file *file, char __user *data, size_t count, loff_t *ppos) { struct saa7134_dev *dev = file->private_data; if (!dev->empress_started) ts_init_encoder(dev); return videobuf_read_stream(&dev->empress_tsq, data, count, ppos, 0, file->f_flags & O_NONBLOCK); } static unsigned int ts_poll(struct file *file, struct poll_table_struct *wait) { struct saa7134_dev *dev = file->private_data; return videobuf_poll_stream(file, &dev->empress_tsq, wait); } static int ts_mmap(struct file *file, struct vm_area_struct * vma) { struct saa7134_dev *dev = file->private_data; return videobuf_mmap_mapper(&dev->empress_tsq, vma); } /* * This function is _not_ called directly, but from * video_generic_ioctl (and maybe others). userspace * copying is done already, arg is a kernel pointer. */ static int empress_querycap(struct file *file, void *priv, struct v4l2_capability *cap) { struct saa7134_dev *dev = file->private_data; strcpy(cap->driver, "saa7134"); strlcpy(cap->card, saa7134_boards[dev->board].name, sizeof(cap->card)); sprintf(cap->bus_info, "PCI:%s", pci_name(dev->pci)); cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING; return 0; } static int empress_enum_input(struct file *file, void *priv, struct v4l2_input *i) { if (i->index != 0) return -EINVAL; i->type = V4L2_INPUT_TYPE_CAMERA; strcpy(i->name, "CCIR656"); return 0; } static int empress_g_input(struct file *file, void *priv, unsigned int *i) { *i = 0; return 0; } static int empress_s_input(struct file *file, void *priv, unsigned int i) { if (i != 0) return -EINVAL; return 0; } static int empress_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *f) { if (f->index != 0) return -EINVAL; strlcpy(f->description, "MPEG TS", sizeof(f->description)); f->pixelformat = V4L2_PIX_FMT_MPEG; return 0; } static int empress_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct saa7134_dev *dev = file->private_data; struct v4l2_mbus_framefmt mbus_fmt; saa_call_all(dev, video, g_mbus_fmt, &mbus_fmt); v4l2_fill_pix_format(&f->fmt.pix, &mbus_fmt); f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG; f->fmt.pix.sizeimage = TS_PACKET_SIZE * dev->ts.nr_packets; return 0; } static int empress_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct saa7134_dev *dev = file->private_data; struct v4l2_mbus_framefmt mbus_fmt; v4l2_fill_mbus_format(&mbus_fmt, &f->fmt.pix, V4L2_MBUS_FMT_FIXED); saa_call_all(dev, video, s_mbus_fmt, &mbus_fmt); v4l2_fill_pix_format(&f->fmt.pix, &mbus_fmt); f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG; f->fmt.pix.sizeimage = TS_PACKET_SIZE * dev->ts.nr_packets; return 0; } static int empress_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *f) { struct saa7134_dev *dev = file->private_data; f->fmt.pix.pixelformat = V4L2_PIX_FMT_MPEG; f->fmt.pix.sizeimage = TS_PACKET_SIZE * dev->ts.nr_packets; return 0; } static int empress_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *p) { struct saa7134_dev *dev = file->private_data; return videobuf_reqbufs(&dev->empress_tsq, p); } static int empress_querybuf(struct file *file, void *priv, struct v4l2_buffer *b) { struct saa7134_dev *dev = file->private_data; return videobuf_querybuf(&dev->empress_tsq, b); } static int empress_qbuf(struct file *file, void *priv, struct v4l2_buffer *b) { struct saa7134_dev *dev = file->private_data; return videobuf_qbuf(&dev->empress_tsq, b); } static int empress_dqbuf(struct file *file, void *priv, struct v4l2_buffer *b) { struct saa7134_dev *dev = file->private_data; return videobuf_dqbuf(&dev->empress_tsq, b, file->f_flags & O_NONBLOCK); } static int empress_streamon(struct file *file, void *priv, enum v4l2_buf_type type) { struct saa7134_dev *dev = file->private_data; return videobuf_streamon(&dev->empress_tsq); } static int empress_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) { struct saa7134_dev *dev = file->private_data; return videobuf_streamoff(&dev->empress_tsq); } static int empress_s_ext_ctrls(struct file *file, void *priv, struct v4l2_ext_controls *ctrls) { struct saa7134_dev *dev = file->private_data; int err; /* count == 0 is abused in saa6752hs.c, so that special case is handled here explicitly. */ if (ctrls->count == 0) return 0; if (ctrls->ctrl_class != V4L2_CTRL_CLASS_MPEG) return -EINVAL; err = saa_call_empress(dev, core, s_ext_ctrls, ctrls); ts_init_encoder(dev); return err; } static int empress_g_ext_ctrls(struct file *file, void *priv, struct v4l2_ext_controls *ctrls) { struct saa7134_dev *dev = file->private_data; if (ctrls->ctrl_class != V4L2_CTRL_CLASS_MPEG) return -EINVAL; return saa_call_empress(dev, core, g_ext_ctrls, ctrls); } static int empress_g_ctrl(struct file *file, void *priv, struct v4l2_control *c) { struct saa7134_dev *dev = file->private_data; return saa7134_g_ctrl_internal(dev, NULL, c); } static int empress_s_ctrl(struct file *file, void *priv, struct v4l2_control *c) { struct saa7134_dev *dev = file->private_data; return saa7134_s_ctrl_internal(dev, NULL, c); } static int empress_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *c) { /* Must be sorted from low to high control ID! */ static const u32 user_ctrls[] = { V4L2_CID_USER_CLASS, V4L2_CID_BRIGHTNESS, V4L2_CID_CONTRAST, V4L2_CID_SATURATION, V4L2_CID_HUE, V4L2_CID_AUDIO_VOLUME, V4L2_CID_AUDIO_MUTE, V4L2_CID_HFLIP, 0 }; /* Must be sorted from low to high control ID! */ static const u32 mpeg_ctrls[] = { V4L2_CID_MPEG_CLASS, V4L2_CID_MPEG_STREAM_TYPE, V4L2_CID_MPEG_STREAM_PID_PMT, V4L2_CID_MPEG_STREAM_PID_AUDIO, V4L2_CID_MPEG_STREAM_PID_VIDEO, V4L2_CID_MPEG_STREAM_PID_PCR, V4L2_CID_MPEG_AUDIO_SAMPLING_FREQ, V4L2_CID_MPEG_AUDIO_ENCODING, V4L2_CID_MPEG_AUDIO_L2_BITRATE, V4L2_CID_MPEG_VIDEO_ENCODING, V4L2_CID_MPEG_VIDEO_ASPECT, V4L2_CID_MPEG_VIDEO_BITRATE_MODE, V4L2_CID_MPEG_VIDEO_BITRATE, V4L2_CID_MPEG_VIDEO_BITRATE_PEAK, 0 }; static const u32 *ctrl_classes[] = { user_ctrls, mpeg_ctrls, NULL }; struct saa7134_dev *dev = file->private_data; c->id = v4l2_ctrl_next(ctrl_classes, c->id); if (c->id == 0) return -EINVAL; if (c->id == V4L2_CID_USER_CLASS || c->id == V4L2_CID_MPEG_CLASS) return v4l2_ctrl_query_fill(c, 0, 0, 0, 0); if (V4L2_CTRL_ID2CLASS(c->id) != V4L2_CTRL_CLASS_MPEG) return saa7134_queryctrl(file, priv, c); return saa_call_empress(dev, core, queryctrl, c); } static int empress_querymenu(struct file *file, void *priv, struct v4l2_querymenu *c) { struct saa7134_dev *dev = file->private_data; if (V4L2_CTRL_ID2CLASS(c->id) != V4L2_CTRL_CLASS_MPEG) return -EINVAL; return saa_call_empress(dev, core, querymenu, c); } static int empress_g_chip_ident(struct file *file, void *fh, struct v4l2_dbg_chip_ident *chip) { struct saa7134_dev *dev = file->private_data; chip->ident = V4L2_IDENT_NONE; chip->revision = 0; if (chip->match.type == V4L2_CHIP_MATCH_I2C_DRIVER && !strcmp(chip->match.name, "saa6752hs")) return saa_call_empress(dev, core, g_chip_ident, chip); if (chip->match.type == V4L2_CHIP_MATCH_I2C_ADDR) return saa_call_empress(dev, core, g_chip_ident, chip); return -EINVAL; } static int empress_s_std(struct file *file, void *priv, v4l2_std_id *id) { struct saa7134_dev *dev = file->private_data; return saa7134_s_std_internal(dev, NULL, id); } static int empress_g_std(struct file *file, void *priv, v4l2_std_id *id) { struct saa7134_dev *dev = file->private_data; *id = dev->tvnorm->id; return 0; } static const struct v4l2_file_operations ts_fops = { .owner = THIS_MODULE, .open = ts_open, .release = ts_release, .read = ts_read, .poll = ts_poll, .mmap = ts_mmap, .ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops ts_ioctl_ops = { .vidioc_querycap = empress_querycap, .vidioc_enum_fmt_vid_cap = empress_enum_fmt_vid_cap, .vidioc_try_fmt_vid_cap = empress_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = empress_s_fmt_vid_cap, .vidioc_g_fmt_vid_cap = empress_g_fmt_vid_cap, .vidioc_reqbufs = empress_reqbufs, .vidioc_querybuf = empress_querybuf, .vidioc_qbuf = empress_qbuf, .vidioc_dqbuf = empress_dqbuf, .vidioc_streamon = empress_streamon, .vidioc_streamoff = empress_streamoff, .vidioc_s_ext_ctrls = empress_s_ext_ctrls, .vidioc_g_ext_ctrls = empress_g_ext_ctrls, .vidioc_enum_input = empress_enum_input, .vidioc_g_input = empress_g_input, .vidioc_s_input = empress_s_input, .vidioc_queryctrl = empress_queryctrl, .vidioc_querymenu = empress_querymenu, .vidioc_g_ctrl = empress_g_ctrl, .vidioc_s_ctrl = empress_s_ctrl, .vidioc_g_chip_ident = empress_g_chip_ident, .vidioc_s_std = empress_s_std, .vidioc_g_std = empress_g_std, }; /* ----------------------------------------------------------- */ static struct video_device saa7134_empress_template = { .name = "saa7134-empress", .fops = &ts_fops, .ioctl_ops = &ts_ioctl_ops, .tvnorms = SAA7134_NORMS, .current_norm = V4L2_STD_PAL, }; static void empress_signal_update(struct work_struct *work) { struct saa7134_dev* dev = container_of(work, struct saa7134_dev, empress_workqueue); if (dev->nosignal) { dprintk("no video signal\n"); } else { dprintk("video signal acquired\n"); } } static void empress_signal_change(struct saa7134_dev *dev) { schedule_work(&dev->empress_workqueue); } static int empress_init(struct saa7134_dev *dev) { int err; dprintk("%s: %s\n",dev->name,__func__); dev->empress_dev = video_device_alloc(); if (NULL == dev->empress_dev) return -ENOMEM; *(dev->empress_dev) = saa7134_empress_template; dev->empress_dev->parent = &dev->pci->dev; dev->empress_dev->release = video_device_release; snprintf(dev->empress_dev->name, sizeof(dev->empress_dev->name), "%s empress (%s)", dev->name, saa7134_boards[dev->board].name); INIT_WORK(&dev->empress_workqueue, empress_signal_update); video_set_drvdata(dev->empress_dev, dev); err = video_register_device(dev->empress_dev,VFL_TYPE_GRABBER, empress_nr[dev->nr]); if (err < 0) { printk(KERN_INFO "%s: can't register video device\n", dev->name); video_device_release(dev->empress_dev); dev->empress_dev = NULL; return err; } printk(KERN_INFO "%s: registered device %s [mpeg]\n", dev->name, video_device_node_name(dev->empress_dev)); videobuf_queue_sg_init(&dev->empress_tsq, &saa7134_ts_qops, &dev->pci->dev, &dev->slock, V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_ALTERNATE, sizeof(struct saa7134_buf), dev, NULL); empress_signal_update(&dev->empress_workqueue); return 0; } static int empress_fini(struct saa7134_dev *dev) { dprintk("%s: %s\n",dev->name,__func__); if (NULL == dev->empress_dev) return 0; flush_work_sync(&dev->empress_workqueue); video_unregister_device(dev->empress_dev); dev->empress_dev = NULL; return 0; } static struct saa7134_mpeg_ops empress_ops = { .type = SAA7134_MPEG_EMPRESS, .init = empress_init, .fini = empress_fini, .signal_change = empress_signal_change, }; static int __init empress_register(void) { return saa7134_ts_register(&empress_ops); } static void __exit empress_unregister(void) { saa7134_ts_unregister(&empress_ops); } module_init(empress_register); module_exit(empress_unregister); /* ----------------------------------------------------------- */ /* * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
bigzz/s3c-kernel
arch/mips/cavium-octeon/executive/cvmx-bootmem.c
8631
20487
/***********************license start*************** * Author: Cavium Networks * * Contact: support@caviumnetworks.com * This file is part of the OCTEON SDK * * Copyright (c) 2003-2008 Cavium Networks * * This file is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, Version 2, as * published by the Free Software Foundation. * * This file is distributed in the hope that it will be useful, but * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or * NONINFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this file; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * or visit http://www.gnu.org/licenses/. * * This file may also be available under a different license from Cavium. * Contact Cavium Networks for more information ***********************license end**************************************/ /* * Simple allocate only memory allocator. Used to allocate memory at * application start time. */ #include <linux/kernel.h> #include <linux/module.h> #include <asm/octeon/cvmx.h> #include <asm/octeon/cvmx-spinlock.h> #include <asm/octeon/cvmx-bootmem.h> /*#define DEBUG */ static struct cvmx_bootmem_desc *cvmx_bootmem_desc; /* See header file for descriptions of functions */ /* * Wrapper functions are provided for reading/writing the size and * next block values as these may not be directly addressible (in 32 * bit applications, for instance.) Offsets of data elements in * bootmem list, must match cvmx_bootmem_block_header_t. */ #define NEXT_OFFSET 0 #define SIZE_OFFSET 8 static void cvmx_bootmem_phy_set_size(uint64_t addr, uint64_t size) { cvmx_write64_uint64((addr + SIZE_OFFSET) | (1ull << 63), size); } static void cvmx_bootmem_phy_set_next(uint64_t addr, uint64_t next) { cvmx_write64_uint64((addr + NEXT_OFFSET) | (1ull << 63), next); } static uint64_t cvmx_bootmem_phy_get_size(uint64_t addr) { return cvmx_read64_uint64((addr + SIZE_OFFSET) | (1ull << 63)); } static uint64_t cvmx_bootmem_phy_get_next(uint64_t addr) { return cvmx_read64_uint64((addr + NEXT_OFFSET) | (1ull << 63)); } void *cvmx_bootmem_alloc_range(uint64_t size, uint64_t alignment, uint64_t min_addr, uint64_t max_addr) { int64_t address; address = cvmx_bootmem_phy_alloc(size, min_addr, max_addr, alignment, 0); if (address > 0) return cvmx_phys_to_ptr(address); else return NULL; } void *cvmx_bootmem_alloc_address(uint64_t size, uint64_t address, uint64_t alignment) { return cvmx_bootmem_alloc_range(size, alignment, address, address + size); } void *cvmx_bootmem_alloc(uint64_t size, uint64_t alignment) { return cvmx_bootmem_alloc_range(size, alignment, 0, 0); } void *cvmx_bootmem_alloc_named_range(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t align, char *name) { int64_t addr; addr = cvmx_bootmem_phy_named_block_alloc(size, min_addr, max_addr, align, name, 0); if (addr >= 0) return cvmx_phys_to_ptr(addr); else return NULL; } void *cvmx_bootmem_alloc_named_address(uint64_t size, uint64_t address, char *name) { return cvmx_bootmem_alloc_named_range(size, address, address + size, 0, name); } void *cvmx_bootmem_alloc_named(uint64_t size, uint64_t alignment, char *name) { return cvmx_bootmem_alloc_named_range(size, 0, 0, alignment, name); } EXPORT_SYMBOL(cvmx_bootmem_alloc_named); int cvmx_bootmem_free_named(char *name) { return cvmx_bootmem_phy_named_block_free(name, 0); } struct cvmx_bootmem_named_block_desc *cvmx_bootmem_find_named_block(char *name) { return cvmx_bootmem_phy_named_block_find(name, 0); } EXPORT_SYMBOL(cvmx_bootmem_find_named_block); void cvmx_bootmem_lock(void) { cvmx_spinlock_lock((cvmx_spinlock_t *) &(cvmx_bootmem_desc->lock)); } void cvmx_bootmem_unlock(void) { cvmx_spinlock_unlock((cvmx_spinlock_t *) &(cvmx_bootmem_desc->lock)); } int cvmx_bootmem_init(void *mem_desc_ptr) { /* Here we set the global pointer to the bootmem descriptor * block. This pointer will be used directly, so we will set * it up to be directly usable by the application. It is set * up as follows for the various runtime/ABI combinations: * * Linux 64 bit: Set XKPHYS bit * Linux 32 bit: use mmap to create mapping, use virtual address * CVMX 64 bit: use physical address directly * CVMX 32 bit: use physical address directly * * Note that the CVMX environment assumes the use of 1-1 TLB * mappings so that the physical addresses can be used * directly */ if (!cvmx_bootmem_desc) { #if defined(CVMX_ABI_64) /* Set XKPHYS bit */ cvmx_bootmem_desc = cvmx_phys_to_ptr(CAST64(mem_desc_ptr)); #else cvmx_bootmem_desc = (struct cvmx_bootmem_desc *) mem_desc_ptr; #endif } return 0; } /* * The cvmx_bootmem_phy* functions below return 64 bit physical * addresses, and expose more features that the cvmx_bootmem_functions * above. These are required for full memory space access in 32 bit * applications, as well as for using some advance features. Most * applications should not need to use these. */ int64_t cvmx_bootmem_phy_alloc(uint64_t req_size, uint64_t address_min, uint64_t address_max, uint64_t alignment, uint32_t flags) { uint64_t head_addr; uint64_t ent_addr; /* points to previous list entry, NULL current entry is head of list */ uint64_t prev_addr = 0; uint64_t new_ent_addr = 0; uint64_t desired_min_addr; #ifdef DEBUG cvmx_dprintf("cvmx_bootmem_phy_alloc: req_size: 0x%llx, " "min_addr: 0x%llx, max_addr: 0x%llx, align: 0x%llx\n", (unsigned long long)req_size, (unsigned long long)address_min, (unsigned long long)address_max, (unsigned long long)alignment); #endif if (cvmx_bootmem_desc->major_version > 3) { cvmx_dprintf("ERROR: Incompatible bootmem descriptor " "version: %d.%d at addr: %p\n", (int)cvmx_bootmem_desc->major_version, (int)cvmx_bootmem_desc->minor_version, cvmx_bootmem_desc); goto error_out; } /* * Do a variety of checks to validate the arguments. The * allocator code will later assume that these checks have * been made. We validate that the requested constraints are * not self-contradictory before we look through the list of * available memory. */ /* 0 is not a valid req_size for this allocator */ if (!req_size) goto error_out; /* Round req_size up to mult of minimum alignment bytes */ req_size = (req_size + (CVMX_BOOTMEM_ALIGNMENT_SIZE - 1)) & ~(CVMX_BOOTMEM_ALIGNMENT_SIZE - 1); /* * Convert !0 address_min and 0 address_max to special case of * range that specifies an exact memory block to allocate. Do * this before other checks and adjustments so that this * tranformation will be validated. */ if (address_min && !address_max) address_max = address_min + req_size; else if (!address_min && !address_max) address_max = ~0ull; /* If no limits given, use max limits */ /* * Enforce minimum alignment (this also keeps the minimum free block * req_size the same as the alignment req_size. */ if (alignment < CVMX_BOOTMEM_ALIGNMENT_SIZE) alignment = CVMX_BOOTMEM_ALIGNMENT_SIZE; /* * Adjust address minimum based on requested alignment (round * up to meet alignment). Do this here so we can reject * impossible requests up front. (NOP for address_min == 0) */ if (alignment) address_min = ALIGN(address_min, alignment); /* * Reject inconsistent args. We have adjusted these, so this * may fail due to our internal changes even if this check * would pass for the values the user supplied. */ if (req_size > address_max - address_min) goto error_out; /* Walk through the list entries - first fit found is returned */ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) cvmx_bootmem_lock(); head_addr = cvmx_bootmem_desc->head_addr; ent_addr = head_addr; for (; ent_addr; prev_addr = ent_addr, ent_addr = cvmx_bootmem_phy_get_next(ent_addr)) { uint64_t usable_base, usable_max; uint64_t ent_size = cvmx_bootmem_phy_get_size(ent_addr); if (cvmx_bootmem_phy_get_next(ent_addr) && ent_addr > cvmx_bootmem_phy_get_next(ent_addr)) { cvmx_dprintf("Internal bootmem_alloc() error: ent: " "0x%llx, next: 0x%llx\n", (unsigned long long)ent_addr, (unsigned long long) cvmx_bootmem_phy_get_next(ent_addr)); goto error_out; } /* * Determine if this is an entry that can satisify the * request Check to make sure entry is large enough to * satisfy request. */ usable_base = ALIGN(max(address_min, ent_addr), alignment); usable_max = min(address_max, ent_addr + ent_size); /* * We should be able to allocate block at address * usable_base. */ desired_min_addr = usable_base; /* * Determine if request can be satisfied from the * current entry. */ if (!((ent_addr + ent_size) > usable_base && ent_addr < address_max && req_size <= usable_max - usable_base)) continue; /* * We have found an entry that has room to satisfy the * request, so allocate it from this entry. If end * CVMX_BOOTMEM_FLAG_END_ALLOC set, then allocate from * the end of this block rather than the beginning. */ if (flags & CVMX_BOOTMEM_FLAG_END_ALLOC) { desired_min_addr = usable_max - req_size; /* * Align desired address down to required * alignment. */ desired_min_addr &= ~(alignment - 1); } /* Match at start of entry */ if (desired_min_addr == ent_addr) { if (req_size < ent_size) { /* * big enough to create a new block * from top portion of block. */ new_ent_addr = ent_addr + req_size; cvmx_bootmem_phy_set_next(new_ent_addr, cvmx_bootmem_phy_get_next(ent_addr)); cvmx_bootmem_phy_set_size(new_ent_addr, ent_size - req_size); /* * Adjust next pointer as following * code uses this. */ cvmx_bootmem_phy_set_next(ent_addr, new_ent_addr); } /* * adjust prev ptr or head to remove this * entry from list. */ if (prev_addr) cvmx_bootmem_phy_set_next(prev_addr, cvmx_bootmem_phy_get_next(ent_addr)); else /* * head of list being returned, so * update head ptr. */ cvmx_bootmem_desc->head_addr = cvmx_bootmem_phy_get_next(ent_addr); if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) cvmx_bootmem_unlock(); return desired_min_addr; } /* * block returned doesn't start at beginning of entry, * so we know that we will be splitting a block off * the front of this one. Create a new block from the * beginning, add to list, and go to top of loop * again. * * create new block from high portion of * block, so that top block starts at desired * addr. */ new_ent_addr = desired_min_addr; cvmx_bootmem_phy_set_next(new_ent_addr, cvmx_bootmem_phy_get_next (ent_addr)); cvmx_bootmem_phy_set_size(new_ent_addr, cvmx_bootmem_phy_get_size (ent_addr) - (desired_min_addr - ent_addr)); cvmx_bootmem_phy_set_size(ent_addr, desired_min_addr - ent_addr); cvmx_bootmem_phy_set_next(ent_addr, new_ent_addr); /* Loop again to handle actual alloc from new block */ } error_out: /* We didn't find anything, so return error */ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) cvmx_bootmem_unlock(); return -1; } int __cvmx_bootmem_phy_free(uint64_t phy_addr, uint64_t size, uint32_t flags) { uint64_t cur_addr; uint64_t prev_addr = 0; /* zero is invalid */ int retval = 0; #ifdef DEBUG cvmx_dprintf("__cvmx_bootmem_phy_free addr: 0x%llx, size: 0x%llx\n", (unsigned long long)phy_addr, (unsigned long long)size); #endif if (cvmx_bootmem_desc->major_version > 3) { cvmx_dprintf("ERROR: Incompatible bootmem descriptor " "version: %d.%d at addr: %p\n", (int)cvmx_bootmem_desc->major_version, (int)cvmx_bootmem_desc->minor_version, cvmx_bootmem_desc); return 0; } /* 0 is not a valid size for this allocator */ if (!size) return 0; if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) cvmx_bootmem_lock(); cur_addr = cvmx_bootmem_desc->head_addr; if (cur_addr == 0 || phy_addr < cur_addr) { /* add at front of list - special case with changing head ptr */ if (cur_addr && phy_addr + size > cur_addr) goto bootmem_free_done; /* error, overlapping section */ else if (phy_addr + size == cur_addr) { /* Add to front of existing first block */ cvmx_bootmem_phy_set_next(phy_addr, cvmx_bootmem_phy_get_next (cur_addr)); cvmx_bootmem_phy_set_size(phy_addr, cvmx_bootmem_phy_get_size (cur_addr) + size); cvmx_bootmem_desc->head_addr = phy_addr; } else { /* New block before first block. OK if cur_addr is 0 */ cvmx_bootmem_phy_set_next(phy_addr, cur_addr); cvmx_bootmem_phy_set_size(phy_addr, size); cvmx_bootmem_desc->head_addr = phy_addr; } retval = 1; goto bootmem_free_done; } /* Find place in list to add block */ while (cur_addr && phy_addr > cur_addr) { prev_addr = cur_addr; cur_addr = cvmx_bootmem_phy_get_next(cur_addr); } if (!cur_addr) { /* * We have reached the end of the list, add on to end, * checking to see if we need to combine with last * block */ if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) == phy_addr) { cvmx_bootmem_phy_set_size(prev_addr, cvmx_bootmem_phy_get_size (prev_addr) + size); } else { cvmx_bootmem_phy_set_next(prev_addr, phy_addr); cvmx_bootmem_phy_set_size(phy_addr, size); cvmx_bootmem_phy_set_next(phy_addr, 0); } retval = 1; goto bootmem_free_done; } else { /* * insert between prev and cur nodes, checking for * merge with either/both. */ if (prev_addr + cvmx_bootmem_phy_get_size(prev_addr) == phy_addr) { /* Merge with previous */ cvmx_bootmem_phy_set_size(prev_addr, cvmx_bootmem_phy_get_size (prev_addr) + size); if (phy_addr + size == cur_addr) { /* Also merge with current */ cvmx_bootmem_phy_set_size(prev_addr, cvmx_bootmem_phy_get_size(cur_addr) + cvmx_bootmem_phy_get_size(prev_addr)); cvmx_bootmem_phy_set_next(prev_addr, cvmx_bootmem_phy_get_next(cur_addr)); } retval = 1; goto bootmem_free_done; } else if (phy_addr + size == cur_addr) { /* Merge with current */ cvmx_bootmem_phy_set_size(phy_addr, cvmx_bootmem_phy_get_size (cur_addr) + size); cvmx_bootmem_phy_set_next(phy_addr, cvmx_bootmem_phy_get_next (cur_addr)); cvmx_bootmem_phy_set_next(prev_addr, phy_addr); retval = 1; goto bootmem_free_done; } /* It is a standalone block, add in between prev and cur */ cvmx_bootmem_phy_set_size(phy_addr, size); cvmx_bootmem_phy_set_next(phy_addr, cur_addr); cvmx_bootmem_phy_set_next(prev_addr, phy_addr); } retval = 1; bootmem_free_done: if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) cvmx_bootmem_unlock(); return retval; } struct cvmx_bootmem_named_block_desc * cvmx_bootmem_phy_named_block_find(char *name, uint32_t flags) { unsigned int i; struct cvmx_bootmem_named_block_desc *named_block_array_ptr; #ifdef DEBUG cvmx_dprintf("cvmx_bootmem_phy_named_block_find: %s\n", name); #endif /* * Lock the structure to make sure that it is not being * changed while we are examining it. */ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) cvmx_bootmem_lock(); /* Use XKPHYS for 64 bit linux */ named_block_array_ptr = (struct cvmx_bootmem_named_block_desc *) cvmx_phys_to_ptr(cvmx_bootmem_desc->named_block_array_addr); #ifdef DEBUG cvmx_dprintf ("cvmx_bootmem_phy_named_block_find: named_block_array_ptr: %p\n", named_block_array_ptr); #endif if (cvmx_bootmem_desc->major_version == 3) { for (i = 0; i < cvmx_bootmem_desc->named_block_num_blocks; i++) { if ((name && named_block_array_ptr[i].size && !strncmp(name, named_block_array_ptr[i].name, cvmx_bootmem_desc->named_block_name_len - 1)) || (!name && !named_block_array_ptr[i].size)) { if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) cvmx_bootmem_unlock(); return &(named_block_array_ptr[i]); } } } else { cvmx_dprintf("ERROR: Incompatible bootmem descriptor " "version: %d.%d at addr: %p\n", (int)cvmx_bootmem_desc->major_version, (int)cvmx_bootmem_desc->minor_version, cvmx_bootmem_desc); } if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) cvmx_bootmem_unlock(); return NULL; } int cvmx_bootmem_phy_named_block_free(char *name, uint32_t flags) { struct cvmx_bootmem_named_block_desc *named_block_ptr; if (cvmx_bootmem_desc->major_version != 3) { cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: " "%d.%d at addr: %p\n", (int)cvmx_bootmem_desc->major_version, (int)cvmx_bootmem_desc->minor_version, cvmx_bootmem_desc); return 0; } #ifdef DEBUG cvmx_dprintf("cvmx_bootmem_phy_named_block_free: %s\n", name); #endif /* * Take lock here, as name lookup/block free/name free need to * be atomic. */ cvmx_bootmem_lock(); named_block_ptr = cvmx_bootmem_phy_named_block_find(name, CVMX_BOOTMEM_FLAG_NO_LOCKING); if (named_block_ptr) { #ifdef DEBUG cvmx_dprintf("cvmx_bootmem_phy_named_block_free: " "%s, base: 0x%llx, size: 0x%llx\n", name, (unsigned long long)named_block_ptr->base_addr, (unsigned long long)named_block_ptr->size); #endif __cvmx_bootmem_phy_free(named_block_ptr->base_addr, named_block_ptr->size, CVMX_BOOTMEM_FLAG_NO_LOCKING); named_block_ptr->size = 0; /* Set size to zero to indicate block not used. */ } cvmx_bootmem_unlock(); return named_block_ptr != NULL; /* 0 on failure, 1 on success */ } int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr, uint64_t max_addr, uint64_t alignment, char *name, uint32_t flags) { int64_t addr_allocated; struct cvmx_bootmem_named_block_desc *named_block_desc_ptr; #ifdef DEBUG cvmx_dprintf("cvmx_bootmem_phy_named_block_alloc: size: 0x%llx, min: " "0x%llx, max: 0x%llx, align: 0x%llx, name: %s\n", (unsigned long long)size, (unsigned long long)min_addr, (unsigned long long)max_addr, (unsigned long long)alignment, name); #endif if (cvmx_bootmem_desc->major_version != 3) { cvmx_dprintf("ERROR: Incompatible bootmem descriptor version: " "%d.%d at addr: %p\n", (int)cvmx_bootmem_desc->major_version, (int)cvmx_bootmem_desc->minor_version, cvmx_bootmem_desc); return -1; } /* * Take lock here, as name lookup/block alloc/name add need to * be atomic. */ if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) cvmx_spinlock_lock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock)); /* Get pointer to first available named block descriptor */ named_block_desc_ptr = cvmx_bootmem_phy_named_block_find(NULL, flags | CVMX_BOOTMEM_FLAG_NO_LOCKING); /* * Check to see if name already in use, return error if name * not available or no more room for blocks. */ if (cvmx_bootmem_phy_named_block_find(name, flags | CVMX_BOOTMEM_FLAG_NO_LOCKING) || !named_block_desc_ptr) { if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock)); return -1; } /* * Round size up to mult of minimum alignment bytes We need * the actual size allocated to allow for blocks to be * coallesced when they are freed. The alloc routine does the * same rounding up on all allocations. */ size = ALIGN(size, CVMX_BOOTMEM_ALIGNMENT_SIZE); addr_allocated = cvmx_bootmem_phy_alloc(size, min_addr, max_addr, alignment, flags | CVMX_BOOTMEM_FLAG_NO_LOCKING); if (addr_allocated >= 0) { named_block_desc_ptr->base_addr = addr_allocated; named_block_desc_ptr->size = size; strncpy(named_block_desc_ptr->name, name, cvmx_bootmem_desc->named_block_name_len); named_block_desc_ptr->name[cvmx_bootmem_desc->named_block_name_len - 1] = 0; } if (!(flags & CVMX_BOOTMEM_FLAG_NO_LOCKING)) cvmx_spinlock_unlock((cvmx_spinlock_t *)&(cvmx_bootmem_desc->lock)); return addr_allocated; }
gpl-2.0
RaYmunDooo/RaYmunDooo
sound/aoa/core/gpio-feature.c
10679
10916
/* * Apple Onboard Audio feature call GPIO control * * Copyright 2006 Johannes Berg <johannes@sipsolutions.net> * * GPL v2, can be found in COPYING. * * This file contains the GPIO control routines for * direct (through feature calls) access to the GPIO * registers. */ #include <asm/pmac_feature.h> #include <linux/interrupt.h> #include "../aoa.h" /* TODO: these are lots of global variables * that aren't used on most machines... * Move them into a dynamically allocated * structure and use that. */ /* these are the GPIO numbers (register addresses as offsets into * the GPIO space) */ static int headphone_mute_gpio; static int master_mute_gpio; static int amp_mute_gpio; static int lineout_mute_gpio; static int hw_reset_gpio; static int lineout_detect_gpio; static int headphone_detect_gpio; static int linein_detect_gpio; /* see the SWITCH_GPIO macro */ static int headphone_mute_gpio_activestate; static int master_mute_gpio_activestate; static int amp_mute_gpio_activestate; static int lineout_mute_gpio_activestate; static int hw_reset_gpio_activestate; static int lineout_detect_gpio_activestate; static int headphone_detect_gpio_activestate; static int linein_detect_gpio_activestate; /* node pointers that we save when getting the GPIO number * to get the interrupt later */ static struct device_node *lineout_detect_node; static struct device_node *linein_detect_node; static struct device_node *headphone_detect_node; static int lineout_detect_irq; static int linein_detect_irq; static int headphone_detect_irq; static struct device_node *get_gpio(char *name, char *altname, int *gpioptr, int *gpioactiveptr) { struct device_node *np, *gpio; const u32 *reg; const char *audio_gpio; *gpioptr = -1; /* check if we can get it the easy way ... */ np = of_find_node_by_name(NULL, name); if (!np) { /* some machines have only gpioX/extint-gpioX nodes, * and an audio-gpio property saying what it is ... * So what we have to do is enumerate all children * of the gpio node and check them all. */ gpio = of_find_node_by_name(NULL, "gpio"); if (!gpio) return NULL; while ((np = of_get_next_child(gpio, np))) { audio_gpio = of_get_property(np, "audio-gpio", NULL); if (!audio_gpio) continue; if (strcmp(audio_gpio, name) == 0) break; if (altname && (strcmp(audio_gpio, altname) == 0)) break; } /* still not found, assume not there */ if (!np) return NULL; } reg = of_get_property(np, "reg", NULL); if (!reg) return NULL; *gpioptr = *reg; /* this is a hack, usually the GPIOs 'reg' property * should have the offset based from the GPIO space * which is at 0x50, but apparently not always... */ if (*gpioptr < 0x50) *gpioptr += 0x50; reg = of_get_property(np, "audio-gpio-active-state", NULL); if (!reg) /* Apple seems to default to 1, but * that doesn't seem right at least on most * machines. So until proven that the opposite * is necessary, we default to 0 * (which, incidentally, snd-powermac also does...) */ *gpioactiveptr = 0; else *gpioactiveptr = *reg; return np; } static void get_irq(struct device_node * np, int *irqptr) { if (np) *irqptr = irq_of_parse_and_map(np, 0); else *irqptr = NO_IRQ; } /* 0x4 is outenable, 0x1 is out, thus 4 or 5 */ #define SWITCH_GPIO(name, v, on) \ (((v)&~1) | ((on)? \ (name##_gpio_activestate==0?4:5): \ (name##_gpio_activestate==0?5:4))) #define FTR_GPIO(name, bit) \ static void ftr_gpio_set_##name(struct gpio_runtime *rt, int on)\ { \ int v; \ \ if (unlikely(!rt)) return; \ \ if (name##_mute_gpio < 0) \ return; \ \ v = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, \ name##_mute_gpio, \ 0); \ \ /* muted = !on... */ \ v = SWITCH_GPIO(name##_mute, v, !on); \ \ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, \ name##_mute_gpio, v); \ \ rt->implementation_private &= ~(1<<bit); \ rt->implementation_private |= (!!on << bit); \ } \ static int ftr_gpio_get_##name(struct gpio_runtime *rt) \ { \ if (unlikely(!rt)) return 0; \ return (rt->implementation_private>>bit)&1; \ } FTR_GPIO(headphone, 0); FTR_GPIO(amp, 1); FTR_GPIO(lineout, 2); FTR_GPIO(master, 3); static void ftr_gpio_set_hw_reset(struct gpio_runtime *rt, int on) { int v; if (unlikely(!rt)) return; if (hw_reset_gpio < 0) return; v = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, hw_reset_gpio, 0); v = SWITCH_GPIO(hw_reset, v, on); pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, hw_reset_gpio, v); } static struct gpio_methods methods; static void ftr_gpio_all_amps_off(struct gpio_runtime *rt) { int saved; if (unlikely(!rt)) return; saved = rt->implementation_private; ftr_gpio_set_headphone(rt, 0); ftr_gpio_set_amp(rt, 0); ftr_gpio_set_lineout(rt, 0); if (methods.set_master) ftr_gpio_set_master(rt, 0); rt->implementation_private = saved; } static void ftr_gpio_all_amps_restore(struct gpio_runtime *rt) { int s; if (unlikely(!rt)) return; s = rt->implementation_private; ftr_gpio_set_headphone(rt, (s>>0)&1); ftr_gpio_set_amp(rt, (s>>1)&1); ftr_gpio_set_lineout(rt, (s>>2)&1); if (methods.set_master) ftr_gpio_set_master(rt, (s>>3)&1); } static void ftr_handle_notify(struct work_struct *work) { struct gpio_notification *notif = container_of(work, struct gpio_notification, work.work); mutex_lock(&notif->mutex); if (notif->notify) notif->notify(notif->data); mutex_unlock(&notif->mutex); } static void gpio_enable_dual_edge(int gpio) { int v; if (gpio == -1) return; v = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, gpio, 0); v |= 0x80; /* enable dual edge */ pmac_call_feature(PMAC_FTR_WRITE_GPIO, NULL, gpio, v); } static void ftr_gpio_init(struct gpio_runtime *rt) { get_gpio("headphone-mute", NULL, &headphone_mute_gpio, &headphone_mute_gpio_activestate); get_gpio("amp-mute", NULL, &amp_mute_gpio, &amp_mute_gpio_activestate); get_gpio("lineout-mute", NULL, &lineout_mute_gpio, &lineout_mute_gpio_activestate); get_gpio("hw-reset", "audio-hw-reset", &hw_reset_gpio, &hw_reset_gpio_activestate); if (get_gpio("master-mute", NULL, &master_mute_gpio, &master_mute_gpio_activestate)) { methods.set_master = ftr_gpio_set_master; methods.get_master = ftr_gpio_get_master; } headphone_detect_node = get_gpio("headphone-detect", NULL, &headphone_detect_gpio, &headphone_detect_gpio_activestate); /* go Apple, and thanks for giving these different names * across the board... */ lineout_detect_node = get_gpio("lineout-detect", "line-output-detect", &lineout_detect_gpio, &lineout_detect_gpio_activestate); linein_detect_node = get_gpio("linein-detect", "line-input-detect", &linein_detect_gpio, &linein_detect_gpio_activestate); gpio_enable_dual_edge(headphone_detect_gpio); gpio_enable_dual_edge(lineout_detect_gpio); gpio_enable_dual_edge(linein_detect_gpio); get_irq(headphone_detect_node, &headphone_detect_irq); get_irq(lineout_detect_node, &lineout_detect_irq); get_irq(linein_detect_node, &linein_detect_irq); ftr_gpio_all_amps_off(rt); rt->implementation_private = 0; INIT_DELAYED_WORK(&rt->headphone_notify.work, ftr_handle_notify); INIT_DELAYED_WORK(&rt->line_in_notify.work, ftr_handle_notify); INIT_DELAYED_WORK(&rt->line_out_notify.work, ftr_handle_notify); mutex_init(&rt->headphone_notify.mutex); mutex_init(&rt->line_in_notify.mutex); mutex_init(&rt->line_out_notify.mutex); } static void ftr_gpio_exit(struct gpio_runtime *rt) { ftr_gpio_all_amps_off(rt); rt->implementation_private = 0; if (rt->headphone_notify.notify) free_irq(headphone_detect_irq, &rt->headphone_notify); if (rt->line_in_notify.gpio_private) free_irq(linein_detect_irq, &rt->line_in_notify); if (rt->line_out_notify.gpio_private) free_irq(lineout_detect_irq, &rt->line_out_notify); cancel_delayed_work_sync(&rt->headphone_notify.work); cancel_delayed_work_sync(&rt->line_in_notify.work); cancel_delayed_work_sync(&rt->line_out_notify.work); mutex_destroy(&rt->headphone_notify.mutex); mutex_destroy(&rt->line_in_notify.mutex); mutex_destroy(&rt->line_out_notify.mutex); } static irqreturn_t ftr_handle_notify_irq(int xx, void *data) { struct gpio_notification *notif = data; schedule_delayed_work(&notif->work, 0); return IRQ_HANDLED; } static int ftr_set_notify(struct gpio_runtime *rt, enum notify_type type, notify_func_t notify, void *data) { struct gpio_notification *notif; notify_func_t old; int irq; char *name; int err = -EBUSY; switch (type) { case AOA_NOTIFY_HEADPHONE: notif = &rt->headphone_notify; name = "headphone-detect"; irq = headphone_detect_irq; break; case AOA_NOTIFY_LINE_IN: notif = &rt->line_in_notify; name = "linein-detect"; irq = linein_detect_irq; break; case AOA_NOTIFY_LINE_OUT: notif = &rt->line_out_notify; name = "lineout-detect"; irq = lineout_detect_irq; break; default: return -EINVAL; } if (irq == NO_IRQ) return -ENODEV; mutex_lock(&notif->mutex); old = notif->notify; if (!old && !notify) { err = 0; goto out_unlock; } if (old && notify) { if (old == notify && notif->data == data) err = 0; goto out_unlock; } if (old && !notify) free_irq(irq, notif); if (!old && notify) { err = request_irq(irq, ftr_handle_notify_irq, 0, name, notif); if (err) goto out_unlock; } notif->notify = notify; notif->data = data; err = 0; out_unlock: mutex_unlock(&notif->mutex); return err; } static int ftr_get_detect(struct gpio_runtime *rt, enum notify_type type) { int gpio, ret, active; switch (type) { case AOA_NOTIFY_HEADPHONE: gpio = headphone_detect_gpio; active = headphone_detect_gpio_activestate; break; case AOA_NOTIFY_LINE_IN: gpio = linein_detect_gpio; active = linein_detect_gpio_activestate; break; case AOA_NOTIFY_LINE_OUT: gpio = lineout_detect_gpio; active = lineout_detect_gpio_activestate; break; default: return -EINVAL; } if (gpio == -1) return -ENODEV; ret = pmac_call_feature(PMAC_FTR_READ_GPIO, NULL, gpio, 0); if (ret < 0) return ret; return ((ret >> 1) & 1) == active; } static struct gpio_methods methods = { .init = ftr_gpio_init, .exit = ftr_gpio_exit, .all_amps_off = ftr_gpio_all_amps_off, .all_amps_restore = ftr_gpio_all_amps_restore, .set_headphone = ftr_gpio_set_headphone, .set_speakers = ftr_gpio_set_amp, .set_lineout = ftr_gpio_set_lineout, .set_hw_reset = ftr_gpio_set_hw_reset, .get_headphone = ftr_gpio_get_headphone, .get_speakers = ftr_gpio_get_amp, .get_lineout = ftr_gpio_get_lineout, .set_notify = ftr_set_notify, .get_detect = ftr_get_detect, }; struct gpio_methods *ftr_gpio_methods = &methods; EXPORT_SYMBOL_GPL(ftr_gpio_methods);
gpl-2.0
Schischu/android_kernel_samsung_chagallwifi
fs/lockd/xdr4.c
12727
7513
/* * linux/fs/lockd/xdr4.c * * XDR support for lockd and the lock client. * * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> * Copyright (C) 1999, Trond Myklebust <trond.myklebust@fys.uio.no> */ #include <linux/types.h> #include <linux/sched.h> #include <linux/nfs.h> #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/svc.h> #include <linux/sunrpc/stats.h> #include <linux/lockd/lockd.h> #define NLMDBG_FACILITY NLMDBG_XDR static inline loff_t s64_to_loff_t(__s64 offset) { return (loff_t)offset; } static inline s64 loff_t_to_s64(loff_t offset) { s64 res; if (offset > NLM4_OFFSET_MAX) res = NLM4_OFFSET_MAX; else if (offset < -NLM4_OFFSET_MAX) res = -NLM4_OFFSET_MAX; else res = offset; return res; } /* * XDR functions for basic NLM types */ static __be32 * nlm4_decode_cookie(__be32 *p, struct nlm_cookie *c) { unsigned int len; len = ntohl(*p++); if(len==0) { c->len=4; memset(c->data, 0, 4); /* hockeypux brain damage */ } else if(len<=NLM_MAXCOOKIELEN) { c->len=len; memcpy(c->data, p, len); p+=XDR_QUADLEN(len); } else { dprintk("lockd: bad cookie size %d (only cookies under " "%d bytes are supported.)\n", len, NLM_MAXCOOKIELEN); return NULL; } return p; } static __be32 * nlm4_encode_cookie(__be32 *p, struct nlm_cookie *c) { *p++ = htonl(c->len); memcpy(p, c->data, c->len); p+=XDR_QUADLEN(c->len); return p; } static __be32 * nlm4_decode_fh(__be32 *p, struct nfs_fh *f) { memset(f->data, 0, sizeof(f->data)); f->size = ntohl(*p++); if (f->size > NFS_MAXFHSIZE) { dprintk("lockd: bad fhandle size %d (should be <=%d)\n", f->size, NFS_MAXFHSIZE); return NULL; } memcpy(f->data, p, f->size); return p + XDR_QUADLEN(f->size); } /* * Encode and decode owner handle */ static __be32 * nlm4_decode_oh(__be32 *p, struct xdr_netobj *oh) { return xdr_decode_netobj(p, oh); } static __be32 * nlm4_decode_lock(__be32 *p, struct nlm_lock *lock) { struct file_lock *fl = &lock->fl; __u64 len, start; __s64 end; if (!(p = xdr_decode_string_inplace(p, &lock->caller, &lock->len, NLM_MAXSTRLEN)) || !(p = nlm4_decode_fh(p, &lock->fh)) || !(p = nlm4_decode_oh(p, &lock->oh))) return NULL; lock->svid = ntohl(*p++); locks_init_lock(fl); fl->fl_owner = current->files; fl->fl_pid = (pid_t)lock->svid; fl->fl_flags = FL_POSIX; fl->fl_type = F_RDLCK; /* as good as anything else */ p = xdr_decode_hyper(p, &start); p = xdr_decode_hyper(p, &len); end = start + len - 1; fl->fl_start = s64_to_loff_t(start); if (len == 0 || end < 0) fl->fl_end = OFFSET_MAX; else fl->fl_end = s64_to_loff_t(end); return p; } /* * Encode result of a TEST/TEST_MSG call */ static __be32 * nlm4_encode_testres(__be32 *p, struct nlm_res *resp) { s64 start, len; dprintk("xdr: before encode_testres (p %p resp %p)\n", p, resp); if (!(p = nlm4_encode_cookie(p, &resp->cookie))) return NULL; *p++ = resp->status; if (resp->status == nlm_lck_denied) { struct file_lock *fl = &resp->lock.fl; *p++ = (fl->fl_type == F_RDLCK)? xdr_zero : xdr_one; *p++ = htonl(resp->lock.svid); /* Encode owner handle. */ if (!(p = xdr_encode_netobj(p, &resp->lock.oh))) return NULL; start = loff_t_to_s64(fl->fl_start); if (fl->fl_end == OFFSET_MAX) len = 0; else len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1); p = xdr_encode_hyper(p, start); p = xdr_encode_hyper(p, len); dprintk("xdr: encode_testres (status %u pid %d type %d start %Ld end %Ld)\n", resp->status, (int)resp->lock.svid, fl->fl_type, (long long)fl->fl_start, (long long)fl->fl_end); } dprintk("xdr: after encode_testres (p %p resp %p)\n", p, resp); return p; } /* * First, the server side XDR functions */ int nlm4svc_decode_testargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp) { u32 exclusive; if (!(p = nlm4_decode_cookie(p, &argp->cookie))) return 0; exclusive = ntohl(*p++); if (!(p = nlm4_decode_lock(p, &argp->lock))) return 0; if (exclusive) argp->lock.fl.fl_type = F_WRLCK; return xdr_argsize_check(rqstp, p); } int nlm4svc_encode_testres(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp) { if (!(p = nlm4_encode_testres(p, resp))) return 0; return xdr_ressize_check(rqstp, p); } int nlm4svc_decode_lockargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp) { u32 exclusive; if (!(p = nlm4_decode_cookie(p, &argp->cookie))) return 0; argp->block = ntohl(*p++); exclusive = ntohl(*p++); if (!(p = nlm4_decode_lock(p, &argp->lock))) return 0; if (exclusive) argp->lock.fl.fl_type = F_WRLCK; argp->reclaim = ntohl(*p++); argp->state = ntohl(*p++); argp->monitor = 1; /* monitor client by default */ return xdr_argsize_check(rqstp, p); } int nlm4svc_decode_cancargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp) { u32 exclusive; if (!(p = nlm4_decode_cookie(p, &argp->cookie))) return 0; argp->block = ntohl(*p++); exclusive = ntohl(*p++); if (!(p = nlm4_decode_lock(p, &argp->lock))) return 0; if (exclusive) argp->lock.fl.fl_type = F_WRLCK; return xdr_argsize_check(rqstp, p); } int nlm4svc_decode_unlockargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp) { if (!(p = nlm4_decode_cookie(p, &argp->cookie)) || !(p = nlm4_decode_lock(p, &argp->lock))) return 0; argp->lock.fl.fl_type = F_UNLCK; return xdr_argsize_check(rqstp, p); } int nlm4svc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp) { struct nlm_lock *lock = &argp->lock; memset(lock, 0, sizeof(*lock)); locks_init_lock(&lock->fl); lock->svid = ~(u32) 0; lock->fl.fl_pid = (pid_t)lock->svid; if (!(p = nlm4_decode_cookie(p, &argp->cookie)) || !(p = xdr_decode_string_inplace(p, &lock->caller, &lock->len, NLM_MAXSTRLEN)) || !(p = nlm4_decode_fh(p, &lock->fh)) || !(p = nlm4_decode_oh(p, &lock->oh))) return 0; argp->fsm_mode = ntohl(*p++); argp->fsm_access = ntohl(*p++); return xdr_argsize_check(rqstp, p); } int nlm4svc_encode_shareres(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp) { if (!(p = nlm4_encode_cookie(p, &resp->cookie))) return 0; *p++ = resp->status; *p++ = xdr_zero; /* sequence argument */ return xdr_ressize_check(rqstp, p); } int nlm4svc_encode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp) { if (!(p = nlm4_encode_cookie(p, &resp->cookie))) return 0; *p++ = resp->status; return xdr_ressize_check(rqstp, p); } int nlm4svc_decode_notify(struct svc_rqst *rqstp, __be32 *p, struct nlm_args *argp) { struct nlm_lock *lock = &argp->lock; if (!(p = xdr_decode_string_inplace(p, &lock->caller, &lock->len, NLM_MAXSTRLEN))) return 0; argp->state = ntohl(*p++); return xdr_argsize_check(rqstp, p); } int nlm4svc_decode_reboot(struct svc_rqst *rqstp, __be32 *p, struct nlm_reboot *argp) { if (!(p = xdr_decode_string_inplace(p, &argp->mon, &argp->len, SM_MAXSTRLEN))) return 0; argp->state = ntohl(*p++); memcpy(&argp->priv.data, p, sizeof(argp->priv.data)); p += XDR_QUADLEN(SM_PRIV_SIZE); return xdr_argsize_check(rqstp, p); } int nlm4svc_decode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp) { if (!(p = nlm4_decode_cookie(p, &resp->cookie))) return 0; resp->status = *p++; return xdr_argsize_check(rqstp, p); } int nlm4svc_decode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy) { return xdr_argsize_check(rqstp, p); } int nlm4svc_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy) { return xdr_ressize_check(rqstp, p); }
gpl-2.0
escalator2015/linux
fs/lockd/xdr4.c
12727
7513
/* * linux/fs/lockd/xdr4.c * * XDR support for lockd and the lock client. * * Copyright (C) 1995, 1996 Olaf Kirch <okir@monad.swb.de> * Copyright (C) 1999, Trond Myklebust <trond.myklebust@fys.uio.no> */ #include <linux/types.h> #include <linux/sched.h> #include <linux/nfs.h> #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/svc.h> #include <linux/sunrpc/stats.h> #include <linux/lockd/lockd.h> #define NLMDBG_FACILITY NLMDBG_XDR static inline loff_t s64_to_loff_t(__s64 offset) { return (loff_t)offset; } static inline s64 loff_t_to_s64(loff_t offset) { s64 res; if (offset > NLM4_OFFSET_MAX) res = NLM4_OFFSET_MAX; else if (offset < -NLM4_OFFSET_MAX) res = -NLM4_OFFSET_MAX; else res = offset; return res; } /* * XDR functions for basic NLM types */ static __be32 * nlm4_decode_cookie(__be32 *p, struct nlm_cookie *c) { unsigned int len; len = ntohl(*p++); if(len==0) { c->len=4; memset(c->data, 0, 4); /* hockeypux brain damage */ } else if(len<=NLM_MAXCOOKIELEN) { c->len=len; memcpy(c->data, p, len); p+=XDR_QUADLEN(len); } else { dprintk("lockd: bad cookie size %d (only cookies under " "%d bytes are supported.)\n", len, NLM_MAXCOOKIELEN); return NULL; } return p; } static __be32 * nlm4_encode_cookie(__be32 *p, struct nlm_cookie *c) { *p++ = htonl(c->len); memcpy(p, c->data, c->len); p+=XDR_QUADLEN(c->len); return p; } static __be32 * nlm4_decode_fh(__be32 *p, struct nfs_fh *f) { memset(f->data, 0, sizeof(f->data)); f->size = ntohl(*p++); if (f->size > NFS_MAXFHSIZE) { dprintk("lockd: bad fhandle size %d (should be <=%d)\n", f->size, NFS_MAXFHSIZE); return NULL; } memcpy(f->data, p, f->size); return p + XDR_QUADLEN(f->size); } /* * Encode and decode owner handle */ static __be32 * nlm4_decode_oh(__be32 *p, struct xdr_netobj *oh) { return xdr_decode_netobj(p, oh); } static __be32 * nlm4_decode_lock(__be32 *p, struct nlm_lock *lock) { struct file_lock *fl = &lock->fl; __u64 len, start; __s64 end; if (!(p = xdr_decode_string_inplace(p, &lock->caller, &lock->len, NLM_MAXSTRLEN)) || !(p = nlm4_decode_fh(p, &lock->fh)) || !(p = nlm4_decode_oh(p, &lock->oh))) return NULL; lock->svid = ntohl(*p++); locks_init_lock(fl); fl->fl_owner = current->files; fl->fl_pid = (pid_t)lock->svid; fl->fl_flags = FL_POSIX; fl->fl_type = F_RDLCK; /* as good as anything else */ p = xdr_decode_hyper(p, &start); p = xdr_decode_hyper(p, &len); end = start + len - 1; fl->fl_start = s64_to_loff_t(start); if (len == 0 || end < 0) fl->fl_end = OFFSET_MAX; else fl->fl_end = s64_to_loff_t(end); return p; } /* * Encode result of a TEST/TEST_MSG call */ static __be32 * nlm4_encode_testres(__be32 *p, struct nlm_res *resp) { s64 start, len; dprintk("xdr: before encode_testres (p %p resp %p)\n", p, resp); if (!(p = nlm4_encode_cookie(p, &resp->cookie))) return NULL; *p++ = resp->status; if (resp->status == nlm_lck_denied) { struct file_lock *fl = &resp->lock.fl; *p++ = (fl->fl_type == F_RDLCK)? xdr_zero : xdr_one; *p++ = htonl(resp->lock.svid); /* Encode owner handle. */ if (!(p = xdr_encode_netobj(p, &resp->lock.oh))) return NULL; start = loff_t_to_s64(fl->fl_start); if (fl->fl_end == OFFSET_MAX) len = 0; else len = loff_t_to_s64(fl->fl_end - fl->fl_start + 1); p = xdr_encode_hyper(p, start); p = xdr_encode_hyper(p, len); dprintk("xdr: encode_testres (status %u pid %d type %d start %Ld end %Ld)\n", resp->status, (int)resp->lock.svid, fl->fl_type, (long long)fl->fl_start, (long long)fl->fl_end); } dprintk("xdr: after encode_testres (p %p resp %p)\n", p, resp); return p; } /* * First, the server side XDR functions */ int nlm4svc_decode_testargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp) { u32 exclusive; if (!(p = nlm4_decode_cookie(p, &argp->cookie))) return 0; exclusive = ntohl(*p++); if (!(p = nlm4_decode_lock(p, &argp->lock))) return 0; if (exclusive) argp->lock.fl.fl_type = F_WRLCK; return xdr_argsize_check(rqstp, p); } int nlm4svc_encode_testres(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp) { if (!(p = nlm4_encode_testres(p, resp))) return 0; return xdr_ressize_check(rqstp, p); } int nlm4svc_decode_lockargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp) { u32 exclusive; if (!(p = nlm4_decode_cookie(p, &argp->cookie))) return 0; argp->block = ntohl(*p++); exclusive = ntohl(*p++); if (!(p = nlm4_decode_lock(p, &argp->lock))) return 0; if (exclusive) argp->lock.fl.fl_type = F_WRLCK; argp->reclaim = ntohl(*p++); argp->state = ntohl(*p++); argp->monitor = 1; /* monitor client by default */ return xdr_argsize_check(rqstp, p); } int nlm4svc_decode_cancargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp) { u32 exclusive; if (!(p = nlm4_decode_cookie(p, &argp->cookie))) return 0; argp->block = ntohl(*p++); exclusive = ntohl(*p++); if (!(p = nlm4_decode_lock(p, &argp->lock))) return 0; if (exclusive) argp->lock.fl.fl_type = F_WRLCK; return xdr_argsize_check(rqstp, p); } int nlm4svc_decode_unlockargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp) { if (!(p = nlm4_decode_cookie(p, &argp->cookie)) || !(p = nlm4_decode_lock(p, &argp->lock))) return 0; argp->lock.fl.fl_type = F_UNLCK; return xdr_argsize_check(rqstp, p); } int nlm4svc_decode_shareargs(struct svc_rqst *rqstp, __be32 *p, nlm_args *argp) { struct nlm_lock *lock = &argp->lock; memset(lock, 0, sizeof(*lock)); locks_init_lock(&lock->fl); lock->svid = ~(u32) 0; lock->fl.fl_pid = (pid_t)lock->svid; if (!(p = nlm4_decode_cookie(p, &argp->cookie)) || !(p = xdr_decode_string_inplace(p, &lock->caller, &lock->len, NLM_MAXSTRLEN)) || !(p = nlm4_decode_fh(p, &lock->fh)) || !(p = nlm4_decode_oh(p, &lock->oh))) return 0; argp->fsm_mode = ntohl(*p++); argp->fsm_access = ntohl(*p++); return xdr_argsize_check(rqstp, p); } int nlm4svc_encode_shareres(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp) { if (!(p = nlm4_encode_cookie(p, &resp->cookie))) return 0; *p++ = resp->status; *p++ = xdr_zero; /* sequence argument */ return xdr_ressize_check(rqstp, p); } int nlm4svc_encode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp) { if (!(p = nlm4_encode_cookie(p, &resp->cookie))) return 0; *p++ = resp->status; return xdr_ressize_check(rqstp, p); } int nlm4svc_decode_notify(struct svc_rqst *rqstp, __be32 *p, struct nlm_args *argp) { struct nlm_lock *lock = &argp->lock; if (!(p = xdr_decode_string_inplace(p, &lock->caller, &lock->len, NLM_MAXSTRLEN))) return 0; argp->state = ntohl(*p++); return xdr_argsize_check(rqstp, p); } int nlm4svc_decode_reboot(struct svc_rqst *rqstp, __be32 *p, struct nlm_reboot *argp) { if (!(p = xdr_decode_string_inplace(p, &argp->mon, &argp->len, SM_MAXSTRLEN))) return 0; argp->state = ntohl(*p++); memcpy(&argp->priv.data, p, sizeof(argp->priv.data)); p += XDR_QUADLEN(SM_PRIV_SIZE); return xdr_argsize_check(rqstp, p); } int nlm4svc_decode_res(struct svc_rqst *rqstp, __be32 *p, struct nlm_res *resp) { if (!(p = nlm4_decode_cookie(p, &resp->cookie))) return 0; resp->status = *p++; return xdr_argsize_check(rqstp, p); } int nlm4svc_decode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy) { return xdr_argsize_check(rqstp, p); } int nlm4svc_encode_void(struct svc_rqst *rqstp, __be32 *p, void *dummy) { return xdr_ressize_check(rqstp, p); }
gpl-2.0
karltsou/fsl-imx6-linux
arch/sh/boards/mach-sdk7780/irq.c
13239
1186
/* * linux/arch/sh/boards/renesas/sdk7780/irq.c * * Renesas Technology Europe SDK7780 Support. * * Copyright (C) 2008 Nicholas Beck <nbeck@mpc-data.co.uk> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/irq.h> #include <linux/io.h> #include <mach/sdk7780.h> enum { UNUSED = 0, /* board specific interrupt sources */ SMC91C111, /* Ethernet controller */ }; static struct intc_vect fpga_vectors[] __initdata = { INTC_IRQ(SMC91C111, IRQ_ETHERNET), }; static struct intc_mask_reg fpga_mask_registers[] __initdata = { { 0, FPGA_IRQ0MR, 16, { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, SMC91C111, 0, 0, 0, 0 } }, }; static DECLARE_INTC_DESC(fpga_intc_desc, "sdk7780-irq", fpga_vectors, NULL, fpga_mask_registers, NULL, NULL); void __init init_sdk7780_IRQ(void) { printk(KERN_INFO "Using SDK7780 interrupt controller.\n"); __raw_writew(0xFFFF, FPGA_IRQ0MR); /* Setup IRL 0-3 */ __raw_writew(0x0003, FPGA_IMSR); plat_irq_setup_pins(IRQ_MODE_IRL3210); register_intc_controller(&fpga_intc_desc); }
gpl-2.0
juanfont/linux-sunxi
drivers/net/macvlan.c
440
22788
/* * Copyright (c) 2007 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * The code this is based on carried the following copyright notice: * --- * (C) Copyright 2001-2006 * Alex Zeffertt, Cambridge Broadband Ltd, ajz@cambridgebroadband.com * Re-worked by Ben Greear <greearb@candelatech.com> * --- */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/module.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/rculist.h> #include <linux/notifier.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/ethtool.h> #include <linux/if_arp.h> #include <linux/if_vlan.h> #include <linux/if_link.h> #include <linux/if_macvlan.h> #include <net/rtnetlink.h> #include <net/xfrm.h> #define MACVLAN_HASH_SIZE (1 << BITS_PER_BYTE) struct macvlan_port { struct net_device *dev; struct hlist_head vlan_hash[MACVLAN_HASH_SIZE]; struct list_head vlans; struct rcu_head rcu; bool passthru; int count; }; static void macvlan_port_destroy(struct net_device *dev); #define macvlan_port_get_rcu(dev) \ ((struct macvlan_port *) rcu_dereference(dev->rx_handler_data)) #define macvlan_port_get(dev) ((struct macvlan_port *) dev->rx_handler_data) #define macvlan_port_exists(dev) (dev->priv_flags & IFF_MACVLAN_PORT) static struct macvlan_dev *macvlan_hash_lookup(const struct macvlan_port *port, const unsigned char *addr) { struct macvlan_dev *vlan; struct hlist_node *n; hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[addr[5]], hlist) { if (!compare_ether_addr_64bits(vlan->dev->dev_addr, addr)) return vlan; } return NULL; } static void macvlan_hash_add(struct macvlan_dev *vlan) { struct macvlan_port *port = vlan->port; const unsigned char *addr = vlan->dev->dev_addr; hlist_add_head_rcu(&vlan->hlist, &port->vlan_hash[addr[5]]); } static void macvlan_hash_del(struct macvlan_dev *vlan, bool sync) { hlist_del_rcu(&vlan->hlist); if (sync) synchronize_rcu(); } static void macvlan_hash_change_addr(struct macvlan_dev *vlan, const unsigned char *addr) { macvlan_hash_del(vlan, true); /* Now that we are unhashed it is safe to change the device * address without confusing packet delivery. */ memcpy(vlan->dev->dev_addr, addr, ETH_ALEN); macvlan_hash_add(vlan); } static int macvlan_addr_busy(const struct macvlan_port *port, const unsigned char *addr) { /* Test to see if the specified multicast address is * currently in use by the underlying device or * another macvlan. */ if (!compare_ether_addr_64bits(port->dev->dev_addr, addr)) return 1; if (macvlan_hash_lookup(port, addr)) return 1; return 0; } static int macvlan_broadcast_one(struct sk_buff *skb, const struct macvlan_dev *vlan, const struct ethhdr *eth, bool local) { struct net_device *dev = vlan->dev; if (!skb) return NET_RX_DROP; if (local) return vlan->forward(dev, skb); skb->dev = dev; if (!compare_ether_addr_64bits(eth->h_dest, dev->broadcast)) skb->pkt_type = PACKET_BROADCAST; else skb->pkt_type = PACKET_MULTICAST; return vlan->receive(skb); } static void macvlan_broadcast(struct sk_buff *skb, const struct macvlan_port *port, struct net_device *src, enum macvlan_mode mode) { const struct ethhdr *eth = eth_hdr(skb); const struct macvlan_dev *vlan; struct hlist_node *n; struct sk_buff *nskb; unsigned int i; int err; if (skb->protocol == htons(ETH_P_PAUSE)) return; for (i = 0; i < MACVLAN_HASH_SIZE; i++) { hlist_for_each_entry_rcu(vlan, n, &port->vlan_hash[i], hlist) { if (vlan->dev == src || !(vlan->mode & mode)) continue; nskb = skb_clone(skb, GFP_ATOMIC); err = macvlan_broadcast_one(nskb, vlan, eth, mode == MACVLAN_MODE_BRIDGE); macvlan_count_rx(vlan, skb->len + ETH_HLEN, err == NET_RX_SUCCESS, 1); } } } /* called under rcu_read_lock() from netif_receive_skb */ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) { struct macvlan_port *port; struct sk_buff *skb = *pskb; const struct ethhdr *eth = eth_hdr(skb); const struct macvlan_dev *vlan; const struct macvlan_dev *src; struct net_device *dev; unsigned int len = 0; int ret = NET_RX_DROP; port = macvlan_port_get_rcu(skb->dev); if (is_multicast_ether_addr(eth->h_dest)) { skb = ip_check_defrag(skb, IP_DEFRAG_MACVLAN); if (!skb) return RX_HANDLER_CONSUMED; eth = eth_hdr(skb); src = macvlan_hash_lookup(port, eth->h_source); if (!src) /* frame comes from an external address */ macvlan_broadcast(skb, port, NULL, MACVLAN_MODE_PRIVATE | MACVLAN_MODE_VEPA | MACVLAN_MODE_PASSTHRU| MACVLAN_MODE_BRIDGE); else if (src->mode == MACVLAN_MODE_VEPA) /* flood to everyone except source */ macvlan_broadcast(skb, port, src->dev, MACVLAN_MODE_VEPA | MACVLAN_MODE_BRIDGE); else if (src->mode == MACVLAN_MODE_BRIDGE) /* * flood only to VEPA ports, bridge ports * already saw the frame on the way out. */ macvlan_broadcast(skb, port, src->dev, MACVLAN_MODE_VEPA); else { /* forward to original port. */ vlan = src; ret = macvlan_broadcast_one(skb, vlan, eth, 0); goto out; } return RX_HANDLER_PASS; } if (port->passthru) vlan = list_first_or_null_rcu(&port->vlans, struct macvlan_dev, list); else vlan = macvlan_hash_lookup(port, eth->h_dest); if (vlan == NULL) return RX_HANDLER_PASS; dev = vlan->dev; if (unlikely(!(dev->flags & IFF_UP))) { kfree_skb(skb); return RX_HANDLER_CONSUMED; } len = skb->len + ETH_HLEN; skb = skb_share_check(skb, GFP_ATOMIC); if (!skb) goto out; skb->dev = dev; skb->pkt_type = PACKET_HOST; ret = vlan->receive(skb); out: macvlan_count_rx(vlan, len, ret == NET_RX_SUCCESS, 0); return RX_HANDLER_CONSUMED; } static int macvlan_queue_xmit(struct sk_buff *skb, struct net_device *dev) { const struct macvlan_dev *vlan = netdev_priv(dev); const struct macvlan_port *port = vlan->port; const struct macvlan_dev *dest; __u8 ip_summed = skb->ip_summed; if (vlan->mode == MACVLAN_MODE_BRIDGE) { const struct ethhdr *eth = (void *)skb->data; skb->ip_summed = CHECKSUM_UNNECESSARY; /* send to other bridge ports directly */ if (is_multicast_ether_addr(eth->h_dest)) { macvlan_broadcast(skb, port, dev, MACVLAN_MODE_BRIDGE); goto xmit_world; } dest = macvlan_hash_lookup(port, eth->h_dest); if (dest && dest->mode == MACVLAN_MODE_BRIDGE) { /* send to lowerdev first for its network taps */ dev_forward_skb(vlan->lowerdev, skb); return NET_XMIT_SUCCESS; } } xmit_world: skb->ip_summed = ip_summed; skb->dev = vlan->lowerdev; return dev_queue_xmit(skb); } netdev_tx_t macvlan_start_xmit(struct sk_buff *skb, struct net_device *dev) { unsigned int len = skb->len; int ret; const struct macvlan_dev *vlan = netdev_priv(dev); ret = macvlan_queue_xmit(skb, dev); if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { struct macvlan_pcpu_stats *pcpu_stats; pcpu_stats = this_cpu_ptr(vlan->pcpu_stats); u64_stats_update_begin(&pcpu_stats->syncp); pcpu_stats->tx_packets++; pcpu_stats->tx_bytes += len; u64_stats_update_end(&pcpu_stats->syncp); } else { this_cpu_inc(vlan->pcpu_stats->tx_dropped); } return ret; } EXPORT_SYMBOL_GPL(macvlan_start_xmit); static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev, unsigned short type, const void *daddr, const void *saddr, unsigned len) { const struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; return dev_hard_header(skb, lowerdev, type, daddr, saddr ? : dev->dev_addr, len); } static const struct header_ops macvlan_hard_header_ops = { .create = macvlan_hard_header, .rebuild = eth_rebuild_header, .parse = eth_header_parse, .cache = eth_header_cache, .cache_update = eth_header_cache_update, }; static int macvlan_open(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; int err; if (vlan->port->passthru) { dev_set_promiscuity(lowerdev, 1); goto hash_add; } err = -EBUSY; if (macvlan_addr_busy(vlan->port, dev->dev_addr)) goto out; err = dev_uc_add(lowerdev, dev->dev_addr); if (err < 0) goto out; if (dev->flags & IFF_ALLMULTI) { err = dev_set_allmulti(lowerdev, 1); if (err < 0) goto del_unicast; } hash_add: macvlan_hash_add(vlan); return 0; del_unicast: dev_uc_del(lowerdev, dev->dev_addr); out: return err; } static int macvlan_stop(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; if (vlan->port->passthru) { dev_set_promiscuity(lowerdev, -1); goto hash_del; } dev_mc_unsync(lowerdev, dev); if (dev->flags & IFF_ALLMULTI) dev_set_allmulti(lowerdev, -1); dev_uc_del(lowerdev, dev->dev_addr); hash_del: macvlan_hash_del(vlan, !dev->dismantle); return 0; } static int macvlan_set_mac_address(struct net_device *dev, void *p) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; struct sockaddr *addr = p; int err; if (!is_valid_ether_addr(addr->sa_data)) return -EADDRNOTAVAIL; if (!(dev->flags & IFF_UP)) { /* Just copy in the new address */ dev->addr_assign_type &= ~NET_ADDR_RANDOM; memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN); } else { /* Rehash and update the device filters */ if (macvlan_addr_busy(vlan->port, addr->sa_data)) return -EBUSY; err = dev_uc_add(lowerdev, addr->sa_data); if (err) return err; dev_uc_del(lowerdev, dev->dev_addr); macvlan_hash_change_addr(vlan, addr->sa_data); } return 0; } static void macvlan_change_rx_flags(struct net_device *dev, int change) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; if (change & IFF_ALLMULTI) dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1); } static void macvlan_set_multicast_list(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); dev_mc_sync(vlan->lowerdev, dev); } static int macvlan_change_mtu(struct net_device *dev, int new_mtu) { struct macvlan_dev *vlan = netdev_priv(dev); if (new_mtu < 68 || vlan->lowerdev->mtu < new_mtu) return -EINVAL; dev->mtu = new_mtu; return 0; } /* * macvlan network devices have devices nesting below it and are a special * "super class" of normal network devices; split their locks off into a * separate class since they always nest. */ static struct lock_class_key macvlan_netdev_xmit_lock_key; static struct lock_class_key macvlan_netdev_addr_lock_key; #define MACVLAN_FEATURES \ (NETIF_F_SG | NETIF_F_ALL_CSUM | NETIF_F_HIGHDMA | NETIF_F_FRAGLIST | \ NETIF_F_GSO | NETIF_F_TSO | NETIF_F_UFO | NETIF_F_GSO_ROBUST | \ NETIF_F_TSO_ECN | NETIF_F_TSO6 | NETIF_F_GRO | NETIF_F_RXCSUM | \ NETIF_F_HW_VLAN_FILTER) #define MACVLAN_STATE_MASK \ ((1<<__LINK_STATE_NOCARRIER) | (1<<__LINK_STATE_DORMANT)) static void macvlan_set_lockdep_class_one(struct net_device *dev, struct netdev_queue *txq, void *_unused) { lockdep_set_class(&txq->_xmit_lock, &macvlan_netdev_xmit_lock_key); } static void macvlan_set_lockdep_class(struct net_device *dev) { lockdep_set_class(&dev->addr_list_lock, &macvlan_netdev_addr_lock_key); netdev_for_each_tx_queue(dev, macvlan_set_lockdep_class_one, NULL); } static int macvlan_init(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); const struct net_device *lowerdev = vlan->lowerdev; dev->state = (dev->state & ~MACVLAN_STATE_MASK) | (lowerdev->state & MACVLAN_STATE_MASK); dev->features = lowerdev->features & MACVLAN_FEATURES; dev->features |= NETIF_F_LLTX; dev->gso_max_size = lowerdev->gso_max_size; dev->iflink = lowerdev->ifindex; dev->hard_header_len = lowerdev->hard_header_len; macvlan_set_lockdep_class(dev); vlan->pcpu_stats = alloc_percpu(struct macvlan_pcpu_stats); if (!vlan->pcpu_stats) return -ENOMEM; return 0; } static void macvlan_uninit(struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); struct macvlan_port *port = vlan->port; free_percpu(vlan->pcpu_stats); port->count -= 1; if (!port->count) macvlan_port_destroy(port->dev); } static struct rtnl_link_stats64 *macvlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct macvlan_dev *vlan = netdev_priv(dev); if (vlan->pcpu_stats) { struct macvlan_pcpu_stats *p; u64 rx_packets, rx_bytes, rx_multicast, tx_packets, tx_bytes; u32 rx_errors = 0, tx_dropped = 0; unsigned int start; int i; for_each_possible_cpu(i) { p = per_cpu_ptr(vlan->pcpu_stats, i); do { start = u64_stats_fetch_begin_bh(&p->syncp); rx_packets = p->rx_packets; rx_bytes = p->rx_bytes; rx_multicast = p->rx_multicast; tx_packets = p->tx_packets; tx_bytes = p->tx_bytes; } while (u64_stats_fetch_retry_bh(&p->syncp, start)); stats->rx_packets += rx_packets; stats->rx_bytes += rx_bytes; stats->multicast += rx_multicast; stats->tx_packets += tx_packets; stats->tx_bytes += tx_bytes; /* rx_errors & tx_dropped are u32, updated * without syncp protection. */ rx_errors += p->rx_errors; tx_dropped += p->tx_dropped; } stats->rx_errors = rx_errors; stats->rx_dropped = rx_errors; stats->tx_dropped = tx_dropped; } return stats; } static int macvlan_vlan_rx_add_vid(struct net_device *dev, unsigned short vid) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; return vlan_vid_add(lowerdev, vid); } static int macvlan_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid) { struct macvlan_dev *vlan = netdev_priv(dev); struct net_device *lowerdev = vlan->lowerdev; vlan_vid_del(lowerdev, vid); return 0; } static void macvlan_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo) { snprintf(drvinfo->driver, 32, "macvlan"); snprintf(drvinfo->version, 32, "0.1"); } static int macvlan_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { const struct macvlan_dev *vlan = netdev_priv(dev); return __ethtool_get_settings(vlan->lowerdev, cmd); } static const struct ethtool_ops macvlan_ethtool_ops = { .get_link = ethtool_op_get_link, .get_settings = macvlan_ethtool_get_settings, .get_drvinfo = macvlan_ethtool_get_drvinfo, }; static const struct net_device_ops macvlan_netdev_ops = { .ndo_init = macvlan_init, .ndo_uninit = macvlan_uninit, .ndo_open = macvlan_open, .ndo_stop = macvlan_stop, .ndo_start_xmit = macvlan_start_xmit, .ndo_change_mtu = macvlan_change_mtu, .ndo_change_rx_flags = macvlan_change_rx_flags, .ndo_set_mac_address = macvlan_set_mac_address, .ndo_set_rx_mode = macvlan_set_multicast_list, .ndo_get_stats64 = macvlan_dev_get_stats64, .ndo_validate_addr = eth_validate_addr, .ndo_vlan_rx_add_vid = macvlan_vlan_rx_add_vid, .ndo_vlan_rx_kill_vid = macvlan_vlan_rx_kill_vid, }; void macvlan_common_setup(struct net_device *dev) { ether_setup(dev); dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); dev->priv_flags |= IFF_UNICAST_FLT; dev->netdev_ops = &macvlan_netdev_ops; dev->destructor = free_netdev; dev->header_ops = &macvlan_hard_header_ops, dev->ethtool_ops = &macvlan_ethtool_ops; } EXPORT_SYMBOL_GPL(macvlan_common_setup); static void macvlan_setup(struct net_device *dev) { macvlan_common_setup(dev); dev->tx_queue_len = 0; } static int macvlan_port_create(struct net_device *dev) { struct macvlan_port *port; unsigned int i; int err; if (dev->type != ARPHRD_ETHER || dev->flags & IFF_LOOPBACK) return -EINVAL; port = kzalloc(sizeof(*port), GFP_KERNEL); if (port == NULL) return -ENOMEM; port->passthru = false; port->dev = dev; INIT_LIST_HEAD(&port->vlans); for (i = 0; i < MACVLAN_HASH_SIZE; i++) INIT_HLIST_HEAD(&port->vlan_hash[i]); err = netdev_rx_handler_register(dev, macvlan_handle_frame, port); if (err) kfree(port); else dev->priv_flags |= IFF_MACVLAN_PORT; return err; } static void macvlan_port_destroy(struct net_device *dev) { struct macvlan_port *port = macvlan_port_get(dev); dev->priv_flags &= ~IFF_MACVLAN_PORT; netdev_rx_handler_unregister(dev); kfree_rcu(port, rcu); } static int macvlan_validate(struct nlattr *tb[], struct nlattr *data[]) { if (tb[IFLA_ADDRESS]) { if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN) return -EINVAL; if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS]))) return -EADDRNOTAVAIL; } if (data && data[IFLA_MACVLAN_MODE]) { switch (nla_get_u32(data[IFLA_MACVLAN_MODE])) { case MACVLAN_MODE_PRIVATE: case MACVLAN_MODE_VEPA: case MACVLAN_MODE_BRIDGE: case MACVLAN_MODE_PASSTHRU: break; default: return -EINVAL; } } return 0; } int macvlan_common_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[], int (*receive)(struct sk_buff *skb), int (*forward)(struct net_device *dev, struct sk_buff *skb)) { struct macvlan_dev *vlan = netdev_priv(dev); struct macvlan_port *port; struct net_device *lowerdev; int err; if (!tb[IFLA_LINK]) return -EINVAL; lowerdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK])); if (lowerdev == NULL) return -ENODEV; /* When creating macvlans on top of other macvlans - use * the real device as the lowerdev. */ if (lowerdev->rtnl_link_ops == dev->rtnl_link_ops) { struct macvlan_dev *lowervlan = netdev_priv(lowerdev); lowerdev = lowervlan->lowerdev; } if (!tb[IFLA_MTU]) dev->mtu = lowerdev->mtu; else if (dev->mtu > lowerdev->mtu) return -EINVAL; if (!tb[IFLA_ADDRESS]) eth_hw_addr_random(dev); if (!macvlan_port_exists(lowerdev)) { err = macvlan_port_create(lowerdev); if (err < 0) return err; } port = macvlan_port_get(lowerdev); /* Only 1 macvlan device can be created in passthru mode */ if (port->passthru) return -EINVAL; vlan->lowerdev = lowerdev; vlan->dev = dev; vlan->port = port; vlan->receive = receive; vlan->forward = forward; vlan->mode = MACVLAN_MODE_VEPA; if (data && data[IFLA_MACVLAN_MODE]) vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); if (vlan->mode == MACVLAN_MODE_PASSTHRU) { if (port->count) return -EINVAL; port->passthru = true; memcpy(dev->dev_addr, lowerdev->dev_addr, ETH_ALEN); } port->count += 1; err = register_netdevice(dev); if (err < 0) goto destroy_port; list_add_tail_rcu(&vlan->list, &port->vlans); netif_stacked_transfer_operstate(lowerdev, dev); return 0; destroy_port: port->count -= 1; if (!port->count) macvlan_port_destroy(lowerdev); return err; } EXPORT_SYMBOL_GPL(macvlan_common_newlink); static int macvlan_newlink(struct net *src_net, struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { return macvlan_common_newlink(src_net, dev, tb, data, netif_rx, dev_forward_skb); } void macvlan_dellink(struct net_device *dev, struct list_head *head) { struct macvlan_dev *vlan = netdev_priv(dev); list_del_rcu(&vlan->list); unregister_netdevice_queue(dev, head); } EXPORT_SYMBOL_GPL(macvlan_dellink); static int macvlan_changelink(struct net_device *dev, struct nlattr *tb[], struct nlattr *data[]) { struct macvlan_dev *vlan = netdev_priv(dev); if (data && data[IFLA_MACVLAN_MODE]) vlan->mode = nla_get_u32(data[IFLA_MACVLAN_MODE]); return 0; } static size_t macvlan_get_size(const struct net_device *dev) { return nla_total_size(4); } static int macvlan_fill_info(struct sk_buff *skb, const struct net_device *dev) { struct macvlan_dev *vlan = netdev_priv(dev); NLA_PUT_U32(skb, IFLA_MACVLAN_MODE, vlan->mode); return 0; nla_put_failure: return -EMSGSIZE; } static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = { [IFLA_MACVLAN_MODE] = { .type = NLA_U32 }, }; int macvlan_link_register(struct rtnl_link_ops *ops) { /* common fields */ ops->priv_size = sizeof(struct macvlan_dev); ops->validate = macvlan_validate; ops->maxtype = IFLA_MACVLAN_MAX; ops->policy = macvlan_policy; ops->changelink = macvlan_changelink; ops->get_size = macvlan_get_size; ops->fill_info = macvlan_fill_info; return rtnl_link_register(ops); }; EXPORT_SYMBOL_GPL(macvlan_link_register); static struct rtnl_link_ops macvlan_link_ops = { .kind = "macvlan", .setup = macvlan_setup, .newlink = macvlan_newlink, .dellink = macvlan_dellink, }; static int macvlan_device_event(struct notifier_block *unused, unsigned long event, void *ptr) { struct net_device *dev = ptr; struct macvlan_dev *vlan, *next; struct macvlan_port *port; LIST_HEAD(list_kill); if (!macvlan_port_exists(dev)) return NOTIFY_DONE; port = macvlan_port_get(dev); switch (event) { case NETDEV_CHANGE: list_for_each_entry(vlan, &port->vlans, list) netif_stacked_transfer_operstate(vlan->lowerdev, vlan->dev); break; case NETDEV_FEAT_CHANGE: list_for_each_entry(vlan, &port->vlans, list) { vlan->dev->features = dev->features & MACVLAN_FEATURES; vlan->dev->gso_max_size = dev->gso_max_size; netdev_features_change(vlan->dev); } break; case NETDEV_UNREGISTER: /* twiddle thumbs on netns device moves */ if (dev->reg_state != NETREG_UNREGISTERING) break; list_for_each_entry_safe(vlan, next, &port->vlans, list) vlan->dev->rtnl_link_ops->dellink(vlan->dev, &list_kill); unregister_netdevice_many(&list_kill); list_del(&list_kill); break; case NETDEV_PRE_TYPE_CHANGE: /* Forbid underlaying device to change its type. */ return NOTIFY_BAD; } return NOTIFY_DONE; } static struct notifier_block macvlan_notifier_block __read_mostly = { .notifier_call = macvlan_device_event, }; static int __init macvlan_init_module(void) { int err; register_netdevice_notifier(&macvlan_notifier_block); err = macvlan_link_register(&macvlan_link_ops); if (err < 0) goto err1; return 0; err1: unregister_netdevice_notifier(&macvlan_notifier_block); return err; } static void __exit macvlan_cleanup_module(void) { rtnl_link_unregister(&macvlan_link_ops); unregister_netdevice_notifier(&macvlan_notifier_block); } module_init(macvlan_init_module); module_exit(macvlan_cleanup_module); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); MODULE_DESCRIPTION("Driver for MAC address based VLANs"); MODULE_ALIAS_RTNL_LINK("macvlan");
gpl-2.0
SM-G920P/SM-N920
net/netfilter/xt_NFQUEUE.c
2232
5370
/* iptables module for using new netfilter netlink queue * * (C) 2005 by Harald Welte <laforge@netfilter.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/ip.h> #include <linux/ipv6.h> #include <linux/jhash.h> #include <linux/netfilter.h> #include <linux/netfilter_arp.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_NFQUEUE.h> MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>"); MODULE_DESCRIPTION("Xtables: packet forwarding to netlink"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_NFQUEUE"); MODULE_ALIAS("ip6t_NFQUEUE"); MODULE_ALIAS("arpt_NFQUEUE"); static u32 jhash_initval __read_mostly; static bool rnd_inited __read_mostly; static unsigned int nfqueue_tg(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_NFQ_info *tinfo = par->targinfo; return NF_QUEUE_NR(tinfo->queuenum); } static u32 hash_v4(const struct sk_buff *skb) { const struct iphdr *iph = ip_hdr(skb); /* packets in either direction go into same queue */ if ((__force u32)iph->saddr < (__force u32)iph->daddr) return jhash_3words((__force u32)iph->saddr, (__force u32)iph->daddr, iph->protocol, jhash_initval); return jhash_3words((__force u32)iph->daddr, (__force u32)iph->saddr, iph->protocol, jhash_initval); } #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) static u32 hash_v6(const struct sk_buff *skb) { const struct ipv6hdr *ip6h = ipv6_hdr(skb); u32 a, b, c; if ((__force u32)ip6h->saddr.s6_addr32[3] < (__force u32)ip6h->daddr.s6_addr32[3]) { a = (__force u32) ip6h->saddr.s6_addr32[3]; b = (__force u32) ip6h->daddr.s6_addr32[3]; } else { b = (__force u32) ip6h->saddr.s6_addr32[3]; a = (__force u32) ip6h->daddr.s6_addr32[3]; } if ((__force u32)ip6h->saddr.s6_addr32[1] < (__force u32)ip6h->daddr.s6_addr32[1]) c = (__force u32) ip6h->saddr.s6_addr32[1]; else c = (__force u32) ip6h->daddr.s6_addr32[1]; return jhash_3words(a, b, c, jhash_initval); } #endif static u32 nfqueue_hash(const struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_NFQ_info_v1 *info = par->targinfo; u32 queue = info->queuenum; if (par->family == NFPROTO_IPV4) queue += ((u64) hash_v4(skb) * info->queues_total) >> 32; #if IS_ENABLED(CONFIG_IP6_NF_IPTABLES) else if (par->family == NFPROTO_IPV6) queue += ((u64) hash_v6(skb) * info->queues_total) >> 32; #endif return queue; } static unsigned int nfqueue_tg_v1(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_NFQ_info_v1 *info = par->targinfo; u32 queue = info->queuenum; if (info->queues_total > 1) queue = nfqueue_hash(skb, par); return NF_QUEUE_NR(queue); } static unsigned int nfqueue_tg_v2(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_NFQ_info_v2 *info = par->targinfo; unsigned int ret = nfqueue_tg_v1(skb, par); if (info->bypass) ret |= NF_VERDICT_FLAG_QUEUE_BYPASS; return ret; } static int nfqueue_tg_check(const struct xt_tgchk_param *par) { const struct xt_NFQ_info_v3 *info = par->targinfo; u32 maxid; if (unlikely(!rnd_inited)) { get_random_bytes(&jhash_initval, sizeof(jhash_initval)); rnd_inited = true; } if (info->queues_total == 0) { pr_err("NFQUEUE: number of total queues is 0\n"); return -EINVAL; } maxid = info->queues_total - 1 + info->queuenum; if (maxid > 0xffff) { pr_err("NFQUEUE: number of queues (%u) out of range (got %u)\n", info->queues_total, maxid); return -ERANGE; } if (par->target->revision == 2 && info->flags > 1) return -EINVAL; if (par->target->revision == 3 && info->flags & ~NFQ_FLAG_MASK) return -EINVAL; return 0; } static unsigned int nfqueue_tg_v3(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_NFQ_info_v3 *info = par->targinfo; u32 queue = info->queuenum; if (info->queues_total > 1) { if (info->flags & NFQ_FLAG_CPU_FANOUT) { int cpu = smp_processor_id(); queue = info->queuenum + cpu % info->queues_total; } else queue = nfqueue_hash(skb, par); } return NF_QUEUE_NR(queue); } static struct xt_target nfqueue_tg_reg[] __read_mostly = { { .name = "NFQUEUE", .family = NFPROTO_UNSPEC, .target = nfqueue_tg, .targetsize = sizeof(struct xt_NFQ_info), .me = THIS_MODULE, }, { .name = "NFQUEUE", .revision = 1, .family = NFPROTO_UNSPEC, .checkentry = nfqueue_tg_check, .target = nfqueue_tg_v1, .targetsize = sizeof(struct xt_NFQ_info_v1), .me = THIS_MODULE, }, { .name = "NFQUEUE", .revision = 2, .family = NFPROTO_UNSPEC, .checkentry = nfqueue_tg_check, .target = nfqueue_tg_v2, .targetsize = sizeof(struct xt_NFQ_info_v2), .me = THIS_MODULE, }, { .name = "NFQUEUE", .revision = 3, .family = NFPROTO_UNSPEC, .checkentry = nfqueue_tg_check, .target = nfqueue_tg_v3, .targetsize = sizeof(struct xt_NFQ_info_v3), .me = THIS_MODULE, }, }; static int __init nfqueue_tg_init(void) { return xt_register_targets(nfqueue_tg_reg, ARRAY_SIZE(nfqueue_tg_reg)); } static void __exit nfqueue_tg_exit(void) { xt_unregister_targets(nfqueue_tg_reg, ARRAY_SIZE(nfqueue_tg_reg)); } module_init(nfqueue_tg_init); module_exit(nfqueue_tg_exit);
gpl-2.0
Quallenauge/kernel-archosg9
net/ax25/ax25_in.c
4280
10813
/* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * Copyright (C) Alan Cox GW4PTS (alan@lxorguk.ukuu.org.uk) * Copyright (C) Jonathan Naylor G4KLX (g4klx@g4klx.demon.co.uk) * Copyright (C) Joerg Reuter DL1BKE (jreuter@yaina.de) * Copyright (C) Hans-Joachim Hetscher DD8NE (dd8ne@bnv-bamberg.de) */ #include <linux/errno.h> #include <linux/types.h> #include <linux/socket.h> #include <linux/in.h> #include <linux/kernel.h> #include <linux/timer.h> #include <linux/string.h> #include <linux/sockios.h> #include <linux/net.h> #include <linux/slab.h> #include <net/ax25.h> #include <linux/inet.h> #include <linux/netdevice.h> #include <linux/skbuff.h> #include <linux/netfilter.h> #include <net/sock.h> #include <net/tcp_states.h> #include <asm/uaccess.h> #include <asm/system.h> #include <linux/fcntl.h> #include <linux/mm.h> #include <linux/interrupt.h> /* * Given a fragment, queue it on the fragment queue and if the fragment * is complete, send it back to ax25_rx_iframe. */ static int ax25_rx_fragment(ax25_cb *ax25, struct sk_buff *skb) { struct sk_buff *skbn, *skbo; if (ax25->fragno != 0) { if (!(*skb->data & AX25_SEG_FIRST)) { if ((ax25->fragno - 1) == (*skb->data & AX25_SEG_REM)) { /* Enqueue fragment */ ax25->fragno = *skb->data & AX25_SEG_REM; skb_pull(skb, 1); /* skip fragno */ ax25->fraglen += skb->len; skb_queue_tail(&ax25->frag_queue, skb); /* Last fragment received ? */ if (ax25->fragno == 0) { skbn = alloc_skb(AX25_MAX_HEADER_LEN + ax25->fraglen, GFP_ATOMIC); if (!skbn) { skb_queue_purge(&ax25->frag_queue); return 1; } skb_reserve(skbn, AX25_MAX_HEADER_LEN); skbn->dev = ax25->ax25_dev->dev; skb_reset_network_header(skbn); skb_reset_transport_header(skbn); /* Copy data from the fragments */ while ((skbo = skb_dequeue(&ax25->frag_queue)) != NULL) { skb_copy_from_linear_data(skbo, skb_put(skbn, skbo->len), skbo->len); kfree_skb(skbo); } ax25->fraglen = 0; if (ax25_rx_iframe(ax25, skbn) == 0) kfree_skb(skbn); } return 1; } } } else { /* First fragment received */ if (*skb->data & AX25_SEG_FIRST) { skb_queue_purge(&ax25->frag_queue); ax25->fragno = *skb->data & AX25_SEG_REM; skb_pull(skb, 1); /* skip fragno */ ax25->fraglen = skb->len; skb_queue_tail(&ax25->frag_queue, skb); return 1; } } return 0; } /* * This is where all valid I frames are sent to, to be dispatched to * whichever protocol requires them. */ int ax25_rx_iframe(ax25_cb *ax25, struct sk_buff *skb) { int (*func)(struct sk_buff *, ax25_cb *); unsigned char pid; int queued = 0; if (skb == NULL) return 0; ax25_start_idletimer(ax25); pid = *skb->data; if (pid == AX25_P_IP) { /* working around a TCP bug to keep additional listeners * happy. TCP re-uses the buffer and destroys the original * content. */ struct sk_buff *skbn = skb_copy(skb, GFP_ATOMIC); if (skbn != NULL) { kfree_skb(skb); skb = skbn; } skb_pull(skb, 1); /* Remove PID */ skb->mac_header = skb->network_header; skb_reset_network_header(skb); skb->dev = ax25->ax25_dev->dev; skb->pkt_type = PACKET_HOST; skb->protocol = htons(ETH_P_IP); netif_rx(skb); return 1; } if (pid == AX25_P_SEGMENT) { skb_pull(skb, 1); /* Remove PID */ return ax25_rx_fragment(ax25, skb); } if ((func = ax25_protocol_function(pid)) != NULL) { skb_pull(skb, 1); /* Remove PID */ return (*func)(skb, ax25); } if (ax25->sk != NULL && ax25->ax25_dev->values[AX25_VALUES_CONMODE] == 2) { if ((!ax25->pidincl && ax25->sk->sk_protocol == pid) || ax25->pidincl) { if (sock_queue_rcv_skb(ax25->sk, skb) == 0) queued = 1; else ax25->condition |= AX25_COND_OWN_RX_BUSY; } } return queued; } /* * Higher level upcall for a LAPB frame */ static int ax25_process_rx_frame(ax25_cb *ax25, struct sk_buff *skb, int type, int dama) { int queued = 0; if (ax25->state == AX25_STATE_0) return 0; switch (ax25->ax25_dev->values[AX25_VALUES_PROTOCOL]) { case AX25_PROTO_STD_SIMPLEX: case AX25_PROTO_STD_DUPLEX: queued = ax25_std_frame_in(ax25, skb, type); break; #ifdef CONFIG_AX25_DAMA_SLAVE case AX25_PROTO_DAMA_SLAVE: if (dama || ax25->ax25_dev->dama.slave) queued = ax25_ds_frame_in(ax25, skb, type); else queued = ax25_std_frame_in(ax25, skb, type); break; #endif } return queued; } static int ax25_rcv(struct sk_buff *skb, struct net_device *dev, ax25_address *dev_addr, struct packet_type *ptype) { ax25_address src, dest, *next_digi = NULL; int type = 0, mine = 0, dama; struct sock *make, *sk; ax25_digi dp, reverse_dp; ax25_cb *ax25; ax25_dev *ax25_dev; /* * Process the AX.25/LAPB frame. */ skb_reset_transport_header(skb); if ((ax25_dev = ax25_dev_ax25dev(dev)) == NULL) goto free; /* * Parse the address header. */ if (ax25_addr_parse(skb->data, skb->len, &src, &dest, &dp, &type, &dama) == NULL) goto free; /* * Ours perhaps ? */ if (dp.lastrepeat + 1 < dp.ndigi) /* Not yet digipeated completely */ next_digi = &dp.calls[dp.lastrepeat + 1]; /* * Pull of the AX.25 headers leaving the CTRL/PID bytes */ skb_pull(skb, ax25_addr_size(&dp)); /* For our port addresses ? */ if (ax25cmp(&dest, dev_addr) == 0 && dp.lastrepeat + 1 == dp.ndigi) mine = 1; /* Also match on any registered callsign from L3/4 */ if (!mine && ax25_listen_mine(&dest, dev) && dp.lastrepeat + 1 == dp.ndigi) mine = 1; /* UI frame - bypass LAPB processing */ if ((*skb->data & ~0x10) == AX25_UI && dp.lastrepeat + 1 == dp.ndigi) { skb_set_transport_header(skb, 2); /* skip control and pid */ ax25_send_to_raw(&dest, skb, skb->data[1]); if (!mine && ax25cmp(&dest, (ax25_address *)dev->broadcast) != 0) goto free; /* Now we are pointing at the pid byte */ switch (skb->data[1]) { case AX25_P_IP: skb_pull(skb,2); /* drop PID/CTRL */ skb_reset_transport_header(skb); skb_reset_network_header(skb); skb->dev = dev; skb->pkt_type = PACKET_HOST; skb->protocol = htons(ETH_P_IP); netif_rx(skb); break; case AX25_P_ARP: skb_pull(skb,2); skb_reset_transport_header(skb); skb_reset_network_header(skb); skb->dev = dev; skb->pkt_type = PACKET_HOST; skb->protocol = htons(ETH_P_ARP); netif_rx(skb); break; case AX25_P_TEXT: /* Now find a suitable dgram socket */ sk = ax25_get_socket(&dest, &src, SOCK_DGRAM); if (sk != NULL) { bh_lock_sock(sk); if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) { kfree_skb(skb); } else { /* * Remove the control and PID. */ skb_pull(skb, 2); if (sock_queue_rcv_skb(sk, skb) != 0) kfree_skb(skb); } bh_unlock_sock(sk); sock_put(sk); } else { kfree_skb(skb); } break; default: kfree_skb(skb); /* Will scan SOCK_AX25 RAW sockets */ break; } return 0; } /* * Is connected mode supported on this device ? * If not, should we DM the incoming frame (except DMs) or * silently ignore them. For now we stay quiet. */ if (ax25_dev->values[AX25_VALUES_CONMODE] == 0) goto free; /* LAPB */ /* AX.25 state 1-4 */ ax25_digi_invert(&dp, &reverse_dp); if ((ax25 = ax25_find_cb(&dest, &src, &reverse_dp, dev)) != NULL) { /* * Process the frame. If it is queued up internally it * returns one otherwise we free it immediately. This * routine itself wakes the user context layers so we do * no further work */ if (ax25_process_rx_frame(ax25, skb, type, dama) == 0) kfree_skb(skb); ax25_cb_put(ax25); return 0; } /* AX.25 state 0 (disconnected) */ /* a) received not a SABM(E) */ if ((*skb->data & ~AX25_PF) != AX25_SABM && (*skb->data & ~AX25_PF) != AX25_SABME) { /* * Never reply to a DM. Also ignore any connects for * addresses that are not our interfaces and not a socket. */ if ((*skb->data & ~AX25_PF) != AX25_DM && mine) ax25_return_dm(dev, &src, &dest, &dp); goto free; } /* b) received SABM(E) */ if (dp.lastrepeat + 1 == dp.ndigi) sk = ax25_find_listener(&dest, 0, dev, SOCK_SEQPACKET); else sk = ax25_find_listener(next_digi, 1, dev, SOCK_SEQPACKET); if (sk != NULL) { bh_lock_sock(sk); if (sk_acceptq_is_full(sk) || (make = ax25_make_new(sk, ax25_dev)) == NULL) { if (mine) ax25_return_dm(dev, &src, &dest, &dp); kfree_skb(skb); bh_unlock_sock(sk); sock_put(sk); return 0; } ax25 = ax25_sk(make); skb_set_owner_r(skb, make); skb_queue_head(&sk->sk_receive_queue, skb); make->sk_state = TCP_ESTABLISHED; sk->sk_ack_backlog++; bh_unlock_sock(sk); } else { if (!mine) goto free; if ((ax25 = ax25_create_cb()) == NULL) { ax25_return_dm(dev, &src, &dest, &dp); goto free; } ax25_fillin_cb(ax25, ax25_dev); } ax25->source_addr = dest; ax25->dest_addr = src; /* * Sort out any digipeated paths. */ if (dp.ndigi && !ax25->digipeat && (ax25->digipeat = kmalloc(sizeof(ax25_digi), GFP_ATOMIC)) == NULL) { kfree_skb(skb); ax25_destroy_socket(ax25); if (sk) sock_put(sk); return 0; } if (dp.ndigi == 0) { kfree(ax25->digipeat); ax25->digipeat = NULL; } else { /* Reverse the source SABM's path */ memcpy(ax25->digipeat, &reverse_dp, sizeof(ax25_digi)); } if ((*skb->data & ~AX25_PF) == AX25_SABME) { ax25->modulus = AX25_EMODULUS; ax25->window = ax25_dev->values[AX25_VALUES_EWINDOW]; } else { ax25->modulus = AX25_MODULUS; ax25->window = ax25_dev->values[AX25_VALUES_WINDOW]; } ax25_send_control(ax25, AX25_UA, AX25_POLLON, AX25_RESPONSE); #ifdef CONFIG_AX25_DAMA_SLAVE if (dama && ax25->ax25_dev->values[AX25_VALUES_PROTOCOL] == AX25_PROTO_DAMA_SLAVE) ax25_dama_on(ax25); #endif ax25->state = AX25_STATE_3; ax25_cb_add(ax25); ax25_start_heartbeat(ax25); ax25_start_t3timer(ax25); ax25_start_idletimer(ax25); if (sk) { if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, skb->len); sock_put(sk); } else { free: kfree_skb(skb); } return 0; } /* * Receive an AX.25 frame via a SLIP interface. */ int ax25_kiss_rcv(struct sk_buff *skb, struct net_device *dev, struct packet_type *ptype, struct net_device *orig_dev) { skb_orphan(skb); if (!net_eq(dev_net(dev), &init_net)) { kfree_skb(skb); return 0; } if ((*skb->data & 0x0F) != 0) { kfree_skb(skb); /* Not a KISS data frame */ return 0; } skb_pull(skb, AX25_KISS_HEADER_LEN); /* Remove the KISS byte */ return ax25_rcv(skb, dev, (ax25_address *)dev->dev_addr, ptype); }
gpl-2.0
zarboz/android_kernel_htc_dlx
virt/arch/powerpc/mm/init_64.c
4536
8668
/* * PowerPC version * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) * * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au) * and Cort Dougan (PReP) (cort@cs.nmt.edu) * Copyright (C) 1996 Paul Mackerras * * Derived from "arch/i386/mm/init.c" * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds * * Dave Engebretsen <engebret@us.ibm.com> * Rework for PPC64 port. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #undef DEBUG #include <linux/signal.h> #include <linux/sched.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/types.h> #include <linux/mman.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/stddef.h> #include <linux/vmalloc.h> #include <linux/init.h> #include <linux/delay.h> #include <linux/bootmem.h> #include <linux/highmem.h> #include <linux/idr.h> #include <linux/nodemask.h> #include <linux/module.h> #include <linux/poison.h> #include <linux/memblock.h> #include <linux/hugetlb.h> #include <linux/slab.h> #include <asm/pgalloc.h> #include <asm/page.h> #include <asm/prom.h> #include <asm/rtas.h> #include <asm/io.h> #include <asm/mmu_context.h> #include <asm/pgtable.h> #include <asm/mmu.h> #include <asm/uaccess.h> #include <asm/smp.h> #include <asm/machdep.h> #include <asm/tlb.h> #include <asm/eeh.h> #include <asm/processor.h> #include <asm/mmzone.h> #include <asm/cputable.h> #include <asm/sections.h> #include <asm/iommu.h> #include <asm/abs_addr.h> #include <asm/vdso.h> #include "mmu_decl.h" #ifdef CONFIG_PPC_STD_MMU_64 #if PGTABLE_RANGE > USER_VSID_RANGE #warning Limited user VSID range means pagetable space is wasted #endif #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE) #warning TASK_SIZE is smaller than it needs to be. #endif #endif /* CONFIG_PPC_STD_MMU_64 */ phys_addr_t memstart_addr = ~0; EXPORT_SYMBOL_GPL(memstart_addr); phys_addr_t kernstart_addr; EXPORT_SYMBOL_GPL(kernstart_addr); static void pgd_ctor(void *addr) { memset(addr, 0, PGD_TABLE_SIZE); } static void pmd_ctor(void *addr) { memset(addr, 0, PMD_TABLE_SIZE); } struct kmem_cache *pgtable_cache[MAX_PGTABLE_INDEX_SIZE]; /* * Create a kmem_cache() for pagetables. This is not used for PTE * pages - they're linked to struct page, come from the normal free * pages pool and have a different entry size (see real_pte_t) to * everything else. Caches created by this function are used for all * the higher level pagetables, and for hugepage pagetables. */ void pgtable_cache_add(unsigned shift, void (*ctor)(void *)) { char *name; unsigned long table_size = sizeof(void *) << shift; unsigned long align = table_size; /* When batching pgtable pointers for RCU freeing, we store * the index size in the low bits. Table alignment must be * big enough to fit it. * * Likewise, hugeapge pagetable pointers contain a (different) * shift value in the low bits. All tables must be aligned so * as to leave enough 0 bits in the address to contain it. */ unsigned long minalign = max(MAX_PGTABLE_INDEX_SIZE + 1, HUGEPD_SHIFT_MASK + 1); struct kmem_cache *new; /* It would be nice if this was a BUILD_BUG_ON(), but at the * moment, gcc doesn't seem to recognize is_power_of_2 as a * constant expression, so so much for that. */ BUG_ON(!is_power_of_2(minalign)); BUG_ON((shift < 1) || (shift > MAX_PGTABLE_INDEX_SIZE)); if (PGT_CACHE(shift)) return; /* Already have a cache of this size */ align = max_t(unsigned long, align, minalign); name = kasprintf(GFP_KERNEL, "pgtable-2^%d", shift); new = kmem_cache_create(name, table_size, align, 0, ctor); PGT_CACHE(shift) = new; pr_debug("Allocated pgtable cache for order %d\n", shift); } void pgtable_cache_init(void) { pgtable_cache_add(PGD_INDEX_SIZE, pgd_ctor); pgtable_cache_add(PMD_INDEX_SIZE, pmd_ctor); if (!PGT_CACHE(PGD_INDEX_SIZE) || !PGT_CACHE(PMD_INDEX_SIZE)) panic("Couldn't allocate pgtable caches"); /* In all current configs, when the PUD index exists it's the * same size as either the pgd or pmd index. Verify that the * initialization above has also created a PUD cache. This * will need re-examiniation if we add new possibilities for * the pagetable layout. */ BUG_ON(PUD_INDEX_SIZE && !PGT_CACHE(PUD_INDEX_SIZE)); } #ifdef CONFIG_SPARSEMEM_VMEMMAP /* * Given an address within the vmemmap, determine the pfn of the page that * represents the start of the section it is within. Note that we have to * do this by hand as the proffered address may not be correctly aligned. * Subtraction of non-aligned pointers produces undefined results. */ static unsigned long __meminit vmemmap_section_start(unsigned long page) { unsigned long offset = page - ((unsigned long)(vmemmap)); /* Return the pfn of the start of the section. */ return (offset / sizeof(struct page)) & PAGE_SECTION_MASK; } /* * Check if this vmemmap page is already initialised. If any section * which overlaps this vmemmap page is initialised then this page is * initialised already. */ static int __meminit vmemmap_populated(unsigned long start, int page_size) { unsigned long end = start + page_size; for (; start < end; start += (PAGES_PER_SECTION * sizeof(struct page))) if (pfn_valid(vmemmap_section_start(start))) return 1; return 0; } /* On hash-based CPUs, the vmemmap is bolted in the hash table. * * On Book3E CPUs, the vmemmap is currently mapped in the top half of * the vmalloc space using normal page tables, though the size of * pages encoded in the PTEs can be different */ #ifdef CONFIG_PPC_BOOK3E static void __meminit vmemmap_create_mapping(unsigned long start, unsigned long page_size, unsigned long phys) { /* Create a PTE encoding without page size */ unsigned long i, flags = _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_KERNEL_RW; /* PTEs only contain page size encodings up to 32M */ BUG_ON(mmu_psize_defs[mmu_vmemmap_psize].enc > 0xf); /* Encode the size in the PTE */ flags |= mmu_psize_defs[mmu_vmemmap_psize].enc << 8; /* For each PTE for that area, map things. Note that we don't * increment phys because all PTEs are of the large size and * thus must have the low bits clear */ for (i = 0; i < page_size; i += PAGE_SIZE) BUG_ON(map_kernel_page(start + i, phys, flags)); } #else /* CONFIG_PPC_BOOK3E */ static void __meminit vmemmap_create_mapping(unsigned long start, unsigned long page_size, unsigned long phys) { int mapped = htab_bolt_mapping(start, start + page_size, phys, PAGE_KERNEL, mmu_vmemmap_psize, mmu_kernel_ssize); BUG_ON(mapped < 0); } #endif /* CONFIG_PPC_BOOK3E */ struct vmemmap_backing *vmemmap_list; static __meminit struct vmemmap_backing * vmemmap_list_alloc(int node) { static struct vmemmap_backing *next; static int num_left; /* allocate a page when required and hand out chunks */ if (!next || !num_left) { next = vmemmap_alloc_block(PAGE_SIZE, node); if (unlikely(!next)) { WARN_ON(1); return NULL; } num_left = PAGE_SIZE / sizeof(struct vmemmap_backing); } num_left--; return next++; } static __meminit void vmemmap_list_populate(unsigned long phys, unsigned long start, int node) { struct vmemmap_backing *vmem_back; vmem_back = vmemmap_list_alloc(node); if (unlikely(!vmem_back)) { WARN_ON(1); return; } vmem_back->phys = phys; vmem_back->virt_addr = start; vmem_back->list = vmemmap_list; vmemmap_list = vmem_back; } int __meminit vmemmap_populate(struct page *start_page, unsigned long nr_pages, int node) { unsigned long start = (unsigned long)start_page; unsigned long end = (unsigned long)(start_page + nr_pages); unsigned long page_size = 1 << mmu_psize_defs[mmu_vmemmap_psize].shift; /* Align to the page size of the linear mapping. */ start = _ALIGN_DOWN(start, page_size); pr_debug("vmemmap_populate page %p, %ld pages, node %d\n", start_page, nr_pages, node); pr_debug(" -> map %lx..%lx\n", start, end); for (; start < end; start += page_size) { void *p; if (vmemmap_populated(start, page_size)) continue; p = vmemmap_alloc_block(page_size, node); if (!p) return -ENOMEM; vmemmap_list_populate(__pa(p), start, node); pr_debug(" * %016lx..%016lx allocated at %p\n", start, start + page_size, p); vmemmap_create_mapping(start, page_size, __pa(p)); } return 0; } #endif /* CONFIG_SPARSEMEM_VMEMMAP */
gpl-2.0
flexdroid/kernel
arch/arm/mach-msm/htc_battery.c
4792
19312
/* arch/arm/mach-msm/htc_battery.c * * Copyright (C) 2008 HTC Corporation. * Copyright (C) 2008 Google, Inc. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/err.h> #include <linux/power_supply.h> #include <linux/platform_device.h> #include <linux/debugfs.h> #include <linux/wakelock.h> #include <asm/gpio.h> #include <mach/msm_rpcrouter.h> #include <mach/board.h> static struct wake_lock vbus_wake_lock; #define TRACE_BATT 0 #if TRACE_BATT #define BATT(x...) printk(KERN_INFO "[BATT] " x) #else #define BATT(x...) do {} while (0) #endif /* rpc related */ #define APP_BATT_PDEV_NAME "rs30100001" #define APP_BATT_PROG 0x30100001 #define APP_BATT_VER 0 #define HTC_PROCEDURE_BATTERY_NULL 0 #define HTC_PROCEDURE_GET_BATT_LEVEL 1 #define HTC_PROCEDURE_GET_BATT_INFO 2 #define HTC_PROCEDURE_GET_CABLE_STATUS 3 #define HTC_PROCEDURE_SET_BATT_DELTA 4 /* module debugger */ #define HTC_BATTERY_DEBUG 1 #define BATTERY_PREVENTION 1 /* Enable this will shut down if no battery */ #define ENABLE_BATTERY_DETECTION 0 #define GPIO_BATTERY_DETECTION 21 #define GPIO_BATTERY_CHARGER_EN 128 /* Charge current selection */ #define GPIO_BATTERY_CHARGER_CURRENT 129 typedef enum { DISABLE = 0, ENABLE_SLOW_CHG, ENABLE_FAST_CHG } batt_ctl_t; /* This order is the same as htc_power_supplies[] * And it's also the same as htc_cable_status_update() */ typedef enum { CHARGER_BATTERY = 0, CHARGER_USB, CHARGER_AC } charger_type_t; struct battery_info_reply { u32 batt_id; /* Battery ID from ADC */ u32 batt_vol; /* Battery voltage from ADC */ u32 batt_temp; /* Battery Temperature (C) from formula and ADC */ u32 batt_current; /* Battery current from ADC */ u32 level; /* formula */ u32 charging_source; /* 0: no cable, 1:usb, 2:AC */ u32 charging_enabled; /* 0: Disable, 1: Enable */ u32 full_bat; /* Full capacity of battery (mAh) */ }; struct htc_battery_info { int present; unsigned long update_time; /* lock to protect the battery info */ struct mutex lock; /* lock held while calling the arm9 to query the battery info */ struct mutex rpc_lock; struct battery_info_reply rep; }; static struct msm_rpc_endpoint *endpoint; static struct htc_battery_info htc_batt_info; static unsigned int cache_time = 1000; static int htc_battery_initial = 0; static enum power_supply_property htc_battery_properties[] = { POWER_SUPPLY_PROP_STATUS, POWER_SUPPLY_PROP_HEALTH, POWER_SUPPLY_PROP_PRESENT, POWER_SUPPLY_PROP_TECHNOLOGY, POWER_SUPPLY_PROP_CAPACITY, }; static enum power_supply_property htc_power_properties[] = { POWER_SUPPLY_PROP_ONLINE, }; static char *supply_list[] = { "battery", }; /* HTC dedicated attributes */ static ssize_t htc_battery_show_property(struct device *dev, struct device_attribute *attr, char *buf); static int htc_power_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val); static int htc_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val); static struct power_supply htc_power_supplies[] = { { .name = "battery", .type = POWER_SUPPLY_TYPE_BATTERY, .properties = htc_battery_properties, .num_properties = ARRAY_SIZE(htc_battery_properties), .get_property = htc_battery_get_property, }, { .name = "usb", .type = POWER_SUPPLY_TYPE_USB, .supplied_to = supply_list, .num_supplicants = ARRAY_SIZE(supply_list), .properties = htc_power_properties, .num_properties = ARRAY_SIZE(htc_power_properties), .get_property = htc_power_get_property, }, { .name = "ac", .type = POWER_SUPPLY_TYPE_MAINS, .supplied_to = supply_list, .num_supplicants = ARRAY_SIZE(supply_list), .properties = htc_power_properties, .num_properties = ARRAY_SIZE(htc_power_properties), .get_property = htc_power_get_property, }, }; /* -------------------------------------------------------------------------- */ #if defined(CONFIG_DEBUG_FS) int htc_battery_set_charging(batt_ctl_t ctl); static int batt_debug_set(void *data, u64 val) { return htc_battery_set_charging((batt_ctl_t) val); } static int batt_debug_get(void *data, u64 *val) { return -ENOSYS; } DEFINE_SIMPLE_ATTRIBUTE(batt_debug_fops, batt_debug_get, batt_debug_set, "%llu\n"); static int __init batt_debug_init(void) { struct dentry *dent; dent = debugfs_create_dir("htc_battery", 0); if (IS_ERR(dent)) return PTR_ERR(dent); debugfs_create_file("charger_state", 0644, dent, NULL, &batt_debug_fops); return 0; } device_initcall(batt_debug_init); #endif static int init_batt_gpio(void) { if (gpio_request(GPIO_BATTERY_DETECTION, "batt_detect") < 0) goto gpio_failed; if (gpio_request(GPIO_BATTERY_CHARGER_EN, "charger_en") < 0) goto gpio_failed; if (gpio_request(GPIO_BATTERY_CHARGER_CURRENT, "charge_current") < 0) goto gpio_failed; return 0; gpio_failed: return -EINVAL; } /* * battery_charging_ctrl - battery charing control. * @ctl: battery control command * */ static int battery_charging_ctrl(batt_ctl_t ctl) { int result = 0; switch (ctl) { case DISABLE: BATT("charger OFF\n"); /* 0 for enable; 1 disable */ result = gpio_direction_output(GPIO_BATTERY_CHARGER_EN, 1); break; case ENABLE_SLOW_CHG: BATT("charger ON (SLOW)\n"); result = gpio_direction_output(GPIO_BATTERY_CHARGER_CURRENT, 0); result = gpio_direction_output(GPIO_BATTERY_CHARGER_EN, 0); break; case ENABLE_FAST_CHG: BATT("charger ON (FAST)\n"); result = gpio_direction_output(GPIO_BATTERY_CHARGER_CURRENT, 1); result = gpio_direction_output(GPIO_BATTERY_CHARGER_EN, 0); break; default: printk(KERN_ERR "Not supported battery ctr called.!\n"); result = -EINVAL; break; } return result; } int htc_battery_set_charging(batt_ctl_t ctl) { int rc; if ((rc = battery_charging_ctrl(ctl)) < 0) goto result; if (!htc_battery_initial) { htc_batt_info.rep.charging_enabled = ctl & 0x3; } else { mutex_lock(&htc_batt_info.lock); htc_batt_info.rep.charging_enabled = ctl & 0x3; mutex_unlock(&htc_batt_info.lock); } result: return rc; } int htc_battery_status_update(u32 curr_level) { int notify; if (!htc_battery_initial) return 0; mutex_lock(&htc_batt_info.lock); notify = (htc_batt_info.rep.level != curr_level); htc_batt_info.rep.level = curr_level; mutex_unlock(&htc_batt_info.lock); if (notify) power_supply_changed(&htc_power_supplies[CHARGER_BATTERY]); return 0; } int htc_cable_status_update(int status) { int rc = 0; unsigned source; if (!htc_battery_initial) return 0; mutex_lock(&htc_batt_info.lock); switch(status) { case CHARGER_BATTERY: BATT("cable NOT PRESENT\n"); htc_batt_info.rep.charging_source = CHARGER_BATTERY; break; case CHARGER_USB: BATT("cable USB\n"); htc_batt_info.rep.charging_source = CHARGER_USB; break; case CHARGER_AC: BATT("cable AC\n"); htc_batt_info.rep.charging_source = CHARGER_AC; break; default: printk(KERN_ERR "%s: Not supported cable status received!\n", __FUNCTION__); rc = -EINVAL; } source = htc_batt_info.rep.charging_source; mutex_unlock(&htc_batt_info.lock); msm_hsusb_set_vbus_state(source == CHARGER_USB); if (source == CHARGER_USB) { wake_lock(&vbus_wake_lock); } else { /* give userspace some time to see the uevent and update * LED state or whatnot... */ wake_lock_timeout(&vbus_wake_lock, HZ / 2); } /* if the power source changes, all power supplies may change state */ power_supply_changed(&htc_power_supplies[CHARGER_BATTERY]); power_supply_changed(&htc_power_supplies[CHARGER_USB]); power_supply_changed(&htc_power_supplies[CHARGER_AC]); return rc; } static int htc_get_batt_info(struct battery_info_reply *buffer) { struct rpc_request_hdr req; struct htc_get_batt_info_rep { struct rpc_reply_hdr hdr; struct battery_info_reply info; } rep; int rc; if (buffer == NULL) return -EINVAL; rc = msm_rpc_call_reply(endpoint, HTC_PROCEDURE_GET_BATT_INFO, &req, sizeof(req), &rep, sizeof(rep), 5 * HZ); if ( rc < 0 ) return rc; mutex_lock(&htc_batt_info.lock); buffer->batt_id = be32_to_cpu(rep.info.batt_id); buffer->batt_vol = be32_to_cpu(rep.info.batt_vol); buffer->batt_temp = be32_to_cpu(rep.info.batt_temp); buffer->batt_current = be32_to_cpu(rep.info.batt_current); buffer->level = be32_to_cpu(rep.info.level); buffer->charging_source = be32_to_cpu(rep.info.charging_source); buffer->charging_enabled = be32_to_cpu(rep.info.charging_enabled); buffer->full_bat = be32_to_cpu(rep.info.full_bat); mutex_unlock(&htc_batt_info.lock); return 0; } #if 0 static int htc_get_cable_status(void) { struct rpc_request_hdr req; struct htc_get_cable_status_rep { struct rpc_reply_hdr hdr; int status; } rep; int rc; rc = msm_rpc_call_reply(endpoint, HTC_PROCEDURE_GET_CABLE_STATUS, &req, sizeof(req), &rep, sizeof(rep), 5 * HZ); if (rc < 0) return rc; return be32_to_cpu(rep.status); } #endif /* -------------------------------------------------------------------------- */ static int htc_power_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { charger_type_t charger; mutex_lock(&htc_batt_info.lock); charger = htc_batt_info.rep.charging_source; mutex_unlock(&htc_batt_info.lock); switch (psp) { case POWER_SUPPLY_PROP_ONLINE: if (psy->type == POWER_SUPPLY_TYPE_MAINS) val->intval = (charger == CHARGER_AC ? 1 : 0); else if (psy->type == POWER_SUPPLY_TYPE_USB) val->intval = (charger == CHARGER_USB ? 1 : 0); else val->intval = 0; break; default: return -EINVAL; } return 0; } static int htc_battery_get_charging_status(void) { u32 level; charger_type_t charger; int ret; mutex_lock(&htc_batt_info.lock); charger = htc_batt_info.rep.charging_source; switch (charger) { case CHARGER_BATTERY: ret = POWER_SUPPLY_STATUS_NOT_CHARGING; break; case CHARGER_USB: case CHARGER_AC: level = htc_batt_info.rep.level; if (level == 100) ret = POWER_SUPPLY_STATUS_FULL; else ret = POWER_SUPPLY_STATUS_CHARGING; break; default: ret = POWER_SUPPLY_STATUS_UNKNOWN; } mutex_unlock(&htc_batt_info.lock); return ret; } static int htc_battery_get_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) { switch (psp) { case POWER_SUPPLY_PROP_STATUS: val->intval = htc_battery_get_charging_status(); break; case POWER_SUPPLY_PROP_HEALTH: val->intval = POWER_SUPPLY_HEALTH_GOOD; break; case POWER_SUPPLY_PROP_PRESENT: val->intval = htc_batt_info.present; break; case POWER_SUPPLY_PROP_TECHNOLOGY: val->intval = POWER_SUPPLY_TECHNOLOGY_LION; break; case POWER_SUPPLY_PROP_CAPACITY: mutex_lock(&htc_batt_info.lock); val->intval = htc_batt_info.rep.level; mutex_unlock(&htc_batt_info.lock); break; default: return -EINVAL; } return 0; } #define HTC_BATTERY_ATTR(_name) \ { \ .attr = { .name = #_name, .mode = S_IRUGO, .owner = THIS_MODULE }, \ .show = htc_battery_show_property, \ .store = NULL, \ } static struct device_attribute htc_battery_attrs[] = { HTC_BATTERY_ATTR(batt_id), HTC_BATTERY_ATTR(batt_vol), HTC_BATTERY_ATTR(batt_temp), HTC_BATTERY_ATTR(batt_current), HTC_BATTERY_ATTR(charging_source), HTC_BATTERY_ATTR(charging_enabled), HTC_BATTERY_ATTR(full_bat), }; enum { BATT_ID = 0, BATT_VOL, BATT_TEMP, BATT_CURRENT, CHARGING_SOURCE, CHARGING_ENABLED, FULL_BAT, }; static int htc_rpc_set_delta(unsigned delta) { struct set_batt_delta_req { struct rpc_request_hdr hdr; uint32_t data; } req; req.data = cpu_to_be32(delta); return msm_rpc_call(endpoint, HTC_PROCEDURE_SET_BATT_DELTA, &req, sizeof(req), 5 * HZ); } static ssize_t htc_battery_set_delta(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int rc; unsigned long delta = 0; delta = simple_strtoul(buf, NULL, 10); if (delta > 100) return -EINVAL; mutex_lock(&htc_batt_info.rpc_lock); rc = htc_rpc_set_delta(delta); mutex_unlock(&htc_batt_info.rpc_lock); if (rc < 0) return rc; return count; } static struct device_attribute htc_set_delta_attrs[] = { __ATTR(delta, S_IWUSR | S_IWGRP, NULL, htc_battery_set_delta), }; static int htc_battery_create_attrs(struct device * dev) { int i, j, rc; for (i = 0; i < ARRAY_SIZE(htc_battery_attrs); i++) { rc = device_create_file(dev, &htc_battery_attrs[i]); if (rc) goto htc_attrs_failed; } for (j = 0; j < ARRAY_SIZE(htc_set_delta_attrs); j++) { rc = device_create_file(dev, &htc_set_delta_attrs[j]); if (rc) goto htc_delta_attrs_failed; } goto succeed; htc_attrs_failed: while (i--) device_remove_file(dev, &htc_battery_attrs[i]); htc_delta_attrs_failed: while (j--) device_remove_file(dev, &htc_set_delta_attrs[i]); succeed: return rc; } static ssize_t htc_battery_show_property(struct device *dev, struct device_attribute *attr, char *buf) { int i = 0; const ptrdiff_t off = attr - htc_battery_attrs; /* rpc lock is used to prevent two threads from calling * into the get info rpc at the same time */ mutex_lock(&htc_batt_info.rpc_lock); /* check cache time to decide if we need to update */ if (htc_batt_info.update_time && time_before(jiffies, htc_batt_info.update_time + msecs_to_jiffies(cache_time))) goto dont_need_update; if (htc_get_batt_info(&htc_batt_info.rep) < 0) printk(KERN_ERR "%s: rpc failed!!!\n", __FUNCTION__); else htc_batt_info.update_time = jiffies; dont_need_update: mutex_unlock(&htc_batt_info.rpc_lock); mutex_lock(&htc_batt_info.lock); switch (off) { case BATT_ID: i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", htc_batt_info.rep.batt_id); break; case BATT_VOL: i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", htc_batt_info.rep.batt_vol); break; case BATT_TEMP: i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", htc_batt_info.rep.batt_temp); break; case BATT_CURRENT: i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", htc_batt_info.rep.batt_current); break; case CHARGING_SOURCE: i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", htc_batt_info.rep.charging_source); break; case CHARGING_ENABLED: i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", htc_batt_info.rep.charging_enabled); break; case FULL_BAT: i += scnprintf(buf + i, PAGE_SIZE - i, "%d\n", htc_batt_info.rep.full_bat); break; default: i = -EINVAL; } mutex_unlock(&htc_batt_info.lock); return i; } static int htc_battery_probe(struct platform_device *pdev) { int i, rc; if (pdev->id != (APP_BATT_VER & RPC_VERSION_MAJOR_MASK)) return -EINVAL; /* init battery gpio */ if ((rc = init_batt_gpio()) < 0) { printk(KERN_ERR "%s: init battery gpio failed!\n", __FUNCTION__); return rc; } /* init structure data member */ htc_batt_info.update_time = jiffies; htc_batt_info.present = gpio_get_value(GPIO_BATTERY_DETECTION); /* init rpc */ endpoint = msm_rpc_connect(APP_BATT_PROG, APP_BATT_VER, 0); if (IS_ERR(endpoint)) { printk(KERN_ERR "%s: init rpc failed! rc = %ld\n", __FUNCTION__, PTR_ERR(endpoint)); return rc; } /* init power supplier framework */ for (i = 0; i < ARRAY_SIZE(htc_power_supplies); i++) { rc = power_supply_register(&pdev->dev, &htc_power_supplies[i]); if (rc) printk(KERN_ERR "Failed to register power supply (%d)\n", rc); } /* create htc detail attributes */ htc_battery_create_attrs(htc_power_supplies[CHARGER_BATTERY].dev); /* After battery driver gets initialized, send rpc request to inquiry * the battery status in case of we lost some info */ htc_battery_initial = 1; mutex_lock(&htc_batt_info.rpc_lock); if (htc_get_batt_info(&htc_batt_info.rep) < 0) printk(KERN_ERR "%s: get info failed\n", __FUNCTION__); htc_cable_status_update(htc_batt_info.rep.charging_source); battery_charging_ctrl(htc_batt_info.rep.charging_enabled ? ENABLE_SLOW_CHG : DISABLE); if (htc_rpc_set_delta(1) < 0) printk(KERN_ERR "%s: set delta failed\n", __FUNCTION__); htc_batt_info.update_time = jiffies; mutex_unlock(&htc_batt_info.rpc_lock); if (htc_batt_info.rep.charging_enabled == 0) battery_charging_ctrl(DISABLE); return 0; } static struct platform_driver htc_battery_driver = { .probe = htc_battery_probe, .driver = { .name = APP_BATT_PDEV_NAME, .owner = THIS_MODULE, }, }; /* batt_mtoa server definitions */ #define BATT_MTOA_PROG 0x30100000 #define BATT_MTOA_VERS 0 #define RPC_BATT_MTOA_NULL 0 #define RPC_BATT_MTOA_SET_CHARGING_PROC 1 #define RPC_BATT_MTOA_CABLE_STATUS_UPDATE_PROC 2 #define RPC_BATT_MTOA_LEVEL_UPDATE_PROC 3 struct rpc_batt_mtoa_set_charging_args { int enable; }; struct rpc_batt_mtoa_cable_status_update_args { int status; }; struct rpc_dem_battery_update_args { uint32_t level; }; static int handle_battery_call(struct msm_rpc_server *server, struct rpc_request_hdr *req, unsigned len) { switch (req->procedure) { case RPC_BATT_MTOA_NULL: return 0; case RPC_BATT_MTOA_SET_CHARGING_PROC: { struct rpc_batt_mtoa_set_charging_args *args; args = (struct rpc_batt_mtoa_set_charging_args *)(req + 1); args->enable = be32_to_cpu(args->enable); BATT("set_charging: enable=%d\n",args->enable); htc_battery_set_charging(args->enable); return 0; } case RPC_BATT_MTOA_CABLE_STATUS_UPDATE_PROC: { struct rpc_batt_mtoa_cable_status_update_args *args; args = (struct rpc_batt_mtoa_cable_status_update_args *)(req + 1); args->status = be32_to_cpu(args->status); BATT("cable_status_update: status=%d\n",args->status); htc_cable_status_update(args->status); return 0; } case RPC_BATT_MTOA_LEVEL_UPDATE_PROC: { struct rpc_dem_battery_update_args *args; args = (struct rpc_dem_battery_update_args *)(req + 1); args->level = be32_to_cpu(args->level); BATT("dem_battery_update: level=%d\n",args->level); htc_battery_status_update(args->level); return 0; } default: printk(KERN_ERR "%s: program 0x%08x:%d: unknown procedure %d\n", __FUNCTION__, req->prog, req->vers, req->procedure); return -ENODEV; } } static struct msm_rpc_server battery_server = { .prog = BATT_MTOA_PROG, .vers = BATT_MTOA_VERS, .rpc_call = handle_battery_call, }; static int __init htc_battery_init(void) { wake_lock_init(&vbus_wake_lock, WAKE_LOCK_SUSPEND, "vbus_present"); mutex_init(&htc_batt_info.lock); mutex_init(&htc_batt_info.rpc_lock); msm_rpc_create_server(&battery_server); platform_driver_register(&htc_battery_driver); return 0; } module_init(htc_battery_init); MODULE_DESCRIPTION("HTC Battery Driver"); MODULE_LICENSE("GPL");
gpl-2.0
showp1984/bricked-hammerhead
arch/powerpc/platforms/86xx/pic.c
6840
1678
/* * Copyright 2008 Freescale Semiconductor, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/interrupt.h> #include <linux/of_platform.h> #include <asm/mpic.h> #include <asm/i8259.h> #ifdef CONFIG_PPC_I8259 static void mpc86xx_8259_cascade(unsigned int irq, struct irq_desc *desc) { struct irq_chip *chip = irq_desc_get_chip(desc); unsigned int cascade_irq = i8259_irq(); if (cascade_irq != NO_IRQ) generic_handle_irq(cascade_irq); chip->irq_eoi(&desc->irq_data); } #endif /* CONFIG_PPC_I8259 */ void __init mpc86xx_init_irq(void) { #ifdef CONFIG_PPC_I8259 struct device_node *np; struct device_node *cascade_node = NULL; int cascade_irq; #endif struct mpic *mpic = mpic_alloc(NULL, 0, MPIC_BIG_ENDIAN | MPIC_SINGLE_DEST_CPU, 0, 256, " MPIC "); BUG_ON(mpic == NULL); mpic_init(mpic); #ifdef CONFIG_PPC_I8259 /* Initialize i8259 controller */ for_each_node_by_type(np, "interrupt-controller") if (of_device_is_compatible(np, "chrp,iic")) { cascade_node = np; break; } if (cascade_node == NULL) { printk(KERN_DEBUG "Could not find i8259 PIC\n"); return; } cascade_irq = irq_of_parse_and_map(cascade_node, 0); if (cascade_irq == NO_IRQ) { printk(KERN_ERR "Failed to map cascade interrupt\n"); return; } i8259_init(cascade_node, 0); of_node_put(cascade_node); irq_set_chained_handler(cascade_irq, mpc86xx_8259_cascade); #endif }
gpl-2.0
acuicultor/android_kernel_oneplus_msm8974-1
net/rxrpc/ar-peer.c
7864
7363
/* RxRPC remote transport endpoint management * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/net.h> #include <linux/skbuff.h> #include <linux/udp.h> #include <linux/in.h> #include <linux/in6.h> #include <linux/icmp.h> #include <linux/slab.h> #include <net/sock.h> #include <net/af_rxrpc.h> #include <net/ip.h> #include <net/route.h> #include "ar-internal.h" static LIST_HEAD(rxrpc_peers); static DEFINE_RWLOCK(rxrpc_peer_lock); static DECLARE_WAIT_QUEUE_HEAD(rxrpc_peer_wq); static void rxrpc_destroy_peer(struct work_struct *work); /* * assess the MTU size for the network interface through which this peer is * reached */ static void rxrpc_assess_MTU_size(struct rxrpc_peer *peer) { struct rtable *rt; struct flowi4 fl4; peer->if_mtu = 1500; rt = ip_route_output_ports(&init_net, &fl4, NULL, peer->srx.transport.sin.sin_addr.s_addr, 0, htons(7000), htons(7001), IPPROTO_UDP, 0, 0); if (IS_ERR(rt)) { _leave(" [route err %ld]", PTR_ERR(rt)); return; } peer->if_mtu = dst_mtu(&rt->dst); dst_release(&rt->dst); _leave(" [if_mtu %u]", peer->if_mtu); } /* * allocate a new peer */ static struct rxrpc_peer *rxrpc_alloc_peer(struct sockaddr_rxrpc *srx, gfp_t gfp) { struct rxrpc_peer *peer; _enter(""); peer = kzalloc(sizeof(struct rxrpc_peer), gfp); if (peer) { INIT_WORK(&peer->destroyer, &rxrpc_destroy_peer); INIT_LIST_HEAD(&peer->link); INIT_LIST_HEAD(&peer->error_targets); spin_lock_init(&peer->lock); atomic_set(&peer->usage, 1); peer->debug_id = atomic_inc_return(&rxrpc_debug_id); memcpy(&peer->srx, srx, sizeof(*srx)); rxrpc_assess_MTU_size(peer); peer->mtu = peer->if_mtu; if (srx->transport.family == AF_INET) { peer->hdrsize = sizeof(struct iphdr); switch (srx->transport_type) { case SOCK_DGRAM: peer->hdrsize += sizeof(struct udphdr); break; default: BUG(); break; } } else { BUG(); } peer->hdrsize += sizeof(struct rxrpc_header); peer->maxdata = peer->mtu - peer->hdrsize; } _leave(" = %p", peer); return peer; } /* * obtain a remote transport endpoint for the specified address */ struct rxrpc_peer *rxrpc_get_peer(struct sockaddr_rxrpc *srx, gfp_t gfp) { struct rxrpc_peer *peer, *candidate; const char *new = "old"; int usage; _enter("{%d,%d,%pI4+%hu}", srx->transport_type, srx->transport_len, &srx->transport.sin.sin_addr, ntohs(srx->transport.sin.sin_port)); /* search the peer list first */ read_lock_bh(&rxrpc_peer_lock); list_for_each_entry(peer, &rxrpc_peers, link) { _debug("check PEER %d { u=%d t=%d l=%d }", peer->debug_id, atomic_read(&peer->usage), peer->srx.transport_type, peer->srx.transport_len); if (atomic_read(&peer->usage) > 0 && peer->srx.transport_type == srx->transport_type && peer->srx.transport_len == srx->transport_len && memcmp(&peer->srx.transport, &srx->transport, srx->transport_len) == 0) goto found_extant_peer; } read_unlock_bh(&rxrpc_peer_lock); /* not yet present - create a candidate for a new record and then * redo the search */ candidate = rxrpc_alloc_peer(srx, gfp); if (!candidate) { _leave(" = -ENOMEM"); return ERR_PTR(-ENOMEM); } write_lock_bh(&rxrpc_peer_lock); list_for_each_entry(peer, &rxrpc_peers, link) { if (atomic_read(&peer->usage) > 0 && peer->srx.transport_type == srx->transport_type && peer->srx.transport_len == srx->transport_len && memcmp(&peer->srx.transport, &srx->transport, srx->transport_len) == 0) goto found_extant_second; } /* we can now add the new candidate to the list */ peer = candidate; candidate = NULL; usage = atomic_read(&peer->usage); list_add_tail(&peer->link, &rxrpc_peers); write_unlock_bh(&rxrpc_peer_lock); new = "new"; success: _net("PEER %s %d {%d,%u,%pI4+%hu}", new, peer->debug_id, peer->srx.transport_type, peer->srx.transport.family, &peer->srx.transport.sin.sin_addr, ntohs(peer->srx.transport.sin.sin_port)); _leave(" = %p {u=%d}", peer, usage); return peer; /* we found the peer in the list immediately */ found_extant_peer: usage = atomic_inc_return(&peer->usage); read_unlock_bh(&rxrpc_peer_lock); goto success; /* we found the peer on the second time through the list */ found_extant_second: usage = atomic_inc_return(&peer->usage); write_unlock_bh(&rxrpc_peer_lock); kfree(candidate); goto success; } /* * find the peer associated with a packet */ struct rxrpc_peer *rxrpc_find_peer(struct rxrpc_local *local, __be32 addr, __be16 port) { struct rxrpc_peer *peer; _enter(""); /* search the peer list */ read_lock_bh(&rxrpc_peer_lock); if (local->srx.transport.family == AF_INET && local->srx.transport_type == SOCK_DGRAM ) { list_for_each_entry(peer, &rxrpc_peers, link) { if (atomic_read(&peer->usage) > 0 && peer->srx.transport_type == SOCK_DGRAM && peer->srx.transport.family == AF_INET && peer->srx.transport.sin.sin_port == port && peer->srx.transport.sin.sin_addr.s_addr == addr) goto found_UDP_peer; } goto new_UDP_peer; } read_unlock_bh(&rxrpc_peer_lock); _leave(" = -EAFNOSUPPORT"); return ERR_PTR(-EAFNOSUPPORT); found_UDP_peer: _net("Rx UDP DGRAM from peer %d", peer->debug_id); atomic_inc(&peer->usage); read_unlock_bh(&rxrpc_peer_lock); _leave(" = %p", peer); return peer; new_UDP_peer: _net("Rx UDP DGRAM from NEW peer %d", peer->debug_id); read_unlock_bh(&rxrpc_peer_lock); _leave(" = -EBUSY [new]"); return ERR_PTR(-EBUSY); } /* * release a remote transport endpoint */ void rxrpc_put_peer(struct rxrpc_peer *peer) { _enter("%p{u=%d}", peer, atomic_read(&peer->usage)); ASSERTCMP(atomic_read(&peer->usage), >, 0); if (likely(!atomic_dec_and_test(&peer->usage))) { _leave(" [in use]"); return; } rxrpc_queue_work(&peer->destroyer); _leave(""); } /* * destroy a remote transport endpoint */ static void rxrpc_destroy_peer(struct work_struct *work) { struct rxrpc_peer *peer = container_of(work, struct rxrpc_peer, destroyer); _enter("%p{%d}", peer, atomic_read(&peer->usage)); write_lock_bh(&rxrpc_peer_lock); list_del(&peer->link); write_unlock_bh(&rxrpc_peer_lock); _net("DESTROY PEER %d", peer->debug_id); kfree(peer); if (list_empty(&rxrpc_peers)) wake_up_all(&rxrpc_peer_wq); _leave(""); } /* * preemptively destroy all the peer records from a transport endpoint rather * than waiting for them to time out */ void __exit rxrpc_destroy_all_peers(void) { DECLARE_WAITQUEUE(myself,current); _enter(""); /* we simply have to wait for them to go away */ if (!list_empty(&rxrpc_peers)) { set_current_state(TASK_UNINTERRUPTIBLE); add_wait_queue(&rxrpc_peer_wq, &myself); while (!list_empty(&rxrpc_peers)) { schedule(); set_current_state(TASK_UNINTERRUPTIBLE); } remove_wait_queue(&rxrpc_peer_wq, &myself); set_current_state(TASK_RUNNING); } _leave(""); }
gpl-2.0
Alexpux/GCC
gcc/testsuite/gfortran.dg/fmt_error_2.f90
185
1256
! { dg-do compile } ! { dg-options "-std=legacy" } ! ! PR 33269: we used to not simplify format strings before checking if ! they were valid, leading to a missed error. IMPLICIT CHARACTER*5 (h-z) CHARACTER*5 f CHARACTER*5 bad, good parameter(bad="a", good="(a)") PRINT ('a'), "hello" ! { dg-error "Missing leading left parenthesis in format string" } WRITE (*, ("a")) "error" ! { dg-error "Missing leading left parenthesis in format string" } PRINT 'a', "hello" ! { dg-error "Missing leading left parenthesis in format string" } WRITE (*, "a") "error" ! { dg-error "Missing leading left parenthesis in format string" } WRITE (*, bad) "error" ! { dg-error "Missing leading left parenthesis in format string" } PRINT 'a' // ', a', "err", "or" ! { dg-error "Missing leading left parenthesis in format string" } PRINT '(' // 'a' ! { dg-error "Unexpected end of format string in format string" } ! the following are ok PRINT "(2f5.3)", bar, foo PRINT ' (a)', "hello" WRITE (*, " ((a))") "hello" print "(a" // ")", "all is fine" print good, "great" ! verify that we haven't broken non-constant expressions f = "(f5.3)" print f, 3.14159 print (f), 2.71813 print implicitly_typed, "something" write (*, implicitly_typed_as_well) "something else" END
gpl-2.0
mfrw/linux
drivers/net/ethernet/cisco/enic/enic_clsf.c
185
7097
#include <linux/if.h> #include <linux/if_ether.h> #include <linux/if_link.h> #include <linux/netdevice.h> #include <linux/in.h> #include <linux/types.h> #include <linux/skbuff.h> #include <net/flow_dissector.h> #include "enic_res.h" #include "enic_clsf.h" /* enic_addfltr_5t - Add ipv4 5tuple filter * @enic: enic struct of vnic * @keys: flow_keys of ipv4 5tuple * @rq: rq number to steer to * * This function returns filter_id(hardware_id) of the filter * added. In case of error it returns a negative number. */ int enic_addfltr_5t(struct enic *enic, struct flow_keys *keys, u16 rq) { int res; struct filter data; switch (keys->basic.ip_proto) { case IPPROTO_TCP: data.u.ipv4.protocol = PROTO_TCP; break; case IPPROTO_UDP: data.u.ipv4.protocol = PROTO_UDP; break; default: return -EPROTONOSUPPORT; }; data.type = FILTER_IPV4_5TUPLE; data.u.ipv4.src_addr = ntohl(keys->addrs.v4addrs.src); data.u.ipv4.dst_addr = ntohl(keys->addrs.v4addrs.dst); data.u.ipv4.src_port = ntohs(keys->ports.src); data.u.ipv4.dst_port = ntohs(keys->ports.dst); data.u.ipv4.flags = FILTER_FIELDS_IPV4_5TUPLE; spin_lock_bh(&enic->devcmd_lock); res = vnic_dev_classifier(enic->vdev, CLSF_ADD, &rq, &data); spin_unlock_bh(&enic->devcmd_lock); res = (res == 0) ? rq : res; return res; } /* enic_delfltr - Delete clsf filter * @enic: enic struct of vnic * @filter_id: filter_is(hardware_id) of filter to be deleted * * This function returns zero in case of success, negative number incase of * error. */ int enic_delfltr(struct enic *enic, u16 filter_id) { int ret; spin_lock_bh(&enic->devcmd_lock); ret = vnic_dev_classifier(enic->vdev, CLSF_DEL, &filter_id, NULL); spin_unlock_bh(&enic->devcmd_lock); return ret; } /* enic_rfs_flw_tbl_init - initialize enic->rfs_h members * @enic: enic data */ void enic_rfs_flw_tbl_init(struct enic *enic) { int i; spin_lock_init(&enic->rfs_h.lock); for (i = 0; i <= ENIC_RFS_FLW_MASK; i++) INIT_HLIST_HEAD(&enic->rfs_h.ht_head[i]); enic->rfs_h.max = enic->config.num_arfs; enic->rfs_h.free = enic->rfs_h.max; enic->rfs_h.toclean = 0; enic_rfs_timer_start(enic); } void enic_rfs_flw_tbl_free(struct enic *enic) { int i; enic_rfs_timer_stop(enic); spin_lock_bh(&enic->rfs_h.lock); enic->rfs_h.free = 0; for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) { struct hlist_head *hhead; struct hlist_node *tmp; struct enic_rfs_fltr_node *n; hhead = &enic->rfs_h.ht_head[i]; hlist_for_each_entry_safe(n, tmp, hhead, node) { enic_delfltr(enic, n->fltr_id); hlist_del(&n->node); kfree(n); } } spin_unlock_bh(&enic->rfs_h.lock); } struct enic_rfs_fltr_node *htbl_fltr_search(struct enic *enic, u16 fltr_id) { int i; for (i = 0; i < (1 << ENIC_RFS_FLW_BITSHIFT); i++) { struct hlist_head *hhead; struct hlist_node *tmp; struct enic_rfs_fltr_node *n; hhead = &enic->rfs_h.ht_head[i]; hlist_for_each_entry_safe(n, tmp, hhead, node) if (n->fltr_id == fltr_id) return n; } return NULL; } #ifdef CONFIG_RFS_ACCEL void enic_flow_may_expire(unsigned long data) { struct enic *enic = (struct enic *)data; bool res; int j; spin_lock_bh(&enic->rfs_h.lock); for (j = 0; j < ENIC_CLSF_EXPIRE_COUNT; j++) { struct hlist_head *hhead; struct hlist_node *tmp; struct enic_rfs_fltr_node *n; hhead = &enic->rfs_h.ht_head[enic->rfs_h.toclean++]; hlist_for_each_entry_safe(n, tmp, hhead, node) { res = rps_may_expire_flow(enic->netdev, n->rq_id, n->flow_id, n->fltr_id); if (res) { res = enic_delfltr(enic, n->fltr_id); if (unlikely(res)) continue; hlist_del(&n->node); kfree(n); enic->rfs_h.free++; } } } spin_unlock_bh(&enic->rfs_h.lock); mod_timer(&enic->rfs_h.rfs_may_expire, jiffies + HZ/4); } static struct enic_rfs_fltr_node *htbl_key_search(struct hlist_head *h, struct flow_keys *k) { struct enic_rfs_fltr_node *tpos; hlist_for_each_entry(tpos, h, node) if (tpos->keys.addrs.v4addrs.src == k->addrs.v4addrs.src && tpos->keys.addrs.v4addrs.dst == k->addrs.v4addrs.dst && tpos->keys.ports.ports == k->ports.ports && tpos->keys.basic.ip_proto == k->basic.ip_proto && tpos->keys.basic.n_proto == k->basic.n_proto) return tpos; return NULL; } int enic_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, u16 rxq_index, u32 flow_id) { struct flow_keys keys; struct enic_rfs_fltr_node *n; struct enic *enic; u16 tbl_idx; int res, i; enic = netdev_priv(dev); res = skb_flow_dissect_flow_keys(skb, &keys); if (!res || keys.basic.n_proto != htons(ETH_P_IP) || (keys.basic.ip_proto != IPPROTO_TCP && keys.basic.ip_proto != IPPROTO_UDP)) return -EPROTONOSUPPORT; tbl_idx = skb_get_hash_raw(skb) & ENIC_RFS_FLW_MASK; spin_lock_bh(&enic->rfs_h.lock); n = htbl_key_search(&enic->rfs_h.ht_head[tbl_idx], &keys); if (n) { /* entry already present */ if (rxq_index == n->rq_id) { res = -EEXIST; goto ret_unlock; } /* desired rq changed for the flow, we need to delete * old fltr and add new one * * The moment we delete the fltr, the upcoming pkts * are put it default rq based on rss. When we add * new filter, upcoming pkts are put in desired queue. * This could cause ooo pkts. * * Lets 1st try adding new fltr and then del old one. */ i = --enic->rfs_h.free; /* clsf tbl is full, we have to del old fltr first*/ if (unlikely(i < 0)) { enic->rfs_h.free++; res = enic_delfltr(enic, n->fltr_id); if (unlikely(res < 0)) goto ret_unlock; res = enic_addfltr_5t(enic, &keys, rxq_index); if (res < 0) { hlist_del(&n->node); enic->rfs_h.free++; goto ret_unlock; } /* add new fltr 1st then del old fltr */ } else { int ret; res = enic_addfltr_5t(enic, &keys, rxq_index); if (res < 0) { enic->rfs_h.free++; goto ret_unlock; } ret = enic_delfltr(enic, n->fltr_id); /* deleting old fltr failed. Add old fltr to list. * enic_flow_may_expire() will try to delete it later. */ if (unlikely(ret < 0)) { struct enic_rfs_fltr_node *d; struct hlist_head *head; head = &enic->rfs_h.ht_head[tbl_idx]; d = kmalloc(sizeof(*d), GFP_ATOMIC); if (d) { d->fltr_id = n->fltr_id; INIT_HLIST_NODE(&d->node); hlist_add_head(&d->node, head); } } else { enic->rfs_h.free++; } } n->rq_id = rxq_index; n->fltr_id = res; n->flow_id = flow_id; /* entry not present */ } else { i = --enic->rfs_h.free; if (i <= 0) { enic->rfs_h.free++; res = -EBUSY; goto ret_unlock; } n = kmalloc(sizeof(*n), GFP_ATOMIC); if (!n) { res = -ENOMEM; enic->rfs_h.free++; goto ret_unlock; } res = enic_addfltr_5t(enic, &keys, rxq_index); if (res < 0) { kfree(n); enic->rfs_h.free++; goto ret_unlock; } n->rq_id = rxq_index; n->fltr_id = res; n->flow_id = flow_id; n->keys = keys; INIT_HLIST_NODE(&n->node); hlist_add_head(&n->node, &enic->rfs_h.ht_head[tbl_idx]); } ret_unlock: spin_unlock_bh(&enic->rfs_h.lock); return res; } #endif /* CONFIG_RFS_ACCEL */
gpl-2.0
openwrt/bcm63xx-next
drivers/media/usb/hdpvr/hdpvr-core.c
441
12574
/* * Hauppauge HD PVR USB driver * * Copyright (C) 2001-2004 Greg Kroah-Hartman (greg@kroah.com) * Copyright (C) 2008 Janne Grunau (j@jannau.net) * Copyright (C) 2008 John Poet * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/module.h> #include <linux/uaccess.h> #include <linux/atomic.h> #include <linux/usb.h> #include <linux/mutex.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <media/v4l2-dev.h> #include <media/v4l2-common.h> #include "hdpvr.h" static int video_nr[HDPVR_MAX] = {[0 ... (HDPVR_MAX - 1)] = UNSET}; module_param_array(video_nr, int, NULL, 0); MODULE_PARM_DESC(video_nr, "video device number (-1=Auto)"); /* holds the number of currently registered devices */ static atomic_t dev_nr = ATOMIC_INIT(-1); int hdpvr_debug; module_param(hdpvr_debug, int, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(hdpvr_debug, "enable debugging output"); static uint default_video_input = HDPVR_VIDEO_INPUTS; module_param(default_video_input, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(default_video_input, "default video input: 0=Component / " "1=S-Video / 2=Composite"); static uint default_audio_input = HDPVR_AUDIO_INPUTS; module_param(default_audio_input, uint, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(default_audio_input, "default audio input: 0=RCA back / " "1=RCA front / 2=S/PDIF"); static bool boost_audio; module_param(boost_audio, bool, S_IRUGO|S_IWUSR); MODULE_PARM_DESC(boost_audio, "boost the audio signal"); /* table of devices that work with this driver */ static struct usb_device_id hdpvr_table[] = { { USB_DEVICE(HD_PVR_VENDOR_ID, HD_PVR_PRODUCT_ID) }, { USB_DEVICE(HD_PVR_VENDOR_ID, HD_PVR_PRODUCT_ID1) }, { USB_DEVICE(HD_PVR_VENDOR_ID, HD_PVR_PRODUCT_ID2) }, { USB_DEVICE(HD_PVR_VENDOR_ID, HD_PVR_PRODUCT_ID3) }, { USB_DEVICE(HD_PVR_VENDOR_ID, HD_PVR_PRODUCT_ID4) }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, hdpvr_table); void hdpvr_delete(struct hdpvr_device *dev) { hdpvr_free_buffers(dev); if (dev->video_dev) video_device_release(dev->video_dev); usb_put_dev(dev->udev); } static void challenge(u8 *bytes) { __le64 *i64P; u64 tmp64; uint i, idx; for (idx = 0; idx < 32; ++idx) { if (idx & 0x3) bytes[(idx >> 3) + 3] = bytes[(idx >> 2) & 0x3]; switch (idx & 0x3) { case 0x3: bytes[2] += bytes[3] * 4 + bytes[4] + bytes[5]; bytes[4] += bytes[(idx & 0x1) * 2] * 9 + 9; break; case 0x1: bytes[0] *= 8; bytes[0] += 7*idx + 4; bytes[6] += bytes[3] * 3; break; case 0x0: bytes[3 - (idx >> 3)] = bytes[idx >> 2]; bytes[5] += bytes[6] * 3; for (i = 0; i < 3; i++) bytes[3] *= bytes[3] + 1; break; case 0x2: for (i = 0; i < 3; i++) bytes[1] *= bytes[6] + 1; for (i = 0; i < 3; i++) { i64P = (__le64 *)bytes; tmp64 = le64_to_cpup(i64P); tmp64 = tmp64 + (tmp64 << (bytes[7] & 0x0f)); *i64P = cpu_to_le64(tmp64); } break; } } } /* try to init the device like the windows driver */ static int device_authorization(struct hdpvr_device *dev) { int ret, retval = -ENOMEM; char request_type = 0x38, rcv_request = 0x81; char *response; #ifdef HDPVR_DEBUG size_t buf_size = 46; char *print_buf = kzalloc(5*buf_size+1, GFP_KERNEL); if (!print_buf) { v4l2_err(&dev->v4l2_dev, "Out of memory\n"); return retval; } #endif mutex_lock(&dev->usbc_mutex); ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), rcv_request, 0x80 | request_type, 0x0400, 0x0003, dev->usbc_buf, 46, 10000); if (ret != 46) { v4l2_err(&dev->v4l2_dev, "unexpected answer of status request, len %d\n", ret); goto unlock; } #ifdef HDPVR_DEBUG else { hex_dump_to_buffer(dev->usbc_buf, 46, 16, 1, print_buf, 5*buf_size+1, 0); v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "Status request returned, len %d: %s\n", ret, print_buf); } #endif dev->fw_ver = dev->usbc_buf[1]; v4l2_info(&dev->v4l2_dev, "firmware version 0x%x dated %s\n", dev->fw_ver, &dev->usbc_buf[2]); if (dev->fw_ver > 0x15) { dev->options.brightness = 0x80; dev->options.contrast = 0x40; dev->options.hue = 0xf; dev->options.saturation = 0x40; dev->options.sharpness = 0x80; } switch (dev->fw_ver) { case HDPVR_FIRMWARE_VERSION: dev->flags &= ~HDPVR_FLAG_AC3_CAP; break; case HDPVR_FIRMWARE_VERSION_AC3: case HDPVR_FIRMWARE_VERSION_0X12: case HDPVR_FIRMWARE_VERSION_0X15: case HDPVR_FIRMWARE_VERSION_0X1E: dev->flags |= HDPVR_FLAG_AC3_CAP; break; default: v4l2_info(&dev->v4l2_dev, "untested firmware, the driver might" " not work.\n"); if (dev->fw_ver >= HDPVR_FIRMWARE_VERSION_AC3) dev->flags |= HDPVR_FLAG_AC3_CAP; else dev->flags &= ~HDPVR_FLAG_AC3_CAP; } response = dev->usbc_buf+38; #ifdef HDPVR_DEBUG hex_dump_to_buffer(response, 8, 16, 1, print_buf, 5*buf_size+1, 0); v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "challenge: %s\n", print_buf); #endif challenge(response); #ifdef HDPVR_DEBUG hex_dump_to_buffer(response, 8, 16, 1, print_buf, 5*buf_size+1, 0); v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, " response: %s\n", print_buf); #endif msleep(100); ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 0xd1, 0x00 | request_type, 0x0000, 0x0000, response, 8, 10000); v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "magic request returned %d\n", ret); retval = ret != 8; unlock: mutex_unlock(&dev->usbc_mutex); #ifdef HDPVR_DEBUG kfree(print_buf); #endif return retval; } static int hdpvr_device_init(struct hdpvr_device *dev) { int ret; u8 *buf; if (device_authorization(dev)) return -EACCES; /* default options for init */ hdpvr_set_options(dev); /* set filter options */ mutex_lock(&dev->usbc_mutex); buf = dev->usbc_buf; buf[0] = 0x03; buf[1] = 0x03; buf[2] = 0x00; buf[3] = 0x00; ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 0x01, 0x38, CTRL_LOW_PASS_FILTER_VALUE, CTRL_DEFAULT_INDEX, buf, 4, 1000); v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "control request returned %d\n", ret); mutex_unlock(&dev->usbc_mutex); /* enable fan and bling leds */ mutex_lock(&dev->usbc_mutex); buf[0] = 0x1; ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 0xd4, 0x38, 0, 0, buf, 1, 1000); v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "control request returned %d\n", ret); /* boost analog audio */ buf[0] = boost_audio; ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), 0xd5, 0x38, 0, 0, buf, 1, 1000); v4l2_dbg(MSG_INFO, hdpvr_debug, &dev->v4l2_dev, "control request returned %d\n", ret); mutex_unlock(&dev->usbc_mutex); dev->status = STATUS_IDLE; return 0; } static const struct hdpvr_options hdpvr_default_options = { .video_std = HDPVR_60HZ, .video_input = HDPVR_COMPONENT, .audio_input = HDPVR_RCA_BACK, .bitrate = 65, /* 6 mbps */ .peak_bitrate = 90, /* 9 mbps */ .bitrate_mode = HDPVR_CONSTANT, .gop_mode = HDPVR_SIMPLE_IDR_GOP, .audio_codec = V4L2_MPEG_AUDIO_ENCODING_AAC, /* original picture controls for firmware version <= 0x15 */ /* updated in device_authorization() for newer firmware */ .brightness = 0x86, .contrast = 0x80, .hue = 0x80, .saturation = 0x80, .sharpness = 0x80, }; static int hdpvr_probe(struct usb_interface *interface, const struct usb_device_id *id) { struct hdpvr_device *dev; struct usb_host_interface *iface_desc; struct usb_endpoint_descriptor *endpoint; struct i2c_client *client; size_t buffer_size; int i; int retval = -ENOMEM; /* allocate memory for our device state and initialize it */ dev = kzalloc(sizeof(*dev), GFP_KERNEL); if (!dev) { dev_err(&interface->dev, "Out of memory\n"); goto error; } /* init video transfer queues first of all */ /* to prevent oops in hdpvr_delete() on error paths */ INIT_LIST_HEAD(&dev->free_buff_list); INIT_LIST_HEAD(&dev->rec_buff_list); /* register v4l2_device early so it can be used for printks */ if (v4l2_device_register(&interface->dev, &dev->v4l2_dev)) { dev_err(&interface->dev, "v4l2_device_register failed\n"); goto error; } mutex_init(&dev->io_mutex); mutex_init(&dev->i2c_mutex); mutex_init(&dev->usbc_mutex); dev->usbc_buf = kmalloc(64, GFP_KERNEL); if (!dev->usbc_buf) { v4l2_err(&dev->v4l2_dev, "Out of memory\n"); goto error; } init_waitqueue_head(&dev->wait_buffer); init_waitqueue_head(&dev->wait_data); dev->workqueue = create_singlethread_workqueue("hdpvr_buffer"); if (!dev->workqueue) goto error; dev->options = hdpvr_default_options; if (default_video_input < HDPVR_VIDEO_INPUTS) dev->options.video_input = default_video_input; if (default_audio_input < HDPVR_AUDIO_INPUTS) { dev->options.audio_input = default_audio_input; if (default_audio_input == HDPVR_SPDIF) dev->options.audio_codec = V4L2_MPEG_AUDIO_ENCODING_AC3; } dev->udev = usb_get_dev(interface_to_usbdev(interface)); /* set up the endpoint information */ /* use only the first bulk-in and bulk-out endpoints */ iface_desc = interface->cur_altsetting; for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) { endpoint = &iface_desc->endpoint[i].desc; if (!dev->bulk_in_endpointAddr && usb_endpoint_is_bulk_in(endpoint)) { /* USB interface description is buggy, reported max * packet size is 512 bytes, windows driver uses 8192 */ buffer_size = 8192; dev->bulk_in_size = buffer_size; dev->bulk_in_endpointAddr = endpoint->bEndpointAddress; } } if (!dev->bulk_in_endpointAddr) { v4l2_err(&dev->v4l2_dev, "Could not find bulk-in endpoint\n"); goto error; } /* init the device */ if (hdpvr_device_init(dev)) { v4l2_err(&dev->v4l2_dev, "device init failed\n"); goto error; } mutex_lock(&dev->io_mutex); if (hdpvr_alloc_buffers(dev, NUM_BUFFERS)) { mutex_unlock(&dev->io_mutex); v4l2_err(&dev->v4l2_dev, "allocating transfer buffers failed\n"); goto error; } mutex_unlock(&dev->io_mutex); #if IS_ENABLED(CONFIG_I2C) retval = hdpvr_register_i2c_adapter(dev); if (retval < 0) { v4l2_err(&dev->v4l2_dev, "i2c adapter register failed\n"); goto error; } client = hdpvr_register_ir_rx_i2c(dev); if (!client) { v4l2_err(&dev->v4l2_dev, "i2c IR RX device register failed\n"); retval = -ENODEV; goto reg_fail; } client = hdpvr_register_ir_tx_i2c(dev); if (!client) { v4l2_err(&dev->v4l2_dev, "i2c IR TX device register failed\n"); retval = -ENODEV; goto reg_fail; } #endif retval = hdpvr_register_videodev(dev, &interface->dev, video_nr[atomic_inc_return(&dev_nr)]); if (retval < 0) { v4l2_err(&dev->v4l2_dev, "registering videodev failed\n"); goto reg_fail; } /* let the user know what node this device is now attached to */ v4l2_info(&dev->v4l2_dev, "device now attached to %s\n", video_device_node_name(dev->video_dev)); return 0; reg_fail: #if IS_ENABLED(CONFIG_I2C) i2c_del_adapter(&dev->i2c_adapter); #endif error: if (dev) { /* Destroy single thread */ if (dev->workqueue) destroy_workqueue(dev->workqueue); /* this frees allocated memory */ hdpvr_delete(dev); } return retval; } static void hdpvr_disconnect(struct usb_interface *interface) { struct hdpvr_device *dev = to_hdpvr_dev(usb_get_intfdata(interface)); v4l2_info(&dev->v4l2_dev, "device %s disconnected\n", video_device_node_name(dev->video_dev)); /* prevent more I/O from starting and stop any ongoing */ mutex_lock(&dev->io_mutex); dev->status = STATUS_DISCONNECTED; wake_up_interruptible(&dev->wait_data); wake_up_interruptible(&dev->wait_buffer); mutex_unlock(&dev->io_mutex); v4l2_device_disconnect(&dev->v4l2_dev); msleep(100); flush_workqueue(dev->workqueue); mutex_lock(&dev->io_mutex); hdpvr_cancel_queue(dev); mutex_unlock(&dev->io_mutex); #if IS_ENABLED(CONFIG_I2C) i2c_del_adapter(&dev->i2c_adapter); #endif video_unregister_device(dev->video_dev); atomic_dec(&dev_nr); } static struct usb_driver hdpvr_usb_driver = { .name = "hdpvr", .probe = hdpvr_probe, .disconnect = hdpvr_disconnect, .id_table = hdpvr_table, }; module_usb_driver(hdpvr_usb_driver); MODULE_LICENSE("GPL"); MODULE_VERSION("0.2.1"); MODULE_AUTHOR("Janne Grunau"); MODULE_DESCRIPTION("Hauppauge HD PVR driver");
gpl-2.0
ionux/linux
drivers/media/v4l2-core/videobuf-dma-sg.c
953
16751
/* * helper functions for SG DMA video4linux capture buffers * * The functions expect the hardware being able to scatter gather * (i.e. the buffers are not linear in physical memory, but fragmented * into PAGE_SIZE chunks). They also assume the driver does not need * to touch the video data. * * (c) 2007 Mauro Carvalho Chehab, <mchehab@infradead.org> * * Highly based on video-buf written originally by: * (c) 2001,02 Gerd Knorr <kraxel@bytesex.org> * (c) 2006 Mauro Carvalho Chehab, <mchehab@infradead.org> * (c) 2006 Ted Walther and John Sokol * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 */ #include <linux/init.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <linux/vmalloc.h> #include <linux/pagemap.h> #include <linux/scatterlist.h> #include <asm/page.h> #include <asm/pgtable.h> #include <media/videobuf-dma-sg.h> #define MAGIC_DMABUF 0x19721112 #define MAGIC_SG_MEM 0x17890714 #define MAGIC_CHECK(is, should) \ if (unlikely((is) != (should))) { \ printk(KERN_ERR "magic mismatch: %x (expected %x)\n", \ is, should); \ BUG(); \ } static int debug; module_param(debug, int, 0644); MODULE_DESCRIPTION("helper module to manage video4linux dma sg buffers"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@infradead.org>"); MODULE_LICENSE("GPL"); #define dprintk(level, fmt, arg...) \ if (debug >= level) \ printk(KERN_DEBUG "vbuf-sg: " fmt , ## arg) /* --------------------------------------------------------------------- */ /* * Return a scatterlist for some page-aligned vmalloc()'ed memory * block (NULL on errors). Memory for the scatterlist is allocated * using kmalloc. The caller must free the memory. */ static struct scatterlist *videobuf_vmalloc_to_sg(unsigned char *virt, int nr_pages) { struct scatterlist *sglist; struct page *pg; int i; sglist = vzalloc(nr_pages * sizeof(*sglist)); if (NULL == sglist) return NULL; sg_init_table(sglist, nr_pages); for (i = 0; i < nr_pages; i++, virt += PAGE_SIZE) { pg = vmalloc_to_page(virt); if (NULL == pg) goto err; BUG_ON(PageHighMem(pg)); sg_set_page(&sglist[i], pg, PAGE_SIZE, 0); } return sglist; err: vfree(sglist); return NULL; } /* * Return a scatterlist for a an array of userpages (NULL on errors). * Memory for the scatterlist is allocated using kmalloc. The caller * must free the memory. */ static struct scatterlist *videobuf_pages_to_sg(struct page **pages, int nr_pages, int offset, size_t size) { struct scatterlist *sglist; int i; if (NULL == pages[0]) return NULL; sglist = vmalloc(nr_pages * sizeof(*sglist)); if (NULL == sglist) return NULL; sg_init_table(sglist, nr_pages); if (PageHighMem(pages[0])) /* DMA to highmem pages might not work */ goto highmem; sg_set_page(&sglist[0], pages[0], min_t(size_t, PAGE_SIZE - offset, size), offset); size -= min_t(size_t, PAGE_SIZE - offset, size); for (i = 1; i < nr_pages; i++) { if (NULL == pages[i]) goto nopage; if (PageHighMem(pages[i])) goto highmem; sg_set_page(&sglist[i], pages[i], min_t(size_t, PAGE_SIZE, size), 0); size -= min_t(size_t, PAGE_SIZE, size); } return sglist; nopage: dprintk(2, "sgl: oops - no page\n"); vfree(sglist); return NULL; highmem: dprintk(2, "sgl: oops - highmem page\n"); vfree(sglist); return NULL; } /* --------------------------------------------------------------------- */ struct videobuf_dmabuf *videobuf_to_dma(struct videobuf_buffer *buf) { struct videobuf_dma_sg_memory *mem = buf->priv; BUG_ON(!mem); MAGIC_CHECK(mem->magic, MAGIC_SG_MEM); return &mem->dma; } EXPORT_SYMBOL_GPL(videobuf_to_dma); static void videobuf_dma_init(struct videobuf_dmabuf *dma) { memset(dma, 0, sizeof(*dma)); dma->magic = MAGIC_DMABUF; } static int videobuf_dma_init_user_locked(struct videobuf_dmabuf *dma, int direction, unsigned long data, unsigned long size) { unsigned long first, last; int err, rw = 0; dma->direction = direction; switch (dma->direction) { case DMA_FROM_DEVICE: rw = READ; break; case DMA_TO_DEVICE: rw = WRITE; break; default: BUG(); } first = (data & PAGE_MASK) >> PAGE_SHIFT; last = ((data+size-1) & PAGE_MASK) >> PAGE_SHIFT; dma->offset = data & ~PAGE_MASK; dma->size = size; dma->nr_pages = last-first+1; dma->pages = kmalloc(dma->nr_pages * sizeof(struct page *), GFP_KERNEL); if (NULL == dma->pages) return -ENOMEM; dprintk(1, "init user [0x%lx+0x%lx => %d pages]\n", data, size, dma->nr_pages); err = get_user_pages(current, current->mm, data & PAGE_MASK, dma->nr_pages, rw == READ, 1, /* force */ dma->pages, NULL); if (err != dma->nr_pages) { dma->nr_pages = (err >= 0) ? err : 0; dprintk(1, "get_user_pages: err=%d [%d]\n", err, dma->nr_pages); return err < 0 ? err : -EINVAL; } return 0; } static int videobuf_dma_init_user(struct videobuf_dmabuf *dma, int direction, unsigned long data, unsigned long size) { int ret; down_read(&current->mm->mmap_sem); ret = videobuf_dma_init_user_locked(dma, direction, data, size); up_read(&current->mm->mmap_sem); return ret; } static int videobuf_dma_init_kernel(struct videobuf_dmabuf *dma, int direction, int nr_pages) { int i; dprintk(1, "init kernel [%d pages]\n", nr_pages); dma->direction = direction; dma->vaddr_pages = kcalloc(nr_pages, sizeof(*dma->vaddr_pages), GFP_KERNEL); if (!dma->vaddr_pages) return -ENOMEM; dma->dma_addr = kcalloc(nr_pages, sizeof(*dma->dma_addr), GFP_KERNEL); if (!dma->dma_addr) { kfree(dma->vaddr_pages); return -ENOMEM; } for (i = 0; i < nr_pages; i++) { void *addr; addr = dma_alloc_coherent(dma->dev, PAGE_SIZE, &(dma->dma_addr[i]), GFP_KERNEL); if (addr == NULL) goto out_free_pages; dma->vaddr_pages[i] = virt_to_page(addr); } dma->vaddr = vmap(dma->vaddr_pages, nr_pages, VM_MAP | VM_IOREMAP, PAGE_KERNEL); if (NULL == dma->vaddr) { dprintk(1, "vmalloc_32(%d pages) failed\n", nr_pages); goto out_free_pages; } dprintk(1, "vmalloc is at addr 0x%08lx, size=%d\n", (unsigned long)dma->vaddr, nr_pages << PAGE_SHIFT); memset(dma->vaddr, 0, nr_pages << PAGE_SHIFT); dma->nr_pages = nr_pages; return 0; out_free_pages: while (i > 0) { void *addr; i--; addr = page_address(dma->vaddr_pages[i]); dma_free_coherent(dma->dev, PAGE_SIZE, addr, dma->dma_addr[i]); } kfree(dma->dma_addr); dma->dma_addr = NULL; kfree(dma->vaddr_pages); dma->vaddr_pages = NULL; return -ENOMEM; } static int videobuf_dma_init_overlay(struct videobuf_dmabuf *dma, int direction, dma_addr_t addr, int nr_pages) { dprintk(1, "init overlay [%d pages @ bus 0x%lx]\n", nr_pages, (unsigned long)addr); dma->direction = direction; if (0 == addr) return -EINVAL; dma->bus_addr = addr; dma->nr_pages = nr_pages; return 0; } static int videobuf_dma_map(struct device *dev, struct videobuf_dmabuf *dma) { MAGIC_CHECK(dma->magic, MAGIC_DMABUF); BUG_ON(0 == dma->nr_pages); if (dma->pages) { dma->sglist = videobuf_pages_to_sg(dma->pages, dma->nr_pages, dma->offset, dma->size); } if (dma->vaddr) { dma->sglist = videobuf_vmalloc_to_sg(dma->vaddr, dma->nr_pages); } if (dma->bus_addr) { dma->sglist = vmalloc(sizeof(*dma->sglist)); if (NULL != dma->sglist) { dma->sglen = 1; sg_dma_address(&dma->sglist[0]) = dma->bus_addr & PAGE_MASK; dma->sglist[0].offset = dma->bus_addr & ~PAGE_MASK; sg_dma_len(&dma->sglist[0]) = dma->nr_pages * PAGE_SIZE; } } if (NULL == dma->sglist) { dprintk(1, "scatterlist is NULL\n"); return -ENOMEM; } if (!dma->bus_addr) { dma->sglen = dma_map_sg(dev, dma->sglist, dma->nr_pages, dma->direction); if (0 == dma->sglen) { printk(KERN_WARNING "%s: videobuf_map_sg failed\n", __func__); vfree(dma->sglist); dma->sglist = NULL; dma->sglen = 0; return -ENOMEM; } } return 0; } int videobuf_dma_unmap(struct device *dev, struct videobuf_dmabuf *dma) { MAGIC_CHECK(dma->magic, MAGIC_DMABUF); if (!dma->sglen) return 0; dma_unmap_sg(dev, dma->sglist, dma->sglen, dma->direction); vfree(dma->sglist); dma->sglist = NULL; dma->sglen = 0; return 0; } EXPORT_SYMBOL_GPL(videobuf_dma_unmap); int videobuf_dma_free(struct videobuf_dmabuf *dma) { int i; MAGIC_CHECK(dma->magic, MAGIC_DMABUF); BUG_ON(dma->sglen); if (dma->pages) { for (i = 0; i < dma->nr_pages; i++) page_cache_release(dma->pages[i]); kfree(dma->pages); dma->pages = NULL; } if (dma->dma_addr) { for (i = 0; i < dma->nr_pages; i++) { void *addr; addr = page_address(dma->vaddr_pages[i]); dma_free_coherent(dma->dev, PAGE_SIZE, addr, dma->dma_addr[i]); } kfree(dma->dma_addr); dma->dma_addr = NULL; kfree(dma->vaddr_pages); dma->vaddr_pages = NULL; vunmap(dma->vaddr); dma->vaddr = NULL; } if (dma->bus_addr) dma->bus_addr = 0; dma->direction = DMA_NONE; return 0; } EXPORT_SYMBOL_GPL(videobuf_dma_free); /* --------------------------------------------------------------------- */ static void videobuf_vm_open(struct vm_area_struct *vma) { struct videobuf_mapping *map = vma->vm_private_data; dprintk(2, "vm_open %p [count=%d,vma=%08lx-%08lx]\n", map, map->count, vma->vm_start, vma->vm_end); map->count++; } static void videobuf_vm_close(struct vm_area_struct *vma) { struct videobuf_mapping *map = vma->vm_private_data; struct videobuf_queue *q = map->q; struct videobuf_dma_sg_memory *mem; int i; dprintk(2, "vm_close %p [count=%d,vma=%08lx-%08lx]\n", map, map->count, vma->vm_start, vma->vm_end); map->count--; if (0 == map->count) { dprintk(1, "munmap %p q=%p\n", map, q); videobuf_queue_lock(q); for (i = 0; i < VIDEO_MAX_FRAME; i++) { if (NULL == q->bufs[i]) continue; mem = q->bufs[i]->priv; if (!mem) continue; MAGIC_CHECK(mem->magic, MAGIC_SG_MEM); if (q->bufs[i]->map != map) continue; q->bufs[i]->map = NULL; q->bufs[i]->baddr = 0; q->ops->buf_release(q, q->bufs[i]); } videobuf_queue_unlock(q); kfree(map); } return; } /* * Get a anonymous page for the mapping. Make sure we can DMA to that * memory location with 32bit PCI devices (i.e. don't use highmem for * now ...). Bounce buffers don't work very well for the data rates * video capture has. */ static int videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) { struct page *page; dprintk(3, "fault: fault @ %08lx [vma %08lx-%08lx]\n", (unsigned long)vmf->virtual_address, vma->vm_start, vma->vm_end); page = alloc_page(GFP_USER | __GFP_DMA32); if (!page) return VM_FAULT_OOM; clear_user_highpage(page, (unsigned long)vmf->virtual_address); vmf->page = page; return 0; } static const struct vm_operations_struct videobuf_vm_ops = { .open = videobuf_vm_open, .close = videobuf_vm_close, .fault = videobuf_vm_fault, }; /* --------------------------------------------------------------------- * SG handlers for the generic methods */ /* Allocated area consists on 3 parts: struct video_buffer struct <driver>_buffer (cx88_buffer, saa7134_buf, ...) struct videobuf_dma_sg_memory */ static struct videobuf_buffer *__videobuf_alloc_vb(size_t size) { struct videobuf_dma_sg_memory *mem; struct videobuf_buffer *vb; vb = kzalloc(size + sizeof(*mem), GFP_KERNEL); if (!vb) return vb; mem = vb->priv = ((char *)vb) + size; mem->magic = MAGIC_SG_MEM; videobuf_dma_init(&mem->dma); dprintk(1, "%s: allocated at %p(%ld+%ld) & %p(%ld)\n", __func__, vb, (long)sizeof(*vb), (long)size - sizeof(*vb), mem, (long)sizeof(*mem)); return vb; } static void *__videobuf_to_vaddr(struct videobuf_buffer *buf) { struct videobuf_dma_sg_memory *mem = buf->priv; BUG_ON(!mem); MAGIC_CHECK(mem->magic, MAGIC_SG_MEM); return mem->dma.vaddr; } static int __videobuf_iolock(struct videobuf_queue *q, struct videobuf_buffer *vb, struct v4l2_framebuffer *fbuf) { int err, pages; dma_addr_t bus; struct videobuf_dma_sg_memory *mem = vb->priv; BUG_ON(!mem); MAGIC_CHECK(mem->magic, MAGIC_SG_MEM); if (!mem->dma.dev) mem->dma.dev = q->dev; else WARN_ON(mem->dma.dev != q->dev); switch (vb->memory) { case V4L2_MEMORY_MMAP: case V4L2_MEMORY_USERPTR: if (0 == vb->baddr) { /* no userspace addr -- kernel bounce buffer */ pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT; err = videobuf_dma_init_kernel(&mem->dma, DMA_FROM_DEVICE, pages); if (0 != err) return err; } else if (vb->memory == V4L2_MEMORY_USERPTR) { /* dma directly to userspace */ err = videobuf_dma_init_user(&mem->dma, DMA_FROM_DEVICE, vb->baddr, vb->bsize); if (0 != err) return err; } else { /* NOTE: HACK: videobuf_iolock on V4L2_MEMORY_MMAP buffers can only be called from videobuf_qbuf we take current->mm->mmap_sem there, to prevent locking inversion, so don't take it here */ err = videobuf_dma_init_user_locked(&mem->dma, DMA_FROM_DEVICE, vb->baddr, vb->bsize); if (0 != err) return err; } break; case V4L2_MEMORY_OVERLAY: if (NULL == fbuf) return -EINVAL; /* FIXME: need sanity checks for vb->boff */ /* * Using a double cast to avoid compiler warnings when * building for PAE. Compiler doesn't like direct casting * of a 32 bit ptr to 64 bit integer. */ bus = (dma_addr_t)(unsigned long)fbuf->base + vb->boff; pages = PAGE_ALIGN(vb->size) >> PAGE_SHIFT; err = videobuf_dma_init_overlay(&mem->dma, DMA_FROM_DEVICE, bus, pages); if (0 != err) return err; break; default: BUG(); } err = videobuf_dma_map(q->dev, &mem->dma); if (0 != err) return err; return 0; } static int __videobuf_sync(struct videobuf_queue *q, struct videobuf_buffer *buf) { struct videobuf_dma_sg_memory *mem = buf->priv; BUG_ON(!mem || !mem->dma.sglen); MAGIC_CHECK(mem->magic, MAGIC_SG_MEM); MAGIC_CHECK(mem->dma.magic, MAGIC_DMABUF); dma_sync_sg_for_cpu(q->dev, mem->dma.sglist, mem->dma.sglen, mem->dma.direction); return 0; } static int __videobuf_mmap_mapper(struct videobuf_queue *q, struct videobuf_buffer *buf, struct vm_area_struct *vma) { struct videobuf_dma_sg_memory *mem = buf->priv; struct videobuf_mapping *map; unsigned int first, last, size = 0, i; int retval; retval = -EINVAL; BUG_ON(!mem); MAGIC_CHECK(mem->magic, MAGIC_SG_MEM); /* look for first buffer to map */ for (first = 0; first < VIDEO_MAX_FRAME; first++) { if (buf == q->bufs[first]) { size = PAGE_ALIGN(q->bufs[first]->bsize); break; } } /* paranoia, should never happen since buf is always valid. */ if (!size) { dprintk(1, "mmap app bug: offset invalid [offset=0x%lx]\n", (vma->vm_pgoff << PAGE_SHIFT)); goto done; } last = first; /* create mapping + update buffer list */ retval = -ENOMEM; map = kmalloc(sizeof(struct videobuf_mapping), GFP_KERNEL); if (NULL == map) goto done; size = 0; for (i = first; i <= last; i++) { if (NULL == q->bufs[i]) continue; q->bufs[i]->map = map; q->bufs[i]->baddr = vma->vm_start + size; size += PAGE_ALIGN(q->bufs[i]->bsize); } map->count = 1; map->q = q; vma->vm_ops = &videobuf_vm_ops; vma->vm_flags |= VM_DONTEXPAND | VM_DONTDUMP; vma->vm_flags &= ~VM_IO; /* using shared anonymous pages */ vma->vm_private_data = map; dprintk(1, "mmap %p: q=%p %08lx-%08lx pgoff %08lx bufs %d-%d\n", map, q, vma->vm_start, vma->vm_end, vma->vm_pgoff, first, last); retval = 0; done: return retval; } static struct videobuf_qtype_ops sg_ops = { .magic = MAGIC_QTYPE_OPS, .alloc_vb = __videobuf_alloc_vb, .iolock = __videobuf_iolock, .sync = __videobuf_sync, .mmap_mapper = __videobuf_mmap_mapper, .vaddr = __videobuf_to_vaddr, }; void *videobuf_sg_alloc(size_t size) { struct videobuf_queue q; /* Required to make generic handler to call __videobuf_alloc */ q.int_ops = &sg_ops; q.msize = size; return videobuf_alloc_vb(&q); } EXPORT_SYMBOL_GPL(videobuf_sg_alloc); void videobuf_queue_sg_init(struct videobuf_queue *q, const struct videobuf_queue_ops *ops, struct device *dev, spinlock_t *irqlock, enum v4l2_buf_type type, enum v4l2_field field, unsigned int msize, void *priv, struct mutex *ext_lock) { videobuf_queue_core_init(q, ops, dev, irqlock, type, field, msize, priv, &sg_ops, ext_lock); } EXPORT_SYMBOL_GPL(videobuf_queue_sg_init);
gpl-2.0
glitschi/kernel_P7320
drivers/media/IR/keymaps/rc-winfast.c
953
2834
/* winfast.h - Keytable for winfast Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> /* Table for Leadtek Winfast Remote Controls - used by both bttv and cx88 */ static struct ir_scancode winfast[] = { /* Keys 0 to 9 */ { 0x12, KEY_0 }, { 0x05, KEY_1 }, { 0x06, KEY_2 }, { 0x07, KEY_3 }, { 0x09, KEY_4 }, { 0x0a, KEY_5 }, { 0x0b, KEY_6 }, { 0x0d, KEY_7 }, { 0x0e, KEY_8 }, { 0x0f, KEY_9 }, { 0x00, KEY_POWER }, { 0x1b, KEY_AUDIO }, /* Audio Source */ { 0x02, KEY_TUNER }, /* TV/FM, not on Y0400052 */ { 0x1e, KEY_VIDEO }, /* Video Source */ { 0x16, KEY_INFO }, /* Display information */ { 0x04, KEY_VOLUMEUP }, { 0x08, KEY_VOLUMEDOWN }, { 0x0c, KEY_CHANNELUP }, { 0x10, KEY_CHANNELDOWN }, { 0x03, KEY_ZOOM }, /* fullscreen */ { 0x1f, KEY_TEXT }, /* closed caption/teletext */ { 0x20, KEY_SLEEP }, { 0x29, KEY_CLEAR }, /* boss key */ { 0x14, KEY_MUTE }, { 0x2b, KEY_RED }, { 0x2c, KEY_GREEN }, { 0x2d, KEY_YELLOW }, { 0x2e, KEY_BLUE }, { 0x18, KEY_KPPLUS }, /* fine tune + , not on Y040052 */ { 0x19, KEY_KPMINUS }, /* fine tune - , not on Y040052 */ { 0x2a, KEY_MEDIA }, /* PIP (Picture in picture */ { 0x21, KEY_DOT }, { 0x13, KEY_ENTER }, { 0x11, KEY_LAST }, /* Recall (last channel */ { 0x22, KEY_PREVIOUS }, { 0x23, KEY_PLAYPAUSE }, { 0x24, KEY_NEXT }, { 0x25, KEY_TIME }, /* Time Shifting */ { 0x26, KEY_STOP }, { 0x27, KEY_RECORD }, { 0x28, KEY_SAVE }, /* Screenshot */ { 0x2f, KEY_MENU }, { 0x30, KEY_CANCEL }, { 0x31, KEY_CHANNEL }, /* Channel Surf */ { 0x32, KEY_SUBTITLE }, { 0x33, KEY_LANGUAGE }, { 0x34, KEY_REWIND }, { 0x35, KEY_FASTFORWARD }, { 0x36, KEY_TV }, { 0x37, KEY_RADIO }, /* FM */ { 0x38, KEY_DVD }, { 0x1a, KEY_MODE}, /* change to MCE mode on Y04G0051 */ { 0x3e, KEY_F21 }, /* MCE +VOL, on Y04G0033 */ { 0x3a, KEY_F22 }, /* MCE -VOL, on Y04G0033 */ { 0x3b, KEY_F23 }, /* MCE +CH, on Y04G0033 */ { 0x3f, KEY_F24 } /* MCE -CH, on Y04G0033 */ }; static struct rc_keymap winfast_map = { .map = { .scan = winfast, .size = ARRAY_SIZE(winfast), .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_WINFAST, } }; static int __init init_rc_map_winfast(void) { return ir_register_map(&winfast_map); } static void __exit exit_rc_map_winfast(void) { ir_unregister_map(&winfast_map); } module_init(init_rc_map_winfast) module_exit(exit_rc_map_winfast) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
hroark13/Warp_Kernel-Jellybean
drivers/media/IR/keymaps/rc-ati-tv-wonder-hd-600.c
953
1861
/* ati-tv-wonder-hd-600.h - Keytable for ati_tv_wonder_hd_600 Remote Controller * * keymap imported from ir-keymaps.c * * Copyright (c) 2010 by Mauro Carvalho Chehab <mchehab@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <media/rc-map.h> /* ATI TV Wonder HD 600 USB Devin Heitmueller <devin.heitmueller@gmail.com> */ static struct ir_scancode ati_tv_wonder_hd_600[] = { { 0x00, KEY_RECORD}, /* Row 1 */ { 0x01, KEY_PLAYPAUSE}, { 0x02, KEY_STOP}, { 0x03, KEY_POWER}, { 0x04, KEY_PREVIOUS}, /* Row 2 */ { 0x05, KEY_REWIND}, { 0x06, KEY_FORWARD}, { 0x07, KEY_NEXT}, { 0x08, KEY_EPG}, /* Row 3 */ { 0x09, KEY_HOME}, { 0x0a, KEY_MENU}, { 0x0b, KEY_CHANNELUP}, { 0x0c, KEY_BACK}, /* Row 4 */ { 0x0d, KEY_UP}, { 0x0e, KEY_INFO}, { 0x0f, KEY_CHANNELDOWN}, { 0x10, KEY_LEFT}, /* Row 5 */ { 0x11, KEY_SELECT}, { 0x12, KEY_RIGHT}, { 0x13, KEY_VOLUMEUP}, { 0x14, KEY_LAST}, /* Row 6 */ { 0x15, KEY_DOWN}, { 0x16, KEY_MUTE}, { 0x17, KEY_VOLUMEDOWN}, }; static struct rc_keymap ati_tv_wonder_hd_600_map = { .map = { .scan = ati_tv_wonder_hd_600, .size = ARRAY_SIZE(ati_tv_wonder_hd_600), .ir_type = IR_TYPE_UNKNOWN, /* Legacy IR type */ .name = RC_MAP_ATI_TV_WONDER_HD_600, } }; static int __init init_rc_map_ati_tv_wonder_hd_600(void) { return ir_register_map(&ati_tv_wonder_hd_600_map); } static void __exit exit_rc_map_ati_tv_wonder_hd_600(void) { ir_unregister_map(&ati_tv_wonder_hd_600_map); } module_init(init_rc_map_ati_tv_wonder_hd_600) module_exit(exit_rc_map_ati_tv_wonder_hd_600) MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab <mchehab@redhat.com>");
gpl-2.0
Hacker432-Y550/android_kernel_huawei_msm8916
drivers/media/usb/cx231xx/cx231xx-avcore.c
1977
92053
/* cx231xx_avcore.c - driver for Conexant Cx23100/101/102 USB video capture devices Copyright (C) 2008 <srinivasa.deevi at conexant dot com> This program contains the specific code to control the avdecoder chip and other related usb control functions for cx231xx based chipset. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/list.h> #include <linux/module.h> #include <linux/kernel.h> #include <linux/bitmap.h> #include <linux/usb.h> #include <linux/i2c.h> #include <linux/mm.h> #include <linux/mutex.h> #include <media/tuner.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/v4l2-chip-ident.h> #include "cx231xx.h" #include "cx231xx-dif.h" #define TUNER_MODE_FM_RADIO 0 /****************************************************************************** -: BLOCK ARRANGEMENT :- I2S block ----------------------| [I2S audio] | | Analog Front End --> Direct IF -|-> Cx25840 --> Audio [video & audio] | [Audio] | |-> Cx25840 --> Video [Video] *******************************************************************************/ /****************************************************************************** * VERVE REGISTER * * * ******************************************************************************/ static int verve_write_byte(struct cx231xx *dev, u8 saddr, u8 data) { return cx231xx_write_i2c_data(dev, VERVE_I2C_ADDRESS, saddr, 1, data, 1); } static int verve_read_byte(struct cx231xx *dev, u8 saddr, u8 *data) { int status; u32 temp = 0; status = cx231xx_read_i2c_data(dev, VERVE_I2C_ADDRESS, saddr, 1, &temp, 1); *data = (u8) temp; return status; } void initGPIO(struct cx231xx *dev) { u32 _gpio_direction = 0; u32 value = 0; u8 val = 0; _gpio_direction = _gpio_direction & 0xFC0003FF; _gpio_direction = _gpio_direction | 0x03FDFC00; cx231xx_send_gpio_cmd(dev, _gpio_direction, (u8 *)&value, 4, 0, 0); verve_read_byte(dev, 0x07, &val); cx231xx_info(" verve_read_byte address0x07=0x%x\n", val); verve_write_byte(dev, 0x07, 0xF4); verve_read_byte(dev, 0x07, &val); cx231xx_info(" verve_read_byte address0x07=0x%x\n", val); cx231xx_capture_start(dev, 1, Vbi); cx231xx_mode_register(dev, EP_MODE_SET, 0x0500FE00); cx231xx_mode_register(dev, GBULK_BIT_EN, 0xFFFDFFFF); } void uninitGPIO(struct cx231xx *dev) { u8 value[4] = { 0, 0, 0, 0 }; cx231xx_capture_start(dev, 0, Vbi); verve_write_byte(dev, 0x07, 0x14); cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, 0x68, value, 4); } /****************************************************************************** * A F E - B L O C K C O N T R O L functions * * [ANALOG FRONT END] * ******************************************************************************/ static int afe_write_byte(struct cx231xx *dev, u16 saddr, u8 data) { return cx231xx_write_i2c_data(dev, AFE_DEVICE_ADDRESS, saddr, 2, data, 1); } static int afe_read_byte(struct cx231xx *dev, u16 saddr, u8 *data) { int status; u32 temp = 0; status = cx231xx_read_i2c_data(dev, AFE_DEVICE_ADDRESS, saddr, 2, &temp, 1); *data = (u8) temp; return status; } int cx231xx_afe_init_super_block(struct cx231xx *dev, u32 ref_count) { int status = 0; u8 temp = 0; u8 afe_power_status = 0; int i = 0; /* super block initialize */ temp = (u8) (ref_count & 0xff); status = afe_write_byte(dev, SUP_BLK_TUNE2, temp); if (status < 0) return status; status = afe_read_byte(dev, SUP_BLK_TUNE2, &afe_power_status); if (status < 0) return status; temp = (u8) ((ref_count & 0x300) >> 8); temp |= 0x40; status = afe_write_byte(dev, SUP_BLK_TUNE1, temp); if (status < 0) return status; status = afe_write_byte(dev, SUP_BLK_PLL2, 0x0f); if (status < 0) return status; /* enable pll */ while (afe_power_status != 0x18) { status = afe_write_byte(dev, SUP_BLK_PWRDN, 0x18); if (status < 0) { cx231xx_info( ": Init Super Block failed in send cmd\n"); break; } status = afe_read_byte(dev, SUP_BLK_PWRDN, &afe_power_status); afe_power_status &= 0xff; if (status < 0) { cx231xx_info( ": Init Super Block failed in receive cmd\n"); break; } i++; if (i == 10) { cx231xx_info( ": Init Super Block force break in loop !!!!\n"); status = -1; break; } } if (status < 0) return status; /* start tuning filter */ status = afe_write_byte(dev, SUP_BLK_TUNE3, 0x40); if (status < 0) return status; msleep(5); /* exit tuning */ status = afe_write_byte(dev, SUP_BLK_TUNE3, 0x00); return status; } int cx231xx_afe_init_channels(struct cx231xx *dev) { int status = 0; /* power up all 3 channels, clear pd_buffer */ status = afe_write_byte(dev, ADC_PWRDN_CLAMP_CH1, 0x00); status = afe_write_byte(dev, ADC_PWRDN_CLAMP_CH2, 0x00); status = afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, 0x00); /* Enable quantizer calibration */ status = afe_write_byte(dev, ADC_COM_QUANT, 0x02); /* channel initialize, force modulator (fb) reset */ status = afe_write_byte(dev, ADC_FB_FRCRST_CH1, 0x17); status = afe_write_byte(dev, ADC_FB_FRCRST_CH2, 0x17); status = afe_write_byte(dev, ADC_FB_FRCRST_CH3, 0x17); /* start quantilizer calibration */ status = afe_write_byte(dev, ADC_CAL_ATEST_CH1, 0x10); status = afe_write_byte(dev, ADC_CAL_ATEST_CH2, 0x10); status = afe_write_byte(dev, ADC_CAL_ATEST_CH3, 0x10); msleep(5); /* exit modulator (fb) reset */ status = afe_write_byte(dev, ADC_FB_FRCRST_CH1, 0x07); status = afe_write_byte(dev, ADC_FB_FRCRST_CH2, 0x07); status = afe_write_byte(dev, ADC_FB_FRCRST_CH3, 0x07); /* enable the pre_clamp in each channel for single-ended input */ status = afe_write_byte(dev, ADC_NTF_PRECLMP_EN_CH1, 0xf0); status = afe_write_byte(dev, ADC_NTF_PRECLMP_EN_CH2, 0xf0); status = afe_write_byte(dev, ADC_NTF_PRECLMP_EN_CH3, 0xf0); /* use diode instead of resistor, so set term_en to 0, res_en to 0 */ status = cx231xx_reg_mask_write(dev, AFE_DEVICE_ADDRESS, 8, ADC_QGAIN_RES_TRM_CH1, 3, 7, 0x00); status = cx231xx_reg_mask_write(dev, AFE_DEVICE_ADDRESS, 8, ADC_QGAIN_RES_TRM_CH2, 3, 7, 0x00); status = cx231xx_reg_mask_write(dev, AFE_DEVICE_ADDRESS, 8, ADC_QGAIN_RES_TRM_CH3, 3, 7, 0x00); /* dynamic element matching off */ status = afe_write_byte(dev, ADC_DCSERVO_DEM_CH1, 0x03); status = afe_write_byte(dev, ADC_DCSERVO_DEM_CH2, 0x03); status = afe_write_byte(dev, ADC_DCSERVO_DEM_CH3, 0x03); return status; } int cx231xx_afe_setup_AFE_for_baseband(struct cx231xx *dev) { u8 c_value = 0; int status = 0; status = afe_read_byte(dev, ADC_PWRDN_CLAMP_CH2, &c_value); c_value &= (~(0x50)); status = afe_write_byte(dev, ADC_PWRDN_CLAMP_CH2, c_value); return status; } /* The Analog Front End in Cx231xx has 3 channels. These channels are used to share between different inputs like tuner, s-video and composite inputs. channel 1 ----- pin 1 to pin4(in reg is 1-4) channel 2 ----- pin 5 to pin8(in reg is 5-8) channel 3 ----- pin 9 to pin 12(in reg is 9-11) */ int cx231xx_afe_set_input_mux(struct cx231xx *dev, u32 input_mux) { u8 ch1_setting = (u8) input_mux; u8 ch2_setting = (u8) (input_mux >> 8); u8 ch3_setting = (u8) (input_mux >> 16); int status = 0; u8 value = 0; if (ch1_setting != 0) { status = afe_read_byte(dev, ADC_INPUT_CH1, &value); value &= ~INPUT_SEL_MASK; value |= (ch1_setting - 1) << 4; value &= 0xff; status = afe_write_byte(dev, ADC_INPUT_CH1, value); } if (ch2_setting != 0) { status = afe_read_byte(dev, ADC_INPUT_CH2, &value); value &= ~INPUT_SEL_MASK; value |= (ch2_setting - 1) << 4; value &= 0xff; status = afe_write_byte(dev, ADC_INPUT_CH2, value); } /* For ch3_setting, the value to put in the register is 7 less than the input number */ if (ch3_setting != 0) { status = afe_read_byte(dev, ADC_INPUT_CH3, &value); value &= ~INPUT_SEL_MASK; value |= (ch3_setting - 1) << 4; value &= 0xff; status = afe_write_byte(dev, ADC_INPUT_CH3, value); } return status; } int cx231xx_afe_set_mode(struct cx231xx *dev, enum AFE_MODE mode) { int status = 0; /* * FIXME: We need to implement the AFE code for LOW IF and for HI IF. * Currently, only baseband works. */ switch (mode) { case AFE_MODE_LOW_IF: cx231xx_Setup_AFE_for_LowIF(dev); break; case AFE_MODE_BASEBAND: status = cx231xx_afe_setup_AFE_for_baseband(dev); break; case AFE_MODE_EU_HI_IF: /* SetupAFEforEuHiIF(); */ break; case AFE_MODE_US_HI_IF: /* SetupAFEforUsHiIF(); */ break; case AFE_MODE_JAPAN_HI_IF: /* SetupAFEforJapanHiIF(); */ break; } if ((mode != dev->afe_mode) && (dev->video_input == CX231XX_VMUX_TELEVISION)) status = cx231xx_afe_adjust_ref_count(dev, CX231XX_VMUX_TELEVISION); dev->afe_mode = mode; return status; } int cx231xx_afe_update_power_control(struct cx231xx *dev, enum AV_MODE avmode) { u8 afe_power_status = 0; int status = 0; switch (dev->model) { case CX231XX_BOARD_CNXT_CARRAERA: case CX231XX_BOARD_CNXT_RDE_250: case CX231XX_BOARD_CNXT_SHELBY: case CX231XX_BOARD_CNXT_RDU_250: case CX231XX_BOARD_CNXT_RDE_253S: case CX231XX_BOARD_CNXT_RDU_253S: case CX231XX_BOARD_CNXT_VIDEO_GRABBER: case CX231XX_BOARD_HAUPPAUGE_EXETER: case CX231XX_BOARD_HAUPPAUGE_USBLIVE2: case CX231XX_BOARD_PV_PLAYTV_USB_HYBRID: case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL: case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC: case CX231XX_BOARD_OTG102: if (avmode == POLARIS_AVMODE_ANALOGT_TV) { while (afe_power_status != (FLD_PWRDN_TUNING_BIAS | FLD_PWRDN_ENABLE_PLL)) { status = afe_write_byte(dev, SUP_BLK_PWRDN, FLD_PWRDN_TUNING_BIAS | FLD_PWRDN_ENABLE_PLL); status |= afe_read_byte(dev, SUP_BLK_PWRDN, &afe_power_status); if (status < 0) break; } status = afe_write_byte(dev, ADC_PWRDN_CLAMP_CH1, 0x00); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH2, 0x00); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, 0x00); } else if (avmode == POLARIS_AVMODE_DIGITAL) { status = afe_write_byte(dev, ADC_PWRDN_CLAMP_CH1, 0x70); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH2, 0x70); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, 0x70); status |= afe_read_byte(dev, SUP_BLK_PWRDN, &afe_power_status); afe_power_status |= FLD_PWRDN_PD_BANDGAP | FLD_PWRDN_PD_BIAS | FLD_PWRDN_PD_TUNECK; status |= afe_write_byte(dev, SUP_BLK_PWRDN, afe_power_status); } else if (avmode == POLARIS_AVMODE_ENXTERNAL_AV) { while (afe_power_status != (FLD_PWRDN_TUNING_BIAS | FLD_PWRDN_ENABLE_PLL)) { status = afe_write_byte(dev, SUP_BLK_PWRDN, FLD_PWRDN_TUNING_BIAS | FLD_PWRDN_ENABLE_PLL); status |= afe_read_byte(dev, SUP_BLK_PWRDN, &afe_power_status); if (status < 0) break; } status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH1, 0x00); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH2, 0x00); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, 0x00); } else { cx231xx_info("Invalid AV mode input\n"); status = -1; } break; default: if (avmode == POLARIS_AVMODE_ANALOGT_TV) { while (afe_power_status != (FLD_PWRDN_TUNING_BIAS | FLD_PWRDN_ENABLE_PLL)) { status = afe_write_byte(dev, SUP_BLK_PWRDN, FLD_PWRDN_TUNING_BIAS | FLD_PWRDN_ENABLE_PLL); status |= afe_read_byte(dev, SUP_BLK_PWRDN, &afe_power_status); if (status < 0) break; } status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH1, 0x40); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH2, 0x40); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, 0x00); } else if (avmode == POLARIS_AVMODE_DIGITAL) { status = afe_write_byte(dev, ADC_PWRDN_CLAMP_CH1, 0x70); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH2, 0x70); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, 0x70); status |= afe_read_byte(dev, SUP_BLK_PWRDN, &afe_power_status); afe_power_status |= FLD_PWRDN_PD_BANDGAP | FLD_PWRDN_PD_BIAS | FLD_PWRDN_PD_TUNECK; status |= afe_write_byte(dev, SUP_BLK_PWRDN, afe_power_status); } else if (avmode == POLARIS_AVMODE_ENXTERNAL_AV) { while (afe_power_status != (FLD_PWRDN_TUNING_BIAS | FLD_PWRDN_ENABLE_PLL)) { status = afe_write_byte(dev, SUP_BLK_PWRDN, FLD_PWRDN_TUNING_BIAS | FLD_PWRDN_ENABLE_PLL); status |= afe_read_byte(dev, SUP_BLK_PWRDN, &afe_power_status); if (status < 0) break; } status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH1, 0x00); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH2, 0x00); status |= afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, 0x40); } else { cx231xx_info("Invalid AV mode input\n"); status = -1; } } /* switch */ return status; } int cx231xx_afe_adjust_ref_count(struct cx231xx *dev, u32 video_input) { u8 input_mode = 0; u8 ntf_mode = 0; int status = 0; dev->video_input = video_input; if (video_input == CX231XX_VMUX_TELEVISION) { status = afe_read_byte(dev, ADC_INPUT_CH3, &input_mode); status = afe_read_byte(dev, ADC_NTF_PRECLMP_EN_CH3, &ntf_mode); } else { status = afe_read_byte(dev, ADC_INPUT_CH1, &input_mode); status = afe_read_byte(dev, ADC_NTF_PRECLMP_EN_CH1, &ntf_mode); } input_mode = (ntf_mode & 0x3) | ((input_mode & 0x6) << 1); switch (input_mode) { case SINGLE_ENDED: dev->afe_ref_count = 0x23C; break; case LOW_IF: dev->afe_ref_count = 0x24C; break; case EU_IF: dev->afe_ref_count = 0x258; break; case US_IF: dev->afe_ref_count = 0x260; break; default: break; } status = cx231xx_afe_init_super_block(dev, dev->afe_ref_count); return status; } /****************************************************************************** * V I D E O / A U D I O D E C O D E R C O N T R O L functions * ******************************************************************************/ static int vid_blk_write_byte(struct cx231xx *dev, u16 saddr, u8 data) { return cx231xx_write_i2c_data(dev, VID_BLK_I2C_ADDRESS, saddr, 2, data, 1); } static int vid_blk_read_byte(struct cx231xx *dev, u16 saddr, u8 *data) { int status; u32 temp = 0; status = cx231xx_read_i2c_data(dev, VID_BLK_I2C_ADDRESS, saddr, 2, &temp, 1); *data = (u8) temp; return status; } static int vid_blk_write_word(struct cx231xx *dev, u16 saddr, u32 data) { return cx231xx_write_i2c_data(dev, VID_BLK_I2C_ADDRESS, saddr, 2, data, 4); } static int vid_blk_read_word(struct cx231xx *dev, u16 saddr, u32 *data) { return cx231xx_read_i2c_data(dev, VID_BLK_I2C_ADDRESS, saddr, 2, data, 4); } int cx231xx_check_fw(struct cx231xx *dev) { u8 temp = 0; int status = 0; status = vid_blk_read_byte(dev, DL_CTL_ADDRESS_LOW, &temp); if (status < 0) return status; else return temp; } int cx231xx_set_video_input_mux(struct cx231xx *dev, u8 input) { int status = 0; switch (INPUT(input)->type) { case CX231XX_VMUX_COMPOSITE1: case CX231XX_VMUX_SVIDEO: if ((dev->current_pcb_config.type == USB_BUS_POWER) && (dev->power_mode != POLARIS_AVMODE_ENXTERNAL_AV)) { /* External AV */ status = cx231xx_set_power_mode(dev, POLARIS_AVMODE_ENXTERNAL_AV); if (status < 0) { cx231xx_errdev("%s: set_power_mode : Failed to" " set Power - errCode [%d]!\n", __func__, status); return status; } } status = cx231xx_set_decoder_video_input(dev, INPUT(input)->type, INPUT(input)->vmux); break; case CX231XX_VMUX_TELEVISION: case CX231XX_VMUX_CABLE: if ((dev->current_pcb_config.type == USB_BUS_POWER) && (dev->power_mode != POLARIS_AVMODE_ANALOGT_TV)) { /* Tuner */ status = cx231xx_set_power_mode(dev, POLARIS_AVMODE_ANALOGT_TV); if (status < 0) { cx231xx_errdev("%s: set_power_mode:Failed" " to set Power - errCode [%d]!\n", __func__, status); return status; } } if (dev->tuner_type == TUNER_NXP_TDA18271) status = cx231xx_set_decoder_video_input(dev, CX231XX_VMUX_TELEVISION, INPUT(input)->vmux); else status = cx231xx_set_decoder_video_input(dev, CX231XX_VMUX_COMPOSITE1, INPUT(input)->vmux); break; default: cx231xx_errdev("%s: set_power_mode : Unknown Input %d !\n", __func__, INPUT(input)->type); break; } /* save the selection */ dev->video_input = input; return status; } int cx231xx_set_decoder_video_input(struct cx231xx *dev, u8 pin_type, u8 input) { int status = 0; u32 value = 0; if (pin_type != dev->video_input) { status = cx231xx_afe_adjust_ref_count(dev, pin_type); if (status < 0) { cx231xx_errdev("%s: adjust_ref_count :Failed to set" "AFE input mux - errCode [%d]!\n", __func__, status); return status; } } /* call afe block to set video inputs */ status = cx231xx_afe_set_input_mux(dev, input); if (status < 0) { cx231xx_errdev("%s: set_input_mux :Failed to set" " AFE input mux - errCode [%d]!\n", __func__, status); return status; } switch (pin_type) { case CX231XX_VMUX_COMPOSITE1: status = vid_blk_read_word(dev, AFE_CTRL, &value); value |= (0 << 13) | (1 << 4); value &= ~(1 << 5); /* set [24:23] [22:15] to 0 */ value &= (~(0x1ff8000)); /* set FUNC_MODE[24:23] = 2 IF_MOD[22:15] = 0 */ value |= 0x1000000; status = vid_blk_write_word(dev, AFE_CTRL, value); status = vid_blk_read_word(dev, OUT_CTRL1, &value); value |= (1 << 7); status = vid_blk_write_word(dev, OUT_CTRL1, value); /* Set output mode */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, OUT_CTRL1, FLD_OUT_MODE, dev->board.output_mode); /* Tell DIF object to go to baseband mode */ status = cx231xx_dif_set_standard(dev, DIF_USE_BASEBAND); if (status < 0) { cx231xx_errdev("%s: cx231xx_dif set to By pass" " mode- errCode [%d]!\n", __func__, status); return status; } /* Read the DFE_CTRL1 register */ status = vid_blk_read_word(dev, DFE_CTRL1, &value); /* enable the VBI_GATE_EN */ value |= FLD_VBI_GATE_EN; /* Enable the auto-VGA enable */ value |= FLD_VGA_AUTO_EN; /* Write it back */ status = vid_blk_write_word(dev, DFE_CTRL1, value); /* Disable auto config of registers */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, MODE_CTRL, FLD_ACFG_DIS, cx231xx_set_field(FLD_ACFG_DIS, 1)); /* Set CVBS input mode */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, MODE_CTRL, FLD_INPUT_MODE, cx231xx_set_field(FLD_INPUT_MODE, INPUT_MODE_CVBS_0)); break; case CX231XX_VMUX_SVIDEO: /* Disable the use of DIF */ status = vid_blk_read_word(dev, AFE_CTRL, &value); /* set [24:23] [22:15] to 0 */ value &= (~(0x1ff8000)); /* set FUNC_MODE[24:23] = 2 IF_MOD[22:15] = 0 DCR_BYP_CH2[4:4] = 1; */ value |= 0x1000010; status = vid_blk_write_word(dev, AFE_CTRL, value); /* Tell DIF object to go to baseband mode */ status = cx231xx_dif_set_standard(dev, DIF_USE_BASEBAND); if (status < 0) { cx231xx_errdev("%s: cx231xx_dif set to By pass" " mode- errCode [%d]!\n", __func__, status); return status; } /* Read the DFE_CTRL1 register */ status = vid_blk_read_word(dev, DFE_CTRL1, &value); /* enable the VBI_GATE_EN */ value |= FLD_VBI_GATE_EN; /* Enable the auto-VGA enable */ value |= FLD_VGA_AUTO_EN; /* Write it back */ status = vid_blk_write_word(dev, DFE_CTRL1, value); /* Disable auto config of registers */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, MODE_CTRL, FLD_ACFG_DIS, cx231xx_set_field(FLD_ACFG_DIS, 1)); /* Set YC input mode */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, MODE_CTRL, FLD_INPUT_MODE, cx231xx_set_field(FLD_INPUT_MODE, INPUT_MODE_YC_1)); /* Chroma to ADC2 */ status = vid_blk_read_word(dev, AFE_CTRL, &value); value |= FLD_CHROMA_IN_SEL; /* set the chroma in select */ /* Clear VGA_SEL_CH2 and VGA_SEL_CH3 (bits 7 and 8) This sets them to use video rather than audio. Only one of the two will be in use. */ value &= ~(FLD_VGA_SEL_CH2 | FLD_VGA_SEL_CH3); status = vid_blk_write_word(dev, AFE_CTRL, value); status = cx231xx_afe_set_mode(dev, AFE_MODE_BASEBAND); break; case CX231XX_VMUX_TELEVISION: case CX231XX_VMUX_CABLE: default: /* TODO: Test if this is also needed for xc2028/xc3028 */ if (dev->board.tuner_type == TUNER_XC5000) { /* Disable the use of DIF */ status = vid_blk_read_word(dev, AFE_CTRL, &value); value |= (0 << 13) | (1 << 4); value &= ~(1 << 5); /* set [24:23] [22:15] to 0 */ value &= (~(0x1FF8000)); /* set FUNC_MODE[24:23] = 2 IF_MOD[22:15] = 0 */ value |= 0x1000000; status = vid_blk_write_word(dev, AFE_CTRL, value); status = vid_blk_read_word(dev, OUT_CTRL1, &value); value |= (1 << 7); status = vid_blk_write_word(dev, OUT_CTRL1, value); /* Set output mode */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, OUT_CTRL1, FLD_OUT_MODE, dev->board.output_mode); /* Tell DIF object to go to baseband mode */ status = cx231xx_dif_set_standard(dev, DIF_USE_BASEBAND); if (status < 0) { cx231xx_errdev("%s: cx231xx_dif set to By pass" " mode- errCode [%d]!\n", __func__, status); return status; } /* Read the DFE_CTRL1 register */ status = vid_blk_read_word(dev, DFE_CTRL1, &value); /* enable the VBI_GATE_EN */ value |= FLD_VBI_GATE_EN; /* Enable the auto-VGA enable */ value |= FLD_VGA_AUTO_EN; /* Write it back */ status = vid_blk_write_word(dev, DFE_CTRL1, value); /* Disable auto config of registers */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, MODE_CTRL, FLD_ACFG_DIS, cx231xx_set_field(FLD_ACFG_DIS, 1)); /* Set CVBS input mode */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, MODE_CTRL, FLD_INPUT_MODE, cx231xx_set_field(FLD_INPUT_MODE, INPUT_MODE_CVBS_0)); } else { /* Enable the DIF for the tuner */ /* Reinitialize the DIF */ status = cx231xx_dif_set_standard(dev, dev->norm); if (status < 0) { cx231xx_errdev("%s: cx231xx_dif set to By pass" " mode- errCode [%d]!\n", __func__, status); return status; } /* Make sure bypass is cleared */ status = vid_blk_read_word(dev, DIF_MISC_CTRL, &value); /* Clear the bypass bit */ value &= ~FLD_DIF_DIF_BYPASS; /* Enable the use of the DIF block */ status = vid_blk_write_word(dev, DIF_MISC_CTRL, value); /* Read the DFE_CTRL1 register */ status = vid_blk_read_word(dev, DFE_CTRL1, &value); /* Disable the VBI_GATE_EN */ value &= ~FLD_VBI_GATE_EN; /* Enable the auto-VGA enable, AGC, and set the skip count to 2 */ value |= FLD_VGA_AUTO_EN | FLD_AGC_AUTO_EN | 0x00200000; /* Write it back */ status = vid_blk_write_word(dev, DFE_CTRL1, value); /* Wait until AGC locks up */ msleep(1); /* Disable the auto-VGA enable AGC */ value &= ~(FLD_VGA_AUTO_EN); /* Write it back */ status = vid_blk_write_word(dev, DFE_CTRL1, value); /* Enable Polaris B0 AGC output */ status = vid_blk_read_word(dev, PIN_CTRL, &value); value |= (FLD_OEF_AGC_RF) | (FLD_OEF_AGC_IFVGA) | (FLD_OEF_AGC_IF); status = vid_blk_write_word(dev, PIN_CTRL, value); /* Set output mode */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, OUT_CTRL1, FLD_OUT_MODE, dev->board.output_mode); /* Disable auto config of registers */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, MODE_CTRL, FLD_ACFG_DIS, cx231xx_set_field(FLD_ACFG_DIS, 1)); /* Set CVBS input mode */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, MODE_CTRL, FLD_INPUT_MODE, cx231xx_set_field(FLD_INPUT_MODE, INPUT_MODE_CVBS_0)); /* Set some bits in AFE_CTRL so that channel 2 or 3 * is ready to receive audio */ /* Clear clamp for channels 2 and 3 (bit 16-17) */ /* Clear droop comp (bit 19-20) */ /* Set VGA_SEL (for audio control) (bit 7-8) */ status = vid_blk_read_word(dev, AFE_CTRL, &value); /*Set Func mode:01-DIF 10-baseband 11-YUV*/ value &= (~(FLD_FUNC_MODE)); value |= 0x800000; value |= FLD_VGA_SEL_CH3 | FLD_VGA_SEL_CH2; status = vid_blk_write_word(dev, AFE_CTRL, value); if (dev->tuner_type == TUNER_NXP_TDA18271) { status = vid_blk_read_word(dev, PIN_CTRL, &value); status = vid_blk_write_word(dev, PIN_CTRL, (value & 0xFFFFFFEF)); } break; } break; } /* Set raw VBI mode */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, OUT_CTRL1, FLD_VBIHACTRAW_EN, cx231xx_set_field(FLD_VBIHACTRAW_EN, 1)); status = vid_blk_read_word(dev, OUT_CTRL1, &value); if (value & 0x02) { value |= (1 << 19); status = vid_blk_write_word(dev, OUT_CTRL1, value); } return status; } void cx231xx_enable656(struct cx231xx *dev) { u8 temp = 0; /*enable TS1 data[0:7] as output to export 656*/ vid_blk_write_byte(dev, TS1_PIN_CTL0, 0xFF); /*enable TS1 clock as output to export 656*/ vid_blk_read_byte(dev, TS1_PIN_CTL1, &temp); temp = temp|0x04; vid_blk_write_byte(dev, TS1_PIN_CTL1, temp); } EXPORT_SYMBOL_GPL(cx231xx_enable656); void cx231xx_disable656(struct cx231xx *dev) { u8 temp = 0; vid_blk_write_byte(dev, TS1_PIN_CTL0, 0x00); vid_blk_read_byte(dev, TS1_PIN_CTL1, &temp); temp = temp&0xFB; vid_blk_write_byte(dev, TS1_PIN_CTL1, temp); } EXPORT_SYMBOL_GPL(cx231xx_disable656); /* * Handle any video-mode specific overrides that are different * on a per video standards basis after touching the MODE_CTRL * register which resets many values for autodetect */ int cx231xx_do_mode_ctrl_overrides(struct cx231xx *dev) { int status = 0; cx231xx_info("do_mode_ctrl_overrides : 0x%x\n", (unsigned int)dev->norm); /* Change the DFE_CTRL3 bp_percent to fix flagging */ status = vid_blk_write_word(dev, DFE_CTRL3, 0xCD3F0280); if (dev->norm & (V4L2_STD_NTSC | V4L2_STD_PAL_M)) { cx231xx_info("do_mode_ctrl_overrides NTSC\n"); /* Move the close caption lines out of active video, adjust the active video start point */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, VERT_TIM_CTRL, FLD_VBLANK_CNT, 0x18); status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, VERT_TIM_CTRL, FLD_VACTIVE_CNT, 0x1E7000); status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, VERT_TIM_CTRL, FLD_V656BLANK_CNT, 0x1C000000); status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, HORIZ_TIM_CTRL, FLD_HBLANK_CNT, cx231xx_set_field (FLD_HBLANK_CNT, 0x79)); } else if (dev->norm & V4L2_STD_SECAM) { cx231xx_info("do_mode_ctrl_overrides SECAM\n"); status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, VERT_TIM_CTRL, FLD_VBLANK_CNT, 0x20); status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, VERT_TIM_CTRL, FLD_VACTIVE_CNT, cx231xx_set_field (FLD_VACTIVE_CNT, 0x244)); status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, VERT_TIM_CTRL, FLD_V656BLANK_CNT, cx231xx_set_field (FLD_V656BLANK_CNT, 0x24)); /* Adjust the active video horizontal start point */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, HORIZ_TIM_CTRL, FLD_HBLANK_CNT, cx231xx_set_field (FLD_HBLANK_CNT, 0x85)); } else { cx231xx_info("do_mode_ctrl_overrides PAL\n"); status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, VERT_TIM_CTRL, FLD_VBLANK_CNT, 0x20); status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, VERT_TIM_CTRL, FLD_VACTIVE_CNT, cx231xx_set_field (FLD_VACTIVE_CNT, 0x244)); status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, VERT_TIM_CTRL, FLD_V656BLANK_CNT, cx231xx_set_field (FLD_V656BLANK_CNT, 0x24)); /* Adjust the active video horizontal start point */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, HORIZ_TIM_CTRL, FLD_HBLANK_CNT, cx231xx_set_field (FLD_HBLANK_CNT, 0x85)); } return status; } int cx231xx_unmute_audio(struct cx231xx *dev) { return vid_blk_write_byte(dev, PATH1_VOL_CTL, 0x24); } EXPORT_SYMBOL_GPL(cx231xx_unmute_audio); static int stopAudioFirmware(struct cx231xx *dev) { return vid_blk_write_byte(dev, DL_CTL_CONTROL, 0x03); } static int restartAudioFirmware(struct cx231xx *dev) { return vid_blk_write_byte(dev, DL_CTL_CONTROL, 0x13); } int cx231xx_set_audio_input(struct cx231xx *dev, u8 input) { int status = 0; enum AUDIO_INPUT ainput = AUDIO_INPUT_LINE; switch (INPUT(input)->amux) { case CX231XX_AMUX_VIDEO: ainput = AUDIO_INPUT_TUNER_TV; break; case CX231XX_AMUX_LINE_IN: status = cx231xx_i2s_blk_set_audio_input(dev, input); ainput = AUDIO_INPUT_LINE; break; default: break; } status = cx231xx_set_audio_decoder_input(dev, ainput); return status; } int cx231xx_set_audio_decoder_input(struct cx231xx *dev, enum AUDIO_INPUT audio_input) { u32 dwval; int status; u8 gen_ctrl; u32 value = 0; /* Put it in soft reset */ status = vid_blk_read_byte(dev, GENERAL_CTL, &gen_ctrl); gen_ctrl |= 1; status = vid_blk_write_byte(dev, GENERAL_CTL, gen_ctrl); switch (audio_input) { case AUDIO_INPUT_LINE: /* setup AUD_IO control from Merlin paralle output */ value = cx231xx_set_field(FLD_AUD_CHAN1_SRC, AUD_CHAN_SRC_PARALLEL); status = vid_blk_write_word(dev, AUD_IO_CTRL, value); /* setup input to Merlin, SRC2 connect to AC97 bypass upsample-by-2, slave mode, sony mode, left justify adr 091c, dat 01000000 */ status = vid_blk_read_word(dev, AC97_CTL, &dwval); status = vid_blk_write_word(dev, AC97_CTL, (dwval | FLD_AC97_UP2X_BYPASS)); /* select the parallel1 and SRC3 */ status = vid_blk_write_word(dev, BAND_OUT_SEL, cx231xx_set_field(FLD_SRC3_IN_SEL, 0x0) | cx231xx_set_field(FLD_SRC3_CLK_SEL, 0x0) | cx231xx_set_field(FLD_PARALLEL1_SRC_SEL, 0x0)); /* unmute all, AC97 in, independence mode adr 08d0, data 0x00063073 */ status = vid_blk_write_word(dev, DL_CTL, 0x3000001); status = vid_blk_write_word(dev, PATH1_CTL1, 0x00063073); /* set AVC maximum threshold, adr 08d4, dat ffff0024 */ status = vid_blk_read_word(dev, PATH1_VOL_CTL, &dwval); status = vid_blk_write_word(dev, PATH1_VOL_CTL, (dwval | FLD_PATH1_AVC_THRESHOLD)); /* set SC maximum threshold, adr 08ec, dat ffffb3a3 */ status = vid_blk_read_word(dev, PATH1_SC_CTL, &dwval); status = vid_blk_write_word(dev, PATH1_SC_CTL, (dwval | FLD_PATH1_SC_THRESHOLD)); break; case AUDIO_INPUT_TUNER_TV: default: status = stopAudioFirmware(dev); /* Setup SRC sources and clocks */ status = vid_blk_write_word(dev, BAND_OUT_SEL, cx231xx_set_field(FLD_SRC6_IN_SEL, 0x00) | cx231xx_set_field(FLD_SRC6_CLK_SEL, 0x01) | cx231xx_set_field(FLD_SRC5_IN_SEL, 0x00) | cx231xx_set_field(FLD_SRC5_CLK_SEL, 0x02) | cx231xx_set_field(FLD_SRC4_IN_SEL, 0x02) | cx231xx_set_field(FLD_SRC4_CLK_SEL, 0x03) | cx231xx_set_field(FLD_SRC3_IN_SEL, 0x00) | cx231xx_set_field(FLD_SRC3_CLK_SEL, 0x00) | cx231xx_set_field(FLD_BASEBAND_BYPASS_CTL, 0x00) | cx231xx_set_field(FLD_AC97_SRC_SEL, 0x03) | cx231xx_set_field(FLD_I2S_SRC_SEL, 0x00) | cx231xx_set_field(FLD_PARALLEL2_SRC_SEL, 0x02) | cx231xx_set_field(FLD_PARALLEL1_SRC_SEL, 0x01)); /* Setup the AUD_IO control */ status = vid_blk_write_word(dev, AUD_IO_CTRL, cx231xx_set_field(FLD_I2S_PORT_DIR, 0x00) | cx231xx_set_field(FLD_I2S_OUT_SRC, 0x00) | cx231xx_set_field(FLD_AUD_CHAN3_SRC, 0x00) | cx231xx_set_field(FLD_AUD_CHAN2_SRC, 0x00) | cx231xx_set_field(FLD_AUD_CHAN1_SRC, 0x03)); status = vid_blk_write_word(dev, PATH1_CTL1, 0x1F063870); /* setAudioStandard(_audio_standard); */ status = vid_blk_write_word(dev, PATH1_CTL1, 0x00063870); status = restartAudioFirmware(dev); switch (dev->board.tuner_type) { case TUNER_XC5000: /* SIF passthrough at 28.6363 MHz sample rate */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, CHIP_CTRL, FLD_SIF_EN, cx231xx_set_field(FLD_SIF_EN, 1)); break; case TUNER_NXP_TDA18271: /* Normal mode: SIF passthrough at 14.32 MHz */ status = cx231xx_read_modify_write_i2c_dword(dev, VID_BLK_I2C_ADDRESS, CHIP_CTRL, FLD_SIF_EN, cx231xx_set_field(FLD_SIF_EN, 0)); break; default: /* This is just a casual suggestion to people adding new boards in case they use a tuner type we don't currently know about */ printk(KERN_INFO "Unknown tuner type configuring SIF"); break; } break; case AUDIO_INPUT_TUNER_FM: /* use SIF for FM radio setupFM(); setAudioStandard(_audio_standard); */ break; case AUDIO_INPUT_MUTE: status = vid_blk_write_word(dev, PATH1_CTL1, 0x1F011012); break; } /* Take it out of soft reset */ status = vid_blk_read_byte(dev, GENERAL_CTL, &gen_ctrl); gen_ctrl &= ~1; status = vid_blk_write_byte(dev, GENERAL_CTL, gen_ctrl); return status; } /****************************************************************************** * C H I P Specific C O N T R O L functions * ******************************************************************************/ int cx231xx_init_ctrl_pin_status(struct cx231xx *dev) { u32 value; int status = 0; status = vid_blk_read_word(dev, PIN_CTRL, &value); value |= (~dev->board.ctl_pin_status_mask); status = vid_blk_write_word(dev, PIN_CTRL, value); return status; } int cx231xx_set_agc_analog_digital_mux_select(struct cx231xx *dev, u8 analog_or_digital) { int status = 0; /* first set the direction to output */ status = cx231xx_set_gpio_direction(dev, dev->board. agc_analog_digital_select_gpio, 1); /* 0 - demod ; 1 - Analog mode */ status = cx231xx_set_gpio_value(dev, dev->board.agc_analog_digital_select_gpio, analog_or_digital); return status; } int cx231xx_enable_i2c_port_3(struct cx231xx *dev, bool is_port_3) { u8 value[4] = { 0, 0, 0, 0 }; int status = 0; bool current_is_port_3; if (dev->board.dont_use_port_3) is_port_3 = false; status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN, value, 4); if (status < 0) return status; current_is_port_3 = value[0] & I2C_DEMOD_EN ? true : false; /* Just return, if already using the right port */ if (current_is_port_3 == is_port_3) return 0; if (is_port_3) value[0] |= I2C_DEMOD_EN; else value[0] &= ~I2C_DEMOD_EN; cx231xx_info("Changing the i2c master port to %d\n", is_port_3 ? 3 : 1); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); return status; } EXPORT_SYMBOL_GPL(cx231xx_enable_i2c_port_3); void update_HH_register_after_set_DIF(struct cx231xx *dev) { /* u8 status = 0; u32 value = 0; vid_blk_write_word(dev, PIN_CTRL, 0xA0FFF82F); vid_blk_write_word(dev, DIF_MISC_CTRL, 0x0A203F11); vid_blk_write_word(dev, DIF_SRC_PHASE_INC, 0x1BEFBF06); status = vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value); vid_blk_write_word(dev, AFE_CTRL_C2HH_SRC_CTRL, 0x4485D390); status = vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value); */ } void cx231xx_dump_HH_reg(struct cx231xx *dev) { u32 value = 0; u16 i = 0; value = 0x45005390; vid_blk_write_word(dev, 0x104, value); for (i = 0x100; i < 0x140; i++) { vid_blk_read_word(dev, i, &value); cx231xx_info("reg0x%x=0x%x\n", i, value); i = i+3; } for (i = 0x300; i < 0x400; i++) { vid_blk_read_word(dev, i, &value); cx231xx_info("reg0x%x=0x%x\n", i, value); i = i+3; } for (i = 0x400; i < 0x440; i++) { vid_blk_read_word(dev, i, &value); cx231xx_info("reg0x%x=0x%x\n", i, value); i = i+3; } vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value); cx231xx_info("AFE_CTRL_C2HH_SRC_CTRL=0x%x\n", value); vid_blk_write_word(dev, AFE_CTRL_C2HH_SRC_CTRL, 0x4485D390); vid_blk_read_word(dev, AFE_CTRL_C2HH_SRC_CTRL, &value); cx231xx_info("AFE_CTRL_C2HH_SRC_CTRL=0x%x\n", value); } void cx231xx_dump_SC_reg(struct cx231xx *dev) { u8 value[4] = { 0, 0, 0, 0 }; cx231xx_info("cx231xx_dump_SC_reg!\n"); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, BOARD_CFG_STAT, value, 4); cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", BOARD_CFG_STAT, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS_MODE_REG, value, 4); cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS_MODE_REG, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS1_CFG_REG, value, 4); cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS1_CFG_REG, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS1_LENGTH_REG, value, 4); cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS1_LENGTH_REG, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS2_CFG_REG, value, 4); cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS2_CFG_REG, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, TS2_LENGTH_REG, value, 4); cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", TS2_LENGTH_REG, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, EP_MODE_SET, value, 4); cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", EP_MODE_SET, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN1, value, 4); cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN1, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN2, value, 4); cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN2, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_PTN3, value, 4); cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_PTN3, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK0, value, 4); cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK0, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK1, value, 4); cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK1, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_PWR_MASK2, value, 4); cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_PWR_MASK2, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_GAIN, value, 4); cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_GAIN, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_CAR_REG, value, 4); cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_CAR_REG, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_OT_CFG1, value, 4); cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_OT_CFG1, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, CIR_OT_CFG2, value, 4); cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", CIR_OT_CFG2, value[0], value[1], value[2], value[3]); cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN, value, 4); cx231xx_info("reg0x%x=0x%x 0x%x 0x%x 0x%x\n", PWR_CTL_EN, value[0], value[1], value[2], value[3]); } void cx231xx_Setup_AFE_for_LowIF(struct cx231xx *dev) { u8 value = 0; afe_read_byte(dev, ADC_STATUS2_CH3, &value); value = (value & 0xFE)|0x01; afe_write_byte(dev, ADC_STATUS2_CH3, value); afe_read_byte(dev, ADC_STATUS2_CH3, &value); value = (value & 0xFE)|0x00; afe_write_byte(dev, ADC_STATUS2_CH3, value); /* config colibri to lo-if mode FIXME: ntf_mode = 2'b00 by default. But set 0x1 would reduce the diff IF input by half, for low-if agc defect */ afe_read_byte(dev, ADC_NTF_PRECLMP_EN_CH3, &value); value = (value & 0xFC)|0x00; afe_write_byte(dev, ADC_NTF_PRECLMP_EN_CH3, value); afe_read_byte(dev, ADC_INPUT_CH3, &value); value = (value & 0xF9)|0x02; afe_write_byte(dev, ADC_INPUT_CH3, value); afe_read_byte(dev, ADC_FB_FRCRST_CH3, &value); value = (value & 0xFB)|0x04; afe_write_byte(dev, ADC_FB_FRCRST_CH3, value); afe_read_byte(dev, ADC_DCSERVO_DEM_CH3, &value); value = (value & 0xFC)|0x03; afe_write_byte(dev, ADC_DCSERVO_DEM_CH3, value); afe_read_byte(dev, ADC_CTRL_DAC1_CH3, &value); value = (value & 0xFB)|0x04; afe_write_byte(dev, ADC_CTRL_DAC1_CH3, value); afe_read_byte(dev, ADC_CTRL_DAC23_CH3, &value); value = (value & 0xF8)|0x06; afe_write_byte(dev, ADC_CTRL_DAC23_CH3, value); afe_read_byte(dev, ADC_CTRL_DAC23_CH3, &value); value = (value & 0x8F)|0x40; afe_write_byte(dev, ADC_CTRL_DAC23_CH3, value); afe_read_byte(dev, ADC_PWRDN_CLAMP_CH3, &value); value = (value & 0xDF)|0x20; afe_write_byte(dev, ADC_PWRDN_CLAMP_CH3, value); } void cx231xx_set_Colibri_For_LowIF(struct cx231xx *dev, u32 if_freq, u8 spectral_invert, u32 mode) { u32 colibri_carrier_offset = 0; u32 func_mode = 0x01; /* Device has a DIF if this function is called */ u32 standard = 0; u8 value[4] = { 0, 0, 0, 0 }; cx231xx_info("Enter cx231xx_set_Colibri_For_LowIF()\n"); value[0] = (u8) 0x6F; value[1] = (u8) 0x6F; value[2] = (u8) 0x6F; value[3] = (u8) 0x6F; cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); /*Set colibri for low IF*/ cx231xx_afe_set_mode(dev, AFE_MODE_LOW_IF); /* Set C2HH for low IF operation.*/ standard = dev->norm; cx231xx_dif_configure_C2HH_for_low_IF(dev, dev->active_mode, func_mode, standard); /* Get colibri offsets.*/ colibri_carrier_offset = cx231xx_Get_Colibri_CarrierOffset(mode, standard); cx231xx_info("colibri_carrier_offset=%d, standard=0x%x\n", colibri_carrier_offset, standard); /* Set the band Pass filter for DIF*/ cx231xx_set_DIF_bandpass(dev, (if_freq+colibri_carrier_offset), spectral_invert, mode); } u32 cx231xx_Get_Colibri_CarrierOffset(u32 mode, u32 standerd) { u32 colibri_carrier_offset = 0; if (mode == TUNER_MODE_FM_RADIO) { colibri_carrier_offset = 1100000; } else if (standerd & (V4L2_STD_MN | V4L2_STD_NTSC_M_JP)) { colibri_carrier_offset = 4832000; /*4.83MHz */ } else if (standerd & (V4L2_STD_PAL_B | V4L2_STD_PAL_G)) { colibri_carrier_offset = 2700000; /*2.70MHz */ } else if (standerd & (V4L2_STD_PAL_D | V4L2_STD_PAL_I | V4L2_STD_SECAM)) { colibri_carrier_offset = 2100000; /*2.10MHz */ } return colibri_carrier_offset; } void cx231xx_set_DIF_bandpass(struct cx231xx *dev, u32 if_freq, u8 spectral_invert, u32 mode) { unsigned long pll_freq_word; u32 dif_misc_ctrl_value = 0; u64 pll_freq_u64 = 0; u32 i = 0; cx231xx_info("if_freq=%d;spectral_invert=0x%x;mode=0x%x\n", if_freq, spectral_invert, mode); if (mode == TUNER_MODE_FM_RADIO) { pll_freq_word = 0x905A1CAC; vid_blk_write_word(dev, DIF_PLL_FREQ_WORD, pll_freq_word); } else /*KSPROPERTY_TUNER_MODE_TV*/{ /* Calculate the PLL frequency word based on the adjusted if_freq*/ pll_freq_word = if_freq; pll_freq_u64 = (u64)pll_freq_word << 28L; do_div(pll_freq_u64, 50000000); pll_freq_word = (u32)pll_freq_u64; /*pll_freq_word = 0x3463497;*/ vid_blk_write_word(dev, DIF_PLL_FREQ_WORD, pll_freq_word); if (spectral_invert) { if_freq -= 400000; /* Enable Spectral Invert*/ vid_blk_read_word(dev, DIF_MISC_CTRL, &dif_misc_ctrl_value); dif_misc_ctrl_value = dif_misc_ctrl_value | 0x00200000; vid_blk_write_word(dev, DIF_MISC_CTRL, dif_misc_ctrl_value); } else { if_freq += 400000; /* Disable Spectral Invert*/ vid_blk_read_word(dev, DIF_MISC_CTRL, &dif_misc_ctrl_value); dif_misc_ctrl_value = dif_misc_ctrl_value & 0xFFDFFFFF; vid_blk_write_word(dev, DIF_MISC_CTRL, dif_misc_ctrl_value); } if_freq = (if_freq/100000)*100000; if (if_freq < 3000000) if_freq = 3000000; if (if_freq > 16000000) if_freq = 16000000; } cx231xx_info("Enter IF=%zd\n", ARRAY_SIZE(Dif_set_array)); for (i = 0; i < ARRAY_SIZE(Dif_set_array); i++) { if (Dif_set_array[i].if_freq == if_freq) { vid_blk_write_word(dev, Dif_set_array[i].register_address, Dif_set_array[i].value); } } } /****************************************************************************** * D I F - B L O C K C O N T R O L functions * ******************************************************************************/ int cx231xx_dif_configure_C2HH_for_low_IF(struct cx231xx *dev, u32 mode, u32 function_mode, u32 standard) { int status = 0; if (mode == V4L2_TUNER_RADIO) { /* C2HH */ /* lo if big signal */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 30, 31, 0x1); /* FUNC_MODE = DIF */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 23, 24, function_mode); /* IF_MODE */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 15, 22, 0xFF); /* no inv */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 9, 9, 0x1); } else if (standard != DIF_USE_BASEBAND) { if (standard & V4L2_STD_MN) { /* lo if big signal */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 30, 31, 0x1); /* FUNC_MODE = DIF */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 23, 24, function_mode); /* IF_MODE */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 15, 22, 0xb); /* no inv */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 9, 9, 0x1); /* 0x124, AUD_CHAN1_SRC = 0x3 */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AUD_IO_CTRL, 0, 31, 0x00000003); } else if ((standard == V4L2_STD_PAL_I) | (standard & V4L2_STD_PAL_D) | (standard & V4L2_STD_SECAM)) { /* C2HH setup */ /* lo if big signal */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 30, 31, 0x1); /* FUNC_MODE = DIF */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 23, 24, function_mode); /* IF_MODE */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 15, 22, 0xF); /* no inv */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 9, 9, 0x1); } else { /* default PAL BG */ /* C2HH setup */ /* lo if big signal */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 30, 31, 0x1); /* FUNC_MODE = DIF */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 23, 24, function_mode); /* IF_MODE */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 15, 22, 0xE); /* no inv */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, AFE_CTRL_C2HH_SRC_CTRL, 9, 9, 0x1); } } return status; } int cx231xx_dif_set_standard(struct cx231xx *dev, u32 standard) { int status = 0; u32 dif_misc_ctrl_value = 0; u32 func_mode = 0; cx231xx_info("%s: setStandard to %x\n", __func__, standard); status = vid_blk_read_word(dev, DIF_MISC_CTRL, &dif_misc_ctrl_value); if (standard != DIF_USE_BASEBAND) dev->norm = standard; switch (dev->model) { case CX231XX_BOARD_CNXT_CARRAERA: case CX231XX_BOARD_CNXT_RDE_250: case CX231XX_BOARD_CNXT_SHELBY: case CX231XX_BOARD_CNXT_RDU_250: case CX231XX_BOARD_CNXT_VIDEO_GRABBER: case CX231XX_BOARD_HAUPPAUGE_EXETER: case CX231XX_BOARD_OTG102: func_mode = 0x03; break; case CX231XX_BOARD_CNXT_RDE_253S: case CX231XX_BOARD_CNXT_RDU_253S: case CX231XX_BOARD_HAUPPAUGE_USB2_FM_PAL: case CX231XX_BOARD_HAUPPAUGE_USB2_FM_NTSC: func_mode = 0x01; break; default: func_mode = 0x01; } status = cx231xx_dif_configure_C2HH_for_low_IF(dev, dev->active_mode, func_mode, standard); if (standard == DIF_USE_BASEBAND) { /* base band */ /* There is a different SRC_PHASE_INC value for baseband vs. DIF */ status = vid_blk_write_word(dev, DIF_SRC_PHASE_INC, 0xDF7DF83); status = vid_blk_read_word(dev, DIF_MISC_CTRL, &dif_misc_ctrl_value); dif_misc_ctrl_value |= FLD_DIF_DIF_BYPASS; status = vid_blk_write_word(dev, DIF_MISC_CTRL, dif_misc_ctrl_value); } else if (standard & V4L2_STD_PAL_D) { status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL, 0, 31, 0x6503bc0c); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL1, 0, 31, 0xbd038c85); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL2, 0, 31, 0x1db4640a); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL3, 0, 31, 0x00008800); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_REF, 0, 31, 0x444C1380); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_IF, 0, 31, 0xDA302600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_INT, 0, 31, 0xDA261700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_RF, 0, 31, 0xDA262600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_INT_CURRENT, 0, 31, 0x26001700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_RF_CURRENT, 0, 31, 0x00002660); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VIDEO_AGC_CTRL, 0, 31, 0x72500800); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VID_AUD_OVERRIDE, 0, 31, 0x27000100); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AV_SEP_CTRL, 0, 31, 0x3F3934EA); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_COMP_FLT_CTRL, 0, 31, 0x00000000); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_PHASE_INC, 0, 31, 0x1befbf06); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_GAIN_CONTROL, 0, 31, 0x000035e8); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_RPT_VARIANCE, 0, 31, 0x00000000); /* Save the Spec Inversion value */ dif_misc_ctrl_value &= FLD_DIF_SPEC_INV; dif_misc_ctrl_value |= 0x3a023F11; } else if (standard & V4L2_STD_PAL_I) { status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL, 0, 31, 0x6503bc0c); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL1, 0, 31, 0xbd038c85); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL2, 0, 31, 0x1db4640a); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL3, 0, 31, 0x00008800); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_REF, 0, 31, 0x444C1380); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_IF, 0, 31, 0xDA302600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_INT, 0, 31, 0xDA261700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_RF, 0, 31, 0xDA262600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_INT_CURRENT, 0, 31, 0x26001700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_RF_CURRENT, 0, 31, 0x00002660); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VIDEO_AGC_CTRL, 0, 31, 0x72500800); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VID_AUD_OVERRIDE, 0, 31, 0x27000100); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AV_SEP_CTRL, 0, 31, 0x5F39A934); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_COMP_FLT_CTRL, 0, 31, 0x00000000); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_PHASE_INC, 0, 31, 0x1befbf06); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_GAIN_CONTROL, 0, 31, 0x000035e8); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_RPT_VARIANCE, 0, 31, 0x00000000); /* Save the Spec Inversion value */ dif_misc_ctrl_value &= FLD_DIF_SPEC_INV; dif_misc_ctrl_value |= 0x3a033F11; } else if (standard & V4L2_STD_PAL_M) { /* improved Low Frequency Phase Noise */ status = vid_blk_write_word(dev, DIF_PLL_CTRL, 0xFF01FF0C); status = vid_blk_write_word(dev, DIF_PLL_CTRL1, 0xbd038c85); status = vid_blk_write_word(dev, DIF_PLL_CTRL2, 0x1db4640a); status = vid_blk_write_word(dev, DIF_PLL_CTRL3, 0x00008800); status = vid_blk_write_word(dev, DIF_AGC_IF_REF, 0x444C1380); status = vid_blk_write_word(dev, DIF_AGC_IF_INT_CURRENT, 0x26001700); status = vid_blk_write_word(dev, DIF_AGC_RF_CURRENT, 0x00002660); status = vid_blk_write_word(dev, DIF_VIDEO_AGC_CTRL, 0x72500800); status = vid_blk_write_word(dev, DIF_VID_AUD_OVERRIDE, 0x27000100); status = vid_blk_write_word(dev, DIF_AV_SEP_CTRL, 0x012c405d); status = vid_blk_write_word(dev, DIF_COMP_FLT_CTRL, 0x009f50c1); status = vid_blk_write_word(dev, DIF_SRC_PHASE_INC, 0x1befbf06); status = vid_blk_write_word(dev, DIF_SRC_GAIN_CONTROL, 0x000035e8); status = vid_blk_write_word(dev, DIF_SOFT_RST_CTRL_REVB, 0x00000000); /* Save the Spec Inversion value */ dif_misc_ctrl_value &= FLD_DIF_SPEC_INV; dif_misc_ctrl_value |= 0x3A0A3F10; } else if (standard & (V4L2_STD_PAL_N | V4L2_STD_PAL_Nc)) { /* improved Low Frequency Phase Noise */ status = vid_blk_write_word(dev, DIF_PLL_CTRL, 0xFF01FF0C); status = vid_blk_write_word(dev, DIF_PLL_CTRL1, 0xbd038c85); status = vid_blk_write_word(dev, DIF_PLL_CTRL2, 0x1db4640a); status = vid_blk_write_word(dev, DIF_PLL_CTRL3, 0x00008800); status = vid_blk_write_word(dev, DIF_AGC_IF_REF, 0x444C1380); status = vid_blk_write_word(dev, DIF_AGC_IF_INT_CURRENT, 0x26001700); status = vid_blk_write_word(dev, DIF_AGC_RF_CURRENT, 0x00002660); status = vid_blk_write_word(dev, DIF_VIDEO_AGC_CTRL, 0x72500800); status = vid_blk_write_word(dev, DIF_VID_AUD_OVERRIDE, 0x27000100); status = vid_blk_write_word(dev, DIF_AV_SEP_CTRL, 0x012c405d); status = vid_blk_write_word(dev, DIF_COMP_FLT_CTRL, 0x009f50c1); status = vid_blk_write_word(dev, DIF_SRC_PHASE_INC, 0x1befbf06); status = vid_blk_write_word(dev, DIF_SRC_GAIN_CONTROL, 0x000035e8); status = vid_blk_write_word(dev, DIF_SOFT_RST_CTRL_REVB, 0x00000000); /* Save the Spec Inversion value */ dif_misc_ctrl_value &= FLD_DIF_SPEC_INV; dif_misc_ctrl_value = 0x3A093F10; } else if (standard & (V4L2_STD_SECAM_B | V4L2_STD_SECAM_D | V4L2_STD_SECAM_G | V4L2_STD_SECAM_K | V4L2_STD_SECAM_K1)) { status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL, 0, 31, 0x6503bc0c); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL1, 0, 31, 0xbd038c85); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL2, 0, 31, 0x1db4640a); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL3, 0, 31, 0x00008800); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_REF, 0, 31, 0x888C0380); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_IF, 0, 31, 0xe0262600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_INT, 0, 31, 0xc2171700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_RF, 0, 31, 0xc2262600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_INT_CURRENT, 0, 31, 0x26001700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_RF_CURRENT, 0, 31, 0x00002660); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VID_AUD_OVERRIDE, 0, 31, 0x27000100); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AV_SEP_CTRL, 0, 31, 0x3F3530ec); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_COMP_FLT_CTRL, 0, 31, 0x00000000); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_PHASE_INC, 0, 31, 0x1befbf06); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_GAIN_CONTROL, 0, 31, 0x000035e8); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_RPT_VARIANCE, 0, 31, 0x00000000); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VIDEO_AGC_CTRL, 0, 31, 0xf4000000); /* Save the Spec Inversion value */ dif_misc_ctrl_value &= FLD_DIF_SPEC_INV; dif_misc_ctrl_value |= 0x3a023F11; } else if (standard & (V4L2_STD_SECAM_L | V4L2_STD_SECAM_LC)) { /* Is it SECAM_L1? */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL, 0, 31, 0x6503bc0c); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL1, 0, 31, 0xbd038c85); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL2, 0, 31, 0x1db4640a); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL3, 0, 31, 0x00008800); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_REF, 0, 31, 0x888C0380); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_IF, 0, 31, 0xe0262600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_INT, 0, 31, 0xc2171700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_RF, 0, 31, 0xc2262600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_INT_CURRENT, 0, 31, 0x26001700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_RF_CURRENT, 0, 31, 0x00002660); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VID_AUD_OVERRIDE, 0, 31, 0x27000100); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AV_SEP_CTRL, 0, 31, 0x3F3530ec); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_COMP_FLT_CTRL, 0, 31, 0x00000000); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_PHASE_INC, 0, 31, 0x1befbf06); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_GAIN_CONTROL, 0, 31, 0x000035e8); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_RPT_VARIANCE, 0, 31, 0x00000000); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VIDEO_AGC_CTRL, 0, 31, 0xf2560000); /* Save the Spec Inversion value */ dif_misc_ctrl_value &= FLD_DIF_SPEC_INV; dif_misc_ctrl_value |= 0x3a023F11; } else if (standard & V4L2_STD_NTSC_M) { /* V4L2_STD_NTSC_M (75 IRE Setup) Or V4L2_STD_NTSC_M_JP (Japan, 0 IRE Setup) */ /* For NTSC the centre frequency of video coming out of sidewinder is around 7.1MHz or 3.6MHz depending on the spectral inversion. so for a non spectrally inverted channel the pll freq word is 0x03420c49 */ status = vid_blk_write_word(dev, DIF_PLL_CTRL, 0x6503BC0C); status = vid_blk_write_word(dev, DIF_PLL_CTRL1, 0xBD038C85); status = vid_blk_write_word(dev, DIF_PLL_CTRL2, 0x1DB4640A); status = vid_blk_write_word(dev, DIF_PLL_CTRL3, 0x00008800); status = vid_blk_write_word(dev, DIF_AGC_IF_REF, 0x444C0380); status = vid_blk_write_word(dev, DIF_AGC_IF_INT_CURRENT, 0x26001700); status = vid_blk_write_word(dev, DIF_AGC_RF_CURRENT, 0x00002660); status = vid_blk_write_word(dev, DIF_VIDEO_AGC_CTRL, 0x04000800); status = vid_blk_write_word(dev, DIF_VID_AUD_OVERRIDE, 0x27000100); status = vid_blk_write_word(dev, DIF_AV_SEP_CTRL, 0x01296e1f); status = vid_blk_write_word(dev, DIF_COMP_FLT_CTRL, 0x009f50c1); status = vid_blk_write_word(dev, DIF_SRC_PHASE_INC, 0x1befbf06); status = vid_blk_write_word(dev, DIF_SRC_GAIN_CONTROL, 0x000035e8); status = vid_blk_write_word(dev, DIF_AGC_CTRL_IF, 0xC2262600); status = vid_blk_write_word(dev, DIF_AGC_CTRL_INT, 0xC2262600); status = vid_blk_write_word(dev, DIF_AGC_CTRL_RF, 0xC2262600); /* Save the Spec Inversion value */ dif_misc_ctrl_value &= FLD_DIF_SPEC_INV; dif_misc_ctrl_value |= 0x3a003F10; } else { /* default PAL BG */ status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL, 0, 31, 0x6503bc0c); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL1, 0, 31, 0xbd038c85); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL2, 0, 31, 0x1db4640a); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_PLL_CTRL3, 0, 31, 0x00008800); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_REF, 0, 31, 0x444C1380); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_IF, 0, 31, 0xDA302600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_INT, 0, 31, 0xDA261700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_CTRL_RF, 0, 31, 0xDA262600); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_IF_INT_CURRENT, 0, 31, 0x26001700); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AGC_RF_CURRENT, 0, 31, 0x00002660); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VIDEO_AGC_CTRL, 0, 31, 0x72500800); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_VID_AUD_OVERRIDE, 0, 31, 0x27000100); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_AV_SEP_CTRL, 0, 31, 0x3F3530EC); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_COMP_FLT_CTRL, 0, 31, 0x00A653A8); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_PHASE_INC, 0, 31, 0x1befbf06); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_SRC_GAIN_CONTROL, 0, 31, 0x000035e8); status = cx231xx_reg_mask_write(dev, VID_BLK_I2C_ADDRESS, 32, DIF_RPT_VARIANCE, 0, 31, 0x00000000); /* Save the Spec Inversion value */ dif_misc_ctrl_value &= FLD_DIF_SPEC_INV; dif_misc_ctrl_value |= 0x3a013F11; } /* The AGC values should be the same for all standards, AUD_SRC_SEL[19] should always be disabled */ dif_misc_ctrl_value &= ~FLD_DIF_AUD_SRC_SEL; /* It is still possible to get Set Standard calls even when we are in FM mode. This is done to override the value for FM. */ if (dev->active_mode == V4L2_TUNER_RADIO) dif_misc_ctrl_value = 0x7a080000; /* Write the calculated value for misc ontrol register */ status = vid_blk_write_word(dev, DIF_MISC_CTRL, dif_misc_ctrl_value); return status; } int cx231xx_tuner_pre_channel_change(struct cx231xx *dev) { int status = 0; u32 dwval; /* Set the RF and IF k_agc values to 3 */ status = vid_blk_read_word(dev, DIF_AGC_IF_REF, &dwval); dwval &= ~(FLD_DIF_K_AGC_RF | FLD_DIF_K_AGC_IF); dwval |= 0x33000000; status = vid_blk_write_word(dev, DIF_AGC_IF_REF, dwval); return status; } int cx231xx_tuner_post_channel_change(struct cx231xx *dev) { int status = 0; u32 dwval; cx231xx_info("cx231xx_tuner_post_channel_change dev->tuner_type =0%d\n", dev->tuner_type); /* Set the RF and IF k_agc values to 4 for PAL/NTSC and 8 for * SECAM L/B/D standards */ status = vid_blk_read_word(dev, DIF_AGC_IF_REF, &dwval); dwval &= ~(FLD_DIF_K_AGC_RF | FLD_DIF_K_AGC_IF); if (dev->norm & (V4L2_STD_SECAM_L | V4L2_STD_SECAM_B | V4L2_STD_SECAM_D)) { if (dev->tuner_type == TUNER_NXP_TDA18271) { dwval &= ~FLD_DIF_IF_REF; dwval |= 0x88000300; } else dwval |= 0x88000000; } else { if (dev->tuner_type == TUNER_NXP_TDA18271) { dwval &= ~FLD_DIF_IF_REF; dwval |= 0xCC000300; } else dwval |= 0x44000000; } status = vid_blk_write_word(dev, DIF_AGC_IF_REF, dwval); return status == sizeof(dwval) ? 0 : -EIO; } /****************************************************************************** * I 2 S - B L O C K C O N T R O L functions * ******************************************************************************/ int cx231xx_i2s_blk_initialize(struct cx231xx *dev) { int status = 0; u32 value; status = cx231xx_read_i2c_data(dev, I2S_BLK_DEVICE_ADDRESS, CH_PWR_CTRL1, 1, &value, 1); /* enables clock to delta-sigma and decimation filter */ value |= 0x80; status = cx231xx_write_i2c_data(dev, I2S_BLK_DEVICE_ADDRESS, CH_PWR_CTRL1, 1, value, 1); /* power up all channel */ status = cx231xx_write_i2c_data(dev, I2S_BLK_DEVICE_ADDRESS, CH_PWR_CTRL2, 1, 0x00, 1); return status; } int cx231xx_i2s_blk_update_power_control(struct cx231xx *dev, enum AV_MODE avmode) { int status = 0; u32 value = 0; if (avmode != POLARIS_AVMODE_ENXTERNAL_AV) { status = cx231xx_read_i2c_data(dev, I2S_BLK_DEVICE_ADDRESS, CH_PWR_CTRL2, 1, &value, 1); value |= 0xfe; status = cx231xx_write_i2c_data(dev, I2S_BLK_DEVICE_ADDRESS, CH_PWR_CTRL2, 1, value, 1); } else { status = cx231xx_write_i2c_data(dev, I2S_BLK_DEVICE_ADDRESS, CH_PWR_CTRL2, 1, 0x00, 1); } return status; } /* set i2s_blk for audio input types */ int cx231xx_i2s_blk_set_audio_input(struct cx231xx *dev, u8 audio_input) { int status = 0; switch (audio_input) { case CX231XX_AMUX_LINE_IN: status = cx231xx_write_i2c_data(dev, I2S_BLK_DEVICE_ADDRESS, CH_PWR_CTRL2, 1, 0x00, 1); status = cx231xx_write_i2c_data(dev, I2S_BLK_DEVICE_ADDRESS, CH_PWR_CTRL1, 1, 0x80, 1); break; case CX231XX_AMUX_VIDEO: default: break; } dev->ctl_ainput = audio_input; return status; } /****************************************************************************** * P O W E R C O N T R O L functions * ******************************************************************************/ int cx231xx_set_power_mode(struct cx231xx *dev, enum AV_MODE mode) { u8 value[4] = { 0, 0, 0, 0 }; u32 tmp = 0; int status = 0; if (dev->power_mode != mode) dev->power_mode = mode; else { cx231xx_info(" setPowerMode::mode = %d, No Change req.\n", mode); return 0; } status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN, value, 4); if (status < 0) return status; tmp = le32_to_cpu(*((u32 *) value)); switch (mode) { case POLARIS_AVMODE_ENXTERNAL_AV: tmp &= (~PWR_MODE_MASK); tmp |= PWR_AV_EN; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); tmp |= PWR_ISO_EN; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); tmp |= POLARIS_AVMODE_ENXTERNAL_AV; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); /* reset state of xceive tuner */ dev->xc_fw_load_done = 0; break; case POLARIS_AVMODE_ANALOGT_TV: tmp |= PWR_DEMOD_EN; tmp |= (I2C_DEMOD_EN); value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); if (!(tmp & PWR_TUNER_EN)) { tmp |= (PWR_TUNER_EN); value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); } if (!(tmp & PWR_AV_EN)) { tmp |= PWR_AV_EN; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); } if (!(tmp & PWR_ISO_EN)) { tmp |= PWR_ISO_EN; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); } if (!(tmp & POLARIS_AVMODE_ANALOGT_TV)) { tmp |= POLARIS_AVMODE_ANALOGT_TV; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); } if (dev->board.tuner_type != TUNER_ABSENT) { /* Enable tuner */ cx231xx_enable_i2c_port_3(dev, true); /* reset the Tuner */ if (dev->board.tuner_gpio) cx231xx_gpio_set(dev, dev->board.tuner_gpio); if (dev->cx231xx_reset_analog_tuner) dev->cx231xx_reset_analog_tuner(dev); } break; case POLARIS_AVMODE_DIGITAL: if (!(tmp & PWR_TUNER_EN)) { tmp |= (PWR_TUNER_EN); value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); } if (!(tmp & PWR_AV_EN)) { tmp |= PWR_AV_EN; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); } if (!(tmp & PWR_ISO_EN)) { tmp |= PWR_ISO_EN; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); } tmp &= (~PWR_AV_MODE); tmp |= POLARIS_AVMODE_DIGITAL | I2C_DEMOD_EN; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); if (!(tmp & PWR_DEMOD_EN)) { tmp |= PWR_DEMOD_EN; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); } if (dev->board.tuner_type != TUNER_ABSENT) { /* * Enable tuner * Hauppauge Exeter seems to need to do something different! */ if (dev->model == CX231XX_BOARD_HAUPPAUGE_EXETER) cx231xx_enable_i2c_port_3(dev, false); else cx231xx_enable_i2c_port_3(dev, true); /* reset the Tuner */ if (dev->board.tuner_gpio) cx231xx_gpio_set(dev, dev->board.tuner_gpio); if (dev->cx231xx_reset_analog_tuner) dev->cx231xx_reset_analog_tuner(dev); } break; default: break; } msleep(PWR_SLEEP_INTERVAL); /* For power saving, only enable Pwr_resetout_n when digital TV is selected. */ if (mode == POLARIS_AVMODE_DIGITAL) { tmp |= PWR_RESETOUT_EN; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); msleep(PWR_SLEEP_INTERVAL); } /* update power control for afe */ status = cx231xx_afe_update_power_control(dev, mode); /* update power control for i2s_blk */ status = cx231xx_i2s_blk_update_power_control(dev, mode); status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN, value, 4); return status; } int cx231xx_power_suspend(struct cx231xx *dev) { u8 value[4] = { 0, 0, 0, 0 }; u32 tmp = 0; int status = 0; status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, PWR_CTL_EN, value, 4); if (status > 0) return status; tmp = le32_to_cpu(*((u32 *) value)); tmp &= (~PWR_MODE_MASK); value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, PWR_CTL_EN, value, 4); return status; } /****************************************************************************** * S T R E A M C O N T R O L functions * ******************************************************************************/ int cx231xx_start_stream(struct cx231xx *dev, u32 ep_mask) { u8 value[4] = { 0x0, 0x0, 0x0, 0x0 }; u32 tmp = 0; int status = 0; cx231xx_info("cx231xx_start_stream():: ep_mask = %x\n", ep_mask); status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, EP_MODE_SET, value, 4); if (status < 0) return status; tmp = le32_to_cpu(*((u32 *) value)); tmp |= ep_mask; value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, EP_MODE_SET, value, 4); return status; } int cx231xx_stop_stream(struct cx231xx *dev, u32 ep_mask) { u8 value[4] = { 0x0, 0x0, 0x0, 0x0 }; u32 tmp = 0; int status = 0; cx231xx_info("cx231xx_stop_stream():: ep_mask = %x\n", ep_mask); status = cx231xx_read_ctrl_reg(dev, VRT_GET_REGISTER, EP_MODE_SET, value, 4); if (status < 0) return status; tmp = le32_to_cpu(*((u32 *) value)); tmp &= (~ep_mask); value[0] = (u8) tmp; value[1] = (u8) (tmp >> 8); value[2] = (u8) (tmp >> 16); value[3] = (u8) (tmp >> 24); status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, EP_MODE_SET, value, 4); return status; } int cx231xx_initialize_stream_xfer(struct cx231xx *dev, u32 media_type) { int status = 0; u32 value = 0; u8 val[4] = { 0, 0, 0, 0 }; if (dev->udev->speed == USB_SPEED_HIGH) { switch (media_type) { case Audio: cx231xx_info("%s: Audio enter HANC\n", __func__); status = cx231xx_mode_register(dev, TS_MODE_REG, 0x9300); break; case Vbi: cx231xx_info("%s: set vanc registers\n", __func__); status = cx231xx_mode_register(dev, TS_MODE_REG, 0x300); break; case Sliced_cc: cx231xx_info("%s: set hanc registers\n", __func__); status = cx231xx_mode_register(dev, TS_MODE_REG, 0x1300); break; case Raw_Video: cx231xx_info("%s: set video registers\n", __func__); status = cx231xx_mode_register(dev, TS_MODE_REG, 0x100); break; case TS1_serial_mode: cx231xx_info("%s: set ts1 registers", __func__); if (dev->board.has_417) { cx231xx_info(" MPEG\n"); value &= 0xFFFFFFFC; value |= 0x3; status = cx231xx_mode_register(dev, TS_MODE_REG, value); val[0] = 0x04; val[1] = 0xA3; val[2] = 0x3B; val[3] = 0x00; status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, TS1_CFG_REG, val, 4); val[0] = 0x00; val[1] = 0x08; val[2] = 0x00; val[3] = 0x08; status = cx231xx_write_ctrl_reg(dev, VRT_SET_REGISTER, TS1_LENGTH_REG, val, 4); } else { cx231xx_info(" BDA\n"); status = cx231xx_mode_register(dev, TS_MODE_REG, 0x101); status = cx231xx_mode_register(dev, TS1_CFG_REG, 0x010); } break; case TS1_parallel_mode: cx231xx_info("%s: set ts1 parallel mode registers\n", __func__); status = cx231xx_mode_register(dev, TS_MODE_REG, 0x100); status = cx231xx_mode_register(dev, TS1_CFG_REG, 0x400); break; } } else { status = cx231xx_mode_register(dev, TS_MODE_REG, 0x101); } return status; } int cx231xx_capture_start(struct cx231xx *dev, int start, u8 media_type) { int rc = -1; u32 ep_mask = -1; struct pcb_config *pcb_config; /* get EP for media type */ pcb_config = (struct pcb_config *)&dev->current_pcb_config; if (pcb_config->config_num) { switch (media_type) { case Raw_Video: ep_mask = ENABLE_EP4; /* ep4 [00:1000] */ break; case Audio: ep_mask = ENABLE_EP3; /* ep3 [00:0100] */ break; case Vbi: ep_mask = ENABLE_EP5; /* ep5 [01:0000] */ break; case Sliced_cc: ep_mask = ENABLE_EP6; /* ep6 [10:0000] */ break; case TS1_serial_mode: case TS1_parallel_mode: ep_mask = ENABLE_EP1; /* ep1 [00:0001] */ break; case TS2: ep_mask = ENABLE_EP2; /* ep2 [00:0010] */ break; } } if (start) { rc = cx231xx_initialize_stream_xfer(dev, media_type); if (rc < 0) return rc; /* enable video capture */ if (ep_mask > 0) rc = cx231xx_start_stream(dev, ep_mask); } else { /* disable video capture */ if (ep_mask > 0) rc = cx231xx_stop_stream(dev, ep_mask); } return rc; } EXPORT_SYMBOL_GPL(cx231xx_capture_start); /***************************************************************************** * G P I O B I T control functions * ******************************************************************************/ static int cx231xx_set_gpio_bit(struct cx231xx *dev, u32 gpio_bit, u32 gpio_val) { int status = 0; gpio_val = cpu_to_le32(gpio_val); status = cx231xx_send_gpio_cmd(dev, gpio_bit, (u8 *)&gpio_val, 4, 0, 0); return status; } static int cx231xx_get_gpio_bit(struct cx231xx *dev, u32 gpio_bit, u32 *gpio_val) { u32 tmp; int status = 0; status = cx231xx_send_gpio_cmd(dev, gpio_bit, (u8 *)&tmp, 4, 0, 1); *gpio_val = le32_to_cpu(tmp); return status; } /* * cx231xx_set_gpio_direction * Sets the direction of the GPIO pin to input or output * * Parameters : * pin_number : The GPIO Pin number to program the direction for * from 0 to 31 * pin_value : The Direction of the GPIO Pin under reference. * 0 = Input direction * 1 = Output direction */ int cx231xx_set_gpio_direction(struct cx231xx *dev, int pin_number, int pin_value) { int status = 0; u32 value = 0; /* Check for valid pin_number - if 32 , bail out */ if (pin_number >= 32) return -EINVAL; /* input */ if (pin_value == 0) value = dev->gpio_dir & (~(1 << pin_number)); /* clear */ else value = dev->gpio_dir | (1 << pin_number); status = cx231xx_set_gpio_bit(dev, value, dev->gpio_val); /* cache the value for future */ dev->gpio_dir = value; return status; } /* * cx231xx_set_gpio_value * Sets the value of the GPIO pin to Logic high or low. The Pin under * reference should ALREADY BE SET IN OUTPUT MODE !!!!!!!!! * * Parameters : * pin_number : The GPIO Pin number to program the direction for * pin_value : The value of the GPIO Pin under reference. * 0 = set it to 0 * 1 = set it to 1 */ int cx231xx_set_gpio_value(struct cx231xx *dev, int pin_number, int pin_value) { int status = 0; u32 value = 0; /* Check for valid pin_number - if 0xFF , bail out */ if (pin_number >= 32) return -EINVAL; /* first do a sanity check - if the Pin is not output, make it output */ if ((dev->gpio_dir & (1 << pin_number)) == 0x00) { /* It was in input mode */ value = dev->gpio_dir | (1 << pin_number); dev->gpio_dir = value; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); value = 0; } if (pin_value == 0) value = dev->gpio_val & (~(1 << pin_number)); else value = dev->gpio_val | (1 << pin_number); /* store the value */ dev->gpio_val = value; /* toggle bit0 of GP_IO */ status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); return status; } /***************************************************************************** * G P I O I2C related functions * ******************************************************************************/ int cx231xx_gpio_i2c_start(struct cx231xx *dev) { int status = 0; /* set SCL to output 1 ; set SDA to output 1 */ dev->gpio_dir |= 1 << dev->board.tuner_scl_gpio; dev->gpio_dir |= 1 << dev->board.tuner_sda_gpio; dev->gpio_val |= 1 << dev->board.tuner_scl_gpio; dev->gpio_val |= 1 << dev->board.tuner_sda_gpio; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); if (status < 0) return -EINVAL; /* set SCL to output 1; set SDA to output 0 */ dev->gpio_val |= 1 << dev->board.tuner_scl_gpio; dev->gpio_val &= ~(1 << dev->board.tuner_sda_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); if (status < 0) return -EINVAL; /* set SCL to output 0; set SDA to output 0 */ dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); dev->gpio_val &= ~(1 << dev->board.tuner_sda_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); if (status < 0) return -EINVAL; return status; } int cx231xx_gpio_i2c_end(struct cx231xx *dev) { int status = 0; /* set SCL to output 0; set SDA to output 0 */ dev->gpio_dir |= 1 << dev->board.tuner_scl_gpio; dev->gpio_dir |= 1 << dev->board.tuner_sda_gpio; dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); dev->gpio_val &= ~(1 << dev->board.tuner_sda_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); if (status < 0) return -EINVAL; /* set SCL to output 1; set SDA to output 0 */ dev->gpio_val |= 1 << dev->board.tuner_scl_gpio; dev->gpio_val &= ~(1 << dev->board.tuner_sda_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); if (status < 0) return -EINVAL; /* set SCL to input ,release SCL cable control set SDA to input ,release SDA cable control */ dev->gpio_dir &= ~(1 << dev->board.tuner_scl_gpio); dev->gpio_dir &= ~(1 << dev->board.tuner_sda_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); if (status < 0) return -EINVAL; return status; } int cx231xx_gpio_i2c_write_byte(struct cx231xx *dev, u8 data) { int status = 0; u8 i; /* set SCL to output ; set SDA to output */ dev->gpio_dir |= 1 << dev->board.tuner_scl_gpio; dev->gpio_dir |= 1 << dev->board.tuner_sda_gpio; for (i = 0; i < 8; i++) { if (((data << i) & 0x80) == 0) { /* set SCL to output 0; set SDA to output 0 */ dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); dev->gpio_val &= ~(1 << dev->board.tuner_sda_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set SCL to output 1; set SDA to output 0 */ dev->gpio_val |= 1 << dev->board.tuner_scl_gpio; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set SCL to output 0; set SDA to output 0 */ dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); } else { /* set SCL to output 0; set SDA to output 1 */ dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); dev->gpio_val |= 1 << dev->board.tuner_sda_gpio; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set SCL to output 1; set SDA to output 1 */ dev->gpio_val |= 1 << dev->board.tuner_scl_gpio; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set SCL to output 0; set SDA to output 1 */ dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); } } return status; } int cx231xx_gpio_i2c_read_byte(struct cx231xx *dev, u8 *buf) { u8 value = 0; int status = 0; u32 gpio_logic_value = 0; u8 i; /* read byte */ for (i = 0; i < 8; i++) { /* send write I2c addr */ /* set SCL to output 0; set SDA to input */ dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set SCL to output 1; set SDA to input */ dev->gpio_val |= 1 << dev->board.tuner_scl_gpio; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* get SDA data bit */ gpio_logic_value = dev->gpio_val; status = cx231xx_get_gpio_bit(dev, dev->gpio_dir, &dev->gpio_val); if ((dev->gpio_val & (1 << dev->board.tuner_sda_gpio)) != 0) value |= (1 << (8 - i - 1)); dev->gpio_val = gpio_logic_value; } /* set SCL to output 0,finish the read latest SCL signal. !!!set SDA to input, never to modify SDA direction at the same times */ dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* store the value */ *buf = value & 0xff; return status; } int cx231xx_gpio_i2c_read_ack(struct cx231xx *dev) { int status = 0; u32 gpio_logic_value = 0; int nCnt = 10; int nInit = nCnt; /* clock stretch; set SCL to input; set SDA to input; get SCL value till SCL = 1 */ dev->gpio_dir &= ~(1 << dev->board.tuner_sda_gpio); dev->gpio_dir &= ~(1 << dev->board.tuner_scl_gpio); gpio_logic_value = dev->gpio_val; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); do { msleep(2); status = cx231xx_get_gpio_bit(dev, dev->gpio_dir, &dev->gpio_val); nCnt--; } while (((dev->gpio_val & (1 << dev->board.tuner_scl_gpio)) == 0) && (nCnt > 0)); if (nCnt == 0) cx231xx_info("No ACK after %d msec -GPIO I2C failed!", nInit * 10); /* * readAck * through clock stretch, slave has given a SCL signal, * so the SDA data can be directly read. */ status = cx231xx_get_gpio_bit(dev, dev->gpio_dir, &dev->gpio_val); if ((dev->gpio_val & 1 << dev->board.tuner_sda_gpio) == 0) { dev->gpio_val = gpio_logic_value; dev->gpio_val &= ~(1 << dev->board.tuner_sda_gpio); status = 0; } else { dev->gpio_val = gpio_logic_value; dev->gpio_val |= (1 << dev->board.tuner_sda_gpio); } /* read SDA end, set the SCL to output 0, after this operation, SDA direction can be changed. */ dev->gpio_val = gpio_logic_value; dev->gpio_dir |= (1 << dev->board.tuner_scl_gpio); dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); return status; } int cx231xx_gpio_i2c_write_ack(struct cx231xx *dev) { int status = 0; /* set SDA to ouput */ dev->gpio_dir |= 1 << dev->board.tuner_sda_gpio; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set SCL = 0 (output); set SDA = 0 (output) */ dev->gpio_val &= ~(1 << dev->board.tuner_sda_gpio); dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set SCL = 1 (output); set SDA = 0 (output) */ dev->gpio_val |= 1 << dev->board.tuner_scl_gpio; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set SCL = 0 (output); set SDA = 0 (output) */ dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set SDA to input,and then the slave will read data from SDA. */ dev->gpio_dir &= ~(1 << dev->board.tuner_sda_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); return status; } int cx231xx_gpio_i2c_write_nak(struct cx231xx *dev) { int status = 0; /* set scl to output ; set sda to input */ dev->gpio_dir |= 1 << dev->board.tuner_scl_gpio; dev->gpio_dir &= ~(1 << dev->board.tuner_sda_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set scl to output 0; set sda to input */ dev->gpio_val &= ~(1 << dev->board.tuner_scl_gpio); status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); /* set scl to output 1; set sda to input */ dev->gpio_val |= 1 << dev->board.tuner_scl_gpio; status = cx231xx_set_gpio_bit(dev, dev->gpio_dir, dev->gpio_val); return status; } /***************************************************************************** * G P I O I2C related functions * ******************************************************************************/ /* cx231xx_gpio_i2c_read * Function to read data from gpio based I2C interface */ int cx231xx_gpio_i2c_read(struct cx231xx *dev, u8 dev_addr, u8 *buf, u8 len) { int status = 0; int i = 0; /* get the lock */ mutex_lock(&dev->gpio_i2c_lock); /* start */ status = cx231xx_gpio_i2c_start(dev); /* write dev_addr */ status = cx231xx_gpio_i2c_write_byte(dev, (dev_addr << 1) + 1); /* readAck */ status = cx231xx_gpio_i2c_read_ack(dev); /* read data */ for (i = 0; i < len; i++) { /* read data */ buf[i] = 0; status = cx231xx_gpio_i2c_read_byte(dev, &buf[i]); if ((i + 1) != len) { /* only do write ack if we more length */ status = cx231xx_gpio_i2c_write_ack(dev); } } /* write NAK - inform reads are complete */ status = cx231xx_gpio_i2c_write_nak(dev); /* write end */ status = cx231xx_gpio_i2c_end(dev); /* release the lock */ mutex_unlock(&dev->gpio_i2c_lock); return status; } /* cx231xx_gpio_i2c_write * Function to write data to gpio based I2C interface */ int cx231xx_gpio_i2c_write(struct cx231xx *dev, u8 dev_addr, u8 *buf, u8 len) { int i = 0; /* get the lock */ mutex_lock(&dev->gpio_i2c_lock); /* start */ cx231xx_gpio_i2c_start(dev); /* write dev_addr */ cx231xx_gpio_i2c_write_byte(dev, dev_addr << 1); /* read Ack */ cx231xx_gpio_i2c_read_ack(dev); for (i = 0; i < len; i++) { /* Write data */ cx231xx_gpio_i2c_write_byte(dev, buf[i]); /* read Ack */ cx231xx_gpio_i2c_read_ack(dev); } /* write End */ cx231xx_gpio_i2c_end(dev); /* release the lock */ mutex_unlock(&dev->gpio_i2c_lock); return 0; }
gpl-2.0
bigzz/eas-backports
lib/digsig.c
2233
5611
/* * Copyright (C) 2011 Nokia Corporation * Copyright (C) 2011 Intel Corporation * * Author: * Dmitry Kasatkin <dmitry.kasatkin@nokia.com> * <dmitry.kasatkin@intel.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, version 2 of the License. * * File: sign.c * implements signature (RSA) verification * pkcs decoding is based on LibTomCrypt code */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/err.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/key.h> #include <linux/crypto.h> #include <crypto/hash.h> #include <crypto/sha.h> #include <keys/user-type.h> #include <linux/mpi.h> #include <linux/digsig.h> static struct crypto_shash *shash; static const char *pkcs_1_v1_5_decode_emsa(const unsigned char *msg, unsigned long msglen, unsigned long modulus_bitlen, unsigned long *outlen) { unsigned long modulus_len, ps_len, i; modulus_len = (modulus_bitlen >> 3) + (modulus_bitlen & 7 ? 1 : 0); /* test message size */ if ((msglen > modulus_len) || (modulus_len < 11)) return NULL; /* separate encoded message */ if (msg[0] != 0x00 || msg[1] != 0x01) return NULL; for (i = 2; i < modulus_len - 1; i++) if (msg[i] != 0xFF) break; /* separator check */ if (msg[i] != 0) /* There was no octet with hexadecimal value 0x00 to separate ps from m. */ return NULL; ps_len = i - 2; *outlen = (msglen - (2 + ps_len + 1)); return msg + 2 + ps_len + 1; } /* * RSA Signature verification with public key */ static int digsig_verify_rsa(struct key *key, const char *sig, int siglen, const char *h, int hlen) { int err = -EINVAL; unsigned long len; unsigned long mlen, mblen; unsigned nret, l; int head, i; unsigned char *out1 = NULL; const char *m; MPI in = NULL, res = NULL, pkey[2]; uint8_t *p, *datap, *endp; struct user_key_payload *ukp; struct pubkey_hdr *pkh; down_read(&key->sem); ukp = key->payload.data; if (ukp->datalen < sizeof(*pkh)) goto err1; pkh = (struct pubkey_hdr *)ukp->data; if (pkh->version != 1) goto err1; if (pkh->algo != PUBKEY_ALGO_RSA) goto err1; if (pkh->nmpi != 2) goto err1; datap = pkh->mpi; endp = ukp->data + ukp->datalen; err = -ENOMEM; for (i = 0; i < pkh->nmpi; i++) { unsigned int remaining = endp - datap; pkey[i] = mpi_read_from_buffer(datap, &remaining); if (!pkey[i]) goto err; datap += remaining; } mblen = mpi_get_nbits(pkey[0]); mlen = DIV_ROUND_UP(mblen, 8); if (mlen == 0) goto err; out1 = kzalloc(mlen, GFP_KERNEL); if (!out1) goto err; nret = siglen; in = mpi_read_from_buffer(sig, &nret); if (!in) goto err; res = mpi_alloc(mpi_get_nlimbs(in) * 2); if (!res) goto err; err = mpi_powm(res, in, pkey[1], pkey[0]); if (err) goto err; if (mpi_get_nlimbs(res) * BYTES_PER_MPI_LIMB > mlen) { err = -EINVAL; goto err; } p = mpi_get_buffer(res, &l, NULL); if (!p) { err = -EINVAL; goto err; } len = mlen; head = len - l; memset(out1, 0, head); memcpy(out1 + head, p, l); kfree(p); m = pkcs_1_v1_5_decode_emsa(out1, len, mblen, &len); if (!m || len != hlen || memcmp(m, h, hlen)) err = -EINVAL; err: mpi_free(in); mpi_free(res); kfree(out1); while (--i >= 0) mpi_free(pkey[i]); err1: up_read(&key->sem); return err; } /** * digsig_verify() - digital signature verification with public key * @keyring: keyring to search key in * @sig: digital signature * @sigen: length of the signature * @data: data * @datalen: length of the data * @return: 0 on success, -EINVAL otherwise * * Verifies data integrity against digital signature. * Currently only RSA is supported. * Normally hash of the content is used as a data for this function. * */ int digsig_verify(struct key *keyring, const char *sig, int siglen, const char *data, int datalen) { int err = -ENOMEM; struct signature_hdr *sh = (struct signature_hdr *)sig; struct shash_desc *desc = NULL; unsigned char hash[SHA1_DIGEST_SIZE]; struct key *key; char name[20]; if (siglen < sizeof(*sh) + 2) return -EINVAL; if (sh->algo != PUBKEY_ALGO_RSA) return -ENOTSUPP; sprintf(name, "%llX", __be64_to_cpup((uint64_t *)sh->keyid)); if (keyring) { /* search in specific keyring */ key_ref_t kref; kref = keyring_search(make_key_ref(keyring, 1UL), &key_type_user, name); if (IS_ERR(kref)) key = ERR_PTR(PTR_ERR(kref)); else key = key_ref_to_ptr(kref); } else { key = request_key(&key_type_user, name, NULL); } if (IS_ERR(key)) { pr_err("key not found, id: %s\n", name); return PTR_ERR(key); } desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(shash), GFP_KERNEL); if (!desc) goto err; desc->tfm = shash; desc->flags = CRYPTO_TFM_REQ_MAY_SLEEP; crypto_shash_init(desc); crypto_shash_update(desc, data, datalen); crypto_shash_update(desc, sig, sizeof(*sh)); crypto_shash_final(desc, hash); kfree(desc); /* pass signature mpis address */ err = digsig_verify_rsa(key, sig + sizeof(*sh), siglen - sizeof(*sh), hash, sizeof(hash)); err: key_put(key); return err ? -EINVAL : 0; } EXPORT_SYMBOL_GPL(digsig_verify); static int __init digsig_init(void) { shash = crypto_alloc_shash("sha1", 0, 0); if (IS_ERR(shash)) { pr_err("shash allocation failed\n"); return PTR_ERR(shash); } return 0; } static void __exit digsig_cleanup(void) { crypto_free_shash(shash); } module_init(digsig_init); module_exit(digsig_cleanup); MODULE_LICENSE("GPL");
gpl-2.0
cdesjardins/DTS-Eagle-Integration_CAF-Android-kernel
arch/x86/crypto/ablk_helper.c
2489
4316
/* * Shared async block cipher helpers * * Copyright (c) 2012 Jussi Kivilinna <jussi.kivilinna@mbnet.fi> * * Based on aesni-intel_glue.c by: * Copyright (C) 2008, Intel Corp. * Author: Huang Ying <ying.huang@intel.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 * USA * */ #include <linux/kernel.h> #include <linux/crypto.h> #include <linux/init.h> #include <linux/module.h> #include <crypto/algapi.h> #include <crypto/cryptd.h> #include <asm/i387.h> #include <asm/crypto/ablk_helper.h> int ablk_set_key(struct crypto_ablkcipher *tfm, const u8 *key, unsigned int key_len) { struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct crypto_ablkcipher *child = &ctx->cryptd_tfm->base; int err; crypto_ablkcipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_ablkcipher_set_flags(child, crypto_ablkcipher_get_flags(tfm) & CRYPTO_TFM_REQ_MASK); err = crypto_ablkcipher_setkey(child, key, key_len); crypto_ablkcipher_set_flags(tfm, crypto_ablkcipher_get_flags(child) & CRYPTO_TFM_RES_MASK); return err; } EXPORT_SYMBOL_GPL(ablk_set_key); int __ablk_encrypt(struct ablkcipher_request *req) { struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); struct blkcipher_desc desc; desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); desc.info = req->info; desc.flags = 0; return crypto_blkcipher_crt(desc.tfm)->encrypt( &desc, req->dst, req->src, req->nbytes); } EXPORT_SYMBOL_GPL(__ablk_encrypt); int ablk_encrypt(struct ablkcipher_request *req) { struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); if (!irq_fpu_usable()) { struct ablkcipher_request *cryptd_req = ablkcipher_request_ctx(req); memcpy(cryptd_req, req, sizeof(*req)); ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); return crypto_ablkcipher_encrypt(cryptd_req); } else { return __ablk_encrypt(req); } } EXPORT_SYMBOL_GPL(ablk_encrypt); int ablk_decrypt(struct ablkcipher_request *req) { struct crypto_ablkcipher *tfm = crypto_ablkcipher_reqtfm(req); struct async_helper_ctx *ctx = crypto_ablkcipher_ctx(tfm); if (!irq_fpu_usable()) { struct ablkcipher_request *cryptd_req = ablkcipher_request_ctx(req); memcpy(cryptd_req, req, sizeof(*req)); ablkcipher_request_set_tfm(cryptd_req, &ctx->cryptd_tfm->base); return crypto_ablkcipher_decrypt(cryptd_req); } else { struct blkcipher_desc desc; desc.tfm = cryptd_ablkcipher_child(ctx->cryptd_tfm); desc.info = req->info; desc.flags = 0; return crypto_blkcipher_crt(desc.tfm)->decrypt( &desc, req->dst, req->src, req->nbytes); } } EXPORT_SYMBOL_GPL(ablk_decrypt); void ablk_exit(struct crypto_tfm *tfm) { struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm); cryptd_free_ablkcipher(ctx->cryptd_tfm); } EXPORT_SYMBOL_GPL(ablk_exit); int ablk_init_common(struct crypto_tfm *tfm, const char *drv_name) { struct async_helper_ctx *ctx = crypto_tfm_ctx(tfm); struct cryptd_ablkcipher *cryptd_tfm; cryptd_tfm = cryptd_alloc_ablkcipher(drv_name, 0, 0); if (IS_ERR(cryptd_tfm)) return PTR_ERR(cryptd_tfm); ctx->cryptd_tfm = cryptd_tfm; tfm->crt_ablkcipher.reqsize = sizeof(struct ablkcipher_request) + crypto_ablkcipher_reqsize(&cryptd_tfm->base); return 0; } EXPORT_SYMBOL_GPL(ablk_init_common); int ablk_init(struct crypto_tfm *tfm) { char drv_name[CRYPTO_MAX_ALG_NAME]; snprintf(drv_name, sizeof(drv_name), "__driver-%s", crypto_tfm_alg_driver_name(tfm)); return ablk_init_common(tfm, drv_name); } EXPORT_SYMBOL_GPL(ablk_init); MODULE_LICENSE("GPL");
gpl-2.0
dianlujitao/android_kernel_zte_msm8994
arch/mips/txx9/generic/setup_tx4938.c
3001
14094
/* * TX4938/4937 setup routines * Based on linux/arch/mips/txx9/rbtx4938/setup.c, * and RBTX49xx patch from CELF patch archive. * * 2003-2005 (c) MontaVista Software, Inc. * (C) Copyright TOSHIBA CORPORATION 2000-2001, 2004-2007 * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/ioport.h> #include <linux/delay.h> #include <linux/param.h> #include <linux/ptrace.h> #include <linux/mtd/physmap.h> #include <linux/platform_device.h> #include <asm/reboot.h> #include <asm/traps.h> #include <asm/txx9irq.h> #include <asm/txx9tmr.h> #include <asm/txx9pio.h> #include <asm/txx9/generic.h> #include <asm/txx9/ndfmc.h> #include <asm/txx9/dmac.h> #include <asm/txx9/tx4938.h> static void __init tx4938_wdr_init(void) { /* report watchdog reset status */ if (____raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_WDRST) pr_warning("Watchdog reset detected at 0x%lx\n", read_c0_errorepc()); /* clear WatchDogReset (W1C) */ tx4938_ccfg_set(TX4938_CCFG_WDRST); /* do reset on watchdog */ tx4938_ccfg_set(TX4938_CCFG_WR); } void __init tx4938_wdt_init(void) { txx9_wdt_init(TX4938_TMR_REG(2) & 0xfffffffffULL); } static void tx4938_machine_restart(char *command) { local_irq_disable(); pr_emerg("Rebooting (with %s watchdog reset)...\n", (____raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_WDREXEN) ? "external" : "internal"); /* clear watchdog status */ tx4938_ccfg_set(TX4938_CCFG_WDRST); /* W1C */ txx9_wdt_now(TX4938_TMR_REG(2) & 0xfffffffffULL); while (!(____raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_WDRST)) ; mdelay(10); if (____raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_WDREXEN) { pr_emerg("Rebooting (with internal watchdog reset)...\n"); /* External WDRST failed. Do internal watchdog reset */ tx4938_ccfg_clear(TX4938_CCFG_WDREXEN); } /* fallback */ (*_machine_halt)(); } void show_registers(struct pt_regs *regs); static int tx4938_be_handler(struct pt_regs *regs, int is_fixup) { int data = regs->cp0_cause & 4; console_verbose(); pr_err("%cBE exception at %#lx\n", data ? 'D' : 'I', regs->cp0_epc); pr_err("ccfg:%llx, toea:%llx\n", (unsigned long long)____raw_readq(&tx4938_ccfgptr->ccfg), (unsigned long long)____raw_readq(&tx4938_ccfgptr->toea)); #ifdef CONFIG_PCI tx4927_report_pcic_status(); #endif show_registers(regs); panic("BusError!"); } static void __init tx4938_be_init(void) { board_be_handler = tx4938_be_handler; } static struct resource tx4938_sdram_resource[4]; static struct resource tx4938_sram_resource; #define TX4938_SRAM_SIZE 0x800 void __init tx4938_setup(void) { int i; __u32 divmode; unsigned int cpuclk = 0; u64 ccfg; txx9_reg_res_init(TX4938_REV_PCODE(), TX4938_REG_BASE, TX4938_REG_SIZE); set_c0_config(TX49_CONF_CWFON); /* SDRAMC,EBUSC are configured by PROM */ for (i = 0; i < 8; i++) { if (!(TX4938_EBUSC_CR(i) & 0x8)) continue; /* disabled */ txx9_ce_res[i].start = (unsigned long)TX4938_EBUSC_BA(i); txx9_ce_res[i].end = txx9_ce_res[i].start + TX4938_EBUSC_SIZE(i) - 1; request_resource(&iomem_resource, &txx9_ce_res[i]); } /* clocks */ ccfg = ____raw_readq(&tx4938_ccfgptr->ccfg); if (txx9_master_clock) { /* calculate gbus_clock and cpu_clock from master_clock */ divmode = (__u32)ccfg & TX4938_CCFG_DIVMODE_MASK; switch (divmode) { case TX4938_CCFG_DIVMODE_8: case TX4938_CCFG_DIVMODE_10: case TX4938_CCFG_DIVMODE_12: case TX4938_CCFG_DIVMODE_16: case TX4938_CCFG_DIVMODE_18: txx9_gbus_clock = txx9_master_clock * 4; break; default: txx9_gbus_clock = txx9_master_clock; } switch (divmode) { case TX4938_CCFG_DIVMODE_2: case TX4938_CCFG_DIVMODE_8: cpuclk = txx9_gbus_clock * 2; break; case TX4938_CCFG_DIVMODE_2_5: case TX4938_CCFG_DIVMODE_10: cpuclk = txx9_gbus_clock * 5 / 2; break; case TX4938_CCFG_DIVMODE_3: case TX4938_CCFG_DIVMODE_12: cpuclk = txx9_gbus_clock * 3; break; case TX4938_CCFG_DIVMODE_4: case TX4938_CCFG_DIVMODE_16: cpuclk = txx9_gbus_clock * 4; break; case TX4938_CCFG_DIVMODE_4_5: case TX4938_CCFG_DIVMODE_18: cpuclk = txx9_gbus_clock * 9 / 2; break; } txx9_cpu_clock = cpuclk; } else { if (txx9_cpu_clock == 0) txx9_cpu_clock = 300000000; /* 300MHz */ /* calculate gbus_clock and master_clock from cpu_clock */ cpuclk = txx9_cpu_clock; divmode = (__u32)ccfg & TX4938_CCFG_DIVMODE_MASK; switch (divmode) { case TX4938_CCFG_DIVMODE_2: case TX4938_CCFG_DIVMODE_8: txx9_gbus_clock = cpuclk / 2; break; case TX4938_CCFG_DIVMODE_2_5: case TX4938_CCFG_DIVMODE_10: txx9_gbus_clock = cpuclk * 2 / 5; break; case TX4938_CCFG_DIVMODE_3: case TX4938_CCFG_DIVMODE_12: txx9_gbus_clock = cpuclk / 3; break; case TX4938_CCFG_DIVMODE_4: case TX4938_CCFG_DIVMODE_16: txx9_gbus_clock = cpuclk / 4; break; case TX4938_CCFG_DIVMODE_4_5: case TX4938_CCFG_DIVMODE_18: txx9_gbus_clock = cpuclk * 2 / 9; break; } switch (divmode) { case TX4938_CCFG_DIVMODE_8: case TX4938_CCFG_DIVMODE_10: case TX4938_CCFG_DIVMODE_12: case TX4938_CCFG_DIVMODE_16: case TX4938_CCFG_DIVMODE_18: txx9_master_clock = txx9_gbus_clock / 4; break; default: txx9_master_clock = txx9_gbus_clock; } } /* change default value to udelay/mdelay take reasonable time */ loops_per_jiffy = txx9_cpu_clock / HZ / 2; /* CCFG */ tx4938_wdr_init(); /* clear BusErrorOnWrite flag (W1C) */ tx4938_ccfg_set(TX4938_CCFG_BEOW); /* enable Timeout BusError */ if (txx9_ccfg_toeon) tx4938_ccfg_set(TX4938_CCFG_TOE); /* DMA selection */ txx9_clear64(&tx4938_ccfgptr->pcfg, TX4938_PCFG_DMASEL_ALL); /* Use external clock for external arbiter */ if (!(____raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_PCIARB)) txx9_clear64(&tx4938_ccfgptr->pcfg, TX4938_PCFG_PCICLKEN_ALL); printk(KERN_INFO "%s -- %dMHz(M%dMHz) CRIR:%08x CCFG:%llx PCFG:%llx\n", txx9_pcode_str, (cpuclk + 500000) / 1000000, (txx9_master_clock + 500000) / 1000000, (__u32)____raw_readq(&tx4938_ccfgptr->crir), (unsigned long long)____raw_readq(&tx4938_ccfgptr->ccfg), (unsigned long long)____raw_readq(&tx4938_ccfgptr->pcfg)); printk(KERN_INFO "%s SDRAMC --", txx9_pcode_str); for (i = 0; i < 4; i++) { __u64 cr = TX4938_SDRAMC_CR(i); unsigned long base, size; if (!((__u32)cr & 0x00000400)) continue; /* disabled */ base = (unsigned long)(cr >> 49) << 21; size = (((unsigned long)(cr >> 33) & 0x7fff) + 1) << 21; printk(" CR%d:%016llx", i, (unsigned long long)cr); tx4938_sdram_resource[i].name = "SDRAM"; tx4938_sdram_resource[i].start = base; tx4938_sdram_resource[i].end = base + size - 1; tx4938_sdram_resource[i].flags = IORESOURCE_MEM; request_resource(&iomem_resource, &tx4938_sdram_resource[i]); } printk(" TR:%09llx\n", (unsigned long long)____raw_readq(&tx4938_sdramcptr->tr)); /* SRAM */ if (txx9_pcode == 0x4938 && ____raw_readq(&tx4938_sramcptr->cr) & 1) { unsigned int size = TX4938_SRAM_SIZE; tx4938_sram_resource.name = "SRAM"; tx4938_sram_resource.start = (____raw_readq(&tx4938_sramcptr->cr) >> (39-11)) & ~(size - 1); tx4938_sram_resource.end = tx4938_sram_resource.start + TX4938_SRAM_SIZE - 1; tx4938_sram_resource.flags = IORESOURCE_MEM; request_resource(&iomem_resource, &tx4938_sram_resource); } /* TMR */ /* disable all timers */ for (i = 0; i < TX4938_NR_TMR; i++) txx9_tmr_init(TX4938_TMR_REG(i) & 0xfffffffffULL); /* PIO */ txx9_gpio_init(TX4938_PIO_REG & 0xfffffffffULL, 0, TX4938_NUM_PIO); __raw_writel(0, &tx4938_pioptr->maskcpu); __raw_writel(0, &tx4938_pioptr->maskext); if (txx9_pcode == 0x4938) { __u64 pcfg = ____raw_readq(&tx4938_ccfgptr->pcfg); /* set PCIC1 reset */ txx9_set64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIC1RST); if (pcfg & (TX4938_PCFG_ETH0_SEL | TX4938_PCFG_ETH1_SEL)) { mdelay(1); /* at least 128 cpu clock */ /* clear PCIC1 reset */ txx9_clear64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIC1RST); } else { printk(KERN_INFO "%s: stop PCIC1\n", txx9_pcode_str); /* stop PCIC1 */ txx9_set64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_PCIC1CKD); } if (!(pcfg & TX4938_PCFG_ETH0_SEL)) { printk(KERN_INFO "%s: stop ETH0\n", txx9_pcode_str); txx9_set64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_ETH0RST); txx9_set64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_ETH0CKD); } if (!(pcfg & TX4938_PCFG_ETH1_SEL)) { printk(KERN_INFO "%s: stop ETH1\n", txx9_pcode_str); txx9_set64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_ETH1RST); txx9_set64(&tx4938_ccfgptr->clkctr, TX4938_CLKCTR_ETH1CKD); } } _machine_restart = tx4938_machine_restart; board_be_init = tx4938_be_init; } void __init tx4938_time_init(unsigned int tmrnr) { if (____raw_readq(&tx4938_ccfgptr->ccfg) & TX4938_CCFG_TINTDIS) txx9_clockevent_init(TX4938_TMR_REG(tmrnr) & 0xfffffffffULL, TXX9_IRQ_BASE + TX4938_IR_TMR(tmrnr), TXX9_IMCLK); } void __init tx4938_sio_init(unsigned int sclk, unsigned int cts_mask) { int i; unsigned int ch_mask = 0; if (__raw_readq(&tx4938_ccfgptr->pcfg) & TX4938_PCFG_ETH0_SEL) ch_mask |= 1 << 1; /* disable SIO1 by PCFG setting */ for (i = 0; i < 2; i++) { if ((1 << i) & ch_mask) continue; txx9_sio_init(TX4938_SIO_REG(i) & 0xfffffffffULL, TXX9_IRQ_BASE + TX4938_IR_SIO(i), i, sclk, (1 << i) & cts_mask); } } void __init tx4938_spi_init(int busid) { txx9_spi_init(busid, TX4938_SPI_REG & 0xfffffffffULL, TXX9_IRQ_BASE + TX4938_IR_SPI); } void __init tx4938_ethaddr_init(unsigned char *addr0, unsigned char *addr1) { u64 pcfg = __raw_readq(&tx4938_ccfgptr->pcfg); if (addr0 && (pcfg & TX4938_PCFG_ETH0_SEL)) txx9_ethaddr_init(TXX9_IRQ_BASE + TX4938_IR_ETH0, addr0); if (addr1 && (pcfg & TX4938_PCFG_ETH1_SEL)) txx9_ethaddr_init(TXX9_IRQ_BASE + TX4938_IR_ETH1, addr1); } void __init tx4938_mtd_init(int ch) { struct physmap_flash_data pdata = { .width = TX4938_EBUSC_WIDTH(ch) / 8, }; unsigned long start = txx9_ce_res[ch].start; unsigned long size = txx9_ce_res[ch].end - start + 1; if (!(TX4938_EBUSC_CR(ch) & 0x8)) return; /* disabled */ txx9_physmap_flash_init(ch, start, size, &pdata); } void __init tx4938_ata_init(unsigned int irq, unsigned int shift, int tune) { struct platform_device *pdev; struct resource res[] = { { /* .start and .end are filled in later */ .flags = IORESOURCE_MEM, }, { .start = irq, .flags = IORESOURCE_IRQ, }, }; struct tx4938ide_platform_info pdata = { .ioport_shift = shift, /* * The IDE driver should not change bus timings if other ISA * devices existed. */ .gbus_clock = tune ? txx9_gbus_clock : 0, }; u64 ebccr; int i; if ((__raw_readq(&tx4938_ccfgptr->pcfg) & (TX4938_PCFG_ATA_SEL | TX4938_PCFG_NDF_SEL)) != TX4938_PCFG_ATA_SEL) return; for (i = 0; i < 8; i++) { /* check EBCCRn.ISA, EBCCRn.BSZ, EBCCRn.ME */ ebccr = __raw_readq(&tx4938_ebuscptr->cr[i]); if ((ebccr & 0x00f00008) == 0x00e00008) break; } if (i == 8) return; pdata.ebus_ch = i; res[0].start = ((ebccr >> 48) << 20) + 0x10000; res[0].end = res[0].start + 0x20000 - 1; pdev = platform_device_alloc("tx4938ide", -1); if (!pdev || platform_device_add_resources(pdev, res, ARRAY_SIZE(res)) || platform_device_add_data(pdev, &pdata, sizeof(pdata)) || platform_device_add(pdev)) platform_device_put(pdev); } void __init tx4938_ndfmc_init(unsigned int hold, unsigned int spw) { struct txx9ndfmc_platform_data plat_data = { .shift = 1, .gbus_clock = txx9_gbus_clock, .hold = hold, .spw = spw, .ch_mask = 1, }; unsigned long baseaddr = TX4938_NDFMC_REG & 0xfffffffffULL; #ifdef __BIG_ENDIAN baseaddr += 4; #endif if ((__raw_readq(&tx4938_ccfgptr->pcfg) & (TX4938_PCFG_ATA_SEL|TX4938_PCFG_ISA_SEL|TX4938_PCFG_NDF_SEL)) == TX4938_PCFG_NDF_SEL) txx9_ndfmc_init(baseaddr, &plat_data); } void __init tx4938_dmac_init(int memcpy_chan0, int memcpy_chan1) { struct txx9dmac_platform_data plat_data = { .have_64bit_regs = true, }; int i; for (i = 0; i < 2; i++) { plat_data.memcpy_chan = i ? memcpy_chan1 : memcpy_chan0; txx9_dmac_init(i, TX4938_DMA_REG(i) & 0xfffffffffULL, TXX9_IRQ_BASE + TX4938_IR_DMA(i, 0), &plat_data); } } void __init tx4938_aclc_init(void) { u64 pcfg = __raw_readq(&tx4938_ccfgptr->pcfg); if ((pcfg & TX4938_PCFG_SEL2) && !(pcfg & TX4938_PCFG_ETH0_SEL)) txx9_aclc_init(TX4938_ACLC_REG & 0xfffffffffULL, TXX9_IRQ_BASE + TX4938_IR_ACLC, 1, 0, 1); } void __init tx4938_sramc_init(void) { if (tx4938_sram_resource.start) txx9_sramc_init(&tx4938_sram_resource); } static void __init tx4938_stop_unused_modules(void) { __u64 pcfg, rst = 0, ckd = 0; char buf[128]; buf[0] = '\0'; local_irq_disable(); pcfg = ____raw_readq(&tx4938_ccfgptr->pcfg); switch (txx9_pcode) { case 0x4937: if (!(pcfg & TX4938_PCFG_SEL2)) { rst |= TX4938_CLKCTR_ACLRST; ckd |= TX4938_CLKCTR_ACLCKD; strcat(buf, " ACLC"); } break; case 0x4938: if (!(pcfg & TX4938_PCFG_SEL2) || (pcfg & TX4938_PCFG_ETH0_SEL)) { rst |= TX4938_CLKCTR_ACLRST; ckd |= TX4938_CLKCTR_ACLCKD; strcat(buf, " ACLC"); } if ((pcfg & (TX4938_PCFG_ATA_SEL | TX4938_PCFG_ISA_SEL | TX4938_PCFG_NDF_SEL)) != TX4938_PCFG_NDF_SEL) { rst |= TX4938_CLKCTR_NDFRST; ckd |= TX4938_CLKCTR_NDFCKD; strcat(buf, " NDFMC"); } if (!(pcfg & TX4938_PCFG_SPI_SEL)) { rst |= TX4938_CLKCTR_SPIRST; ckd |= TX4938_CLKCTR_SPICKD; strcat(buf, " SPI"); } break; } if (rst | ckd) { txx9_set64(&tx4938_ccfgptr->clkctr, rst); txx9_set64(&tx4938_ccfgptr->clkctr, ckd); } local_irq_enable(); if (buf[0]) pr_info("%s: stop%s\n", txx9_pcode_str, buf); } static int __init tx4938_late_init(void) { if (txx9_pcode != 0x4937 && txx9_pcode != 0x4938) return -ENODEV; tx4938_stop_unused_modules(); return 0; } late_initcall(tx4938_late_init);
gpl-2.0
kerneldevs/fusX-univa-kernel
sound/drivers/vx/vx_hwdep.c
4025
6162
/* * Driver for Digigram VX soundcards * * DSP firmware management * * Copyright (c) 2002 by Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/device.h> #include <linux/firmware.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <sound/core.h> #include <sound/hwdep.h> #include <sound/vx_core.h> #ifdef SND_VX_FW_LOADER MODULE_FIRMWARE("vx/bx_1_vxp.b56"); MODULE_FIRMWARE("vx/bx_1_vp4.b56"); MODULE_FIRMWARE("vx/x1_1_vx2.xlx"); MODULE_FIRMWARE("vx/x1_2_v22.xlx"); MODULE_FIRMWARE("vx/x1_1_vxp.xlx"); MODULE_FIRMWARE("vx/x1_1_vp4.xlx"); MODULE_FIRMWARE("vx/bd56002.boot"); MODULE_FIRMWARE("vx/bd563v2.boot"); MODULE_FIRMWARE("vx/bd563s3.boot"); MODULE_FIRMWARE("vx/l_1_vx2.d56"); MODULE_FIRMWARE("vx/l_1_v22.d56"); MODULE_FIRMWARE("vx/l_1_vxp.d56"); MODULE_FIRMWARE("vx/l_1_vp4.d56"); int snd_vx_setup_firmware(struct vx_core *chip) { static char *fw_files[VX_TYPE_NUMS][4] = { [VX_TYPE_BOARD] = { NULL, "x1_1_vx2.xlx", "bd56002.boot", "l_1_vx2.d56", }, [VX_TYPE_V2] = { NULL, "x1_2_v22.xlx", "bd563v2.boot", "l_1_v22.d56", }, [VX_TYPE_MIC] = { NULL, "x1_2_v22.xlx", "bd563v2.boot", "l_1_v22.d56", }, [VX_TYPE_VXPOCKET] = { "bx_1_vxp.b56", "x1_1_vxp.xlx", "bd563s3.boot", "l_1_vxp.d56" }, [VX_TYPE_VXP440] = { "bx_1_vp4.b56", "x1_1_vp4.xlx", "bd563s3.boot", "l_1_vp4.d56" }, }; int i, err; for (i = 0; i < 4; i++) { char path[32]; const struct firmware *fw; if (! fw_files[chip->type][i]) continue; sprintf(path, "vx/%s", fw_files[chip->type][i]); if (request_firmware(&fw, path, chip->dev)) { snd_printk(KERN_ERR "vx: can't load firmware %s\n", path); return -ENOENT; } err = chip->ops->load_dsp(chip, i, fw); if (err < 0) { release_firmware(fw); return err; } if (i == 1) chip->chip_status |= VX_STAT_XILINX_LOADED; #ifdef CONFIG_PM chip->firmware[i] = fw; #else release_firmware(fw); #endif } /* ok, we reached to the last one */ /* create the devices if not built yet */ if ((err = snd_vx_pcm_new(chip)) < 0) return err; if ((err = snd_vx_mixer_new(chip)) < 0) return err; if (chip->ops->add_controls) if ((err = chip->ops->add_controls(chip)) < 0) return err; chip->chip_status |= VX_STAT_DEVICE_INIT; chip->chip_status |= VX_STAT_CHIP_INIT; return snd_card_register(chip->card); } /* exported */ void snd_vx_free_firmware(struct vx_core *chip) { #ifdef CONFIG_PM int i; for (i = 0; i < 4; i++) release_firmware(chip->firmware[i]); #endif } #else /* old style firmware loading */ static int vx_hwdep_dsp_status(struct snd_hwdep *hw, struct snd_hwdep_dsp_status *info) { static char *type_ids[VX_TYPE_NUMS] = { [VX_TYPE_BOARD] = "vxboard", [VX_TYPE_V2] = "vx222", [VX_TYPE_MIC] = "vx222", [VX_TYPE_VXPOCKET] = "vxpocket", [VX_TYPE_VXP440] = "vxp440", }; struct vx_core *vx = hw->private_data; if (snd_BUG_ON(!type_ids[vx->type])) return -EINVAL; strcpy(info->id, type_ids[vx->type]); if (vx_is_pcmcia(vx)) info->num_dsps = 4; else info->num_dsps = 3; if (vx->chip_status & VX_STAT_CHIP_INIT) info->chip_ready = 1; info->version = VX_DRIVER_VERSION; return 0; } static void free_fw(const struct firmware *fw) { if (fw) { vfree(fw->data); kfree(fw); } } static int vx_hwdep_dsp_load(struct snd_hwdep *hw, struct snd_hwdep_dsp_image *dsp) { struct vx_core *vx = hw->private_data; int index, err; struct firmware *fw; if (snd_BUG_ON(!vx->ops->load_dsp)) return -ENXIO; fw = kmalloc(sizeof(*fw), GFP_KERNEL); if (! fw) { snd_printk(KERN_ERR "cannot allocate firmware\n"); return -ENOMEM; } fw->size = dsp->length; fw->data = vmalloc(fw->size); if (! fw->data) { snd_printk(KERN_ERR "cannot allocate firmware image (length=%d)\n", (int)fw->size); kfree(fw); return -ENOMEM; } if (copy_from_user((void *)fw->data, dsp->image, dsp->length)) { free_fw(fw); return -EFAULT; } index = dsp->index; if (! vx_is_pcmcia(vx)) index++; err = vx->ops->load_dsp(vx, index, fw); if (err < 0) { free_fw(fw); return err; } #ifdef CONFIG_PM vx->firmware[index] = fw; #else free_fw(fw); #endif if (index == 1) vx->chip_status |= VX_STAT_XILINX_LOADED; if (index < 3) return 0; /* ok, we reached to the last one */ /* create the devices if not built yet */ if (! (vx->chip_status & VX_STAT_DEVICE_INIT)) { if ((err = snd_vx_pcm_new(vx)) < 0) return err; if ((err = snd_vx_mixer_new(vx)) < 0) return err; if (vx->ops->add_controls) if ((err = vx->ops->add_controls(vx)) < 0) return err; if ((err = snd_card_register(vx->card)) < 0) return err; vx->chip_status |= VX_STAT_DEVICE_INIT; } vx->chip_status |= VX_STAT_CHIP_INIT; return 0; } /* exported */ int snd_vx_setup_firmware(struct vx_core *chip) { int err; struct snd_hwdep *hw; if ((err = snd_hwdep_new(chip->card, SND_VX_HWDEP_ID, 0, &hw)) < 0) return err; hw->iface = SNDRV_HWDEP_IFACE_VX; hw->private_data = chip; hw->ops.dsp_status = vx_hwdep_dsp_status; hw->ops.dsp_load = vx_hwdep_dsp_load; hw->exclusive = 1; sprintf(hw->name, "VX Loader (%s)", chip->card->driver); chip->hwdep = hw; return snd_card_register(chip->card); } /* exported */ void snd_vx_free_firmware(struct vx_core *chip) { #ifdef CONFIG_PM int i; for (i = 0; i < 4; i++) free_fw(chip->firmware[i]); #endif } #endif /* SND_VX_FW_LOADER */ EXPORT_SYMBOL(snd_vx_setup_firmware); EXPORT_SYMBOL(snd_vx_free_firmware);
gpl-2.0
bilalliberty/android_kernel_htc_zaraul
drivers/media/video/mt9m032.c
4793
23750
/* * Driver for MT9M032 CMOS Image Sensor from Micron * * Copyright (C) 2010-2011 Lund Engineering * Contact: Gil Lund <gwlund@lundeng.com> * Author: Martin Hostettler <martin@neutronstar.dyndns.org> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <linux/delay.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/math64.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/slab.h> #include <linux/v4l2-mediabus.h> #include <media/media-entity.h> #include <media/mt9m032.h> #include <media/v4l2-ctrls.h> #include <media/v4l2-device.h> #include <media/v4l2-subdev.h> #include "aptina-pll.h" /* * width and height include active boundary and black parts * * column 0- 15 active boundary * column 16-1455 image * column 1456-1471 active boundary * column 1472-1599 black * * row 0- 51 black * row 53- 59 active boundary * row 60-1139 image * row 1140-1147 active boundary * row 1148-1151 black */ #define MT9M032_PIXEL_ARRAY_WIDTH 1600 #define MT9M032_PIXEL_ARRAY_HEIGHT 1152 #define MT9M032_CHIP_VERSION 0x00 #define MT9M032_CHIP_VERSION_VALUE 0x1402 #define MT9M032_ROW_START 0x01 #define MT9M032_ROW_START_MIN 0 #define MT9M032_ROW_START_MAX 1152 #define MT9M032_ROW_START_DEF 60 #define MT9M032_COLUMN_START 0x02 #define MT9M032_COLUMN_START_MIN 0 #define MT9M032_COLUMN_START_MAX 1600 #define MT9M032_COLUMN_START_DEF 16 #define MT9M032_ROW_SIZE 0x03 #define MT9M032_ROW_SIZE_MIN 32 #define MT9M032_ROW_SIZE_MAX 1152 #define MT9M032_ROW_SIZE_DEF 1080 #define MT9M032_COLUMN_SIZE 0x04 #define MT9M032_COLUMN_SIZE_MIN 32 #define MT9M032_COLUMN_SIZE_MAX 1600 #define MT9M032_COLUMN_SIZE_DEF 1440 #define MT9M032_HBLANK 0x05 #define MT9M032_VBLANK 0x06 #define MT9M032_VBLANK_MAX 0x7ff #define MT9M032_SHUTTER_WIDTH_HIGH 0x08 #define MT9M032_SHUTTER_WIDTH_LOW 0x09 #define MT9M032_SHUTTER_WIDTH_MIN 1 #define MT9M032_SHUTTER_WIDTH_MAX 1048575 #define MT9M032_SHUTTER_WIDTH_DEF 1943 #define MT9M032_PIX_CLK_CTRL 0x0a #define MT9M032_PIX_CLK_CTRL_INV_PIXCLK 0x8000 #define MT9M032_RESTART 0x0b #define MT9M032_RESET 0x0d #define MT9M032_PLL_CONFIG1 0x11 #define MT9M032_PLL_CONFIG1_OUTDIV_MASK 0x3f #define MT9M032_PLL_CONFIG1_MUL_SHIFT 8 #define MT9M032_READ_MODE1 0x1e #define MT9M032_READ_MODE2 0x20 #define MT9M032_READ_MODE2_VFLIP_SHIFT 15 #define MT9M032_READ_MODE2_HFLIP_SHIFT 14 #define MT9M032_READ_MODE2_ROW_BLC 0x40 #define MT9M032_GAIN_GREEN1 0x2b #define MT9M032_GAIN_BLUE 0x2c #define MT9M032_GAIN_RED 0x2d #define MT9M032_GAIN_GREEN2 0x2e /* write only */ #define MT9M032_GAIN_ALL 0x35 #define MT9M032_GAIN_DIGITAL_MASK 0x7f #define MT9M032_GAIN_DIGITAL_SHIFT 8 #define MT9M032_GAIN_AMUL_SHIFT 6 #define MT9M032_GAIN_ANALOG_MASK 0x3f #define MT9M032_FORMATTER1 0x9e #define MT9M032_FORMATTER2 0x9f #define MT9M032_FORMATTER2_DOUT_EN 0x1000 #define MT9M032_FORMATTER2_PIXCLK_EN 0x2000 /* * The available MT9M032 datasheet is missing documentation for register 0x10 * MT9P031 seems to be close enough, so use constants from that datasheet for * now. * But keep the name MT9P031 to remind us, that this isn't really confirmed * for this sensor. */ #define MT9P031_PLL_CONTROL 0x10 #define MT9P031_PLL_CONTROL_PWROFF 0x0050 #define MT9P031_PLL_CONTROL_PWRON 0x0051 #define MT9P031_PLL_CONTROL_USEPLL 0x0052 #define MT9P031_PLL_CONFIG2 0x11 #define MT9P031_PLL_CONFIG2_P1_DIV_MASK 0x1f struct mt9m032 { struct v4l2_subdev subdev; struct media_pad pad; struct mt9m032_platform_data *pdata; unsigned int pix_clock; struct v4l2_ctrl_handler ctrls; struct { struct v4l2_ctrl *hflip; struct v4l2_ctrl *vflip; }; struct mutex lock; /* Protects streaming, format, interval and crop */ bool streaming; struct v4l2_mbus_framefmt format; struct v4l2_rect crop; struct v4l2_fract frame_interval; }; #define to_mt9m032(sd) container_of(sd, struct mt9m032, subdev) #define to_dev(sensor) \ (&((struct i2c_client *)v4l2_get_subdevdata(&(sensor)->subdev))->dev) static int mt9m032_read(struct i2c_client *client, u8 reg) { return i2c_smbus_read_word_swapped(client, reg); } static int mt9m032_write(struct i2c_client *client, u8 reg, const u16 data) { return i2c_smbus_write_word_swapped(client, reg, data); } static u32 mt9m032_row_time(struct mt9m032 *sensor, unsigned int width) { unsigned int effective_width; u32 ns; effective_width = width + 716; /* empirical value */ ns = div_u64(1000000000ULL * effective_width, sensor->pix_clock); dev_dbg(to_dev(sensor), "MT9M032 line time: %u ns\n", ns); return ns; } static int mt9m032_update_timing(struct mt9m032 *sensor, struct v4l2_fract *interval) { struct i2c_client *client = v4l2_get_subdevdata(&sensor->subdev); struct v4l2_rect *crop = &sensor->crop; unsigned int min_vblank; unsigned int vblank; u32 row_time; if (!interval) interval = &sensor->frame_interval; row_time = mt9m032_row_time(sensor, crop->width); vblank = div_u64(1000000000ULL * interval->numerator, (u64)row_time * interval->denominator) - crop->height; if (vblank > MT9M032_VBLANK_MAX) { /* hardware limits to 11 bit values */ interval->denominator = 1000; interval->numerator = div_u64((crop->height + MT9M032_VBLANK_MAX) * (u64)row_time * interval->denominator, 1000000000ULL); vblank = div_u64(1000000000ULL * interval->numerator, (u64)row_time * interval->denominator) - crop->height; } /* enforce minimal 1.6ms blanking time. */ min_vblank = 1600000 / row_time; vblank = clamp_t(unsigned int, vblank, min_vblank, MT9M032_VBLANK_MAX); return mt9m032_write(client, MT9M032_VBLANK, vblank); } static int mt9m032_update_geom_timing(struct mt9m032 *sensor) { struct i2c_client *client = v4l2_get_subdevdata(&sensor->subdev); int ret; ret = mt9m032_write(client, MT9M032_COLUMN_SIZE, sensor->crop.width - 1); if (!ret) ret = mt9m032_write(client, MT9M032_ROW_SIZE, sensor->crop.height - 1); if (!ret) ret = mt9m032_write(client, MT9M032_COLUMN_START, sensor->crop.left); if (!ret) ret = mt9m032_write(client, MT9M032_ROW_START, sensor->crop.top); if (!ret) ret = mt9m032_update_timing(sensor, NULL); return ret; } static int update_formatter2(struct mt9m032 *sensor, bool streaming) { struct i2c_client *client = v4l2_get_subdevdata(&sensor->subdev); u16 reg_val = MT9M032_FORMATTER2_DOUT_EN | 0x0070; /* parts reserved! */ /* possibly for changing to 14-bit mode */ if (streaming) reg_val |= MT9M032_FORMATTER2_PIXCLK_EN; /* pixclock enable */ return mt9m032_write(client, MT9M032_FORMATTER2, reg_val); } static int mt9m032_setup_pll(struct mt9m032 *sensor) { static const struct aptina_pll_limits limits = { .ext_clock_min = 8000000, .ext_clock_max = 16500000, .int_clock_min = 2000000, .int_clock_max = 24000000, .out_clock_min = 322000000, .out_clock_max = 693000000, .pix_clock_max = 99000000, .n_min = 1, .n_max = 64, .m_min = 16, .m_max = 255, .p1_min = 1, .p1_max = 128, }; struct i2c_client *client = v4l2_get_subdevdata(&sensor->subdev); struct mt9m032_platform_data *pdata = sensor->pdata; struct aptina_pll pll; int ret; pll.ext_clock = pdata->ext_clock; pll.pix_clock = pdata->pix_clock; ret = aptina_pll_calculate(&client->dev, &limits, &pll); if (ret < 0) return ret; sensor->pix_clock = pdata->pix_clock; ret = mt9m032_write(client, MT9M032_PLL_CONFIG1, (pll.m << MT9M032_PLL_CONFIG1_MUL_SHIFT) | (pll.p1 - 1)); if (!ret) ret = mt9m032_write(client, MT9P031_PLL_CONFIG2, pll.n - 1); if (!ret) ret = mt9m032_write(client, MT9P031_PLL_CONTROL, MT9P031_PLL_CONTROL_PWRON | MT9P031_PLL_CONTROL_USEPLL); if (!ret) /* more reserved, Continuous, Master Mode */ ret = mt9m032_write(client, MT9M032_READ_MODE1, 0x8006); if (!ret) /* Set 14-bit mode, select 7 divider */ ret = mt9m032_write(client, MT9M032_FORMATTER1, 0x111e); return ret; } /* ----------------------------------------------------------------------------- * Subdev pad operations */ static int mt9m032_enum_mbus_code(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh, struct v4l2_subdev_mbus_code_enum *code) { if (code->index != 0) return -EINVAL; code->code = V4L2_MBUS_FMT_Y8_1X8; return 0; } static int mt9m032_enum_frame_size(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh, struct v4l2_subdev_frame_size_enum *fse) { if (fse->index != 0 || fse->code != V4L2_MBUS_FMT_Y8_1X8) return -EINVAL; fse->min_width = MT9M032_COLUMN_SIZE_DEF; fse->max_width = MT9M032_COLUMN_SIZE_DEF; fse->min_height = MT9M032_ROW_SIZE_DEF; fse->max_height = MT9M032_ROW_SIZE_DEF; return 0; } /** * __mt9m032_get_pad_crop() - get crop rect * @sensor: pointer to the sensor struct * @fh: file handle for getting the try crop rect from * @which: select try or active crop rect * * Returns a pointer the current active or fh relative try crop rect */ static struct v4l2_rect * __mt9m032_get_pad_crop(struct mt9m032 *sensor, struct v4l2_subdev_fh *fh, enum v4l2_subdev_format_whence which) { switch (which) { case V4L2_SUBDEV_FORMAT_TRY: return v4l2_subdev_get_try_crop(fh, 0); case V4L2_SUBDEV_FORMAT_ACTIVE: return &sensor->crop; default: return NULL; } } /** * __mt9m032_get_pad_format() - get format * @sensor: pointer to the sensor struct * @fh: file handle for getting the try format from * @which: select try or active format * * Returns a pointer the current active or fh relative try format */ static struct v4l2_mbus_framefmt * __mt9m032_get_pad_format(struct mt9m032 *sensor, struct v4l2_subdev_fh *fh, enum v4l2_subdev_format_whence which) { switch (which) { case V4L2_SUBDEV_FORMAT_TRY: return v4l2_subdev_get_try_format(fh, 0); case V4L2_SUBDEV_FORMAT_ACTIVE: return &sensor->format; default: return NULL; } } static int mt9m032_get_pad_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh, struct v4l2_subdev_format *fmt) { struct mt9m032 *sensor = to_mt9m032(subdev); mutex_lock(&sensor->lock); fmt->format = *__mt9m032_get_pad_format(sensor, fh, fmt->which); mutex_unlock(&sensor->lock); return 0; } static int mt9m032_set_pad_format(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh, struct v4l2_subdev_format *fmt) { struct mt9m032 *sensor = to_mt9m032(subdev); int ret; mutex_lock(&sensor->lock); if (sensor->streaming && fmt->which == V4L2_SUBDEV_FORMAT_ACTIVE) { ret = -EBUSY; goto done; } /* Scaling is not supported, the format is thus fixed. */ fmt->format = *__mt9m032_get_pad_format(sensor, fh, fmt->which); ret = 0; done: mutex_unlock(&sensor->lock); return ret; } static int mt9m032_get_pad_crop(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh, struct v4l2_subdev_crop *crop) { struct mt9m032 *sensor = to_mt9m032(subdev); mutex_lock(&sensor->lock); crop->rect = *__mt9m032_get_pad_crop(sensor, fh, crop->which); mutex_unlock(&sensor->lock); return 0; } static int mt9m032_set_pad_crop(struct v4l2_subdev *subdev, struct v4l2_subdev_fh *fh, struct v4l2_subdev_crop *crop) { struct mt9m032 *sensor = to_mt9m032(subdev); struct v4l2_mbus_framefmt *format; struct v4l2_rect *__crop; struct v4l2_rect rect; int ret = 0; mutex_lock(&sensor->lock); if (sensor->streaming && crop->which == V4L2_SUBDEV_FORMAT_ACTIVE) { ret = -EBUSY; goto done; } /* Clamp the crop rectangle boundaries and align them to a multiple of 2 * pixels to ensure a GRBG Bayer pattern. */ rect.left = clamp(ALIGN(crop->rect.left, 2), MT9M032_COLUMN_START_MIN, MT9M032_COLUMN_START_MAX); rect.top = clamp(ALIGN(crop->rect.top, 2), MT9M032_ROW_START_MIN, MT9M032_ROW_START_MAX); rect.width = clamp(ALIGN(crop->rect.width, 2), MT9M032_COLUMN_SIZE_MIN, MT9M032_COLUMN_SIZE_MAX); rect.height = clamp(ALIGN(crop->rect.height, 2), MT9M032_ROW_SIZE_MIN, MT9M032_ROW_SIZE_MAX); rect.width = min(rect.width, MT9M032_PIXEL_ARRAY_WIDTH - rect.left); rect.height = min(rect.height, MT9M032_PIXEL_ARRAY_HEIGHT - rect.top); __crop = __mt9m032_get_pad_crop(sensor, fh, crop->which); if (rect.width != __crop->width || rect.height != __crop->height) { /* Reset the output image size if the crop rectangle size has * been modified. */ format = __mt9m032_get_pad_format(sensor, fh, crop->which); format->width = rect.width; format->height = rect.height; } *__crop = rect; crop->rect = rect; if (crop->which == V4L2_SUBDEV_FORMAT_ACTIVE) ret = mt9m032_update_geom_timing(sensor); done: mutex_unlock(&sensor->lock); return ret; } static int mt9m032_get_frame_interval(struct v4l2_subdev *subdev, struct v4l2_subdev_frame_interval *fi) { struct mt9m032 *sensor = to_mt9m032(subdev); mutex_lock(&sensor->lock); memset(fi, 0, sizeof(*fi)); fi->interval = sensor->frame_interval; mutex_unlock(&sensor->lock); return 0; } static int mt9m032_set_frame_interval(struct v4l2_subdev *subdev, struct v4l2_subdev_frame_interval *fi) { struct mt9m032 *sensor = to_mt9m032(subdev); int ret; mutex_lock(&sensor->lock); if (sensor->streaming) { ret = -EBUSY; goto done; } /* Avoid divisions by 0. */ if (fi->interval.denominator == 0) fi->interval.denominator = 1; ret = mt9m032_update_timing(sensor, &fi->interval); if (!ret) sensor->frame_interval = fi->interval; done: mutex_unlock(&sensor->lock); return ret; } static int mt9m032_s_stream(struct v4l2_subdev *subdev, int streaming) { struct mt9m032 *sensor = to_mt9m032(subdev); int ret; mutex_lock(&sensor->lock); ret = update_formatter2(sensor, streaming); if (!ret) sensor->streaming = streaming; mutex_unlock(&sensor->lock); return ret; } /* ----------------------------------------------------------------------------- * V4L2 subdev core operations */ #ifdef CONFIG_VIDEO_ADV_DEBUG static int mt9m032_g_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct mt9m032 *sensor = to_mt9m032(sd); struct i2c_client *client = v4l2_get_subdevdata(&sensor->subdev); int val; if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff) return -EINVAL; if (reg->match.addr != client->addr) return -ENODEV; val = mt9m032_read(client, reg->reg); if (val < 0) return -EIO; reg->size = 2; reg->val = val; return 0; } static int mt9m032_s_register(struct v4l2_subdev *sd, struct v4l2_dbg_register *reg) { struct mt9m032 *sensor = to_mt9m032(sd); struct i2c_client *client = v4l2_get_subdevdata(&sensor->subdev); if (reg->match.type != V4L2_CHIP_MATCH_I2C_ADDR || reg->reg > 0xff) return -EINVAL; if (reg->match.addr != client->addr) return -ENODEV; return mt9m032_write(client, reg->reg, reg->val); } #endif /* ----------------------------------------------------------------------------- * V4L2 subdev control operations */ static int update_read_mode2(struct mt9m032 *sensor, bool vflip, bool hflip) { struct i2c_client *client = v4l2_get_subdevdata(&sensor->subdev); int reg_val = (vflip << MT9M032_READ_MODE2_VFLIP_SHIFT) | (hflip << MT9M032_READ_MODE2_HFLIP_SHIFT) | MT9M032_READ_MODE2_ROW_BLC | 0x0007; return mt9m032_write(client, MT9M032_READ_MODE2, reg_val); } static int mt9m032_set_gain(struct mt9m032 *sensor, s32 val) { struct i2c_client *client = v4l2_get_subdevdata(&sensor->subdev); int digital_gain_val; /* in 1/8th (0..127) */ int analog_mul; /* 0 or 1 */ int analog_gain_val; /* in 1/16th. (0..63) */ u16 reg_val; digital_gain_val = 51; /* from setup example */ if (val < 63) { analog_mul = 0; analog_gain_val = val; } else { analog_mul = 1; analog_gain_val = val / 2; } /* a_gain = (1 + analog_mul) + (analog_gain_val + 1) / 16 */ /* overall_gain = a_gain * (1 + digital_gain_val / 8) */ reg_val = ((digital_gain_val & MT9M032_GAIN_DIGITAL_MASK) << MT9M032_GAIN_DIGITAL_SHIFT) | ((analog_mul & 1) << MT9M032_GAIN_AMUL_SHIFT) | (analog_gain_val & MT9M032_GAIN_ANALOG_MASK); return mt9m032_write(client, MT9M032_GAIN_ALL, reg_val); } static int mt9m032_try_ctrl(struct v4l2_ctrl *ctrl) { if (ctrl->id == V4L2_CID_GAIN && ctrl->val >= 63) { /* round because of multiplier used for values >= 63 */ ctrl->val &= ~1; } return 0; } static int mt9m032_set_ctrl(struct v4l2_ctrl *ctrl) { struct mt9m032 *sensor = container_of(ctrl->handler, struct mt9m032, ctrls); struct i2c_client *client = v4l2_get_subdevdata(&sensor->subdev); int ret; switch (ctrl->id) { case V4L2_CID_GAIN: return mt9m032_set_gain(sensor, ctrl->val); case V4L2_CID_HFLIP: /* case V4L2_CID_VFLIP: -- In the same cluster */ return update_read_mode2(sensor, sensor->vflip->val, sensor->hflip->val); case V4L2_CID_EXPOSURE: ret = mt9m032_write(client, MT9M032_SHUTTER_WIDTH_HIGH, (ctrl->val >> 16) & 0xffff); if (ret < 0) return ret; return mt9m032_write(client, MT9M032_SHUTTER_WIDTH_LOW, ctrl->val & 0xffff); } return 0; } static struct v4l2_ctrl_ops mt9m032_ctrl_ops = { .s_ctrl = mt9m032_set_ctrl, .try_ctrl = mt9m032_try_ctrl, }; /* -------------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops mt9m032_core_ops = { #ifdef CONFIG_VIDEO_ADV_DEBUG .g_register = mt9m032_g_register, .s_register = mt9m032_s_register, #endif }; static const struct v4l2_subdev_video_ops mt9m032_video_ops = { .s_stream = mt9m032_s_stream, .g_frame_interval = mt9m032_get_frame_interval, .s_frame_interval = mt9m032_set_frame_interval, }; static const struct v4l2_subdev_pad_ops mt9m032_pad_ops = { .enum_mbus_code = mt9m032_enum_mbus_code, .enum_frame_size = mt9m032_enum_frame_size, .get_fmt = mt9m032_get_pad_format, .set_fmt = mt9m032_set_pad_format, .set_crop = mt9m032_set_pad_crop, .get_crop = mt9m032_get_pad_crop, }; static const struct v4l2_subdev_ops mt9m032_ops = { .core = &mt9m032_core_ops, .video = &mt9m032_video_ops, .pad = &mt9m032_pad_ops, }; /* ----------------------------------------------------------------------------- * Driver initialization and probing */ static int mt9m032_probe(struct i2c_client *client, const struct i2c_device_id *devid) { struct i2c_adapter *adapter = client->adapter; struct mt9m032 *sensor; int chip_version; int ret; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WORD_DATA)) { dev_warn(&client->dev, "I2C-Adapter doesn't support I2C_FUNC_SMBUS_WORD\n"); return -EIO; } if (!client->dev.platform_data) return -ENODEV; sensor = kzalloc(sizeof(*sensor), GFP_KERNEL); if (sensor == NULL) return -ENOMEM; mutex_init(&sensor->lock); sensor->pdata = client->dev.platform_data; v4l2_i2c_subdev_init(&sensor->subdev, client, &mt9m032_ops); sensor->subdev.flags |= V4L2_SUBDEV_FL_HAS_DEVNODE; chip_version = mt9m032_read(client, MT9M032_CHIP_VERSION); if (chip_version != MT9M032_CHIP_VERSION_VALUE) { dev_err(&client->dev, "MT9M032 not detected, wrong version " "0x%04x\n", chip_version); ret = -ENODEV; goto error_sensor; } dev_info(&client->dev, "MT9M032 detected at address 0x%02x\n", client->addr); sensor->frame_interval.numerator = 1; sensor->frame_interval.denominator = 30; sensor->crop.left = MT9M032_COLUMN_START_DEF; sensor->crop.top = MT9M032_ROW_START_DEF; sensor->crop.width = MT9M032_COLUMN_SIZE_DEF; sensor->crop.height = MT9M032_ROW_SIZE_DEF; sensor->format.width = sensor->crop.width; sensor->format.height = sensor->crop.height; sensor->format.code = V4L2_MBUS_FMT_Y8_1X8; sensor->format.field = V4L2_FIELD_NONE; sensor->format.colorspace = V4L2_COLORSPACE_SRGB; v4l2_ctrl_handler_init(&sensor->ctrls, 4); v4l2_ctrl_new_std(&sensor->ctrls, &mt9m032_ctrl_ops, V4L2_CID_GAIN, 0, 127, 1, 64); sensor->hflip = v4l2_ctrl_new_std(&sensor->ctrls, &mt9m032_ctrl_ops, V4L2_CID_HFLIP, 0, 1, 1, 0); sensor->vflip = v4l2_ctrl_new_std(&sensor->ctrls, &mt9m032_ctrl_ops, V4L2_CID_VFLIP, 0, 1, 1, 0); v4l2_ctrl_new_std(&sensor->ctrls, &mt9m032_ctrl_ops, V4L2_CID_EXPOSURE, MT9M032_SHUTTER_WIDTH_MIN, MT9M032_SHUTTER_WIDTH_MAX, 1, MT9M032_SHUTTER_WIDTH_DEF); if (sensor->ctrls.error) { ret = sensor->ctrls.error; dev_err(&client->dev, "control initialization error %d\n", ret); goto error_ctrl; } v4l2_ctrl_cluster(2, &sensor->hflip); sensor->subdev.ctrl_handler = &sensor->ctrls; sensor->pad.flags = MEDIA_PAD_FL_SOURCE; ret = media_entity_init(&sensor->subdev.entity, 1, &sensor->pad, 0); if (ret < 0) goto error_ctrl; ret = mt9m032_write(client, MT9M032_RESET, 1); /* reset on */ if (ret < 0) goto error_entity; mt9m032_write(client, MT9M032_RESET, 0); /* reset off */ if (ret < 0) goto error_entity; ret = mt9m032_setup_pll(sensor); if (ret < 0) goto error_entity; usleep_range(10000, 11000); ret = v4l2_ctrl_handler_setup(&sensor->ctrls); if (ret < 0) goto error_entity; /* SIZE */ ret = mt9m032_update_geom_timing(sensor); if (ret < 0) goto error_entity; ret = mt9m032_write(client, 0x41, 0x0000); /* reserved !!! */ if (ret < 0) goto error_entity; ret = mt9m032_write(client, 0x42, 0x0003); /* reserved !!! */ if (ret < 0) goto error_entity; ret = mt9m032_write(client, 0x43, 0x0003); /* reserved !!! */ if (ret < 0) goto error_entity; ret = mt9m032_write(client, 0x7f, 0x0000); /* reserved !!! */ if (ret < 0) goto error_entity; if (sensor->pdata->invert_pixclock) { ret = mt9m032_write(client, MT9M032_PIX_CLK_CTRL, MT9M032_PIX_CLK_CTRL_INV_PIXCLK); if (ret < 0) goto error_entity; } ret = mt9m032_write(client, MT9M032_RESTART, 1); /* Restart on */ if (ret < 0) goto error_entity; msleep(100); ret = mt9m032_write(client, MT9M032_RESTART, 0); /* Restart off */ if (ret < 0) goto error_entity; msleep(100); ret = update_formatter2(sensor, false); if (ret < 0) goto error_entity; return ret; error_entity: media_entity_cleanup(&sensor->subdev.entity); error_ctrl: v4l2_ctrl_handler_free(&sensor->ctrls); error_sensor: mutex_destroy(&sensor->lock); kfree(sensor); return ret; } static int mt9m032_remove(struct i2c_client *client) { struct v4l2_subdev *subdev = i2c_get_clientdata(client); struct mt9m032 *sensor = to_mt9m032(subdev); v4l2_device_unregister_subdev(&sensor->subdev); v4l2_ctrl_handler_free(&sensor->ctrls); media_entity_cleanup(&sensor->subdev.entity); mutex_destroy(&sensor->lock); kfree(sensor); return 0; } static const struct i2c_device_id mt9m032_id_table[] = { { MT9M032_NAME, 0 }, { } }; MODULE_DEVICE_TABLE(i2c, mt9m032_id_table); static struct i2c_driver mt9m032_i2c_driver = { .driver = { .name = MT9M032_NAME, }, .probe = mt9m032_probe, .remove = mt9m032_remove, .id_table = mt9m032_id_table, }; module_i2c_driver(mt9m032_i2c_driver); MODULE_AUTHOR("Martin Hostettler <martin@neutronstar.dyndns.org>"); MODULE_DESCRIPTION("MT9M032 camera sensor driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
copolii/android_kernel_lge_msm8974
drivers/gpu/drm/nouveau/nouveau_gpio.c
5305
9852
/* * Copyright 2011 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "drmP.h" #include "nouveau_drv.h" #include "nouveau_i2c.h" #include "nouveau_gpio.h" static u8 * dcb_gpio_table(struct drm_device *dev) { u8 *dcb = dcb_table(dev); if (dcb) { if (dcb[0] >= 0x30 && dcb[1] >= 0x0c) return ROMPTR(dev, dcb[0x0a]); if (dcb[0] >= 0x22 && dcb[-1] >= 0x13) return ROMPTR(dev, dcb[-15]); } return NULL; } static u8 * dcb_gpio_entry(struct drm_device *dev, int idx, int ent, u8 *version) { u8 *table = dcb_gpio_table(dev); if (table) { *version = table[0]; if (*version < 0x30 && ent < table[2]) return table + 3 + (ent * table[1]); else if (ent < table[2]) return table + table[1] + (ent * table[3]); } return NULL; } int nouveau_gpio_drive(struct drm_device *dev, int idx, int line, int dir, int out) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; return pgpio->drive ? pgpio->drive(dev, line, dir, out) : -ENODEV; } int nouveau_gpio_sense(struct drm_device *dev, int idx, int line) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; return pgpio->sense ? pgpio->sense(dev, line) : -ENODEV; } int nouveau_gpio_find(struct drm_device *dev, int idx, u8 func, u8 line, struct gpio_func *gpio) { u8 *table, *entry, version; int i = -1; if (line == 0xff && func == 0xff) return -EINVAL; while ((entry = dcb_gpio_entry(dev, idx, ++i, &version))) { if (version < 0x40) { u16 data = ROM16(entry[0]); *gpio = (struct gpio_func) { .line = (data & 0x001f) >> 0, .func = (data & 0x07e0) >> 5, .log[0] = (data & 0x1800) >> 11, .log[1] = (data & 0x6000) >> 13, }; } else if (version < 0x41) { *gpio = (struct gpio_func) { .line = entry[0] & 0x1f, .func = entry[1], .log[0] = (entry[3] & 0x18) >> 3, .log[1] = (entry[3] & 0x60) >> 5, }; } else { *gpio = (struct gpio_func) { .line = entry[0] & 0x3f, .func = entry[1], .log[0] = (entry[4] & 0x30) >> 4, .log[1] = (entry[4] & 0xc0) >> 6, }; } if ((line == 0xff || line == gpio->line) && (func == 0xff || func == gpio->func)) return 0; } /* DCB 2.2, fixed TVDAC GPIO data */ if ((table = dcb_table(dev)) && table[0] >= 0x22) { if (func == DCB_GPIO_TVDAC0) { *gpio = (struct gpio_func) { .func = DCB_GPIO_TVDAC0, .line = table[-4] >> 4, .log[0] = !!(table[-5] & 2), .log[1] = !(table[-5] & 2), }; return 0; } } /* Apple iMac G4 NV18 */ if (nv_match_device(dev, 0x0189, 0x10de, 0x0010)) { if (func == DCB_GPIO_TVDAC0) { *gpio = (struct gpio_func) { .func = DCB_GPIO_TVDAC0, .line = 4, .log[0] = 0, .log[1] = 1, }; return 0; } } return -EINVAL; } int nouveau_gpio_set(struct drm_device *dev, int idx, u8 tag, u8 line, int state) { struct gpio_func gpio; int ret; ret = nouveau_gpio_find(dev, idx, tag, line, &gpio); if (ret == 0) { int dir = !!(gpio.log[state] & 0x02); int out = !!(gpio.log[state] & 0x01); ret = nouveau_gpio_drive(dev, idx, gpio.line, dir, out); } return ret; } int nouveau_gpio_get(struct drm_device *dev, int idx, u8 tag, u8 line) { struct gpio_func gpio; int ret; ret = nouveau_gpio_find(dev, idx, tag, line, &gpio); if (ret == 0) { ret = nouveau_gpio_sense(dev, idx, gpio.line); if (ret >= 0) ret = (ret == (gpio.log[1] & 1)); } return ret; } int nouveau_gpio_irq(struct drm_device *dev, int idx, u8 tag, u8 line, bool on) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; struct gpio_func gpio; int ret; ret = nouveau_gpio_find(dev, idx, tag, line, &gpio); if (ret == 0) { if (idx == 0 && pgpio->irq_enable) pgpio->irq_enable(dev, gpio.line, on); else ret = -ENODEV; } return ret; } struct gpio_isr { struct drm_device *dev; struct list_head head; struct work_struct work; int idx; struct gpio_func func; void (*handler)(void *, int); void *data; bool inhibit; }; static void nouveau_gpio_isr_bh(struct work_struct *work) { struct gpio_isr *isr = container_of(work, struct gpio_isr, work); struct drm_device *dev = isr->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; unsigned long flags; int state; state = nouveau_gpio_get(dev, isr->idx, isr->func.func, isr->func.line); if (state >= 0) isr->handler(isr->data, state); spin_lock_irqsave(&pgpio->lock, flags); isr->inhibit = false; spin_unlock_irqrestore(&pgpio->lock, flags); } void nouveau_gpio_isr(struct drm_device *dev, int idx, u32 line_mask) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; struct gpio_isr *isr; if (idx != 0) return; spin_lock(&pgpio->lock); list_for_each_entry(isr, &pgpio->isr, head) { if (line_mask & (1 << isr->func.line)) { if (isr->inhibit) continue; isr->inhibit = true; schedule_work(&isr->work); } } spin_unlock(&pgpio->lock); } int nouveau_gpio_isr_add(struct drm_device *dev, int idx, u8 tag, u8 line, void (*handler)(void *, int), void *data) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; struct gpio_isr *isr; unsigned long flags; int ret; isr = kzalloc(sizeof(*isr), GFP_KERNEL); if (!isr) return -ENOMEM; ret = nouveau_gpio_find(dev, idx, tag, line, &isr->func); if (ret) { kfree(isr); return ret; } INIT_WORK(&isr->work, nouveau_gpio_isr_bh); isr->dev = dev; isr->handler = handler; isr->data = data; isr->idx = idx; spin_lock_irqsave(&pgpio->lock, flags); list_add(&isr->head, &pgpio->isr); spin_unlock_irqrestore(&pgpio->lock, flags); return 0; } void nouveau_gpio_isr_del(struct drm_device *dev, int idx, u8 tag, u8 line, void (*handler)(void *, int), void *data) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; struct gpio_isr *isr, *tmp; struct gpio_func func; unsigned long flags; LIST_HEAD(tofree); int ret; ret = nouveau_gpio_find(dev, idx, tag, line, &func); if (ret == 0) { spin_lock_irqsave(&pgpio->lock, flags); list_for_each_entry_safe(isr, tmp, &pgpio->isr, head) { if (memcmp(&isr->func, &func, sizeof(func)) || isr->idx != idx || isr->handler != handler || isr->data != data) continue; list_move(&isr->head, &tofree); } spin_unlock_irqrestore(&pgpio->lock, flags); list_for_each_entry_safe(isr, tmp, &tofree, head) { flush_work_sync(&isr->work); kfree(isr); } } } int nouveau_gpio_create(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; INIT_LIST_HEAD(&pgpio->isr); spin_lock_init(&pgpio->lock); return nouveau_gpio_init(dev); } void nouveau_gpio_destroy(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; nouveau_gpio_fini(dev); BUG_ON(!list_empty(&pgpio->isr)); } int nouveau_gpio_init(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; int ret = 0; if (pgpio->init) ret = pgpio->init(dev); return ret; } void nouveau_gpio_fini(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpio_engine *pgpio = &dev_priv->engine.gpio; if (pgpio->fini) pgpio->fini(dev); } void nouveau_gpio_reset(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; u8 *entry, version; int ent = -1; while ((entry = dcb_gpio_entry(dev, 0, ++ent, &version))) { u8 func = 0xff, line, defs, unk0, unk1; if (version >= 0x41) { defs = !!(entry[0] & 0x80); line = entry[0] & 0x3f; func = entry[1]; unk0 = entry[2]; unk1 = entry[3] & 0x1f; } else if (version >= 0x40) { line = entry[0] & 0x1f; func = entry[1]; defs = !!(entry[3] & 0x01); unk0 = !!(entry[3] & 0x02); unk1 = !!(entry[3] & 0x04); } else { break; } if (func == 0xff) continue; nouveau_gpio_func_set(dev, func, defs); if (dev_priv->card_type >= NV_D0) { nv_mask(dev, 0x00d610 + (line * 4), 0xff, unk0); if (unk1--) nv_mask(dev, 0x00d640 + (unk1 * 4), 0xff, line); } else if (dev_priv->card_type >= NV_50) { static const u32 regs[] = { 0xe100, 0xe28c }; u32 val = (unk1 << 16) | unk0; u32 reg = regs[line >> 4]; line &= 0x0f; nv_mask(dev, reg, 0x00010001 << line, val << line); } } }
gpl-2.0
msfkonsole/android_kernel_xiaomi_dior
drivers/mtd/nand/bcm_umi_bch.c
9145
6980
/***************************************************************************** * Copyright 2004 - 2009 Broadcom Corporation. All rights reserved. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available at * http://www.broadcom.com/licenses/GPLv2.php (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a * license other than the GPL, without Broadcom's express prior written * consent. *****************************************************************************/ /* ---- Include Files ---------------------------------------------------- */ #include "nand_bcm_umi.h" /* ---- External Variable Declarations ----------------------------------- */ /* ---- External Function Prototypes ------------------------------------- */ /* ---- Public Variables ------------------------------------------------- */ /* ---- Private Constants and Types -------------------------------------- */ /* ---- Private Function Prototypes -------------------------------------- */ static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t *buf, int page); static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf); /* ---- Private Variables ------------------------------------------------ */ /* ** nand_hw_eccoob ** New oob placement block for use with hardware ecc generation. */ static struct nand_ecclayout nand_hw_eccoob_512 = { /* Reserve 5 for BI indicator */ .oobfree = { #if (NAND_ECC_NUM_BYTES > 3) {.offset = 0, .length = 2} #else {.offset = 0, .length = 5}, {.offset = 6, .length = 7} #endif } }; /* ** We treat the OOB for a 2K page as if it were 4 512 byte oobs, ** except the BI is at byte 0. */ static struct nand_ecclayout nand_hw_eccoob_2048 = { /* Reserve 0 as BI indicator */ .oobfree = { #if (NAND_ECC_NUM_BYTES > 10) {.offset = 1, .length = 2}, #elif (NAND_ECC_NUM_BYTES > 7) {.offset = 1, .length = 5}, {.offset = 16, .length = 6}, {.offset = 32, .length = 6}, {.offset = 48, .length = 6} #else {.offset = 1, .length = 8}, {.offset = 16, .length = 9}, {.offset = 32, .length = 9}, {.offset = 48, .length = 9} #endif } }; /* We treat the OOB for a 4K page as if it were 8 512 byte oobs, * except the BI is at byte 0. */ static struct nand_ecclayout nand_hw_eccoob_4096 = { /* Reserve 0 as BI indicator */ .oobfree = { #if (NAND_ECC_NUM_BYTES > 10) {.offset = 1, .length = 2}, {.offset = 16, .length = 3}, {.offset = 32, .length = 3}, {.offset = 48, .length = 3}, {.offset = 64, .length = 3}, {.offset = 80, .length = 3}, {.offset = 96, .length = 3}, {.offset = 112, .length = 3} #else {.offset = 1, .length = 5}, {.offset = 16, .length = 6}, {.offset = 32, .length = 6}, {.offset = 48, .length = 6}, {.offset = 64, .length = 6}, {.offset = 80, .length = 6}, {.offset = 96, .length = 6}, {.offset = 112, .length = 6} #endif } }; /* ---- Private Functions ------------------------------------------------ */ /* ==== Public Functions ================================================= */ /**************************************************************************** * * bcm_umi_bch_read_page_hwecc - hardware ecc based page read function * @mtd: mtd info structure * @chip: nand chip info structure * @buf: buffer to store read data * ***************************************************************************/ static int bcm_umi_bch_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, uint8_t * buf, int page) { int sectorIdx = 0; int eccsize = chip->ecc.size; int eccsteps = chip->ecc.steps; uint8_t *datap = buf; uint8_t eccCalc[NAND_ECC_NUM_BYTES]; int sectorOobSize = mtd->oobsize / eccsteps; int stat; for (sectorIdx = 0; sectorIdx < eccsteps; sectorIdx++, datap += eccsize) { if (sectorIdx > 0) { /* Seek to page location within sector */ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, sectorIdx * eccsize, -1); } /* Enable hardware ECC before reading the buf */ nand_bcm_umi_bch_enable_read_hwecc(); /* Read in data */ bcm_umi_nand_read_buf(mtd, datap, eccsize); /* Pause hardware ECC after reading the buf */ nand_bcm_umi_bch_pause_read_ecc_calc(); /* Read the OOB ECC */ chip->cmdfunc(mtd, NAND_CMD_RNDOUT, mtd->writesize + sectorIdx * sectorOobSize, -1); nand_bcm_umi_bch_read_oobEcc(mtd->writesize, eccCalc, NAND_ECC_NUM_BYTES, chip->oob_poi + sectorIdx * sectorOobSize); /* Correct any ECC detected errors */ stat = nand_bcm_umi_bch_correct_page(datap, eccCalc, NAND_ECC_NUM_BYTES); /* Update Stats */ if (stat < 0) { #if defined(NAND_BCM_UMI_DEBUG) printk(KERN_WARNING "%s uncorr_err sectorIdx=%d\n", __func__, sectorIdx); printk(KERN_WARNING "%s data %02x %02x %02x %02x " "%02x %02x %02x %02x\n", __func__, datap[0], datap[1], datap[2], datap[3], datap[4], datap[5], datap[6], datap[7]); printk(KERN_WARNING "%s ecc %02x %02x %02x %02x " "%02x %02x %02x %02x %02x %02x " "%02x %02x %02x\n", __func__, eccCalc[0], eccCalc[1], eccCalc[2], eccCalc[3], eccCalc[4], eccCalc[5], eccCalc[6], eccCalc[7], eccCalc[8], eccCalc[9], eccCalc[10], eccCalc[11], eccCalc[12]); BUG(); #endif mtd->ecc_stats.failed++; } else { #if defined(NAND_BCM_UMI_DEBUG) if (stat > 0) { printk(KERN_INFO "%s %d correctable_errors detected\n", __func__, stat); } #endif mtd->ecc_stats.corrected += stat; } } return 0; } /**************************************************************************** * * bcm_umi_bch_write_page_hwecc - hardware ecc based page write function * @mtd: mtd info structure * @chip: nand chip info structure * @buf: data buffer * ***************************************************************************/ static void bcm_umi_bch_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, const uint8_t *buf) { int sectorIdx = 0; int eccsize = chip->ecc.size; int eccsteps = chip->ecc.steps; const uint8_t *datap = buf; uint8_t *oobp = chip->oob_poi; int sectorOobSize = mtd->oobsize / eccsteps; for (sectorIdx = 0; sectorIdx < eccsteps; sectorIdx++, datap += eccsize, oobp += sectorOobSize) { /* Enable hardware ECC before writing the buf */ nand_bcm_umi_bch_enable_write_hwecc(); bcm_umi_nand_write_buf(mtd, datap, eccsize); nand_bcm_umi_bch_write_oobEcc(mtd->writesize, oobp, NAND_ECC_NUM_BYTES); } bcm_umi_nand_write_buf(mtd, chip->oob_poi, mtd->oobsize); }
gpl-2.0
5y/linux
drivers/mfd/wm8350-gpio.c
10425
6211
/* * wm8350-core.c -- Device access for Wolfson WM8350 * * Copyright 2007, 2008 Wolfson Microelectronics PLC. * * Author: Liam Girdwood * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/mfd/wm8350/core.h> #include <linux/mfd/wm8350/gpio.h> #include <linux/mfd/wm8350/pmic.h> static int gpio_set_dir(struct wm8350 *wm8350, int gpio, int dir) { int ret; wm8350_reg_unlock(wm8350); if (dir == WM8350_GPIO_DIR_OUT) ret = wm8350_clear_bits(wm8350, WM8350_GPIO_CONFIGURATION_I_O, 1 << gpio); else ret = wm8350_set_bits(wm8350, WM8350_GPIO_CONFIGURATION_I_O, 1 << gpio); wm8350_reg_lock(wm8350); return ret; } static int wm8350_gpio_set_debounce(struct wm8350 *wm8350, int gpio, int db) { if (db == WM8350_GPIO_DEBOUNCE_ON) return wm8350_set_bits(wm8350, WM8350_GPIO_DEBOUNCE, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_DEBOUNCE, 1 << gpio); } static int gpio_set_func(struct wm8350 *wm8350, int gpio, int func) { u16 reg; wm8350_reg_unlock(wm8350); switch (gpio) { case 0: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_1) & ~WM8350_GP0_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_1, reg | ((func & 0xf) << 0)); break; case 1: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_1) & ~WM8350_GP1_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_1, reg | ((func & 0xf) << 4)); break; case 2: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_1) & ~WM8350_GP2_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_1, reg | ((func & 0xf) << 8)); break; case 3: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_1) & ~WM8350_GP3_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_1, reg | ((func & 0xf) << 12)); break; case 4: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_2) & ~WM8350_GP4_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_2, reg | ((func & 0xf) << 0)); break; case 5: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_2) & ~WM8350_GP5_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_2, reg | ((func & 0xf) << 4)); break; case 6: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_2) & ~WM8350_GP6_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_2, reg | ((func & 0xf) << 8)); break; case 7: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_2) & ~WM8350_GP7_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_2, reg | ((func & 0xf) << 12)); break; case 8: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_3) & ~WM8350_GP8_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_3, reg | ((func & 0xf) << 0)); break; case 9: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_3) & ~WM8350_GP9_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_3, reg | ((func & 0xf) << 4)); break; case 10: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_3) & ~WM8350_GP10_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_3, reg | ((func & 0xf) << 8)); break; case 11: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_3) & ~WM8350_GP11_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_3, reg | ((func & 0xf) << 12)); break; case 12: reg = wm8350_reg_read(wm8350, WM8350_GPIO_FUNCTION_SELECT_4) & ~WM8350_GP12_FN_MASK; wm8350_reg_write(wm8350, WM8350_GPIO_FUNCTION_SELECT_4, reg | ((func & 0xf) << 0)); break; default: wm8350_reg_lock(wm8350); return -EINVAL; } wm8350_reg_lock(wm8350); return 0; } static int gpio_set_pull_up(struct wm8350 *wm8350, int gpio, int up) { if (up) return wm8350_set_bits(wm8350, WM8350_GPIO_PIN_PULL_UP_CONTROL, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_PIN_PULL_UP_CONTROL, 1 << gpio); } static int gpio_set_pull_down(struct wm8350 *wm8350, int gpio, int down) { if (down) return wm8350_set_bits(wm8350, WM8350_GPIO_PULL_DOWN_CONTROL, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_PULL_DOWN_CONTROL, 1 << gpio); } static int gpio_set_polarity(struct wm8350 *wm8350, int gpio, int pol) { if (pol == WM8350_GPIO_ACTIVE_HIGH) return wm8350_set_bits(wm8350, WM8350_GPIO_PIN_POLARITY_TYPE, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_PIN_POLARITY_TYPE, 1 << gpio); } static int gpio_set_invert(struct wm8350 *wm8350, int gpio, int invert) { if (invert == WM8350_GPIO_INVERT_ON) return wm8350_set_bits(wm8350, WM8350_GPIO_INT_MODE, 1 << gpio); else return wm8350_clear_bits(wm8350, WM8350_GPIO_INT_MODE, 1 << gpio); } int wm8350_gpio_config(struct wm8350 *wm8350, int gpio, int dir, int func, int pol, int pull, int invert, int debounce) { /* make sure we never pull up and down at the same time */ if (pull == WM8350_GPIO_PULL_NONE) { if (gpio_set_pull_up(wm8350, gpio, 0)) goto err; if (gpio_set_pull_down(wm8350, gpio, 0)) goto err; } else if (pull == WM8350_GPIO_PULL_UP) { if (gpio_set_pull_down(wm8350, gpio, 0)) goto err; if (gpio_set_pull_up(wm8350, gpio, 1)) goto err; } else if (pull == WM8350_GPIO_PULL_DOWN) { if (gpio_set_pull_up(wm8350, gpio, 0)) goto err; if (gpio_set_pull_down(wm8350, gpio, 1)) goto err; } if (gpio_set_invert(wm8350, gpio, invert)) goto err; if (gpio_set_polarity(wm8350, gpio, pol)) goto err; if (wm8350_gpio_set_debounce(wm8350, gpio, debounce)) goto err; if (gpio_set_dir(wm8350, gpio, dir)) goto err; return gpio_set_func(wm8350, gpio, func); err: return -EIO; } EXPORT_SYMBOL_GPL(wm8350_gpio_config);
gpl-2.0
maxnet/linux-allwinner-aufs34
arch/m68k/sun3/dvma.c
11705
1265
/* * linux/arch/m68k/sun3/dvma.c * * Written by Sam Creasey * * Sun3 IOMMU routines used for dvma accesses. * */ #include <linux/kernel.h> #include <linux/mm.h> #include <linux/bootmem.h> #include <linux/list.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/sun3mmu.h> #include <asm/dvma.h> static unsigned long ptelist[120]; static unsigned long dvma_page(unsigned long kaddr, unsigned long vaddr) { unsigned long pte; unsigned long j; pte_t ptep; j = *(volatile unsigned long *)kaddr; *(volatile unsigned long *)kaddr = j; ptep = pfn_pte(virt_to_pfn(kaddr), PAGE_KERNEL); pte = pte_val(ptep); // printk("dvma_remap: addr %lx -> %lx pte %08lx len %x\n", // kaddr, vaddr, pte, len); if(ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] != pte) { sun3_put_pte(vaddr, pte); ptelist[(vaddr & 0xff000) >> PAGE_SHIFT] = pte; } return (vaddr + (kaddr & ~PAGE_MASK)); } int dvma_map_iommu(unsigned long kaddr, unsigned long baddr, int len) { unsigned long end; unsigned long vaddr; vaddr = dvma_btov(baddr); end = vaddr + len; while(vaddr < end) { dvma_page(kaddr, vaddr); kaddr += PAGE_SIZE; vaddr += PAGE_SIZE; } return 0; } void sun3_dvma_init(void) { memset(ptelist, 0, sizeof(ptelist)); }
gpl-2.0
pavel-odintsov/openvz_rhel6_kernel_mirror
drivers/scsi/bnx2fc/bnx2fc_hwi.c
442
63487
/* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver. * This file contains the code that low level functions that interact * with 57712 FCoE firmware. * * Copyright (c) 2008 - 2013 Broadcom Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation. * * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com) */ #include "bnx2fc.h" DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu); static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba, struct fcoe_kcqe *new_cqe_kcqe); static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, struct fcoe_kcqe *ofld_kcqe); static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, struct fcoe_kcqe *ofld_kcqe); static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code); static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, struct fcoe_kcqe *destroy_kcqe); int bnx2fc_send_stat_req(struct bnx2fc_hba *hba) { struct fcoe_kwqe_stat stat_req; struct kwqe *kwqe_arr[2]; int num_kwqes = 1; int rc = 0; memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat)); stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT; stat_req.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma; stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32); kwqe_arr[0] = (struct kwqe *) &stat_req; if (hba->cnic && hba->cnic->submit_kwqes) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); return rc; } /** * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w * * @hba: adapter structure pointer * * Send down FCoE firmware init KWQEs which initiates the initial handshake * with the f/w. * */ int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba) { struct fcoe_kwqe_init1 fcoe_init1; struct fcoe_kwqe_init2 fcoe_init2; struct fcoe_kwqe_init3 fcoe_init3; struct kwqe *kwqe_arr[3]; int num_kwqes = 3; int rc = 0; if (!hba->cnic) { printk(KERN_ERR PFX "hba->cnic NULL during fcoe fw init\n"); return -ENODEV; } /* fill init1 KWQE */ memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1)); fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1; fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); fcoe_init1.num_tasks = hba->max_tasks; fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX; fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX; fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ; fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX; fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma; fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32); fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma; fcoe_init1.task_list_pbl_addr_hi = (u32) ((u64) hba->task_ctx_bd_dma >> 32); fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU; fcoe_init1.flags = (PAGE_SHIFT << FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT); fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG; /* fill init2 KWQE */ memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2)); fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2; fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); fcoe_init2.hsi_major_version = FCOE_HSI_MAJOR_VERSION; fcoe_init2.hsi_minor_version = FCOE_HSI_MINOR_VERSION; fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma; fcoe_init2.hash_tbl_pbl_addr_hi = (u32) ((u64) hba->hash_tbl_pbl_dma >> 32); fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma; fcoe_init2.t2_hash_tbl_addr_hi = (u32) ((u64) hba->t2_hash_tbl_dma >> 32); fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma; fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32) ((u64) hba->t2_hash_tbl_ptr_dma >> 32); fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS; /* fill init3 KWQE */ memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3)); fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3; fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); fcoe_init3.error_bit_map_lo = 0xffffffff; fcoe_init3.error_bit_map_hi = 0xffffffff; /* * enable both cached connection and cached tasks * 0 = none, 1 = cached connection, 2 = cached tasks, 3 = both */ fcoe_init3.perf_config = 3; kwqe_arr[0] = (struct kwqe *) &fcoe_init1; kwqe_arr[1] = (struct kwqe *) &fcoe_init2; kwqe_arr[2] = (struct kwqe *) &fcoe_init3; if (hba->cnic && hba->cnic->submit_kwqes) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); return rc; } int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba) { struct fcoe_kwqe_destroy fcoe_destroy; struct kwqe *kwqe_arr[2]; int num_kwqes = 1; int rc = -1; /* fill destroy KWQE */ memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy)); fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY; fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); kwqe_arr[0] = (struct kwqe *) &fcoe_destroy; if (hba->cnic && hba->cnic->submit_kwqes) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); return rc; } /** * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process * * @port: port structure pointer * @tgt: bnx2fc_rport structure pointer */ int bnx2fc_send_session_ofld_req(struct fcoe_port *port, struct bnx2fc_rport *tgt) { struct fc_lport *lport = port->lport; struct bnx2fc_interface *interface = port->priv; struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); struct bnx2fc_hba *hba = interface->hba; struct kwqe *kwqe_arr[4]; struct fcoe_kwqe_conn_offload1 ofld_req1; struct fcoe_kwqe_conn_offload2 ofld_req2; struct fcoe_kwqe_conn_offload3 ofld_req3; struct fcoe_kwqe_conn_offload4 ofld_req4; struct fc_rport_priv *rdata = tgt->rdata; struct fc_rport *rport = tgt->rport; int num_kwqes = 4; u32 port_id; int rc = 0; u16 conn_id; /* Initialize offload request 1 structure */ memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1)); ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1; ofld_req1.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); conn_id = (u16)tgt->fcoe_conn_id; ofld_req1.fcoe_conn_id = conn_id; ofld_req1.sq_addr_lo = (u32) tgt->sq_dma; ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32); ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma; ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32); ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma; ofld_req1.rq_first_pbe_addr_hi = (u32)((u64) tgt->rq_dma >> 32); ofld_req1.rq_prod = 0x8000; /* Initialize offload request 2 structure */ memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2)); ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2; ofld_req2.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size; ofld_req2.cq_addr_lo = (u32) tgt->cq_dma; ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32); ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma; ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32); ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma; ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32); /* Initialize offload request 3 structure */ memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3)); ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3; ofld_req3.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); ofld_req3.vlan_tag = interface->vlan_id << FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT; ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT; port_id = fc_host_port_id(lport->host); if (port_id == 0) { BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n"); return -EINVAL; } /* * Store s_id of the initiator for further reference. This will * be used during disable/destroy during linkdown processing as * when the lport is reset, the port_id also is reset to 0 */ tgt->sid = port_id; ofld_req3.s_id[0] = (port_id & 0x000000FF); ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8; ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16; port_id = rport->port_id; ofld_req3.d_id[0] = (port_id & 0x000000FF); ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8; ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16; ofld_req3.tx_total_conc_seqs = rdata->max_seq; ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq; ofld_req3.rx_max_fc_pay_len = lport->mfs; ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS; ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS; ofld_req3.rx_open_seqs_exch_c3 = 1; ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma; ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32); /* set mul_n_port_ids supported flag to 0, until it is supported */ ofld_req3.flags = 0; /* ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) << FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT); */ /* Info from PLOGI response */ ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) << FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT); ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) << FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT); /* * Info from PRLI response, this info is used for sequence level error * recovery support */ if (tgt->dev_type == TYPE_TAPE) { ofld_req3.flags |= 1 << FCOE_KWQE_CONN_OFFLOAD3_B_CONF_REQ_SHIFT; ofld_req3.flags |= (((rdata->flags & FC_RP_FLAGS_REC_SUPPORTED) ? 1 : 0) << FCOE_KWQE_CONN_OFFLOAD3_B_REC_VALID_SHIFT); } /* vlan flag */ ofld_req3.flags |= (interface->vlan_enabled << FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT); /* C2_VALID and ACK flags are not set as they are not supported */ /* Initialize offload request 4 structure */ memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4)); ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4; ofld_req4.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20; ofld_req4.src_mac_addr_lo[0] = port->data_src_addr[5]; /* local mac */ ofld_req4.src_mac_addr_lo[1] = port->data_src_addr[4]; ofld_req4.src_mac_addr_mid[0] = port->data_src_addr[3]; ofld_req4.src_mac_addr_mid[1] = port->data_src_addr[2]; ofld_req4.src_mac_addr_hi[0] = port->data_src_addr[1]; ofld_req4.src_mac_addr_hi[1] = port->data_src_addr[0]; ofld_req4.dst_mac_addr_lo[0] = ctlr->dest_addr[5]; /* fcf mac */ ofld_req4.dst_mac_addr_lo[1] = ctlr->dest_addr[4]; ofld_req4.dst_mac_addr_mid[0] = ctlr->dest_addr[3]; ofld_req4.dst_mac_addr_mid[1] = ctlr->dest_addr[2]; ofld_req4.dst_mac_addr_hi[0] = ctlr->dest_addr[1]; ofld_req4.dst_mac_addr_hi[1] = ctlr->dest_addr[0]; ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma; ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32); ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma; ofld_req4.confq_pbl_base_addr_hi = (u32)((u64) tgt->confq_pbl_dma >> 32); kwqe_arr[0] = (struct kwqe *) &ofld_req1; kwqe_arr[1] = (struct kwqe *) &ofld_req2; kwqe_arr[2] = (struct kwqe *) &ofld_req3; kwqe_arr[3] = (struct kwqe *) &ofld_req4; if (hba->cnic && hba->cnic->submit_kwqes) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); return rc; } /** * bnx2fc_send_session_enable_req - initiates FCoE Session enablement * * @port: port structure pointer * @tgt: bnx2fc_rport structure pointer */ int bnx2fc_send_session_enable_req(struct fcoe_port *port, struct bnx2fc_rport *tgt) { struct kwqe *kwqe_arr[2]; struct bnx2fc_interface *interface = port->priv; struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); struct bnx2fc_hba *hba = interface->hba; struct fcoe_kwqe_conn_enable_disable enbl_req; struct fc_lport *lport = port->lport; struct fc_rport *rport = tgt->rport; int num_kwqes = 1; int rc = 0; u32 port_id; memset(&enbl_req, 0x00, sizeof(struct fcoe_kwqe_conn_enable_disable)); enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN; enbl_req.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); enbl_req.src_mac_addr_lo[0] = port->data_src_addr[5]; /* local mac */ enbl_req.src_mac_addr_lo[1] = port->data_src_addr[4]; enbl_req.src_mac_addr_mid[0] = port->data_src_addr[3]; enbl_req.src_mac_addr_mid[1] = port->data_src_addr[2]; enbl_req.src_mac_addr_hi[0] = port->data_src_addr[1]; enbl_req.src_mac_addr_hi[1] = port->data_src_addr[0]; memcpy(tgt->src_addr, port->data_src_addr, ETH_ALEN); enbl_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5]; enbl_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4]; enbl_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3]; enbl_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2]; enbl_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1]; enbl_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0]; port_id = fc_host_port_id(lport->host); if (port_id != tgt->sid) { printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x," "sid = 0x%x\n", port_id, tgt->sid); port_id = tgt->sid; } enbl_req.s_id[0] = (port_id & 0x000000FF); enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8; enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16; port_id = rport->port_id; enbl_req.d_id[0] = (port_id & 0x000000FF); enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8; enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16; enbl_req.vlan_tag = interface->vlan_id << FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; enbl_req.vlan_flag = interface->vlan_enabled; enbl_req.context_id = tgt->context_id; enbl_req.conn_id = tgt->fcoe_conn_id; kwqe_arr[0] = (struct kwqe *) &enbl_req; if (hba->cnic && hba->cnic->submit_kwqes) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); return rc; } /** * bnx2fc_send_session_disable_req - initiates FCoE Session disable * * @port: port structure pointer * @tgt: bnx2fc_rport structure pointer */ int bnx2fc_send_session_disable_req(struct fcoe_port *port, struct bnx2fc_rport *tgt) { struct bnx2fc_interface *interface = port->priv; struct fcoe_ctlr *ctlr = bnx2fc_to_ctlr(interface); struct bnx2fc_hba *hba = interface->hba; struct fcoe_kwqe_conn_enable_disable disable_req; struct kwqe *kwqe_arr[2]; struct fc_rport *rport = tgt->rport; int num_kwqes = 1; int rc = 0; u32 port_id; memset(&disable_req, 0x00, sizeof(struct fcoe_kwqe_conn_enable_disable)); disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN; disable_req.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); disable_req.src_mac_addr_lo[0] = tgt->src_addr[5]; disable_req.src_mac_addr_lo[1] = tgt->src_addr[4]; disable_req.src_mac_addr_mid[0] = tgt->src_addr[3]; disable_req.src_mac_addr_mid[1] = tgt->src_addr[2]; disable_req.src_mac_addr_hi[0] = tgt->src_addr[1]; disable_req.src_mac_addr_hi[1] = tgt->src_addr[0]; disable_req.dst_mac_addr_lo[0] = ctlr->dest_addr[5]; disable_req.dst_mac_addr_lo[1] = ctlr->dest_addr[4]; disable_req.dst_mac_addr_mid[0] = ctlr->dest_addr[3]; disable_req.dst_mac_addr_mid[1] = ctlr->dest_addr[2]; disable_req.dst_mac_addr_hi[0] = ctlr->dest_addr[1]; disable_req.dst_mac_addr_hi[1] = ctlr->dest_addr[0]; port_id = tgt->sid; disable_req.s_id[0] = (port_id & 0x000000FF); disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8; disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16; port_id = rport->port_id; disable_req.d_id[0] = (port_id & 0x000000FF); disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8; disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16; disable_req.context_id = tgt->context_id; disable_req.conn_id = tgt->fcoe_conn_id; disable_req.vlan_tag = interface->vlan_id << FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT; disable_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT; disable_req.vlan_flag = interface->vlan_enabled; kwqe_arr[0] = (struct kwqe *) &disable_req; if (hba->cnic && hba->cnic->submit_kwqes) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); return rc; } /** * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy * * @port: port structure pointer * @tgt: bnx2fc_rport structure pointer */ int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba, struct bnx2fc_rport *tgt) { struct fcoe_kwqe_conn_destroy destroy_req; struct kwqe *kwqe_arr[2]; int num_kwqes = 1; int rc = 0; memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy)); destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN; destroy_req.hdr.flags = (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT); destroy_req.context_id = tgt->context_id; destroy_req.conn_id = tgt->fcoe_conn_id; kwqe_arr[0] = (struct kwqe *) &destroy_req; if (hba->cnic && hba->cnic->submit_kwqes) rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes); return rc; } static bool is_valid_lport(struct bnx2fc_hba *hba, struct fc_lport *lport) { struct bnx2fc_lport *blport; spin_lock_bh(&hba->hba_lock); list_for_each_entry(blport, &hba->vports, list) { if (blport->lport == lport) { spin_unlock_bh(&hba->hba_lock); return true; } } spin_unlock_bh(&hba->hba_lock); return false; } static void bnx2fc_unsol_els_work(struct work_struct *work) { struct bnx2fc_unsol_els *unsol_els; struct fc_lport *lport; struct bnx2fc_hba *hba; struct fc_frame *fp; unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work); lport = unsol_els->lport; fp = unsol_els->fp; hba = unsol_els->hba; if (is_valid_lport(hba, lport)) fc_exch_recv(lport, fp); kfree(unsol_els); } void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt, unsigned char *buf, u32 frame_len, u16 l2_oxid) { struct fcoe_port *port = tgt->port; struct fc_lport *lport = port->lport; struct bnx2fc_interface *interface = port->priv; struct bnx2fc_unsol_els *unsol_els; struct fc_frame_header *fh; struct fc_frame *fp; struct sk_buff *skb; u32 payload_len; u32 crc; u8 op; unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC); if (!unsol_els) { BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n"); return; } BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n", l2_oxid, frame_len); payload_len = frame_len - sizeof(struct fc_frame_header); fp = fc_frame_alloc(lport, payload_len); if (!fp) { printk(KERN_ERR PFX "fc_frame_alloc failure\n"); kfree(unsol_els); return; } fh = (struct fc_frame_header *) fc_frame_header_get(fp); /* Copy FC Frame header and payload into the frame */ memcpy(fh, buf, frame_len); if (l2_oxid != FC_XID_UNKNOWN) fh->fh_ox_id = htons(l2_oxid); skb = fp_skb(fp); if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) || (fh->fh_r_ctl == FC_RCTL_ELS_REP)) { if (fh->fh_type == FC_TYPE_ELS) { op = fc_frame_payload_op(fp); if ((op == ELS_TEST) || (op == ELS_ESTC) || (op == ELS_FAN) || (op == ELS_CSU)) { /* * No need to reply for these * ELS requests */ printk(KERN_ERR PFX "dropping ELS 0x%x\n", op); kfree_skb(skb); kfree(unsol_els); return; } } crc = fcoe_fc_crc(fp); fc_frame_init(fp); fr_dev(fp) = lport; fr_sof(fp) = FC_SOF_I3; fr_eof(fp) = FC_EOF_T; fr_crc(fp) = cpu_to_le32(~crc); unsol_els->lport = lport; unsol_els->hba = interface->hba; unsol_els->fp = fp; INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work); queue_work(bnx2fc_wq, &unsol_els->unsol_els_work); } else { BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl); kfree_skb(skb); kfree(unsol_els); } } static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe) { u8 num_rq; struct fcoe_err_report_entry *err_entry; unsigned char *rq_data; unsigned char *buf = NULL, *buf1; int i; u16 xid; u32 frame_len, len; struct bnx2fc_cmd *io_req = NULL; struct fcoe_task_ctx_entry *task, *task_page; struct bnx2fc_interface *interface = tgt->port->priv; struct bnx2fc_hba *hba = interface->hba; int task_idx, index; int rc = 0; u64 err_warn_bit_map; u8 err_warn = 0xff; BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe); switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) { case FCOE_UNSOLICITED_FRAME_CQE_TYPE: frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >> FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT; num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ; spin_lock_bh(&tgt->tgt_lock); rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq); spin_unlock_bh(&tgt->tgt_lock); if (rq_data) { buf = rq_data; } else { buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ), GFP_ATOMIC); if (!buf1) { BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n"); break; } for (i = 0; i < num_rq; i++) { spin_lock_bh(&tgt->tgt_lock); rq_data = (unsigned char *) bnx2fc_get_next_rqe(tgt, 1); spin_unlock_bh(&tgt->tgt_lock); len = BNX2FC_RQ_BUF_SZ; memcpy(buf1, rq_data, len); buf1 += len; } } bnx2fc_process_l2_frame_compl(tgt, buf, frame_len, FC_XID_UNKNOWN); if (buf != rq_data) kfree(buf); spin_lock_bh(&tgt->tgt_lock); bnx2fc_return_rqe(tgt, num_rq); spin_unlock_bh(&tgt->tgt_lock); break; case FCOE_ERROR_DETECTION_CQE_TYPE: /* * In case of error reporting CQE a single RQ entry * is consumed. */ spin_lock_bh(&tgt->tgt_lock); num_rq = 1; err_entry = (struct fcoe_err_report_entry *) bnx2fc_get_next_rqe(tgt, 1); xid = err_entry->fc_hdr.ox_id; BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid); BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n", err_entry->data.err_warn_bitmap_hi, err_entry->data.err_warn_bitmap_lo); BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n", err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); if (xid > hba->max_xid) { BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid); goto ret_err_rqe; } task_idx = xid / BNX2FC_TASKS_PER_PAGE; index = xid % BNX2FC_TASKS_PER_PAGE; task_page = (struct fcoe_task_ctx_entry *) hba->task_ctx[task_idx]; task = &(task_page[index]); io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; if (!io_req) goto ret_err_rqe; if (io_req->cmd_type != BNX2FC_SCSI_CMD) { printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n"); goto ret_err_rqe; } if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP, &io_req->req_flags)) { BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in " "progress.. ignore unsol err\n"); goto ret_err_rqe; } err_warn_bit_map = (u64) ((u64)err_entry->data.err_warn_bitmap_hi << 32) | (u64)err_entry->data.err_warn_bitmap_lo; for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) { if (err_warn_bit_map & (u64)((u64)1 << i)) { err_warn = i; break; } } /* * If ABTS is already in progress, and FW error is * received after that, do not cancel the timeout_work * and let the error recovery continue by explicitly * logging out the target, when the ABTS eventually * times out. */ if (test_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags)) { printk(KERN_ERR PFX "err_warn: io_req (0x%x) already " "in ABTS processing\n", xid); goto ret_err_rqe; } BNX2FC_TGT_DBG(tgt, "err = 0x%x\n", err_warn); if (tgt->dev_type != TYPE_TAPE) goto skip_rec; switch (err_warn) { case FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION: case FCOE_ERROR_CODE_DATA_OOO_RO: case FCOE_ERROR_CODE_COMMON_INCORRECT_SEQ_CNT: case FCOE_ERROR_CODE_DATA_SOFI3_SEQ_ACTIVE_SET: case FCOE_ERROR_CODE_FCP_RSP_OPENED_SEQ: case FCOE_ERROR_CODE_DATA_SOFN_SEQ_ACTIVE_RESET: BNX2FC_TGT_DBG(tgt, "REC TOV popped for xid - 0x%x\n", xid); memcpy(&io_req->err_entry, err_entry, sizeof(struct fcoe_err_report_entry)); if (!test_bit(BNX2FC_FLAG_SRR_SENT, &io_req->req_flags)) { spin_unlock_bh(&tgt->tgt_lock); rc = bnx2fc_send_rec(io_req); spin_lock_bh(&tgt->tgt_lock); if (rc) goto skip_rec; } else printk(KERN_ERR PFX "SRR in progress\n"); goto ret_err_rqe; break; default: break; } skip_rec: set_bit(BNX2FC_FLAG_ISSUE_ABTS, &io_req->req_flags); /* * Cancel the timeout_work, as we received IO * completion with FW error. */ if (cancel_delayed_work(&io_req->timeout_work)) kref_put(&io_req->refcount, bnx2fc_cmd_release); rc = bnx2fc_initiate_abts(io_req); if (rc != SUCCESS) { printk(KERN_ERR PFX "err_warn: initiate_abts " "failed xid = 0x%x. issue cleanup\n", io_req->xid); bnx2fc_initiate_cleanup(io_req); } ret_err_rqe: bnx2fc_return_rqe(tgt, 1); spin_unlock_bh(&tgt->tgt_lock); break; case FCOE_WARNING_DETECTION_CQE_TYPE: /* *In case of warning reporting CQE a single RQ entry * is consumes. */ spin_lock_bh(&tgt->tgt_lock); num_rq = 1; err_entry = (struct fcoe_err_report_entry *) bnx2fc_get_next_rqe(tgt, 1); xid = cpu_to_be16(err_entry->fc_hdr.ox_id); BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid); BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x", err_entry->data.err_warn_bitmap_hi, err_entry->data.err_warn_bitmap_lo); BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x", err_entry->data.tx_buf_off, err_entry->data.rx_buf_off); if (xid > hba->max_xid) { BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n", xid); goto ret_warn_rqe; } err_warn_bit_map = (u64) ((u64)err_entry->data.err_warn_bitmap_hi << 32) | (u64)err_entry->data.err_warn_bitmap_lo; for (i = 0; i < BNX2FC_NUM_ERR_BITS; i++) { if (err_warn_bit_map & (u64) (1 << i)) { err_warn = i; break; } } BNX2FC_TGT_DBG(tgt, "warn = 0x%x\n", err_warn); task_idx = xid / BNX2FC_TASKS_PER_PAGE; index = xid % BNX2FC_TASKS_PER_PAGE; task_page = (struct fcoe_task_ctx_entry *) interface->hba->task_ctx[task_idx]; task = &(task_page[index]); io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; if (!io_req) goto ret_warn_rqe; if (io_req->cmd_type != BNX2FC_SCSI_CMD) { printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n"); goto ret_warn_rqe; } memcpy(&io_req->err_entry, err_entry, sizeof(struct fcoe_err_report_entry)); if (err_warn == FCOE_ERROR_CODE_REC_TOV_TIMER_EXPIRATION) /* REC_TOV is not a warning code */ BUG_ON(1); else BNX2FC_TGT_DBG(tgt, "Unsolicited warning\n"); ret_warn_rqe: bnx2fc_return_rqe(tgt, 1); spin_unlock_bh(&tgt->tgt_lock); break; default: printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n"); break; } } void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe) { struct fcoe_task_ctx_entry *task; struct fcoe_task_ctx_entry *task_page; struct fcoe_port *port = tgt->port; struct bnx2fc_interface *interface = port->priv; struct bnx2fc_hba *hba = interface->hba; struct bnx2fc_cmd *io_req; int task_idx, index; u16 xid; u8 cmd_type; u8 rx_state = 0; u8 num_rq; spin_lock_bh(&tgt->tgt_lock); xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID; if (xid >= hba->max_tasks) { printk(KERN_ERR PFX "ERROR:xid out of range\n"); spin_unlock_bh(&tgt->tgt_lock); return; } task_idx = xid / BNX2FC_TASKS_PER_PAGE; index = xid % BNX2FC_TASKS_PER_PAGE; task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx]; task = &(task_page[index]); num_rq = ((task->rxwr_txrd.var_ctx.rx_flags & FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE) >> FCOE_TCE_RX_WR_TX_RD_VAR_NUM_RQ_WQE_SHIFT); io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid]; if (io_req == NULL) { printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n"); spin_unlock_bh(&tgt->tgt_lock); return; } /* Timestamp IO completion time */ cmd_type = io_req->cmd_type; rx_state = ((task->rxwr_txrd.var_ctx.rx_flags & FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE) >> FCOE_TCE_RX_WR_TX_RD_VAR_RX_STATE_SHIFT); /* Process other IO completion types */ switch (cmd_type) { case BNX2FC_SCSI_CMD: if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) { bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq); spin_unlock_bh(&tgt->tgt_lock); return; } if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED) bnx2fc_process_abts_compl(io_req, task, num_rq); else if (rx_state == FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED) bnx2fc_process_cleanup_compl(io_req, task, num_rq); else printk(KERN_ERR PFX "Invalid rx state - %d\n", rx_state); break; case BNX2FC_TASK_MGMT_CMD: BNX2FC_IO_DBG(io_req, "Processing TM complete\n"); bnx2fc_process_tm_compl(io_req, task, num_rq); break; case BNX2FC_ABTS: /* * ABTS request received by firmware. ABTS response * will be delivered to the task belonging to the IO * that was aborted */ BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n"); kref_put(&io_req->refcount, bnx2fc_cmd_release); break; case BNX2FC_ELS: if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) bnx2fc_process_els_compl(io_req, task, num_rq); else if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED) bnx2fc_process_abts_compl(io_req, task, num_rq); else if (rx_state == FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED) bnx2fc_process_cleanup_compl(io_req, task, num_rq); else printk(KERN_ERR PFX "Invalid rx state = %d\n", rx_state); break; case BNX2FC_CLEANUP: BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n"); kref_put(&io_req->refcount, bnx2fc_cmd_release); break; case BNX2FC_SEQ_CLEANUP: BNX2FC_IO_DBG(io_req, "cq_compl(0x%x) - seq cleanup resp\n", io_req->xid); bnx2fc_process_seq_cleanup_compl(io_req, task, rx_state); kref_put(&io_req->refcount, bnx2fc_cmd_release); break; default: printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type); break; } spin_unlock_bh(&tgt->tgt_lock); } void bnx2fc_arm_cq(struct bnx2fc_rport *tgt) { struct b577xx_fcoe_rx_doorbell *rx_db = &tgt->rx_db; u32 msg; wmb(); rx_db->doorbell_cq_cons = tgt->cq_cons_idx | (tgt->cq_curr_toggle_bit << FCOE_CQE_TOGGLE_BIT_SHIFT); msg = *((u32 *)rx_db); writel(cpu_to_le32(msg), tgt->ctx_base); mmiowb(); } struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe) { struct bnx2fc_work *work; work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC); if (!work) return NULL; INIT_LIST_HEAD(&work->list); work->tgt = tgt; work->wqe = wqe; return work; } int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt) { struct fcoe_cqe *cq; u32 cq_cons; struct fcoe_cqe *cqe; u32 num_free_sqes = 0; u32 num_cqes = 0; u16 wqe; /* * cq_lock is a low contention lock used to protect * the CQ data structure from being freed up during * the upload operation */ spin_lock_bh(&tgt->cq_lock); if (!tgt->cq) { printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n"); spin_unlock_bh(&tgt->cq_lock); return 0; } cq = tgt->cq; cq_cons = tgt->cq_cons_idx; cqe = &cq[cq_cons]; while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) == (tgt->cq_curr_toggle_bit << FCOE_CQE_TOGGLE_BIT_SHIFT)) { /* new entry on the cq */ if (wqe & FCOE_CQE_CQE_TYPE) { /* Unsolicited event notification */ bnx2fc_process_unsol_compl(tgt, wqe); } else { /* Pending work request completion */ struct bnx2fc_work *work = NULL; struct bnx2fc_percpu_s *fps = NULL; unsigned int cpu = wqe % num_possible_cpus(); fps = &per_cpu(bnx2fc_percpu, cpu); spin_lock_bh(&fps->fp_work_lock); if (unlikely(!fps->iothread)) goto unlock; work = bnx2fc_alloc_work(tgt, wqe); if (work) list_add_tail(&work->list, &fps->work_list); unlock: spin_unlock_bh(&fps->fp_work_lock); /* Pending work request completion */ if (fps->iothread && work) wake_up_process(fps->iothread); else bnx2fc_process_cq_compl(tgt, wqe); num_free_sqes++; } cqe++; tgt->cq_cons_idx++; num_cqes++; if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) { tgt->cq_cons_idx = 0; cqe = cq; tgt->cq_curr_toggle_bit = 1 - tgt->cq_curr_toggle_bit; } } if (num_cqes) { /* Arm CQ only if doorbell is mapped */ if (tgt->ctx_base) bnx2fc_arm_cq(tgt); atomic_add(num_free_sqes, &tgt->free_sqes); } spin_unlock_bh(&tgt->cq_lock); return 0; } /** * bnx2fc_fastpath_notification - process global event queue (KCQ) * * @hba: adapter structure pointer * @new_cqe_kcqe: pointer to newly DMA'd KCQ entry * * Fast path event notification handler */ static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba, struct fcoe_kcqe *new_cqe_kcqe) { u32 conn_id = new_cqe_kcqe->fcoe_conn_id; struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id]; if (!tgt) { printk(KERN_ERR PFX "conn_id 0x%x not valid\n", conn_id); return; } bnx2fc_process_new_cqes(tgt); } /** * bnx2fc_process_ofld_cmpl - process FCoE session offload completion * * @hba: adapter structure pointer * @ofld_kcqe: connection offload kcqe pointer * * handle session offload completion, enable the session if offload is * successful. */ static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba, struct fcoe_kcqe *ofld_kcqe) { struct bnx2fc_rport *tgt; struct fcoe_port *port; struct bnx2fc_interface *interface; u32 conn_id; u32 context_id; conn_id = ofld_kcqe->fcoe_conn_id; context_id = ofld_kcqe->fcoe_conn_context_id; tgt = hba->tgt_ofld_list[conn_id]; if (!tgt) { printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n"); return; } BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n", ofld_kcqe->fcoe_conn_context_id); port = tgt->port; interface = tgt->port->priv; if (hba != interface->hba) { printk(KERN_ERR PFX "ERROR:ofld_cmpl: HBA mis-match\n"); goto ofld_cmpl_err; } /* * cnic has allocated a context_id for this session; use this * while enabling the session. */ tgt->context_id = context_id; if (ofld_kcqe->completion_status) { if (ofld_kcqe->completion_status == FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) { printk(KERN_ERR PFX "unable to allocate FCoE context " "resources\n"); set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags); } } else { /* FW offload request successfully completed */ set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); } ofld_cmpl_err: set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); wake_up_interruptible(&tgt->ofld_wait); } /** * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion * * @hba: adapter structure pointer * @ofld_kcqe: connection offload kcqe pointer * * handle session enable completion, mark the rport as ready */ static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba, struct fcoe_kcqe *ofld_kcqe) { struct bnx2fc_rport *tgt; struct bnx2fc_interface *interface; u32 conn_id; u32 context_id; context_id = ofld_kcqe->fcoe_conn_context_id; conn_id = ofld_kcqe->fcoe_conn_id; tgt = hba->tgt_ofld_list[conn_id]; if (!tgt) { printk(KERN_ERR PFX "ERROR:enbl_cmpl: No pending ofld req\n"); return; } BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n", ofld_kcqe->fcoe_conn_context_id); /* * context_id should be the same for this target during offload * and enable */ if (tgt->context_id != context_id) { printk(KERN_ERR PFX "context id mis-match\n"); return; } interface = tgt->port->priv; if (hba != interface->hba) { printk(KERN_ERR PFX "bnx2fc-enbl_cmpl: HBA mis-match\n"); goto enbl_cmpl_err; } if (!ofld_kcqe->completion_status) /* enable successful - rport ready for issuing IOs */ set_bit(BNX2FC_FLAG_ENABLED, &tgt->flags); enbl_cmpl_err: set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags); wake_up_interruptible(&tgt->ofld_wait); } static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba, struct fcoe_kcqe *disable_kcqe) { struct bnx2fc_rport *tgt; u32 conn_id; conn_id = disable_kcqe->fcoe_conn_id; tgt = hba->tgt_ofld_list[conn_id]; if (!tgt) { printk(KERN_ERR PFX "ERROR: disable_cmpl: No disable req\n"); return; } BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id); if (disable_kcqe->completion_status) { printk(KERN_ERR PFX "Disable failed with cmpl status %d\n", disable_kcqe->completion_status); set_bit(BNX2FC_FLAG_DISABLE_FAILED, &tgt->flags); set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); wake_up_interruptible(&tgt->upld_wait); } else { /* disable successful */ BNX2FC_TGT_DBG(tgt, "disable successful\n"); clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags); clear_bit(BNX2FC_FLAG_ENABLED, &tgt->flags); set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags); set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); wake_up_interruptible(&tgt->upld_wait); } } static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba, struct fcoe_kcqe *destroy_kcqe) { struct bnx2fc_rport *tgt; u32 conn_id; conn_id = destroy_kcqe->fcoe_conn_id; tgt = hba->tgt_ofld_list[conn_id]; if (!tgt) { printk(KERN_ERR PFX "destroy_cmpl: No destroy req\n"); return; } BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id); if (destroy_kcqe->completion_status) { printk(KERN_ERR PFX "Destroy conn failed, cmpl status %d\n", destroy_kcqe->completion_status); return; } else { /* destroy successful */ BNX2FC_TGT_DBG(tgt, "upload successful\n"); clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags); set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags); set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags); wake_up_interruptible(&tgt->upld_wait); } } static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code) { switch (err_code) { case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE: printk(KERN_ERR PFX "init_failure due to invalid opcode\n"); break; case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE: printk(KERN_ERR PFX "init failed due to ctx alloc failure\n"); break; case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR: printk(KERN_ERR PFX "init_failure due to NIC error\n"); break; case FCOE_KCQE_COMPLETION_STATUS_ERROR: printk(KERN_ERR PFX "init failure due to compl status err\n"); break; case FCOE_KCQE_COMPLETION_STATUS_WRONG_HSI_VERSION: printk(KERN_ERR PFX "init failure due to HSI mismatch\n"); break; default: printk(KERN_ERR PFX "Unknown Error code %d\n", err_code); } } /** * bnx2fc_indicae_kcqe - process KCQE * * @hba: adapter structure pointer * @kcqe: kcqe pointer * @num_cqe: Number of completion queue elements * * Generic KCQ event handler */ void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[], u32 num_cqe) { struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context; int i = 0; struct fcoe_kcqe *kcqe = NULL; while (i < num_cqe) { kcqe = (struct fcoe_kcqe *) kcq[i++]; switch (kcqe->op_code) { case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION: bnx2fc_fastpath_notification(hba, kcqe); break; case FCOE_KCQE_OPCODE_OFFLOAD_CONN: bnx2fc_process_ofld_cmpl(hba, kcqe); break; case FCOE_KCQE_OPCODE_ENABLE_CONN: bnx2fc_process_enable_conn_cmpl(hba, kcqe); break; case FCOE_KCQE_OPCODE_INIT_FUNC: if (kcqe->completion_status != FCOE_KCQE_COMPLETION_STATUS_SUCCESS) { bnx2fc_init_failure(hba, kcqe->completion_status); } else { set_bit(ADAPTER_STATE_UP, &hba->adapter_state); bnx2fc_get_link_state(hba); printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n", (u8)hba->pcidev->bus->number); } break; case FCOE_KCQE_OPCODE_DESTROY_FUNC: if (kcqe->completion_status != FCOE_KCQE_COMPLETION_STATUS_SUCCESS) { printk(KERN_ERR PFX "DESTROY failed\n"); } else { printk(KERN_ERR PFX "DESTROY success\n"); } set_bit(BNX2FC_FLAG_DESTROY_CMPL, &hba->flags); wake_up_interruptible(&hba->destroy_wait); break; case FCOE_KCQE_OPCODE_DISABLE_CONN: bnx2fc_process_conn_disable_cmpl(hba, kcqe); break; case FCOE_KCQE_OPCODE_DESTROY_CONN: bnx2fc_process_conn_destroy_cmpl(hba, kcqe); break; case FCOE_KCQE_OPCODE_STAT_FUNC: if (kcqe->completion_status != FCOE_KCQE_COMPLETION_STATUS_SUCCESS) printk(KERN_ERR PFX "STAT failed\n"); complete(&hba->stat_req_done); break; case FCOE_KCQE_OPCODE_FCOE_ERROR: /* fall thru */ default: printk(KERN_ERR PFX "unknown opcode 0x%x\n", kcqe->op_code); } } } void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid) { struct fcoe_sqe *sqe; sqe = &tgt->sq[tgt->sq_prod_idx]; /* Fill SQ WQE */ sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT; sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT; /* Advance SQ Prod Idx */ if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) { tgt->sq_prod_idx = 0; tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit; } } void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt) { struct b577xx_doorbell_set_prod *sq_db = &tgt->sq_db; u32 msg; wmb(); sq_db->prod = tgt->sq_prod_idx | (tgt->sq_curr_toggle_bit << 15); msg = *((u32 *)sq_db); writel(cpu_to_le32(msg), tgt->ctx_base); mmiowb(); } int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt) { u32 context_id = tgt->context_id; struct fcoe_port *port = tgt->port; u32 reg_off; resource_size_t reg_base; struct bnx2fc_interface *interface = port->priv; struct bnx2fc_hba *hba = interface->hba; reg_base = pci_resource_start(hba->pcidev, BNX2X_DOORBELL_PCI_BAR); reg_off = (1 << BNX2X_DB_SHIFT) * (context_id & 0x1FFFF); tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4); if (!tgt->ctx_base) return -ENOMEM; return 0; } char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items) { char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ); if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX) return NULL; tgt->rq_cons_idx += num_items; if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX) tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX; return buf; } void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items) { /* return the rq buffer */ u32 next_prod_idx = tgt->rq_prod_idx + num_items; if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) { /* Wrap around RQ */ next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX; } tgt->rq_prod_idx = next_prod_idx; tgt->conn_db->rq_prod = tgt->rq_prod_idx; } void bnx2fc_init_seq_cleanup_task(struct bnx2fc_cmd *seq_clnp_req, struct fcoe_task_ctx_entry *task, struct bnx2fc_cmd *orig_io_req, u32 offset) { struct scsi_cmnd *sc_cmd = orig_io_req->sc_cmd; struct bnx2fc_rport *tgt = seq_clnp_req->tgt; struct bnx2fc_interface *interface = tgt->port->priv; struct fcoe_bd_ctx *bd = orig_io_req->bd_tbl->bd_tbl; struct fcoe_task_ctx_entry *orig_task; struct fcoe_task_ctx_entry *task_page; struct fcoe_ext_mul_sges_ctx *sgl; u8 task_type = FCOE_TASK_TYPE_SEQUENCE_CLEANUP; u8 orig_task_type; u16 orig_xid = orig_io_req->xid; u32 context_id = tgt->context_id; u64 phys_addr = (u64)orig_io_req->bd_tbl->bd_tbl_dma; u32 orig_offset = offset; int bd_count; int orig_task_idx, index; int i; memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) orig_task_type = FCOE_TASK_TYPE_WRITE; else orig_task_type = FCOE_TASK_TYPE_READ; /* Tx flags */ task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_SEQUENCE_CLEANUP << FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; /* init flags */ task->txwr_rxrd.const_ctx.init_flags = task_type << FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; task->rxwr_txrd.const_ctx.init_flags = context_id << FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; task->rxwr_txrd.const_ctx.init_flags = context_id << FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid; task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_seq_cnt = 0; task->txwr_rxrd.union_ctx.cleanup.ctx.rolled_tx_data_offset = offset; bd_count = orig_io_req->bd_tbl->bd_valid; /* obtain the appropriate bd entry from relative offset */ for (i = 0; i < bd_count; i++) { if (offset < bd[i].buf_len) break; offset -= bd[i].buf_len; } phys_addr += (i * sizeof(struct fcoe_bd_ctx)); if (orig_task_type == FCOE_TASK_TYPE_WRITE) { task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = (u32)phys_addr; task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32); task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = bd_count; task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_off = offset; /* adjusted offset */ task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_idx = i; } else { orig_task_idx = orig_xid / BNX2FC_TASKS_PER_PAGE; index = orig_xid % BNX2FC_TASKS_PER_PAGE; task_page = (struct fcoe_task_ctx_entry *) interface->hba->task_ctx[orig_task_idx]; orig_task = &(task_page[index]); /* Multiple SGEs were used for this IO */ sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; sgl->mul_sgl.cur_sge_addr.lo = (u32)phys_addr; sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)phys_addr >> 32); sgl->mul_sgl.sgl_size = bd_count; sgl->mul_sgl.cur_sge_off = offset; /*adjusted offset */ sgl->mul_sgl.cur_sge_idx = i; memset(&task->rxwr_only.rx_seq_ctx, 0, sizeof(struct fcoe_rx_seq_ctx)); task->rxwr_only.rx_seq_ctx.low_exp_ro = orig_offset; task->rxwr_only.rx_seq_ctx.high_exp_ro = orig_offset; } } void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task, u16 orig_xid) { u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP; struct bnx2fc_rport *tgt = io_req->tgt; u32 context_id = tgt->context_id; memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); /* Tx Write Rx Read */ /* init flags */ task->txwr_rxrd.const_ctx.init_flags = task_type << FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; if (tgt->dev_type == TYPE_TAPE) task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_DEV_TYPE_TAPE << FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; else task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_DEV_TYPE_DISK << FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; task->txwr_rxrd.union_ctx.cleanup.ctx.cleaned_task_id = orig_xid; /* Tx flags */ task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP << FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; /* Rx Read Tx Write */ task->rxwr_txrd.const_ctx.init_flags = context_id << FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; task->rxwr_txrd.var_ctx.rx_flags |= 1 << FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; } void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task) { struct bnx2fc_mp_req *mp_req = &(io_req->mp_req); struct bnx2fc_rport *tgt = io_req->tgt; struct fc_frame_header *fc_hdr; struct fcoe_ext_mul_sges_ctx *sgl; u8 task_type = 0; u64 *hdr; u64 temp_hdr[3]; u32 context_id; /* Obtain task_type */ if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) || (io_req->cmd_type == BNX2FC_ELS)) { task_type = FCOE_TASK_TYPE_MIDPATH; } else if (io_req->cmd_type == BNX2FC_ABTS) { task_type = FCOE_TASK_TYPE_ABTS; } memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); /* Setup the task from io_req for easy reference */ io_req->task = task; BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n", io_req->cmd_type, task_type); /* Tx only */ if ((task_type == FCOE_TASK_TYPE_MIDPATH) || (task_type == FCOE_TASK_TYPE_UNSOLICITED)) { task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_req_bd_dma; task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = (u32)((u64)mp_req->mp_req_bd_dma >> 32); task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = 1; } /* Tx Write Rx Read */ /* init flags */ task->txwr_rxrd.const_ctx.init_flags = task_type << FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; if (tgt->dev_type == TYPE_TAPE) task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_DEV_TYPE_TAPE << FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; else task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_DEV_TYPE_DISK << FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; /* tx flags */ task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_INIT << FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; /* Rx Write Tx Read */ task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len; /* rx flags */ task->rxwr_txrd.var_ctx.rx_flags |= 1 << FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; context_id = tgt->context_id; task->rxwr_txrd.const_ctx.init_flags = context_id << FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; fc_hdr = &(mp_req->req_fc_hdr); if (task_type == FCOE_TASK_TYPE_MIDPATH) { fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid); fc_hdr->fh_rx_id = htons(0xffff); task->rxwr_txrd.var_ctx.rx_id = 0xffff; } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) { fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid); } /* Fill FC Header into middle path buffer */ hdr = (u64 *) &task->txwr_rxrd.union_ctx.tx_frame.fc_hdr; memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr)); hdr[0] = cpu_to_be64(temp_hdr[0]); hdr[1] = cpu_to_be64(temp_hdr[1]); hdr[2] = cpu_to_be64(temp_hdr[2]); /* Rx Only */ if (task_type == FCOE_TASK_TYPE_MIDPATH) { sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; sgl->mul_sgl.cur_sge_addr.lo = (u32)mp_req->mp_resp_bd_dma; sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)mp_req->mp_resp_bd_dma >> 32); sgl->mul_sgl.sgl_size = 1; } } void bnx2fc_init_task(struct bnx2fc_cmd *io_req, struct fcoe_task_ctx_entry *task) { u8 task_type; struct scsi_cmnd *sc_cmd = io_req->sc_cmd; struct io_bdt *bd_tbl = io_req->bd_tbl; struct bnx2fc_rport *tgt = io_req->tgt; struct fcoe_cached_sge_ctx *cached_sge; struct fcoe_ext_mul_sges_ctx *sgl; int dev_type = tgt->dev_type; u64 *fcp_cmnd; u64 tmp_fcp_cmnd[4]; u32 context_id; int cnt, i; int bd_count; memset(task, 0, sizeof(struct fcoe_task_ctx_entry)); /* Setup the task from io_req for easy reference */ io_req->task = task; if (sc_cmd->sc_data_direction == DMA_TO_DEVICE) task_type = FCOE_TASK_TYPE_WRITE; else task_type = FCOE_TASK_TYPE_READ; /* Tx only */ bd_count = bd_tbl->bd_valid; cached_sge = &task->rxwr_only.union_ctx.read_info.sgl_ctx.cached_sge; if (task_type == FCOE_TASK_TYPE_WRITE) { if ((dev_type == TYPE_DISK) && (bd_count == 1)) { struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.lo = cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo; task->txwr_only.sgl_ctx.cached_sge.cur_buf_addr.hi = cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi; task->txwr_only.sgl_ctx.cached_sge.cur_buf_rem = cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len; task->txwr_rxrd.const_ctx.init_flags |= 1 << FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; } else { task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma; task->txwr_only.sgl_ctx.sgl.mul_sgl.cur_sge_addr.hi = (u32)((u64)bd_tbl->bd_tbl_dma >> 32); task->txwr_only.sgl_ctx.sgl.mul_sgl.sgl_size = bd_tbl->bd_valid; } } /*Tx Write Rx Read */ /* Init state to NORMAL */ task->txwr_rxrd.const_ctx.init_flags |= task_type << FCOE_TCE_TX_WR_RX_RD_CONST_TASK_TYPE_SHIFT; if (dev_type == TYPE_TAPE) { task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_DEV_TYPE_TAPE << FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; io_req->rec_retry = 0; io_req->rec_retry = 0; } else task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_DEV_TYPE_DISK << FCOE_TCE_TX_WR_RX_RD_CONST_DEV_TYPE_SHIFT; task->txwr_rxrd.const_ctx.init_flags |= FCOE_TASK_CLASS_TYPE_3 << FCOE_TCE_TX_WR_RX_RD_CONST_CLASS_TYPE_SHIFT; /* tx flags */ task->txwr_rxrd.const_ctx.tx_flags = FCOE_TASK_TX_STATE_NORMAL << FCOE_TCE_TX_WR_RX_RD_CONST_TX_STATE_SHIFT; /* Set initial seq counter */ task->txwr_rxrd.union_ctx.tx_seq.ctx.seq_cnt = 1; /* Fill FCP_CMND IU */ fcp_cmnd = (u64 *) task->txwr_rxrd.union_ctx.fcp_cmd.opaque; bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd); /* swap fcp_cmnd */ cnt = sizeof(struct fcp_cmnd) / sizeof(u64); for (i = 0; i < cnt; i++) { *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]); fcp_cmnd++; } /* Rx Write Tx Read */ task->rxwr_txrd.const_ctx.data_2_trns = io_req->data_xfer_len; context_id = tgt->context_id; task->rxwr_txrd.const_ctx.init_flags = context_id << FCOE_TCE_RX_WR_TX_RD_CONST_CID_SHIFT; /* rx flags */ /* Set state to "waiting for the first packet" */ task->rxwr_txrd.var_ctx.rx_flags |= 1 << FCOE_TCE_RX_WR_TX_RD_VAR_EXP_FIRST_FRAME_SHIFT; task->rxwr_txrd.var_ctx.rx_id = 0xffff; /* Rx Only */ if (task_type != FCOE_TASK_TYPE_READ) return; sgl = &task->rxwr_only.union_ctx.read_info.sgl_ctx.sgl; bd_count = bd_tbl->bd_valid; if (dev_type == TYPE_DISK) { if (bd_count == 1) { struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo; cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi; cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len; task->txwr_rxrd.const_ctx.init_flags |= 1 << FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; } else if (bd_count == 2) { struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl; cached_sge->cur_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo; cached_sge->cur_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi; cached_sge->cur_buf_rem = fcoe_bd_tbl->buf_len; fcoe_bd_tbl++; cached_sge->second_buf_addr.lo = fcoe_bd_tbl->buf_addr_lo; cached_sge->second_buf_addr.hi = fcoe_bd_tbl->buf_addr_hi; cached_sge->second_buf_rem = fcoe_bd_tbl->buf_len; task->txwr_rxrd.const_ctx.init_flags |= 1 << FCOE_TCE_TX_WR_RX_RD_CONST_CACHED_SGE_SHIFT; } else { sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma; sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)bd_tbl->bd_tbl_dma >> 32); sgl->mul_sgl.sgl_size = bd_count; } } else { sgl->mul_sgl.cur_sge_addr.lo = (u32)bd_tbl->bd_tbl_dma; sgl->mul_sgl.cur_sge_addr.hi = (u32)((u64)bd_tbl->bd_tbl_dma >> 32); sgl->mul_sgl.sgl_size = bd_count; } } /** * bnx2fc_setup_task_ctx - allocate and map task context * * @hba: pointer to adapter structure * * allocate memory for task context, and associated BD table to be used * by firmware * */ int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba) { int rc = 0; struct regpair *task_ctx_bdt; dma_addr_t addr; int task_ctx_arr_sz; int i; /* * Allocate task context bd table. A page size of bd table * can map 256 buffers. Each buffer contains 32 task context * entries. Hence the limit with one page is 8192 task context * entries. */ hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, &hba->task_ctx_bd_dma, GFP_KERNEL); if (!hba->task_ctx_bd_tbl) { printk(KERN_ERR PFX "unable to allocate task context BDT\n"); rc = -1; goto out; } memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE); /* * Allocate task_ctx which is an array of pointers pointing to * a page containing 32 task contexts */ task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE); hba->task_ctx = kzalloc((task_ctx_arr_sz * sizeof(void *)), GFP_KERNEL); if (!hba->task_ctx) { printk(KERN_ERR PFX "unable to allocate task context array\n"); rc = -1; goto out1; } /* * Allocate task_ctx_dma which is an array of dma addresses */ hba->task_ctx_dma = kmalloc((task_ctx_arr_sz * sizeof(dma_addr_t)), GFP_KERNEL); if (!hba->task_ctx_dma) { printk(KERN_ERR PFX "unable to alloc context mapping array\n"); rc = -1; goto out2; } task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl; for (i = 0; i < task_ctx_arr_sz; i++) { hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, &hba->task_ctx_dma[i], GFP_KERNEL); if (!hba->task_ctx[i]) { printk(KERN_ERR PFX "unable to alloc task context\n"); rc = -1; goto out3; } memset(hba->task_ctx[i], 0, PAGE_SIZE); addr = (u64)hba->task_ctx_dma[i]; task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32); task_ctx_bdt->lo = cpu_to_le32((u32)addr); task_ctx_bdt++; } return 0; out3: for (i = 0; i < task_ctx_arr_sz; i++) { if (hba->task_ctx[i]) { dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba->task_ctx[i], hba->task_ctx_dma[i]); hba->task_ctx[i] = NULL; } } kfree(hba->task_ctx_dma); hba->task_ctx_dma = NULL; out2: kfree(hba->task_ctx); hba->task_ctx = NULL; out1: dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma); hba->task_ctx_bd_tbl = NULL; out: return rc; } void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba) { int task_ctx_arr_sz; int i; if (hba->task_ctx_bd_tbl) { dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma); hba->task_ctx_bd_tbl = NULL; } task_ctx_arr_sz = (hba->max_tasks / BNX2FC_TASKS_PER_PAGE); if (hba->task_ctx) { for (i = 0; i < task_ctx_arr_sz; i++) { if (hba->task_ctx[i]) { dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba->task_ctx[i], hba->task_ctx_dma[i]); hba->task_ctx[i] = NULL; } } kfree(hba->task_ctx); hba->task_ctx = NULL; } kfree(hba->task_ctx_dma); hba->task_ctx_dma = NULL; } static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba) { int i; int segment_count; int hash_table_size; u32 *pbl; segment_count = hba->hash_tbl_segment_count; hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL * sizeof(struct fcoe_hash_table_entry); pbl = hba->hash_tbl_pbl; for (i = 0; i < segment_count; ++i) { dma_addr_t dma_address; dma_address = le32_to_cpu(*pbl); ++pbl; dma_address += ((u64)le32_to_cpu(*pbl)) << 32; ++pbl; dma_free_coherent(&hba->pcidev->dev, BNX2FC_HASH_TBL_CHUNK_SIZE, hba->hash_tbl_segments[i], dma_address); } if (hba->hash_tbl_pbl) { dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba->hash_tbl_pbl, hba->hash_tbl_pbl_dma); hba->hash_tbl_pbl = NULL; } } static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba) { int i; int hash_table_size; int segment_count; int segment_array_size; int dma_segment_array_size; dma_addr_t *dma_segment_array; u32 *pbl; hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL * sizeof(struct fcoe_hash_table_entry); segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1; segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE; hba->hash_tbl_segment_count = segment_count; segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments); hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL); if (!hba->hash_tbl_segments) { printk(KERN_ERR PFX "hash table pointers alloc failed\n"); return -ENOMEM; } dma_segment_array_size = segment_count * sizeof(*dma_segment_array); dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL); if (!dma_segment_array) { printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n"); return -ENOMEM; } for (i = 0; i < segment_count; ++i) { hba->hash_tbl_segments[i] = dma_alloc_coherent(&hba->pcidev->dev, BNX2FC_HASH_TBL_CHUNK_SIZE, &dma_segment_array[i], GFP_KERNEL); if (!hba->hash_tbl_segments[i]) { printk(KERN_ERR PFX "hash segment alloc failed\n"); while (--i >= 0) { dma_free_coherent(&hba->pcidev->dev, BNX2FC_HASH_TBL_CHUNK_SIZE, hba->hash_tbl_segments[i], dma_segment_array[i]); hba->hash_tbl_segments[i] = NULL; } kfree(dma_segment_array); return -ENOMEM; } memset(hba->hash_tbl_segments[i], 0, BNX2FC_HASH_TBL_CHUNK_SIZE); } hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, &hba->hash_tbl_pbl_dma, GFP_KERNEL); if (!hba->hash_tbl_pbl) { printk(KERN_ERR PFX "hash table pbl alloc failed\n"); kfree(dma_segment_array); return -ENOMEM; } memset(hba->hash_tbl_pbl, 0, PAGE_SIZE); pbl = hba->hash_tbl_pbl; for (i = 0; i < segment_count; ++i) { u64 paddr = dma_segment_array[i]; *pbl = cpu_to_le32((u32) paddr); ++pbl; *pbl = cpu_to_le32((u32) (paddr >> 32)); ++pbl; } pbl = hba->hash_tbl_pbl; i = 0; while (*pbl && *(pbl + 1)) { u32 lo; u32 hi; lo = *pbl; ++pbl; hi = *pbl; ++pbl; ++i; } kfree(dma_segment_array); return 0; } /** * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer * * @hba: Pointer to adapter structure * */ int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba) { u64 addr; u32 mem_size; int i; if (bnx2fc_allocate_hash_table(hba)) return -ENOMEM; mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size, &hba->t2_hash_tbl_ptr_dma, GFP_KERNEL); if (!hba->t2_hash_tbl_ptr) { printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n"); bnx2fc_free_fw_resc(hba); return -ENOMEM; } memset(hba->t2_hash_tbl_ptr, 0x00, mem_size); mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct fcoe_t2_hash_table_entry); hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size, &hba->t2_hash_tbl_dma, GFP_KERNEL); if (!hba->t2_hash_tbl) { printk(KERN_ERR PFX "unable to allocate t2 hash table\n"); bnx2fc_free_fw_resc(hba); return -ENOMEM; } memset(hba->t2_hash_tbl, 0x00, mem_size); for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) { addr = (unsigned long) hba->t2_hash_tbl_dma + ((i+1) * sizeof(struct fcoe_t2_hash_table_entry)); hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff; hba->t2_hash_tbl[i].next.hi = addr >> 32; } hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, &hba->dummy_buf_dma, GFP_KERNEL); if (!hba->dummy_buffer) { printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n"); bnx2fc_free_fw_resc(hba); return -ENOMEM; } hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev, PAGE_SIZE, &hba->stats_buf_dma, GFP_KERNEL); if (!hba->stats_buffer) { printk(KERN_ERR PFX "unable to alloc Stats Buffer\n"); bnx2fc_free_fw_resc(hba); return -ENOMEM; } memset(hba->stats_buffer, 0x00, PAGE_SIZE); return 0; } void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba) { u32 mem_size; if (hba->stats_buffer) { dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba->stats_buffer, hba->stats_buf_dma); hba->stats_buffer = NULL; } if (hba->dummy_buffer) { dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE, hba->dummy_buffer, hba->dummy_buf_dma); hba->dummy_buffer = NULL; } if (hba->t2_hash_tbl_ptr) { mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair); dma_free_coherent(&hba->pcidev->dev, mem_size, hba->t2_hash_tbl_ptr, hba->t2_hash_tbl_ptr_dma); hba->t2_hash_tbl_ptr = NULL; } if (hba->t2_hash_tbl) { mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct fcoe_t2_hash_table_entry); dma_free_coherent(&hba->pcidev->dev, mem_size, hba->t2_hash_tbl, hba->t2_hash_tbl_dma); hba->t2_hash_tbl = NULL; } bnx2fc_free_hash_table(hba); }
gpl-2.0
CarbonROM/android_kernel_htc_msm8960
fs/ext4/move_extent.c
1722
40292
/* * Copyright (c) 2008,2009 NEC Software Tohoku, Ltd. * Written by Takashi Sato <t-sato@yk.jp.nec.com> * Akira Fujita <a-fujita@rs.jp.nec.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2.1 of the GNU Lesser General Public License * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/fs.h> #include <linux/quotaops.h> #include <linux/slab.h> #include "ext4_jbd2.h" #include "ext4.h" /** * get_ext_path - Find an extent path for designated logical block number. * * @inode: an inode which is searched * @lblock: logical block number to find an extent path * @path: pointer to an extent path pointer (for output) * * ext4_ext_find_extent wrapper. Return 0 on success, or a negative error value * on failure. */ static inline int get_ext_path(struct inode *inode, ext4_lblk_t lblock, struct ext4_ext_path **path) { int ret = 0; *path = ext4_ext_find_extent(inode, lblock, *path); if (IS_ERR(*path)) { ret = PTR_ERR(*path); *path = NULL; } else if ((*path)[ext_depth(inode)].p_ext == NULL) ret = -ENODATA; return ret; } /** * copy_extent_status - Copy the extent's initialization status * * @src: an extent for getting initialize status * @dest: an extent to be set the status */ static void copy_extent_status(struct ext4_extent *src, struct ext4_extent *dest) { if (ext4_ext_is_uninitialized(src)) ext4_ext_mark_uninitialized(dest); else dest->ee_len = cpu_to_le16(ext4_ext_get_actual_len(dest)); } /** * mext_next_extent - Search for the next extent and set it to "extent" * * @inode: inode which is searched * @path: this will obtain data for the next extent * @extent: pointer to the next extent we have just gotten * * Search the next extent in the array of ext4_ext_path structure (@path) * and set it to ext4_extent structure (@extent). In addition, the member of * @path (->p_ext) also points the next extent. Return 0 on success, 1 if * ext4_ext_path structure refers to the last extent, or a negative error * value on failure. */ static int mext_next_extent(struct inode *inode, struct ext4_ext_path *path, struct ext4_extent **extent) { struct ext4_extent_header *eh; int ppos, leaf_ppos = path->p_depth; ppos = leaf_ppos; if (EXT_LAST_EXTENT(path[ppos].p_hdr) > path[ppos].p_ext) { /* leaf block */ *extent = ++path[ppos].p_ext; path[ppos].p_block = ext4_ext_pblock(path[ppos].p_ext); return 0; } while (--ppos >= 0) { if (EXT_LAST_INDEX(path[ppos].p_hdr) > path[ppos].p_idx) { int cur_ppos = ppos; /* index block */ path[ppos].p_idx++; path[ppos].p_block = ext4_idx_pblock(path[ppos].p_idx); if (path[ppos+1].p_bh) brelse(path[ppos+1].p_bh); path[ppos+1].p_bh = sb_bread(inode->i_sb, path[ppos].p_block); if (!path[ppos+1].p_bh) return -EIO; path[ppos+1].p_hdr = ext_block_hdr(path[ppos+1].p_bh); /* Halfway index block */ while (++cur_ppos < leaf_ppos) { path[cur_ppos].p_idx = EXT_FIRST_INDEX(path[cur_ppos].p_hdr); path[cur_ppos].p_block = ext4_idx_pblock(path[cur_ppos].p_idx); if (path[cur_ppos+1].p_bh) brelse(path[cur_ppos+1].p_bh); path[cur_ppos+1].p_bh = sb_bread(inode->i_sb, path[cur_ppos].p_block); if (!path[cur_ppos+1].p_bh) return -EIO; path[cur_ppos+1].p_hdr = ext_block_hdr(path[cur_ppos+1].p_bh); } path[leaf_ppos].p_ext = *extent = NULL; eh = path[leaf_ppos].p_hdr; if (le16_to_cpu(eh->eh_entries) == 0) /* empty leaf is found */ return -ENODATA; /* leaf block */ path[leaf_ppos].p_ext = *extent = EXT_FIRST_EXTENT(path[leaf_ppos].p_hdr); path[leaf_ppos].p_block = ext4_ext_pblock(path[leaf_ppos].p_ext); return 0; } } /* We found the last extent */ return 1; } /** * double_down_write_data_sem - Acquire two inodes' write lock of i_data_sem * * Acquire write lock of i_data_sem of the two inodes */ static void double_down_write_data_sem(struct inode *first, struct inode *second) { if (first < second) { down_write(&EXT4_I(first)->i_data_sem); down_write_nested(&EXT4_I(second)->i_data_sem, SINGLE_DEPTH_NESTING); } else { down_write(&EXT4_I(second)->i_data_sem); down_write_nested(&EXT4_I(first)->i_data_sem, SINGLE_DEPTH_NESTING); } } /** * double_up_write_data_sem - Release two inodes' write lock of i_data_sem * * @orig_inode: original inode structure to be released its lock first * @donor_inode: donor inode structure to be released its lock second * Release write lock of i_data_sem of two inodes (orig and donor). */ static void double_up_write_data_sem(struct inode *orig_inode, struct inode *donor_inode) { up_write(&EXT4_I(orig_inode)->i_data_sem); up_write(&EXT4_I(donor_inode)->i_data_sem); } /** * mext_insert_across_blocks - Insert extents across leaf block * * @handle: journal handle * @orig_inode: original inode * @o_start: first original extent to be changed * @o_end: last original extent to be changed * @start_ext: first new extent to be inserted * @new_ext: middle of new extent to be inserted * @end_ext: last new extent to be inserted * * Allocate a new leaf block and insert extents into it. Return 0 on success, * or a negative error value on failure. */ static int mext_insert_across_blocks(handle_t *handle, struct inode *orig_inode, struct ext4_extent *o_start, struct ext4_extent *o_end, struct ext4_extent *start_ext, struct ext4_extent *new_ext, struct ext4_extent *end_ext) { struct ext4_ext_path *orig_path = NULL; ext4_lblk_t eblock = 0; int new_flag = 0; int end_flag = 0; int err = 0; if (start_ext->ee_len && new_ext->ee_len && end_ext->ee_len) { if (o_start == o_end) { /* start_ext new_ext end_ext * donor |---------|-----------|--------| * orig |------------------------------| */ end_flag = 1; } else { /* start_ext new_ext end_ext * donor |---------|----------|---------| * orig |---------------|--------------| */ o_end->ee_block = end_ext->ee_block; o_end->ee_len = end_ext->ee_len; ext4_ext_store_pblock(o_end, ext4_ext_pblock(end_ext)); } o_start->ee_len = start_ext->ee_len; eblock = le32_to_cpu(start_ext->ee_block); new_flag = 1; } else if (start_ext->ee_len && new_ext->ee_len && !end_ext->ee_len && o_start == o_end) { /* start_ext new_ext * donor |--------------|---------------| * orig |------------------------------| */ o_start->ee_len = start_ext->ee_len; eblock = le32_to_cpu(start_ext->ee_block); new_flag = 1; } else if (!start_ext->ee_len && new_ext->ee_len && end_ext->ee_len && o_start == o_end) { /* new_ext end_ext * donor |--------------|---------------| * orig |------------------------------| */ o_end->ee_block = end_ext->ee_block; o_end->ee_len = end_ext->ee_len; ext4_ext_store_pblock(o_end, ext4_ext_pblock(end_ext)); /* * Set 0 to the extent block if new_ext was * the first block. */ if (new_ext->ee_block) eblock = le32_to_cpu(new_ext->ee_block); new_flag = 1; } else { ext4_debug("ext4 move extent: Unexpected insert case\n"); return -EIO; } if (new_flag) { err = get_ext_path(orig_inode, eblock, &orig_path); if (err) goto out; if (ext4_ext_insert_extent(handle, orig_inode, orig_path, new_ext, 0)) goto out; } if (end_flag) { err = get_ext_path(orig_inode, le32_to_cpu(end_ext->ee_block) - 1, &orig_path); if (err) goto out; if (ext4_ext_insert_extent(handle, orig_inode, orig_path, end_ext, 0)) goto out; } out: if (orig_path) { ext4_ext_drop_refs(orig_path); kfree(orig_path); } return err; } /** * mext_insert_inside_block - Insert new extent to the extent block * * @o_start: first original extent to be moved * @o_end: last original extent to be moved * @start_ext: first new extent to be inserted * @new_ext: middle of new extent to be inserted * @end_ext: last new extent to be inserted * @eh: extent header of target leaf block * @range_to_move: used to decide how to insert extent * * Insert extents into the leaf block. The extent (@o_start) is overwritten * by inserted extents. */ static void mext_insert_inside_block(struct ext4_extent *o_start, struct ext4_extent *o_end, struct ext4_extent *start_ext, struct ext4_extent *new_ext, struct ext4_extent *end_ext, struct ext4_extent_header *eh, int range_to_move) { int i = 0; unsigned long len; /* Move the existing extents */ if (range_to_move && o_end < EXT_LAST_EXTENT(eh)) { len = (unsigned long)(EXT_LAST_EXTENT(eh) + 1) - (unsigned long)(o_end + 1); memmove(o_end + 1 + range_to_move, o_end + 1, len); } /* Insert start entry */ if (start_ext->ee_len) o_start[i++].ee_len = start_ext->ee_len; /* Insert new entry */ if (new_ext->ee_len) { o_start[i] = *new_ext; ext4_ext_store_pblock(&o_start[i++], ext4_ext_pblock(new_ext)); } /* Insert end entry */ if (end_ext->ee_len) o_start[i] = *end_ext; /* Increment the total entries counter on the extent block */ le16_add_cpu(&eh->eh_entries, range_to_move); } /** * mext_insert_extents - Insert new extent * * @handle: journal handle * @orig_inode: original inode * @orig_path: path indicates first extent to be changed * @o_start: first original extent to be changed * @o_end: last original extent to be changed * @start_ext: first new extent to be inserted * @new_ext: middle of new extent to be inserted * @end_ext: last new extent to be inserted * * Call the function to insert extents. If we cannot add more extents into * the leaf block, we call mext_insert_across_blocks() to create a * new leaf block. Otherwise call mext_insert_inside_block(). Return 0 * on success, or a negative error value on failure. */ static int mext_insert_extents(handle_t *handle, struct inode *orig_inode, struct ext4_ext_path *orig_path, struct ext4_extent *o_start, struct ext4_extent *o_end, struct ext4_extent *start_ext, struct ext4_extent *new_ext, struct ext4_extent *end_ext) { struct ext4_extent_header *eh; unsigned long need_slots, slots_range; int range_to_move, depth, ret; /* * The extents need to be inserted * start_extent + new_extent + end_extent. */ need_slots = (start_ext->ee_len ? 1 : 0) + (end_ext->ee_len ? 1 : 0) + (new_ext->ee_len ? 1 : 0); /* The number of slots between start and end */ slots_range = ((unsigned long)(o_end + 1) - (unsigned long)o_start + 1) / sizeof(struct ext4_extent); /* Range to move the end of extent */ range_to_move = need_slots - slots_range; depth = orig_path->p_depth; orig_path += depth; eh = orig_path->p_hdr; if (depth) { /* Register to journal */ ret = ext4_journal_get_write_access(handle, orig_path->p_bh); if (ret) return ret; } /* Expansion */ if (range_to_move > 0 && (range_to_move > le16_to_cpu(eh->eh_max) - le16_to_cpu(eh->eh_entries))) { ret = mext_insert_across_blocks(handle, orig_inode, o_start, o_end, start_ext, new_ext, end_ext); if (ret < 0) return ret; } else mext_insert_inside_block(o_start, o_end, start_ext, new_ext, end_ext, eh, range_to_move); if (depth) { ret = ext4_handle_dirty_metadata(handle, orig_inode, orig_path->p_bh); if (ret) return ret; } else { ret = ext4_mark_inode_dirty(handle, orig_inode); if (ret < 0) return ret; } return 0; } /** * mext_leaf_block - Move one leaf extent block into the inode. * * @handle: journal handle * @orig_inode: original inode * @orig_path: path indicates first extent to be changed * @dext: donor extent * @from: start offset on the target file * * In order to insert extents into the leaf block, we must divide the extent * in the leaf block into three extents. The one is located to be inserted * extents, and the others are located around it. * * Therefore, this function creates structures to save extents of the leaf * block, and inserts extents by calling mext_insert_extents() with * created extents. Return 0 on success, or a negative error value on failure. */ static int mext_leaf_block(handle_t *handle, struct inode *orig_inode, struct ext4_ext_path *orig_path, struct ext4_extent *dext, ext4_lblk_t *from) { struct ext4_extent *oext, *o_start, *o_end, *prev_ext; struct ext4_extent new_ext, start_ext, end_ext; ext4_lblk_t new_ext_end; int oext_alen, new_ext_alen, end_ext_alen; int depth = ext_depth(orig_inode); int ret; start_ext.ee_block = end_ext.ee_block = 0; o_start = o_end = oext = orig_path[depth].p_ext; oext_alen = ext4_ext_get_actual_len(oext); start_ext.ee_len = end_ext.ee_len = 0; new_ext.ee_block = cpu_to_le32(*from); ext4_ext_store_pblock(&new_ext, ext4_ext_pblock(dext)); new_ext.ee_len = dext->ee_len; new_ext_alen = ext4_ext_get_actual_len(&new_ext); new_ext_end = le32_to_cpu(new_ext.ee_block) + new_ext_alen - 1; /* * Case: original extent is first * oext |--------| * new_ext |--| * start_ext |--| */ if (le32_to_cpu(oext->ee_block) < le32_to_cpu(new_ext.ee_block) && le32_to_cpu(new_ext.ee_block) < le32_to_cpu(oext->ee_block) + oext_alen) { start_ext.ee_len = cpu_to_le16(le32_to_cpu(new_ext.ee_block) - le32_to_cpu(oext->ee_block)); start_ext.ee_block = oext->ee_block; copy_extent_status(oext, &start_ext); } else if (oext > EXT_FIRST_EXTENT(orig_path[depth].p_hdr)) { prev_ext = oext - 1; /* * We can merge new_ext into previous extent, * if these are contiguous and same extent type. */ if (ext4_can_extents_be_merged(orig_inode, prev_ext, &new_ext)) { o_start = prev_ext; start_ext.ee_len = cpu_to_le16( ext4_ext_get_actual_len(prev_ext) + new_ext_alen); start_ext.ee_block = oext->ee_block; copy_extent_status(prev_ext, &start_ext); new_ext.ee_len = 0; } } /* * Case: new_ext_end must be less than oext * oext |-----------| * new_ext |-------| */ if (le32_to_cpu(oext->ee_block) + oext_alen - 1 < new_ext_end) { EXT4_ERROR_INODE(orig_inode, "new_ext_end(%u) should be less than or equal to " "oext->ee_block(%u) + oext_alen(%d) - 1", new_ext_end, le32_to_cpu(oext->ee_block), oext_alen); ret = -EIO; goto out; } /* * Case: new_ext is smaller than original extent * oext |---------------| * new_ext |-----------| * end_ext |---| */ if (le32_to_cpu(oext->ee_block) <= new_ext_end && new_ext_end < le32_to_cpu(oext->ee_block) + oext_alen - 1) { end_ext.ee_len = cpu_to_le16(le32_to_cpu(oext->ee_block) + oext_alen - 1 - new_ext_end); copy_extent_status(oext, &end_ext); end_ext_alen = ext4_ext_get_actual_len(&end_ext); ext4_ext_store_pblock(&end_ext, (ext4_ext_pblock(o_end) + oext_alen - end_ext_alen)); end_ext.ee_block = cpu_to_le32(le32_to_cpu(o_end->ee_block) + oext_alen - end_ext_alen); } ret = mext_insert_extents(handle, orig_inode, orig_path, o_start, o_end, &start_ext, &new_ext, &end_ext); out: return ret; } /** * mext_calc_swap_extents - Calculate extents for extent swapping. * * @tmp_dext: the extent that will belong to the original inode * @tmp_oext: the extent that will belong to the donor inode * @orig_off: block offset of original inode * @donor_off: block offset of donor inode * @max_count: the maximum length of extents * * Return 0 on success, or a negative error value on failure. */ static int mext_calc_swap_extents(struct ext4_extent *tmp_dext, struct ext4_extent *tmp_oext, ext4_lblk_t orig_off, ext4_lblk_t donor_off, ext4_lblk_t max_count) { ext4_lblk_t diff, orig_diff; struct ext4_extent dext_old, oext_old; BUG_ON(orig_off != donor_off); /* original and donor extents have to cover the same block offset */ if (orig_off < le32_to_cpu(tmp_oext->ee_block) || le32_to_cpu(tmp_oext->ee_block) + ext4_ext_get_actual_len(tmp_oext) - 1 < orig_off) return -ENODATA; if (orig_off < le32_to_cpu(tmp_dext->ee_block) || le32_to_cpu(tmp_dext->ee_block) + ext4_ext_get_actual_len(tmp_dext) - 1 < orig_off) return -ENODATA; dext_old = *tmp_dext; oext_old = *tmp_oext; /* When tmp_dext is too large, pick up the target range. */ diff = donor_off - le32_to_cpu(tmp_dext->ee_block); ext4_ext_store_pblock(tmp_dext, ext4_ext_pblock(tmp_dext) + diff); tmp_dext->ee_block = cpu_to_le32(le32_to_cpu(tmp_dext->ee_block) + diff); tmp_dext->ee_len = cpu_to_le16(le16_to_cpu(tmp_dext->ee_len) - diff); if (max_count < ext4_ext_get_actual_len(tmp_dext)) tmp_dext->ee_len = cpu_to_le16(max_count); orig_diff = orig_off - le32_to_cpu(tmp_oext->ee_block); ext4_ext_store_pblock(tmp_oext, ext4_ext_pblock(tmp_oext) + orig_diff); /* Adjust extent length if donor extent is larger than orig */ if (ext4_ext_get_actual_len(tmp_dext) > ext4_ext_get_actual_len(tmp_oext) - orig_diff) tmp_dext->ee_len = cpu_to_le16(le16_to_cpu(tmp_oext->ee_len) - orig_diff); tmp_oext->ee_len = cpu_to_le16(ext4_ext_get_actual_len(tmp_dext)); copy_extent_status(&oext_old, tmp_dext); copy_extent_status(&dext_old, tmp_oext); return 0; } /** * mext_replace_branches - Replace original extents with new extents * * @handle: journal handle * @orig_inode: original inode * @donor_inode: donor inode * @from: block offset of orig_inode * @count: block count to be replaced * @err: pointer to save return value * * Replace original inode extents and donor inode extents page by page. * We implement this replacement in the following three steps: * 1. Save the block information of original and donor inodes into * dummy extents. * 2. Change the block information of original inode to point at the * donor inode blocks. * 3. Change the block information of donor inode to point at the saved * original inode blocks in the dummy extents. * * Return replaced block count. */ static int mext_replace_branches(handle_t *handle, struct inode *orig_inode, struct inode *donor_inode, ext4_lblk_t from, ext4_lblk_t count, int *err) { struct ext4_ext_path *orig_path = NULL; struct ext4_ext_path *donor_path = NULL; struct ext4_extent *oext, *dext; struct ext4_extent tmp_dext, tmp_oext; ext4_lblk_t orig_off = from, donor_off = from; int depth; int replaced_count = 0; int dext_alen; /* Protect extent trees against block allocations via delalloc */ double_down_write_data_sem(orig_inode, donor_inode); /* Get the original extent for the block "orig_off" */ *err = get_ext_path(orig_inode, orig_off, &orig_path); if (*err) goto out; /* Get the donor extent for the head */ *err = get_ext_path(donor_inode, donor_off, &donor_path); if (*err) goto out; depth = ext_depth(orig_inode); oext = orig_path[depth].p_ext; tmp_oext = *oext; depth = ext_depth(donor_inode); dext = donor_path[depth].p_ext; tmp_dext = *dext; *err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off, donor_off, count); if (*err) goto out; /* Loop for the donor extents */ while (1) { /* The extent for donor must be found. */ if (!dext) { EXT4_ERROR_INODE(donor_inode, "The extent for donor must be found"); *err = -EIO; goto out; } else if (donor_off != le32_to_cpu(tmp_dext.ee_block)) { EXT4_ERROR_INODE(donor_inode, "Donor offset(%u) and the first block of donor " "extent(%u) should be equal", donor_off, le32_to_cpu(tmp_dext.ee_block)); *err = -EIO; goto out; } /* Set donor extent to orig extent */ *err = mext_leaf_block(handle, orig_inode, orig_path, &tmp_dext, &orig_off); if (*err) goto out; /* Set orig extent to donor extent */ *err = mext_leaf_block(handle, donor_inode, donor_path, &tmp_oext, &donor_off); if (*err) goto out; dext_alen = ext4_ext_get_actual_len(&tmp_dext); replaced_count += dext_alen; donor_off += dext_alen; orig_off += dext_alen; /* Already moved the expected blocks */ if (replaced_count >= count) break; if (orig_path) ext4_ext_drop_refs(orig_path); *err = get_ext_path(orig_inode, orig_off, &orig_path); if (*err) goto out; depth = ext_depth(orig_inode); oext = orig_path[depth].p_ext; tmp_oext = *oext; if (donor_path) ext4_ext_drop_refs(donor_path); *err = get_ext_path(donor_inode, donor_off, &donor_path); if (*err) goto out; depth = ext_depth(donor_inode); dext = donor_path[depth].p_ext; tmp_dext = *dext; *err = mext_calc_swap_extents(&tmp_dext, &tmp_oext, orig_off, donor_off, count - replaced_count); if (*err) goto out; } out: if (orig_path) { ext4_ext_drop_refs(orig_path); kfree(orig_path); } if (donor_path) { ext4_ext_drop_refs(donor_path); kfree(donor_path); } ext4_ext_invalidate_cache(orig_inode); ext4_ext_invalidate_cache(donor_inode); double_up_write_data_sem(orig_inode, donor_inode); return replaced_count; } /** * move_extent_per_page - Move extent data per page * * @o_filp: file structure of original file * @donor_inode: donor inode * @orig_page_offset: page index on original file * @data_offset_in_page: block index where data swapping starts * @block_len_in_page: the number of blocks to be swapped * @uninit: orig extent is uninitialized or not * @err: pointer to save return value * * Save the data in original inode blocks and replace original inode extents * with donor inode extents by calling mext_replace_branches(). * Finally, write out the saved data in new original inode blocks. Return * replaced block count. */ static int move_extent_per_page(struct file *o_filp, struct inode *donor_inode, pgoff_t orig_page_offset, int data_offset_in_page, int block_len_in_page, int uninit, int *err) { struct inode *orig_inode = o_filp->f_dentry->d_inode; struct address_space *mapping = orig_inode->i_mapping; struct buffer_head *bh; struct page *page = NULL; const struct address_space_operations *a_ops = mapping->a_ops; handle_t *handle; ext4_lblk_t orig_blk_offset; long long offs = orig_page_offset << PAGE_CACHE_SHIFT; unsigned long blocksize = orig_inode->i_sb->s_blocksize; unsigned int w_flags = 0; unsigned int tmp_data_size, data_size, replaced_size; void *fsdata; int i, jblocks; int err2 = 0; int replaced_count = 0; int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; /* * It needs twice the amount of ordinary journal buffers because * inode and donor_inode may change each different metadata blocks. */ jblocks = ext4_writepage_trans_blocks(orig_inode) * 2; handle = ext4_journal_start(orig_inode, jblocks); if (IS_ERR(handle)) { *err = PTR_ERR(handle); return 0; } if (segment_eq(get_fs(), KERNEL_DS)) w_flags |= AOP_FLAG_UNINTERRUPTIBLE; orig_blk_offset = orig_page_offset * blocks_per_page + data_offset_in_page; /* * If orig extent is uninitialized one, * it's not necessary force the page into memory * and then force it to be written out again. * Just swap data blocks between orig and donor. */ if (uninit) { replaced_count = mext_replace_branches(handle, orig_inode, donor_inode, orig_blk_offset, block_len_in_page, err); goto out2; } offs = (long long)orig_blk_offset << orig_inode->i_blkbits; /* Calculate data_size */ if ((orig_blk_offset + block_len_in_page - 1) == ((orig_inode->i_size - 1) >> orig_inode->i_blkbits)) { /* Replace the last block */ tmp_data_size = orig_inode->i_size & (blocksize - 1); /* * If data_size equal zero, it shows data_size is multiples of * blocksize. So we set appropriate value. */ if (tmp_data_size == 0) tmp_data_size = blocksize; data_size = tmp_data_size + ((block_len_in_page - 1) << orig_inode->i_blkbits); } else data_size = block_len_in_page << orig_inode->i_blkbits; replaced_size = data_size; *err = a_ops->write_begin(o_filp, mapping, offs, data_size, w_flags, &page, &fsdata); if (unlikely(*err < 0)) goto out; if (!PageUptodate(page)) { mapping->a_ops->readpage(o_filp, page); lock_page(page); } /* * try_to_release_page() doesn't call releasepage in writeback mode. * We should care about the order of writing to the same file * by multiple move extent processes. * It needs to call wait_on_page_writeback() to wait for the * writeback of the page. */ wait_on_page_writeback(page); /* Release old bh and drop refs */ try_to_release_page(page, 0); replaced_count = mext_replace_branches(handle, orig_inode, donor_inode, orig_blk_offset, block_len_in_page, &err2); if (err2) { if (replaced_count) { block_len_in_page = replaced_count; replaced_size = block_len_in_page << orig_inode->i_blkbits; } else goto out; } if (!page_has_buffers(page)) create_empty_buffers(page, 1 << orig_inode->i_blkbits, 0); bh = page_buffers(page); for (i = 0; i < data_offset_in_page; i++) bh = bh->b_this_page; for (i = 0; i < block_len_in_page; i++) { *err = ext4_get_block(orig_inode, (sector_t)(orig_blk_offset + i), bh, 0); if (*err < 0) goto out; if (bh->b_this_page != NULL) bh = bh->b_this_page; } *err = a_ops->write_end(o_filp, mapping, offs, data_size, replaced_size, page, fsdata); page = NULL; out: if (unlikely(page)) { if (PageLocked(page)) unlock_page(page); page_cache_release(page); ext4_journal_stop(handle); } out2: ext4_journal_stop(handle); if (err2) *err = err2; return replaced_count; } /** * mext_check_arguments - Check whether move extent can be done * * @orig_inode: original inode * @donor_inode: donor inode * @orig_start: logical start offset in block for orig * @donor_start: logical start offset in block for donor * @len: the number of blocks to be moved * * Check the arguments of ext4_move_extents() whether the files can be * exchanged with each other. * Return 0 on success, or a negative error value on failure. */ static int mext_check_arguments(struct inode *orig_inode, struct inode *donor_inode, __u64 orig_start, __u64 donor_start, __u64 *len) { ext4_lblk_t orig_blocks, donor_blocks; unsigned int blkbits = orig_inode->i_blkbits; unsigned int blocksize = 1 << blkbits; if (donor_inode->i_mode & (S_ISUID|S_ISGID)) { ext4_debug("ext4 move extent: suid or sgid is set" " to donor file [ino:orig %lu, donor %lu]\n", orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } if (IS_IMMUTABLE(donor_inode) || IS_APPEND(donor_inode)) return -EPERM; /* Ext4 move extent does not support swapfile */ if (IS_SWAPFILE(orig_inode) || IS_SWAPFILE(donor_inode)) { ext4_debug("ext4 move extent: The argument files should " "not be swapfile [ino:orig %lu, donor %lu]\n", orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } /* Ext4 move extent supports only extent based file */ if (!(ext4_test_inode_flag(orig_inode, EXT4_INODE_EXTENTS))) { ext4_debug("ext4 move extent: orig file is not extents " "based file [ino:orig %lu]\n", orig_inode->i_ino); return -EOPNOTSUPP; } else if (!(ext4_test_inode_flag(donor_inode, EXT4_INODE_EXTENTS))) { ext4_debug("ext4 move extent: donor file is not extents " "based file [ino:donor %lu]\n", donor_inode->i_ino); return -EOPNOTSUPP; } if ((!orig_inode->i_size) || (!donor_inode->i_size)) { ext4_debug("ext4 move extent: File size is 0 byte\n"); return -EINVAL; } /* Start offset should be same */ if (orig_start != donor_start) { ext4_debug("ext4 move extent: orig and donor's start " "offset are not same [ino:orig %lu, donor %lu]\n", orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } if ((orig_start >= EXT_MAX_BLOCKS) || (donor_start >= EXT_MAX_BLOCKS) || (*len > EXT_MAX_BLOCKS) || (orig_start + *len >= EXT_MAX_BLOCKS)) { ext4_debug("ext4 move extent: Can't handle over [%u] blocks " "[ino:orig %lu, donor %lu]\n", EXT_MAX_BLOCKS, orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } if (orig_inode->i_size > donor_inode->i_size) { donor_blocks = (donor_inode->i_size + blocksize - 1) >> blkbits; /* TODO: eliminate this artificial restriction */ if (orig_start >= donor_blocks) { ext4_debug("ext4 move extent: orig start offset " "[%llu] should be less than donor file blocks " "[%u] [ino:orig %lu, donor %lu]\n", orig_start, donor_blocks, orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } /* TODO: eliminate this artificial restriction */ if (orig_start + *len > donor_blocks) { ext4_debug("ext4 move extent: End offset [%llu] should " "be less than donor file blocks [%u]." "So adjust length from %llu to %llu " "[ino:orig %lu, donor %lu]\n", orig_start + *len, donor_blocks, *len, donor_blocks - orig_start, orig_inode->i_ino, donor_inode->i_ino); *len = donor_blocks - orig_start; } } else { orig_blocks = (orig_inode->i_size + blocksize - 1) >> blkbits; if (orig_start >= orig_blocks) { ext4_debug("ext4 move extent: start offset [%llu] " "should be less than original file blocks " "[%u] [ino:orig %lu, donor %lu]\n", orig_start, orig_blocks, orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } if (orig_start + *len > orig_blocks) { ext4_debug("ext4 move extent: Adjust length " "from %llu to %llu. Because it should be " "less than original file blocks " "[ino:orig %lu, donor %lu]\n", *len, orig_blocks - orig_start, orig_inode->i_ino, donor_inode->i_ino); *len = orig_blocks - orig_start; } } if (!*len) { ext4_debug("ext4 move extent: len should not be 0 " "[ino:orig %lu, donor %lu]\n", orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } return 0; } /** * mext_inode_double_lock - Lock i_mutex on both @inode1 and @inode2 * * @inode1: the inode structure * @inode2: the inode structure * * Lock two inodes' i_mutex */ static void mext_inode_double_lock(struct inode *inode1, struct inode *inode2) { BUG_ON(inode1 == inode2); if (inode1 < inode2) { mutex_lock_nested(&inode1->i_mutex, I_MUTEX_PARENT); mutex_lock_nested(&inode2->i_mutex, I_MUTEX_CHILD); } else { mutex_lock_nested(&inode2->i_mutex, I_MUTEX_PARENT); mutex_lock_nested(&inode1->i_mutex, I_MUTEX_CHILD); } } /** * mext_inode_double_unlock - Release i_mutex on both @inode1 and @inode2 * * @inode1: the inode that is released first * @inode2: the inode that is released second * */ static void mext_inode_double_unlock(struct inode *inode1, struct inode *inode2) { mutex_unlock(&inode1->i_mutex); mutex_unlock(&inode2->i_mutex); } /** * ext4_move_extents - Exchange the specified range of a file * * @o_filp: file structure of the original file * @d_filp: file structure of the donor file * @orig_start: start offset in block for orig * @donor_start: start offset in block for donor * @len: the number of blocks to be moved * @moved_len: moved block length * * This function returns 0 and moved block length is set in moved_len * if succeed, otherwise returns error value. * * Note: ext4_move_extents() proceeds the following order. * 1:ext4_move_extents() calculates the last block number of moving extent * function by the start block number (orig_start) and the number of blocks * to be moved (len) specified as arguments. * If the {orig, donor}_start points a hole, the extent's start offset * pointed by ext_cur (current extent), holecheck_path, orig_path are set * after hole behind. * 2:Continue step 3 to step 5, until the holecheck_path points to last_extent * or the ext_cur exceeds the block_end which is last logical block number. * 3:To get the length of continues area, call mext_next_extent() * specified with the ext_cur (initial value is holecheck_path) re-cursive, * until find un-continuous extent, the start logical block number exceeds * the block_end or the extent points to the last extent. * 4:Exchange the original inode data with donor inode data * from orig_page_offset to seq_end_page. * The start indexes of data are specified as arguments. * That of the original inode is orig_page_offset, * and the donor inode is also orig_page_offset * (To easily handle blocksize != pagesize case, the offset for the * donor inode is block unit). * 5:Update holecheck_path and orig_path to points a next proceeding extent, * then returns to step 2. * 6:Release holecheck_path, orig_path and set the len to moved_len * which shows the number of moved blocks. * The moved_len is useful for the command to calculate the file offset * for starting next move extent ioctl. * 7:Return 0 on success, or a negative error value on failure. */ int ext4_move_extents(struct file *o_filp, struct file *d_filp, __u64 orig_start, __u64 donor_start, __u64 len, __u64 *moved_len) { struct inode *orig_inode = o_filp->f_dentry->d_inode; struct inode *donor_inode = d_filp->f_dentry->d_inode; struct ext4_ext_path *orig_path = NULL, *holecheck_path = NULL; struct ext4_extent *ext_prev, *ext_cur, *ext_dummy; ext4_lblk_t block_start = orig_start; ext4_lblk_t block_end, seq_start, add_blocks, file_end, seq_blocks = 0; ext4_lblk_t rest_blocks; pgoff_t orig_page_offset = 0, seq_end_page; int ret, depth, last_extent = 0; int blocks_per_page = PAGE_CACHE_SIZE >> orig_inode->i_blkbits; int data_offset_in_page; int block_len_in_page; int uninit; if (orig_inode->i_sb != donor_inode->i_sb) { ext4_debug("ext4 move extent: The argument files " "should be in same FS [ino:orig %lu, donor %lu]\n", orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } /* orig and donor should be different inodes */ if (orig_inode == donor_inode) { ext4_debug("ext4 move extent: The argument files should not " "be same inode [ino:orig %lu, donor %lu]\n", orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } /* Regular file check */ if (!S_ISREG(orig_inode->i_mode) || !S_ISREG(donor_inode->i_mode)) { ext4_debug("ext4 move extent: The argument files should be " "regular file [ino:orig %lu, donor %lu]\n", orig_inode->i_ino, donor_inode->i_ino); return -EINVAL; } /* TODO: This is non obvious task to swap blocks for inodes with full jornaling enabled */ if (ext4_should_journal_data(orig_inode) || ext4_should_journal_data(donor_inode)) { return -EINVAL; } /* Protect orig and donor inodes against a truncate */ mext_inode_double_lock(orig_inode, donor_inode); /* Protect extent tree against block allocations via delalloc */ double_down_write_data_sem(orig_inode, donor_inode); /* Check the filesystem environment whether move_extent can be done */ ret = mext_check_arguments(orig_inode, donor_inode, orig_start, donor_start, &len); if (ret) goto out; file_end = (i_size_read(orig_inode) - 1) >> orig_inode->i_blkbits; block_end = block_start + len - 1; if (file_end < block_end) len -= block_end - file_end; ret = get_ext_path(orig_inode, block_start, &orig_path); if (ret) goto out; /* Get path structure to check the hole */ ret = get_ext_path(orig_inode, block_start, &holecheck_path); if (ret) goto out; depth = ext_depth(orig_inode); ext_cur = holecheck_path[depth].p_ext; /* * Get proper starting location of block replacement if block_start was * within the hole. */ if (le32_to_cpu(ext_cur->ee_block) + ext4_ext_get_actual_len(ext_cur) - 1 < block_start) { /* * The hole exists between extents or the tail of * original file. */ last_extent = mext_next_extent(orig_inode, holecheck_path, &ext_cur); if (last_extent < 0) { ret = last_extent; goto out; } last_extent = mext_next_extent(orig_inode, orig_path, &ext_dummy); if (last_extent < 0) { ret = last_extent; goto out; } seq_start = le32_to_cpu(ext_cur->ee_block); } else if (le32_to_cpu(ext_cur->ee_block) > block_start) /* The hole exists at the beginning of original file. */ seq_start = le32_to_cpu(ext_cur->ee_block); else seq_start = block_start; /* No blocks within the specified range. */ if (le32_to_cpu(ext_cur->ee_block) > block_end) { ext4_debug("ext4 move extent: The specified range of file " "may be the hole\n"); ret = -EINVAL; goto out; } /* Adjust start blocks */ add_blocks = min(le32_to_cpu(ext_cur->ee_block) + ext4_ext_get_actual_len(ext_cur), block_end + 1) - max(le32_to_cpu(ext_cur->ee_block), block_start); while (!last_extent && le32_to_cpu(ext_cur->ee_block) <= block_end) { seq_blocks += add_blocks; /* Adjust tail blocks */ if (seq_start + seq_blocks - 1 > block_end) seq_blocks = block_end - seq_start + 1; ext_prev = ext_cur; last_extent = mext_next_extent(orig_inode, holecheck_path, &ext_cur); if (last_extent < 0) { ret = last_extent; break; } add_blocks = ext4_ext_get_actual_len(ext_cur); /* * Extend the length of contiguous block (seq_blocks) * if extents are contiguous. */ if (ext4_can_extents_be_merged(orig_inode, ext_prev, ext_cur) && block_end >= le32_to_cpu(ext_cur->ee_block) && !last_extent) continue; /* Is original extent is uninitialized */ uninit = ext4_ext_is_uninitialized(ext_prev); data_offset_in_page = seq_start % blocks_per_page; /* * Calculate data blocks count that should be swapped * at the first page. */ if (data_offset_in_page + seq_blocks > blocks_per_page) { /* Swapped blocks are across pages */ block_len_in_page = blocks_per_page - data_offset_in_page; } else { /* Swapped blocks are in a page */ block_len_in_page = seq_blocks; } orig_page_offset = seq_start >> (PAGE_CACHE_SHIFT - orig_inode->i_blkbits); seq_end_page = (seq_start + seq_blocks - 1) >> (PAGE_CACHE_SHIFT - orig_inode->i_blkbits); seq_start = le32_to_cpu(ext_cur->ee_block); rest_blocks = seq_blocks; /* * Up semaphore to avoid following problems: * a. transaction deadlock among ext4_journal_start, * ->write_begin via pagefault, and jbd2_journal_commit * b. racing with ->readpage, ->write_begin, and ext4_get_block * in move_extent_per_page */ double_up_write_data_sem(orig_inode, donor_inode); while (orig_page_offset <= seq_end_page) { /* Swap original branches with new branches */ block_len_in_page = move_extent_per_page( o_filp, donor_inode, orig_page_offset, data_offset_in_page, block_len_in_page, uninit, &ret); /* Count how many blocks we have exchanged */ *moved_len += block_len_in_page; if (ret < 0) break; if (*moved_len > len) { EXT4_ERROR_INODE(orig_inode, "We replaced blocks too much! " "sum of replaced: %llu requested: %llu", *moved_len, len); ret = -EIO; break; } orig_page_offset++; data_offset_in_page = 0; rest_blocks -= block_len_in_page; if (rest_blocks > blocks_per_page) block_len_in_page = blocks_per_page; else block_len_in_page = rest_blocks; } double_down_write_data_sem(orig_inode, donor_inode); if (ret < 0) break; /* Decrease buffer counter */ if (holecheck_path) ext4_ext_drop_refs(holecheck_path); ret = get_ext_path(orig_inode, seq_start, &holecheck_path); if (ret) break; depth = holecheck_path->p_depth; /* Decrease buffer counter */ if (orig_path) ext4_ext_drop_refs(orig_path); ret = get_ext_path(orig_inode, seq_start, &orig_path); if (ret) break; ext_cur = holecheck_path[depth].p_ext; add_blocks = ext4_ext_get_actual_len(ext_cur); seq_blocks = 0; } out: if (*moved_len) { ext4_discard_preallocations(orig_inode); ext4_discard_preallocations(donor_inode); } if (orig_path) { ext4_ext_drop_refs(orig_path); kfree(orig_path); } if (holecheck_path) { ext4_ext_drop_refs(holecheck_path); kfree(holecheck_path); } double_up_write_data_sem(orig_inode, donor_inode); mext_inode_double_unlock(orig_inode, donor_inode); return ret; }
gpl-2.0
mv0/kvm
arch/mips/loongson/common/early_printk.c
3002
1035
/* early printk support * * Copyright (c) 2009 Philippe Vachon <philippe@cowpig.ca> * Copyright (c) 2009 Lemote Inc. * Author: Wu Zhangjin, wuzhangjin@gmail.com * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/serial_reg.h> #include <loongson.h> #define PORT(base, offset) (u8 *)(base + offset) static inline unsigned int serial_in(unsigned char *base, int offset) { return readb(PORT(base, offset)); } static inline void serial_out(unsigned char *base, int offset, int value) { writeb(value, PORT(base, offset)); } void prom_putchar(char c) { int timeout; unsigned char *uart_base; uart_base = (unsigned char *)_loongson_uart_base; timeout = 1024; while (((serial_in(uart_base, UART_LSR) & UART_LSR_THRE) == 0) && (timeout-- > 0)) ; serial_out(uart_base, UART_TX, c); }
gpl-2.0
adknight87/android_kernel_samsung_afyonltetmo
drivers/gpu/drm/i915/i915_drv.c
3258
29286
/* i915_drv.c -- i830,i845,i855,i865,i915 driver -*- linux-c -*- */ /* * * Copyright 2003 Tungsten Graphics, Inc., Cedar Park, Texas. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sub license, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial portions * of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. * IN NO EVENT SHALL TUNGSTEN GRAPHICS AND/OR ITS SUPPLIERS BE LIABLE FOR * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include <linux/device.h> #include "drmP.h" #include "drm.h" #include "i915_drm.h" #include "i915_drv.h" #include "intel_drv.h" #include <linux/console.h> #include <linux/module.h> #include "drm_crtc_helper.h" static int i915_modeset __read_mostly = -1; module_param_named(modeset, i915_modeset, int, 0400); MODULE_PARM_DESC(modeset, "Use kernel modesetting [KMS] (0=DRM_I915_KMS from .config, " "1=on, -1=force vga console preference [default])"); unsigned int i915_fbpercrtc __always_unused = 0; module_param_named(fbpercrtc, i915_fbpercrtc, int, 0400); int i915_panel_ignore_lid __read_mostly = 0; module_param_named(panel_ignore_lid, i915_panel_ignore_lid, int, 0600); MODULE_PARM_DESC(panel_ignore_lid, "Override lid status (0=autodetect [default], 1=lid open, " "-1=lid closed)"); unsigned int i915_powersave __read_mostly = 1; module_param_named(powersave, i915_powersave, int, 0600); MODULE_PARM_DESC(powersave, "Enable powersavings, fbc, downclocking, etc. (default: true)"); int i915_semaphores __read_mostly = -1; module_param_named(semaphores, i915_semaphores, int, 0600); MODULE_PARM_DESC(semaphores, "Use semaphores for inter-ring sync (default: -1 (use per-chip defaults))"); int i915_enable_rc6 __read_mostly = -1; module_param_named(i915_enable_rc6, i915_enable_rc6, int, 0400); MODULE_PARM_DESC(i915_enable_rc6, "Enable power-saving render C-state 6. " "Different stages can be selected via bitmask values " "(0 = disable; 1 = enable rc6; 2 = enable deep rc6; 4 = enable deepest rc6). " "For example, 3 would enable rc6 and deep rc6, and 7 would enable everything. " "default: -1 (use per-chip default)"); int i915_enable_fbc __read_mostly = -1; module_param_named(i915_enable_fbc, i915_enable_fbc, int, 0600); MODULE_PARM_DESC(i915_enable_fbc, "Enable frame buffer compression for power savings " "(default: -1 (use per-chip default))"); unsigned int i915_lvds_downclock __read_mostly = 0; module_param_named(lvds_downclock, i915_lvds_downclock, int, 0400); MODULE_PARM_DESC(lvds_downclock, "Use panel (LVDS/eDP) downclocking for power savings " "(default: false)"); int i915_panel_use_ssc __read_mostly = -1; module_param_named(lvds_use_ssc, i915_panel_use_ssc, int, 0600); MODULE_PARM_DESC(lvds_use_ssc, "Use Spread Spectrum Clock with panels [LVDS/eDP] " "(default: auto from VBT)"); int i915_vbt_sdvo_panel_type __read_mostly = -1; module_param_named(vbt_sdvo_panel_type, i915_vbt_sdvo_panel_type, int, 0600); MODULE_PARM_DESC(vbt_sdvo_panel_type, "Override selection of SDVO panel mode in the VBT " "(default: auto)"); static bool i915_try_reset __read_mostly = true; module_param_named(reset, i915_try_reset, bool, 0600); MODULE_PARM_DESC(reset, "Attempt GPU resets (default: true)"); bool i915_enable_hangcheck __read_mostly = true; module_param_named(enable_hangcheck, i915_enable_hangcheck, bool, 0644); MODULE_PARM_DESC(enable_hangcheck, "Periodically check GPU activity for detecting hangs. " "WARNING: Disabling this can cause system wide hangs. " "(default: true)"); int i915_enable_ppgtt __read_mostly = -1; module_param_named(i915_enable_ppgtt, i915_enable_ppgtt, int, 0600); MODULE_PARM_DESC(i915_enable_ppgtt, "Enable PPGTT (default: true)"); static struct drm_driver driver; extern int intel_agp_enabled; #define INTEL_VGA_DEVICE(id, info) { \ .class = PCI_BASE_CLASS_DISPLAY << 16, \ .class_mask = 0xff0000, \ .vendor = 0x8086, \ .device = id, \ .subvendor = PCI_ANY_ID, \ .subdevice = PCI_ANY_ID, \ .driver_data = (unsigned long) info } static const struct intel_device_info intel_i830_info = { .gen = 2, .is_mobile = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_845g_info = { .gen = 2, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i85x_info = { .gen = 2, .is_i85x = 1, .is_mobile = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i865g_info = { .gen = 2, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i915g_info = { .gen = 3, .is_i915g = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i915gm_info = { .gen = 3, .is_mobile = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, .supports_tv = 1, }; static const struct intel_device_info intel_i945g_info = { .gen = 3, .has_hotplug = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, }; static const struct intel_device_info intel_i945gm_info = { .gen = 3, .is_i945gm = 1, .is_mobile = 1, .has_hotplug = 1, .cursor_needs_physical = 1, .has_overlay = 1, .overlay_needs_physical = 1, .supports_tv = 1, }; static const struct intel_device_info intel_i965g_info = { .gen = 4, .is_broadwater = 1, .has_hotplug = 1, .has_overlay = 1, }; static const struct intel_device_info intel_i965gm_info = { .gen = 4, .is_crestline = 1, .is_mobile = 1, .has_fbc = 1, .has_hotplug = 1, .has_overlay = 1, .supports_tv = 1, }; static const struct intel_device_info intel_g33_info = { .gen = 3, .is_g33 = 1, .need_gfx_hws = 1, .has_hotplug = 1, .has_overlay = 1, }; static const struct intel_device_info intel_g45_info = { .gen = 4, .is_g4x = 1, .need_gfx_hws = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, .has_bsd_ring = 1, }; static const struct intel_device_info intel_gm45_info = { .gen = 4, .is_g4x = 1, .is_mobile = 1, .need_gfx_hws = 1, .has_fbc = 1, .has_pipe_cxsr = 1, .has_hotplug = 1, .supports_tv = 1, .has_bsd_ring = 1, }; static const struct intel_device_info intel_pineview_info = { .gen = 3, .is_g33 = 1, .is_pineview = 1, .is_mobile = 1, .need_gfx_hws = 1, .has_hotplug = 1, .has_overlay = 1, }; static const struct intel_device_info intel_ironlake_d_info = { .gen = 5, .need_gfx_hws = 1, .has_hotplug = 1, .has_bsd_ring = 1, }; static const struct intel_device_info intel_ironlake_m_info = { .gen = 5, .is_mobile = 1, .need_gfx_hws = 1, .has_hotplug = 1, .has_fbc = 1, .has_bsd_ring = 1, }; static const struct intel_device_info intel_sandybridge_d_info = { .gen = 6, .need_gfx_hws = 1, .has_hotplug = 1, .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, }; static const struct intel_device_info intel_sandybridge_m_info = { .gen = 6, .is_mobile = 1, .need_gfx_hws = 1, .has_hotplug = 1, .has_fbc = 1, .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, }; static const struct intel_device_info intel_ivybridge_d_info = { .is_ivybridge = 1, .gen = 7, .need_gfx_hws = 1, .has_hotplug = 1, .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, }; static const struct intel_device_info intel_ivybridge_m_info = { .is_ivybridge = 1, .gen = 7, .is_mobile = 1, .need_gfx_hws = 1, .has_hotplug = 1, .has_fbc = 0, /* FBC is not enabled on Ivybridge mobile yet */ .has_bsd_ring = 1, .has_blt_ring = 1, .has_llc = 1, }; static const struct pci_device_id pciidlist[] = { /* aka */ INTEL_VGA_DEVICE(0x3577, &intel_i830_info), /* I830_M */ INTEL_VGA_DEVICE(0x2562, &intel_845g_info), /* 845_G */ INTEL_VGA_DEVICE(0x3582, &intel_i85x_info), /* I855_GM */ INTEL_VGA_DEVICE(0x358e, &intel_i85x_info), INTEL_VGA_DEVICE(0x2572, &intel_i865g_info), /* I865_G */ INTEL_VGA_DEVICE(0x2582, &intel_i915g_info), /* I915_G */ INTEL_VGA_DEVICE(0x258a, &intel_i915g_info), /* E7221_G */ INTEL_VGA_DEVICE(0x2592, &intel_i915gm_info), /* I915_GM */ INTEL_VGA_DEVICE(0x2772, &intel_i945g_info), /* I945_G */ INTEL_VGA_DEVICE(0x27a2, &intel_i945gm_info), /* I945_GM */ INTEL_VGA_DEVICE(0x27ae, &intel_i945gm_info), /* I945_GME */ INTEL_VGA_DEVICE(0x2972, &intel_i965g_info), /* I946_GZ */ INTEL_VGA_DEVICE(0x2982, &intel_i965g_info), /* G35_G */ INTEL_VGA_DEVICE(0x2992, &intel_i965g_info), /* I965_Q */ INTEL_VGA_DEVICE(0x29a2, &intel_i965g_info), /* I965_G */ INTEL_VGA_DEVICE(0x29b2, &intel_g33_info), /* Q35_G */ INTEL_VGA_DEVICE(0x29c2, &intel_g33_info), /* G33_G */ INTEL_VGA_DEVICE(0x29d2, &intel_g33_info), /* Q33_G */ INTEL_VGA_DEVICE(0x2a02, &intel_i965gm_info), /* I965_GM */ INTEL_VGA_DEVICE(0x2a12, &intel_i965gm_info), /* I965_GME */ INTEL_VGA_DEVICE(0x2a42, &intel_gm45_info), /* GM45_G */ INTEL_VGA_DEVICE(0x2e02, &intel_g45_info), /* IGD_E_G */ INTEL_VGA_DEVICE(0x2e12, &intel_g45_info), /* Q45_G */ INTEL_VGA_DEVICE(0x2e22, &intel_g45_info), /* G45_G */ INTEL_VGA_DEVICE(0x2e32, &intel_g45_info), /* G41_G */ INTEL_VGA_DEVICE(0x2e42, &intel_g45_info), /* B43_G */ INTEL_VGA_DEVICE(0x2e92, &intel_g45_info), /* B43_G.1 */ INTEL_VGA_DEVICE(0xa001, &intel_pineview_info), INTEL_VGA_DEVICE(0xa011, &intel_pineview_info), INTEL_VGA_DEVICE(0x0042, &intel_ironlake_d_info), INTEL_VGA_DEVICE(0x0046, &intel_ironlake_m_info), INTEL_VGA_DEVICE(0x0102, &intel_sandybridge_d_info), INTEL_VGA_DEVICE(0x0112, &intel_sandybridge_d_info), INTEL_VGA_DEVICE(0x0122, &intel_sandybridge_d_info), INTEL_VGA_DEVICE(0x0106, &intel_sandybridge_m_info), INTEL_VGA_DEVICE(0x0116, &intel_sandybridge_m_info), INTEL_VGA_DEVICE(0x0126, &intel_sandybridge_m_info), INTEL_VGA_DEVICE(0x010A, &intel_sandybridge_d_info), INTEL_VGA_DEVICE(0x0156, &intel_ivybridge_m_info), /* GT1 mobile */ INTEL_VGA_DEVICE(0x0166, &intel_ivybridge_m_info), /* GT2 mobile */ INTEL_VGA_DEVICE(0x0152, &intel_ivybridge_d_info), /* GT1 desktop */ INTEL_VGA_DEVICE(0x0162, &intel_ivybridge_d_info), /* GT2 desktop */ INTEL_VGA_DEVICE(0x015a, &intel_ivybridge_d_info), /* GT1 server */ INTEL_VGA_DEVICE(0x016a, &intel_ivybridge_d_info), /* GT2 server */ {0, 0, 0} }; #if defined(CONFIG_DRM_I915_KMS) MODULE_DEVICE_TABLE(pci, pciidlist); #endif #define INTEL_PCH_DEVICE_ID_MASK 0xff00 #define INTEL_PCH_IBX_DEVICE_ID_TYPE 0x3b00 #define INTEL_PCH_CPT_DEVICE_ID_TYPE 0x1c00 #define INTEL_PCH_PPT_DEVICE_ID_TYPE 0x1e00 void intel_detect_pch(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; struct pci_dev *pch; /* * The reason to probe ISA bridge instead of Dev31:Fun0 is to * make graphics device passthrough work easy for VMM, that only * need to expose ISA bridge to let driver know the real hardware * underneath. This is a requirement from virtualization team. */ pch = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL); if (pch) { if (pch->vendor == PCI_VENDOR_ID_INTEL) { int id; id = pch->device & INTEL_PCH_DEVICE_ID_MASK; if (id == INTEL_PCH_IBX_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_IBX; DRM_DEBUG_KMS("Found Ibex Peak PCH\n"); } else if (id == INTEL_PCH_CPT_DEVICE_ID_TYPE) { dev_priv->pch_type = PCH_CPT; DRM_DEBUG_KMS("Found CougarPoint PCH\n"); } else if (id == INTEL_PCH_PPT_DEVICE_ID_TYPE) { /* PantherPoint is CPT compatible */ dev_priv->pch_type = PCH_CPT; DRM_DEBUG_KMS("Found PatherPoint PCH\n"); } } pci_dev_put(pch); } } void __gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) { int count; count = 0; while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1)) udelay(10); I915_WRITE_NOTRACE(FORCEWAKE, 1); POSTING_READ(FORCEWAKE); count = 0; while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_ACK) & 1) == 0) udelay(10); } void __gen6_gt_force_wake_mt_get(struct drm_i915_private *dev_priv) { int count; count = 0; while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1)) udelay(10); I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 1); POSTING_READ(FORCEWAKE_MT); count = 0; while (count++ < 50 && (I915_READ_NOTRACE(FORCEWAKE_MT_ACK) & 1) == 0) udelay(10); } /* * Generally this is called implicitly by the register read function. However, * if some sequence requires the GT to not power down then this function should * be called at the beginning of the sequence followed by a call to * gen6_gt_force_wake_put() at the end of the sequence. */ void gen6_gt_force_wake_get(struct drm_i915_private *dev_priv) { unsigned long irqflags; spin_lock_irqsave(&dev_priv->gt_lock, irqflags); if (dev_priv->forcewake_count++ == 0) dev_priv->display.force_wake_get(dev_priv); spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); } static void gen6_gt_check_fifodbg(struct drm_i915_private *dev_priv) { u32 gtfifodbg; gtfifodbg = I915_READ_NOTRACE(GTFIFODBG); if (WARN(gtfifodbg & GT_FIFO_CPU_ERROR_MASK, "MMIO read or write has been dropped %x\n", gtfifodbg)) I915_WRITE_NOTRACE(GTFIFODBG, GT_FIFO_CPU_ERROR_MASK); } void __gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE, 0); /* The below doubles as a POSTING_READ */ gen6_gt_check_fifodbg(dev_priv); } void __gen6_gt_force_wake_mt_put(struct drm_i915_private *dev_priv) { I915_WRITE_NOTRACE(FORCEWAKE_MT, (1<<16) | 0); /* The below doubles as a POSTING_READ */ gen6_gt_check_fifodbg(dev_priv); } /* * see gen6_gt_force_wake_get() */ void gen6_gt_force_wake_put(struct drm_i915_private *dev_priv) { unsigned long irqflags; spin_lock_irqsave(&dev_priv->gt_lock, irqflags); if (--dev_priv->forcewake_count == 0) dev_priv->display.force_wake_put(dev_priv); spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); } int __gen6_gt_wait_for_fifo(struct drm_i915_private *dev_priv) { int ret = 0; if (dev_priv->gt_fifo_count < GT_FIFO_NUM_RESERVED_ENTRIES) { int loop = 500; u32 fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); while (fifo <= GT_FIFO_NUM_RESERVED_ENTRIES && loop--) { udelay(10); fifo = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); } if (WARN_ON(loop < 0 && fifo <= GT_FIFO_NUM_RESERVED_ENTRIES)) ++ret; dev_priv->gt_fifo_count = fifo; } dev_priv->gt_fifo_count--; return ret; } static int i915_drm_freeze(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; drm_kms_helper_poll_disable(dev); pci_save_state(dev->pdev); /* If KMS is active, we do the leavevt stuff here */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { int error = i915_gem_idle(dev); if (error) { dev_err(&dev->pdev->dev, "GEM idle failed, resume might fail\n"); return error; } drm_irq_uninstall(dev); } i915_save_state(dev); intel_opregion_fini(dev); /* Modeset on resume, not lid events */ dev_priv->modeset_on_lid = 0; console_lock(); intel_fbdev_set_suspend(dev, 1); console_unlock(); return 0; } int i915_suspend(struct drm_device *dev, pm_message_t state) { int error; if (!dev || !dev->dev_private) { DRM_ERROR("dev: %p\n", dev); DRM_ERROR("DRM not initialized, aborting suspend.\n"); return -ENODEV; } if (state.event == PM_EVENT_PRETHAW) return 0; if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; error = i915_drm_freeze(dev); if (error) return error; if (state.event == PM_EVENT_SUSPEND) { /* Shut down the device */ pci_disable_device(dev->pdev); pci_set_power_state(dev->pdev, PCI_D3hot); } return 0; } static int i915_drm_thaw(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; int error = 0; if (drm_core_check_feature(dev, DRIVER_MODESET)) { mutex_lock(&dev->struct_mutex); i915_gem_restore_gtt_mappings(dev); mutex_unlock(&dev->struct_mutex); } i915_restore_state(dev); intel_opregion_setup(dev); /* KMS EnterVT equivalent */ if (drm_core_check_feature(dev, DRIVER_MODESET)) { mutex_lock(&dev->struct_mutex); dev_priv->mm.suspended = 0; error = i915_gem_init_hw(dev); mutex_unlock(&dev->struct_mutex); if (HAS_PCH_SPLIT(dev)) ironlake_init_pch_refclk(dev); drm_mode_config_reset(dev); drm_irq_install(dev); /* Resume the modeset for every activated CRTC */ mutex_lock(&dev->mode_config.mutex); drm_helper_resume_force_mode(dev); mutex_unlock(&dev->mode_config.mutex); if (IS_IRONLAKE_M(dev)) ironlake_enable_rc6(dev); } intel_opregion_init(dev); dev_priv->modeset_on_lid = 0; console_lock(); intel_fbdev_set_suspend(dev, 0); console_unlock(); return error; } int i915_resume(struct drm_device *dev) { int ret; if (dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; if (pci_enable_device(dev->pdev)) return -EIO; pci_set_master(dev->pdev); ret = i915_drm_thaw(dev); if (ret) return ret; drm_kms_helper_poll_enable(dev); return 0; } static int i8xx_do_reset(struct drm_device *dev, u8 flags) { struct drm_i915_private *dev_priv = dev->dev_private; if (IS_I85X(dev)) return -ENODEV; I915_WRITE(D_STATE, I915_READ(D_STATE) | DSTATE_GFX_RESET_I830); POSTING_READ(D_STATE); if (IS_I830(dev) || IS_845G(dev)) { I915_WRITE(DEBUG_RESET_I830, DEBUG_RESET_DISPLAY | DEBUG_RESET_RENDER | DEBUG_RESET_FULL); POSTING_READ(DEBUG_RESET_I830); msleep(1); I915_WRITE(DEBUG_RESET_I830, 0); POSTING_READ(DEBUG_RESET_I830); } msleep(1); I915_WRITE(D_STATE, I915_READ(D_STATE) & ~DSTATE_GFX_RESET_I830); POSTING_READ(D_STATE); return 0; } static int i965_reset_complete(struct drm_device *dev) { u8 gdrst; pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); return gdrst & 0x1; } static int i965_do_reset(struct drm_device *dev, u8 flags) { u8 gdrst; /* * Set the domains we want to reset (GRDOM/bits 2 and 3) as * well as the reset bit (GR/bit 0). Setting the GR bit * triggers the reset; when done, the hardware will clear it. */ pci_read_config_byte(dev->pdev, I965_GDRST, &gdrst); pci_write_config_byte(dev->pdev, I965_GDRST, gdrst | flags | 0x1); return wait_for(i965_reset_complete(dev), 500); } static int ironlake_do_reset(struct drm_device *dev, u8 flags) { struct drm_i915_private *dev_priv = dev->dev_private; u32 gdrst = I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR); I915_WRITE(MCHBAR_MIRROR_BASE + ILK_GDSR, gdrst | flags | 0x1); return wait_for(I915_READ(MCHBAR_MIRROR_BASE + ILK_GDSR) & 0x1, 500); } static int gen6_do_reset(struct drm_device *dev, u8 flags) { struct drm_i915_private *dev_priv = dev->dev_private; int ret; unsigned long irqflags; /* Hold gt_lock across reset to prevent any register access * with forcewake not set correctly */ spin_lock_irqsave(&dev_priv->gt_lock, irqflags); /* Reset the chip */ /* GEN6_GDRST is not in the gt power well, no need to check * for fifo space for the write or forcewake the chip for * the read */ I915_WRITE_NOTRACE(GEN6_GDRST, GEN6_GRDOM_FULL); /* Spin waiting for the device to ack the reset request */ ret = wait_for((I915_READ_NOTRACE(GEN6_GDRST) & GEN6_GRDOM_FULL) == 0, 500); /* If reset with a user forcewake, try to restore, otherwise turn it off */ if (dev_priv->forcewake_count) dev_priv->display.force_wake_get(dev_priv); else dev_priv->display.force_wake_put(dev_priv); /* Restore fifo count */ dev_priv->gt_fifo_count = I915_READ_NOTRACE(GT_FIFO_FREE_ENTRIES); spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); return ret; } /** * i915_reset - reset chip after a hang * @dev: drm device to reset * @flags: reset domains * * Reset the chip. Useful if a hang is detected. Returns zero on successful * reset or otherwise an error code. * * Procedure is fairly simple: * - reset the chip using the reset reg * - re-init context state * - re-init hardware status page * - re-init ring buffer * - re-init interrupt state * - re-init display */ int i915_reset(struct drm_device *dev, u8 flags) { drm_i915_private_t *dev_priv = dev->dev_private; /* * We really should only reset the display subsystem if we actually * need to */ bool need_display = true; int ret; if (!i915_try_reset) return 0; if (!mutex_trylock(&dev->struct_mutex)) return -EBUSY; i915_gem_reset(dev); ret = -ENODEV; if (get_seconds() - dev_priv->last_gpu_reset < 5) { DRM_ERROR("GPU hanging too fast, declaring wedged!\n"); } else switch (INTEL_INFO(dev)->gen) { case 7: case 6: ret = gen6_do_reset(dev, flags); break; case 5: ret = ironlake_do_reset(dev, flags); break; case 4: ret = i965_do_reset(dev, flags); break; case 2: ret = i8xx_do_reset(dev, flags); break; } dev_priv->last_gpu_reset = get_seconds(); if (ret) { DRM_ERROR("Failed to reset chip.\n"); mutex_unlock(&dev->struct_mutex); return ret; } /* Ok, now get things going again... */ /* * Everything depends on having the GTT running, so we need to start * there. Fortunately we don't need to do this unless we reset the * chip at a PCI level. * * Next we need to restore the context, but we don't use those * yet either... * * Ring buffer needs to be re-initialized in the KMS case, or if X * was running at the time of the reset (i.e. we weren't VT * switched away). */ if (drm_core_check_feature(dev, DRIVER_MODESET) || !dev_priv->mm.suspended) { dev_priv->mm.suspended = 0; i915_gem_init_swizzling(dev); dev_priv->ring[RCS].init(&dev_priv->ring[RCS]); if (HAS_BSD(dev)) dev_priv->ring[VCS].init(&dev_priv->ring[VCS]); if (HAS_BLT(dev)) dev_priv->ring[BCS].init(&dev_priv->ring[BCS]); i915_gem_init_ppgtt(dev); mutex_unlock(&dev->struct_mutex); drm_irq_uninstall(dev); drm_mode_config_reset(dev); drm_irq_install(dev); mutex_lock(&dev->struct_mutex); } mutex_unlock(&dev->struct_mutex); /* * Perform a full modeset as on later generations, e.g. Ironlake, we may * need to retrain the display link and cannot just restore the register * values. */ if (need_display) { mutex_lock(&dev->mode_config.mutex); drm_helper_resume_force_mode(dev); mutex_unlock(&dev->mode_config.mutex); } return 0; } static int __devinit i915_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { /* Only bind to function 0 of the device. Early generations * used function 1 as a placeholder for multi-head. This causes * us confusion instead, especially on the systems where both * functions have the same PCI-ID! */ if (PCI_FUNC(pdev->devfn)) return -ENODEV; return drm_get_pci_dev(pdev, ent, &driver); } static void i915_pci_remove(struct pci_dev *pdev) { struct drm_device *dev = pci_get_drvdata(pdev); drm_put_dev(dev); } static int i915_pm_suspend(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); int error; if (!drm_dev || !drm_dev->dev_private) { dev_err(dev, "DRM not initialized, aborting suspend.\n"); return -ENODEV; } if (drm_dev->switch_power_state == DRM_SWITCH_POWER_OFF) return 0; error = i915_drm_freeze(drm_dev); if (error) return error; pci_disable_device(pdev); pci_set_power_state(pdev, PCI_D3hot); return 0; } static int i915_pm_resume(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); return i915_resume(drm_dev); } static int i915_pm_freeze(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); if (!drm_dev || !drm_dev->dev_private) { dev_err(dev, "DRM not initialized, aborting suspend.\n"); return -ENODEV; } return i915_drm_freeze(drm_dev); } static int i915_pm_thaw(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); return i915_drm_thaw(drm_dev); } static int i915_pm_poweroff(struct device *dev) { struct pci_dev *pdev = to_pci_dev(dev); struct drm_device *drm_dev = pci_get_drvdata(pdev); return i915_drm_freeze(drm_dev); } static const struct dev_pm_ops i915_pm_ops = { .suspend = i915_pm_suspend, .resume = i915_pm_resume, .freeze = i915_pm_freeze, .thaw = i915_pm_thaw, .poweroff = i915_pm_poweroff, .restore = i915_pm_resume, }; static struct vm_operations_struct i915_gem_vm_ops = { .fault = i915_gem_fault, .open = drm_gem_vm_open, .close = drm_gem_vm_close, }; static const struct file_operations i915_driver_fops = { .owner = THIS_MODULE, .open = drm_open, .release = drm_release, .unlocked_ioctl = drm_ioctl, .mmap = drm_gem_mmap, .poll = drm_poll, .fasync = drm_fasync, .read = drm_read, #ifdef CONFIG_COMPAT .compat_ioctl = i915_compat_ioctl, #endif .llseek = noop_llseek, }; static struct drm_driver driver = { /* Don't use MTRRs here; the Xserver or userspace app should * deal with them for Intel hardware. */ .driver_features = DRIVER_USE_AGP | DRIVER_REQUIRE_AGP | /* DRIVER_USE_MTRR |*/ DRIVER_HAVE_IRQ | DRIVER_IRQ_SHARED | DRIVER_GEM, .load = i915_driver_load, .unload = i915_driver_unload, .open = i915_driver_open, .lastclose = i915_driver_lastclose, .preclose = i915_driver_preclose, .postclose = i915_driver_postclose, /* Used in place of i915_pm_ops for non-DRIVER_MODESET */ .suspend = i915_suspend, .resume = i915_resume, .device_is_agp = i915_driver_device_is_agp, .reclaim_buffers = drm_core_reclaim_buffers, .master_create = i915_master_create, .master_destroy = i915_master_destroy, #if defined(CONFIG_DEBUG_FS) .debugfs_init = i915_debugfs_init, .debugfs_cleanup = i915_debugfs_cleanup, #endif .gem_init_object = i915_gem_init_object, .gem_free_object = i915_gem_free_object, .gem_vm_ops = &i915_gem_vm_ops, .dumb_create = i915_gem_dumb_create, .dumb_map_offset = i915_gem_mmap_gtt, .dumb_destroy = i915_gem_dumb_destroy, .ioctls = i915_ioctls, .fops = &i915_driver_fops, .name = DRIVER_NAME, .desc = DRIVER_DESC, .date = DRIVER_DATE, .major = DRIVER_MAJOR, .minor = DRIVER_MINOR, .patchlevel = DRIVER_PATCHLEVEL, }; static struct pci_driver i915_pci_driver = { .name = DRIVER_NAME, .id_table = pciidlist, .probe = i915_pci_probe, .remove = i915_pci_remove, .driver.pm = &i915_pm_ops, }; static int __init i915_init(void) { if (!intel_agp_enabled) { DRM_ERROR("drm/i915 can't work without intel_agp module!\n"); return -ENODEV; } driver.num_ioctls = i915_max_ioctl; /* * If CONFIG_DRM_I915_KMS is set, default to KMS unless * explicitly disabled with the module pararmeter. * * Otherwise, just follow the parameter (defaulting to off). * * Allow optional vga_text_mode_force boot option to override * the default behavior. */ #if defined(CONFIG_DRM_I915_KMS) if (i915_modeset != 0) driver.driver_features |= DRIVER_MODESET; #endif if (i915_modeset == 1) driver.driver_features |= DRIVER_MODESET; #ifdef CONFIG_VGA_CONSOLE if (vgacon_text_force() && i915_modeset == -1) driver.driver_features &= ~DRIVER_MODESET; #endif if (!(driver.driver_features & DRIVER_MODESET)) driver.get_vblank_timestamp = NULL; return drm_pci_init(&driver, &i915_pci_driver); } static void __exit i915_exit(void) { drm_pci_exit(&driver, &i915_pci_driver); } module_init(i915_init); module_exit(i915_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL and additional rights"); #define __i915_read(x, y) \ u##x i915_read##x(struct drm_i915_private *dev_priv, u32 reg) { \ u##x val = 0; \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ unsigned long irqflags; \ spin_lock_irqsave(&dev_priv->gt_lock, irqflags); \ if (dev_priv->forcewake_count == 0) \ dev_priv->display.force_wake_get(dev_priv); \ val = read##y(dev_priv->regs + reg); \ if (dev_priv->forcewake_count == 0) \ dev_priv->display.force_wake_put(dev_priv); \ spin_unlock_irqrestore(&dev_priv->gt_lock, irqflags); \ } else { \ val = read##y(dev_priv->regs + reg); \ } \ trace_i915_reg_rw(false, reg, val, sizeof(val)); \ return val; \ } __i915_read(8, b) __i915_read(16, w) __i915_read(32, l) __i915_read(64, q) #undef __i915_read #define __i915_write(x, y) \ void i915_write##x(struct drm_i915_private *dev_priv, u32 reg, u##x val) { \ u32 __fifo_ret = 0; \ trace_i915_reg_rw(true, reg, val, sizeof(val)); \ if (NEEDS_FORCE_WAKE((dev_priv), (reg))) { \ __fifo_ret = __gen6_gt_wait_for_fifo(dev_priv); \ } \ write##y(val, dev_priv->regs + reg); \ if (unlikely(__fifo_ret)) { \ gen6_gt_check_fifodbg(dev_priv); \ } \ } __i915_write(8, b) __i915_write(16, w) __i915_write(32, l) __i915_write(64, q) #undef __i915_write
gpl-2.0
detule/lge-linux-msm
drivers/scsi/aic94xx/aic94xx_task.c
4538
17434
/* * Aic94xx SAS/SATA Tasks * * Copyright (C) 2005 Adaptec, Inc. All rights reserved. * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com> * * This file is licensed under GPLv2. * * This file is part of the aic94xx driver. * * The aic94xx driver is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; version 2 of the * License. * * The aic94xx driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with the aic94xx driver; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/spinlock.h> #include "aic94xx.h" #include "aic94xx_sas.h" #include "aic94xx_hwi.h" static void asd_unbuild_ata_ascb(struct asd_ascb *a); static void asd_unbuild_smp_ascb(struct asd_ascb *a); static void asd_unbuild_ssp_ascb(struct asd_ascb *a); static void asd_can_dequeue(struct asd_ha_struct *asd_ha, int num) { unsigned long flags; spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags); asd_ha->seq.can_queue += num; spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); } /* PCI_DMA_... to our direction translation. */ static const u8 data_dir_flags[] = { [PCI_DMA_BIDIRECTIONAL] = DATA_DIR_BYRECIPIENT, /* UNSPECIFIED */ [PCI_DMA_TODEVICE] = DATA_DIR_OUT, /* OUTBOUND */ [PCI_DMA_FROMDEVICE] = DATA_DIR_IN, /* INBOUND */ [PCI_DMA_NONE] = DATA_DIR_NONE, /* NO TRANSFER */ }; static int asd_map_scatterlist(struct sas_task *task, struct sg_el *sg_arr, gfp_t gfp_flags) { struct asd_ascb *ascb = task->lldd_task; struct asd_ha_struct *asd_ha = ascb->ha; struct scatterlist *sc; int num_sg, res; if (task->data_dir == PCI_DMA_NONE) return 0; if (task->num_scatter == 0) { void *p = task->scatter; dma_addr_t dma = pci_map_single(asd_ha->pcidev, p, task->total_xfer_len, task->data_dir); sg_arr[0].bus_addr = cpu_to_le64((u64)dma); sg_arr[0].size = cpu_to_le32(task->total_xfer_len); sg_arr[0].flags |= ASD_SG_EL_LIST_EOL; return 0; } /* STP tasks come from libata which has already mapped * the SG list */ if (sas_protocol_ata(task->task_proto)) num_sg = task->num_scatter; else num_sg = pci_map_sg(asd_ha->pcidev, task->scatter, task->num_scatter, task->data_dir); if (num_sg == 0) return -ENOMEM; if (num_sg > 3) { int i; ascb->sg_arr = asd_alloc_coherent(asd_ha, num_sg*sizeof(struct sg_el), gfp_flags); if (!ascb->sg_arr) { res = -ENOMEM; goto err_unmap; } for_each_sg(task->scatter, sc, num_sg, i) { struct sg_el *sg = &((struct sg_el *)ascb->sg_arr->vaddr)[i]; sg->bus_addr = cpu_to_le64((u64)sg_dma_address(sc)); sg->size = cpu_to_le32((u32)sg_dma_len(sc)); if (i == num_sg-1) sg->flags |= ASD_SG_EL_LIST_EOL; } for_each_sg(task->scatter, sc, 2, i) { sg_arr[i].bus_addr = cpu_to_le64((u64)sg_dma_address(sc)); sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc)); } sg_arr[1].next_sg_offs = 2 * sizeof(*sg_arr); sg_arr[1].flags |= ASD_SG_EL_LIST_EOS; memset(&sg_arr[2], 0, sizeof(*sg_arr)); sg_arr[2].bus_addr=cpu_to_le64((u64)ascb->sg_arr->dma_handle); } else { int i; for_each_sg(task->scatter, sc, num_sg, i) { sg_arr[i].bus_addr = cpu_to_le64((u64)sg_dma_address(sc)); sg_arr[i].size = cpu_to_le32((u32)sg_dma_len(sc)); } sg_arr[i-1].flags |= ASD_SG_EL_LIST_EOL; } return 0; err_unmap: if (sas_protocol_ata(task->task_proto)) pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter, task->data_dir); return res; } static void asd_unmap_scatterlist(struct asd_ascb *ascb) { struct asd_ha_struct *asd_ha = ascb->ha; struct sas_task *task = ascb->uldd_task; if (task->data_dir == PCI_DMA_NONE) return; if (task->num_scatter == 0) { dma_addr_t dma = (dma_addr_t) le64_to_cpu(ascb->scb->ssp_task.sg_element[0].bus_addr); pci_unmap_single(ascb->ha->pcidev, dma, task->total_xfer_len, task->data_dir); return; } asd_free_coherent(asd_ha, ascb->sg_arr); if (task->task_proto != SAS_PROTOCOL_STP) pci_unmap_sg(asd_ha->pcidev, task->scatter, task->num_scatter, task->data_dir); } /* ---------- Task complete tasklet ---------- */ static void asd_get_response_tasklet(struct asd_ascb *ascb, struct done_list_struct *dl) { struct asd_ha_struct *asd_ha = ascb->ha; struct sas_task *task = ascb->uldd_task; struct task_status_struct *ts = &task->task_status; unsigned long flags; struct tc_resp_sb_struct { __le16 index_escb; u8 len_lsb; u8 flags; } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block; /* int size = ((resp_sb->flags & 7) << 8) | resp_sb->len_lsb; */ int edb_id = ((resp_sb->flags & 0x70) >> 4)-1; struct asd_ascb *escb; struct asd_dma_tok *edb; void *r; spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags); escb = asd_tc_index_find(&asd_ha->seq, (int)le16_to_cpu(resp_sb->index_escb)); spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags); if (!escb) { ASD_DPRINTK("Uh-oh! No escb for this dl?!\n"); return; } ts->buf_valid_size = 0; edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index]; r = edb->vaddr; if (task->task_proto == SAS_PROTOCOL_SSP) { struct ssp_response_iu *iu = r + 16 + sizeof(struct ssp_frame_hdr); ts->residual = le32_to_cpu(*(__le32 *)r); sas_ssp_task_response(&asd_ha->pcidev->dev, task, iu); } else { struct ata_task_resp *resp = (void *) &ts->buf[0]; ts->residual = le32_to_cpu(*(__le32 *)r); if (SAS_STATUS_BUF_SIZE >= sizeof(*resp)) { resp->frame_len = le16_to_cpu(*(__le16 *)(r+6)); memcpy(&resp->ending_fis[0], r+16, ATA_RESP_FIS_SIZE); ts->buf_valid_size = sizeof(*resp); } } asd_invalidate_edb(escb, edb_id); } static void asd_task_tasklet_complete(struct asd_ascb *ascb, struct done_list_struct *dl) { struct sas_task *task = ascb->uldd_task; struct task_status_struct *ts = &task->task_status; unsigned long flags; u8 opcode = dl->opcode; asd_can_dequeue(ascb->ha, 1); Again: switch (opcode) { case TC_NO_ERROR: ts->resp = SAS_TASK_COMPLETE; ts->stat = SAM_STAT_GOOD; break; case TC_UNDERRUN: ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DATA_UNDERRUN; ts->residual = le32_to_cpu(*(__le32 *)dl->status_block); break; case TC_OVERRUN: ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_DATA_OVERRUN; ts->residual = 0; break; case TC_SSP_RESP: case TC_ATA_RESP: ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_PROTO_RESPONSE; asd_get_response_tasklet(ascb, dl); break; case TF_OPEN_REJECT: ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_OPEN_REJECT; if (dl->status_block[1] & 2) ts->open_rej_reason = 1 + dl->status_block[2]; else if (dl->status_block[1] & 1) ts->open_rej_reason = (dl->status_block[2] >> 4)+10; else ts->open_rej_reason = SAS_OREJ_UNKNOWN; break; case TF_OPEN_TO: ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_OPEN_TO; break; case TF_PHY_DOWN: case TU_PHY_DOWN: ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_PHY_DOWN; break; case TI_PHY_DOWN: ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_PHY_DOWN; break; case TI_BREAK: case TI_PROTO_ERR: case TI_NAK: case TI_ACK_NAK_TO: case TF_SMP_XMIT_RCV_ERR: case TC_ATA_R_ERR_RECV: ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_INTERRUPTED; break; case TF_BREAK: case TU_BREAK: case TU_ACK_NAK_TO: case TF_SMPRSP_TO: ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_DEV_NO_RESPONSE; break; case TF_NAK_RECV: ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_NAK_R_ERR; break; case TA_I_T_NEXUS_LOSS: opcode = dl->status_block[0]; goto Again; break; case TF_INV_CONN_HANDLE: ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_DEVICE_UNKNOWN; break; case TF_REQUESTED_N_PENDING: ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_PENDING; break; case TC_TASK_CLEARED: case TA_ON_REQ: ts->resp = SAS_TASK_COMPLETE; ts->stat = SAS_ABORTED_TASK; break; case TF_NO_SMP_CONN: case TF_TMF_NO_CTX: case TF_TMF_NO_TAG: case TF_TMF_TAG_FREE: case TF_TMF_TASK_DONE: case TF_TMF_NO_CONN_HANDLE: case TF_IRTT_TO: case TF_IU_SHORT: case TF_DATA_OFFS_ERR: ts->resp = SAS_TASK_UNDELIVERED; ts->stat = SAS_DEV_NO_RESPONSE; break; case TC_LINK_ADM_RESP: case TC_CONTROL_PHY: case TC_RESUME: case TC_PARTIAL_SG_LIST: default: ASD_DPRINTK("%s: dl opcode: 0x%x?\n", __func__, opcode); break; } switch (task->task_proto) { case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: asd_unbuild_ata_ascb(ascb); break; case SAS_PROTOCOL_SMP: asd_unbuild_smp_ascb(ascb); break; case SAS_PROTOCOL_SSP: asd_unbuild_ssp_ascb(ascb); default: break; } spin_lock_irqsave(&task->task_state_lock, flags); task->task_state_flags &= ~SAS_TASK_STATE_PENDING; task->task_state_flags &= ~SAS_TASK_AT_INITIATOR; task->task_state_flags |= SAS_TASK_STATE_DONE; if (unlikely((task->task_state_flags & SAS_TASK_STATE_ABORTED))) { struct completion *completion = ascb->completion; spin_unlock_irqrestore(&task->task_state_lock, flags); ASD_DPRINTK("task 0x%p done with opcode 0x%x resp 0x%x " "stat 0x%x but aborted by upper layer!\n", task, opcode, ts->resp, ts->stat); if (completion) complete(completion); } else { spin_unlock_irqrestore(&task->task_state_lock, flags); task->lldd_task = NULL; asd_ascb_free(ascb); mb(); task->task_done(task); } } /* ---------- ATA ---------- */ static int asd_build_ata_ascb(struct asd_ascb *ascb, struct sas_task *task, gfp_t gfp_flags) { struct domain_device *dev = task->dev; struct scb *scb; u8 flags; int res = 0; scb = ascb->scb; if (unlikely(task->ata_task.device_control_reg_update)) scb->header.opcode = CONTROL_ATA_DEV; else if (dev->sata_dev.command_set == ATA_COMMAND_SET) scb->header.opcode = INITIATE_ATA_TASK; else scb->header.opcode = INITIATE_ATAPI_TASK; scb->ata_task.proto_conn_rate = (1 << 5); /* STP */ if (dev->port->oob_mode == SAS_OOB_MODE) scb->ata_task.proto_conn_rate |= dev->linkrate; scb->ata_task.total_xfer_len = cpu_to_le32(task->total_xfer_len); scb->ata_task.fis = task->ata_task.fis; if (likely(!task->ata_task.device_control_reg_update)) scb->ata_task.fis.flags |= 0x80; /* C=1: update ATA cmd reg */ scb->ata_task.fis.flags &= 0xF0; /* PM_PORT field shall be 0 */ if (dev->sata_dev.command_set == ATAPI_COMMAND_SET) memcpy(scb->ata_task.atapi_packet, task->ata_task.atapi_packet, 16); scb->ata_task.sister_scb = cpu_to_le16(0xFFFF); scb->ata_task.conn_handle = cpu_to_le16( (u16)(unsigned long)dev->lldd_dev); if (likely(!task->ata_task.device_control_reg_update)) { flags = 0; if (task->ata_task.dma_xfer) flags |= DATA_XFER_MODE_DMA; if (task->ata_task.use_ncq && dev->sata_dev.command_set != ATAPI_COMMAND_SET) flags |= ATA_Q_TYPE_NCQ; flags |= data_dir_flags[task->data_dir]; scb->ata_task.ata_flags = flags; scb->ata_task.retry_count = task->ata_task.retry_count; flags = 0; if (task->ata_task.set_affil_pol) flags |= SET_AFFIL_POLICY; if (task->ata_task.stp_affil_pol) flags |= STP_AFFIL_POLICY; scb->ata_task.flags = flags; } ascb->tasklet_complete = asd_task_tasklet_complete; if (likely(!task->ata_task.device_control_reg_update)) res = asd_map_scatterlist(task, scb->ata_task.sg_element, gfp_flags); return res; } static void asd_unbuild_ata_ascb(struct asd_ascb *a) { asd_unmap_scatterlist(a); } /* ---------- SMP ---------- */ static int asd_build_smp_ascb(struct asd_ascb *ascb, struct sas_task *task, gfp_t gfp_flags) { struct asd_ha_struct *asd_ha = ascb->ha; struct domain_device *dev = task->dev; struct scb *scb; pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_req, 1, PCI_DMA_TODEVICE); pci_map_sg(asd_ha->pcidev, &task->smp_task.smp_resp, 1, PCI_DMA_FROMDEVICE); scb = ascb->scb; scb->header.opcode = INITIATE_SMP_TASK; scb->smp_task.proto_conn_rate = dev->linkrate; scb->smp_task.smp_req.bus_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_req)); scb->smp_task.smp_req.size = cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_req)-4); scb->smp_task.smp_resp.bus_addr = cpu_to_le64((u64)sg_dma_address(&task->smp_task.smp_resp)); scb->smp_task.smp_resp.size = cpu_to_le32((u32)sg_dma_len(&task->smp_task.smp_resp)-4); scb->smp_task.sister_scb = cpu_to_le16(0xFFFF); scb->smp_task.conn_handle = cpu_to_le16((u16) (unsigned long)dev->lldd_dev); ascb->tasklet_complete = asd_task_tasklet_complete; return 0; } static void asd_unbuild_smp_ascb(struct asd_ascb *a) { struct sas_task *task = a->uldd_task; BUG_ON(!task); pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_req, 1, PCI_DMA_TODEVICE); pci_unmap_sg(a->ha->pcidev, &task->smp_task.smp_resp, 1, PCI_DMA_FROMDEVICE); } /* ---------- SSP ---------- */ static int asd_build_ssp_ascb(struct asd_ascb *ascb, struct sas_task *task, gfp_t gfp_flags) { struct domain_device *dev = task->dev; struct scb *scb; int res = 0; scb = ascb->scb; scb->header.opcode = INITIATE_SSP_TASK; scb->ssp_task.proto_conn_rate = (1 << 4); /* SSP */ scb->ssp_task.proto_conn_rate |= dev->linkrate; scb->ssp_task.total_xfer_len = cpu_to_le32(task->total_xfer_len); scb->ssp_task.ssp_frame.frame_type = SSP_DATA; memcpy(scb->ssp_task.ssp_frame.hashed_dest_addr, dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); memcpy(scb->ssp_task.ssp_frame.hashed_src_addr, dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE); scb->ssp_task.ssp_frame.tptt = cpu_to_be16(0xFFFF); memcpy(scb->ssp_task.ssp_cmd.lun, task->ssp_task.LUN, 8); if (task->ssp_task.enable_first_burst) scb->ssp_task.ssp_cmd.efb_prio_attr |= EFB_MASK; scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_prio << 3); scb->ssp_task.ssp_cmd.efb_prio_attr |= (task->ssp_task.task_attr & 7); memcpy(scb->ssp_task.ssp_cmd.cdb, task->ssp_task.cdb, 16); scb->ssp_task.sister_scb = cpu_to_le16(0xFFFF); scb->ssp_task.conn_handle = cpu_to_le16( (u16)(unsigned long)dev->lldd_dev); scb->ssp_task.data_dir = data_dir_flags[task->data_dir]; scb->ssp_task.retry_count = scb->ssp_task.retry_count; ascb->tasklet_complete = asd_task_tasklet_complete; res = asd_map_scatterlist(task, scb->ssp_task.sg_element, gfp_flags); return res; } static void asd_unbuild_ssp_ascb(struct asd_ascb *a) { asd_unmap_scatterlist(a); } /* ---------- Execute Task ---------- */ static int asd_can_queue(struct asd_ha_struct *asd_ha, int num) { int res = 0; unsigned long flags; spin_lock_irqsave(&asd_ha->seq.pend_q_lock, flags); if ((asd_ha->seq.can_queue - num) < 0) res = -SAS_QUEUE_FULL; else asd_ha->seq.can_queue -= num; spin_unlock_irqrestore(&asd_ha->seq.pend_q_lock, flags); return res; } int asd_execute_task(struct sas_task *task, const int num, gfp_t gfp_flags) { int res = 0; LIST_HEAD(alist); struct sas_task *t = task; struct asd_ascb *ascb = NULL, *a; struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha; unsigned long flags; res = asd_can_queue(asd_ha, num); if (res) return res; res = num; ascb = asd_ascb_alloc_list(asd_ha, &res, gfp_flags); if (res) { res = -ENOMEM; goto out_err; } __list_add(&alist, ascb->list.prev, &ascb->list); list_for_each_entry(a, &alist, list) { a->uldd_task = t; t->lldd_task = a; t = list_entry(t->list.next, struct sas_task, list); } list_for_each_entry(a, &alist, list) { t = a->uldd_task; a->uldd_timer = 1; if (t->task_proto & SAS_PROTOCOL_STP) t->task_proto = SAS_PROTOCOL_STP; switch (t->task_proto) { case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: res = asd_build_ata_ascb(a, t, gfp_flags); break; case SAS_PROTOCOL_SMP: res = asd_build_smp_ascb(a, t, gfp_flags); break; case SAS_PROTOCOL_SSP: res = asd_build_ssp_ascb(a, t, gfp_flags); break; default: asd_printk("unknown sas_task proto: 0x%x\n", t->task_proto); res = -ENOMEM; break; } if (res) goto out_err_unmap; spin_lock_irqsave(&t->task_state_lock, flags); t->task_state_flags |= SAS_TASK_AT_INITIATOR; spin_unlock_irqrestore(&t->task_state_lock, flags); } list_del_init(&alist); res = asd_post_ascb_list(asd_ha, ascb, num); if (unlikely(res)) { a = NULL; __list_add(&alist, ascb->list.prev, &ascb->list); goto out_err_unmap; } return 0; out_err_unmap: { struct asd_ascb *b = a; list_for_each_entry(a, &alist, list) { if (a == b) break; t = a->uldd_task; spin_lock_irqsave(&t->task_state_lock, flags); t->task_state_flags &= ~SAS_TASK_AT_INITIATOR; spin_unlock_irqrestore(&t->task_state_lock, flags); switch (t->task_proto) { case SAS_PROTOCOL_SATA: case SAS_PROTOCOL_STP: asd_unbuild_ata_ascb(a); break; case SAS_PROTOCOL_SMP: asd_unbuild_smp_ascb(a); break; case SAS_PROTOCOL_SSP: asd_unbuild_ssp_ascb(a); default: break; } t->lldd_task = NULL; } } list_del_init(&alist); out_err: if (ascb) asd_ascb_free_list(ascb); asd_can_dequeue(asd_ha, num); return res; }
gpl-2.0
ViciousAOSP/platform_kernel_ViciousKernel
ipc/namespace.c
7098
4365
/* * linux/ipc/namespace.c * Copyright (C) 2006 Pavel Emelyanov <xemul@openvz.org> OpenVZ, SWsoft Inc. */ #include <linux/ipc.h> #include <linux/msg.h> #include <linux/ipc_namespace.h> #include <linux/rcupdate.h> #include <linux/nsproxy.h> #include <linux/slab.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/user_namespace.h> #include <linux/proc_fs.h> #include "util.h" static struct ipc_namespace *create_ipc_ns(struct task_struct *tsk, struct ipc_namespace *old_ns) { struct ipc_namespace *ns; int err; ns = kmalloc(sizeof(struct ipc_namespace), GFP_KERNEL); if (ns == NULL) return ERR_PTR(-ENOMEM); atomic_set(&ns->count, 1); err = mq_init_ns(ns); if (err) { kfree(ns); return ERR_PTR(err); } atomic_inc(&nr_ipc_ns); sem_init_ns(ns); msg_init_ns(ns); shm_init_ns(ns); /* * msgmni has already been computed for the new ipc ns. * Thus, do the ipcns creation notification before registering that * new ipcns in the chain. */ ipcns_notify(IPCNS_CREATED); register_ipcns_notifier(ns); ns->user_ns = get_user_ns(task_cred_xxx(tsk, user)->user_ns); return ns; } struct ipc_namespace *copy_ipcs(unsigned long flags, struct task_struct *tsk) { struct ipc_namespace *ns = tsk->nsproxy->ipc_ns; if (!(flags & CLONE_NEWIPC)) return get_ipc_ns(ns); return create_ipc_ns(tsk, ns); } /* * free_ipcs - free all ipcs of one type * @ns: the namespace to remove the ipcs from * @ids: the table of ipcs to free * @free: the function called to free each individual ipc * * Called for each kind of ipc when an ipc_namespace exits. */ void free_ipcs(struct ipc_namespace *ns, struct ipc_ids *ids, void (*free)(struct ipc_namespace *, struct kern_ipc_perm *)) { struct kern_ipc_perm *perm; int next_id; int total, in_use; down_write(&ids->rw_mutex); in_use = ids->in_use; for (total = 0, next_id = 0; total < in_use; next_id++) { perm = idr_find(&ids->ipcs_idr, next_id); if (perm == NULL) continue; ipc_lock_by_ptr(perm); free(ns, perm); total++; } up_write(&ids->rw_mutex); } static void free_ipc_ns(struct ipc_namespace *ns) { /* * Unregistering the hotplug notifier at the beginning guarantees * that the ipc namespace won't be freed while we are inside the * callback routine. Since the blocking_notifier_chain_XXX routines * hold a rw lock on the notifier list, unregister_ipcns_notifier() * won't take the rw lock before blocking_notifier_call_chain() has * released the rd lock. */ unregister_ipcns_notifier(ns); sem_exit_ns(ns); msg_exit_ns(ns); shm_exit_ns(ns); atomic_dec(&nr_ipc_ns); /* * Do the ipcns removal notification after decrementing nr_ipc_ns in * order to have a correct value when recomputing msgmni. */ ipcns_notify(IPCNS_REMOVED); put_user_ns(ns->user_ns); kfree(ns); } /* * put_ipc_ns - drop a reference to an ipc namespace. * @ns: the namespace to put * * If this is the last task in the namespace exiting, and * it is dropping the refcount to 0, then it can race with * a task in another ipc namespace but in a mounts namespace * which has this ipcns's mqueuefs mounted, doing some action * with one of the mqueuefs files. That can raise the refcount. * So dropping the refcount, and raising the refcount when * accessing it through the VFS, are protected with mq_lock. * * (Clearly, a task raising the refcount on its own ipc_ns * needn't take mq_lock since it can't race with the last task * in the ipcns exiting). */ void put_ipc_ns(struct ipc_namespace *ns) { if (atomic_dec_and_lock(&ns->count, &mq_lock)) { mq_clear_sbinfo(ns); spin_unlock(&mq_lock); mq_put_mnt(ns); free_ipc_ns(ns); } } static void *ipcns_get(struct task_struct *task) { struct ipc_namespace *ns = NULL; struct nsproxy *nsproxy; rcu_read_lock(); nsproxy = task_nsproxy(task); if (nsproxy) ns = get_ipc_ns(nsproxy->ipc_ns); rcu_read_unlock(); return ns; } static void ipcns_put(void *ns) { return put_ipc_ns(ns); } static int ipcns_install(struct nsproxy *nsproxy, void *ns) { /* Ditch state from the old ipc namespace */ exit_sem(current); put_ipc_ns(nsproxy->ipc_ns); nsproxy->ipc_ns = get_ipc_ns(ns); return 0; } const struct proc_ns_operations ipcns_operations = { .name = "ipc", .type = CLONE_NEWIPC, .get = ipcns_get, .put = ipcns_put, .install = ipcns_install, };
gpl-2.0
kondors1995/Soviet-kernel
drivers/net/irda/toim3232-sir.c
8890
12466
/********************************************************************* * * Filename: toim3232-sir.c * Version: 1.0 * Description: Implementation of dongles based on the Vishay/Temic * TOIM3232 SIR Endec chipset. Currently only the * IRWave IR320ST-2 is tested, although it should work * with any TOIM3232 or TOIM4232 chipset based RS232 * dongle with minimal modification. * Based heavily on the Tekram driver (tekram.c), * with thanks to Dag Brattli and Martin Diehl. * Status: Experimental. * Author: David Basden <davidb-irda@rcpt.to> * Created at: Thu Feb 09 23:47:32 2006 * * Copyright (c) 2006 David Basden. * Copyright (c) 1998-1999 Dag Brattli, * Copyright (c) 2002 Martin Diehl, * All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; either version 2 of * the License, or (at your option) any later version. * * Neither Dag Brattli nor University of Tromsø admit liability nor * provide warranty for any of this software. This material is * provided "AS-IS" and at no charge. * ********************************************************************/ /* * This driver has currently only been tested on the IRWave IR320ST-2 * * PROTOCOL: * * The protocol for talking to the TOIM3232 is quite easy, and is * designed to interface with RS232 with only level convertors. The * BR/~D line on the chip is brought high to signal 'command mode', * where a command byte is sent to select the baudrate of the RS232 * interface and the pulse length of the IRDA output. When BR/~D * is brought low, the dongle then changes to the selected baudrate, * and the RS232 interface is used for data until BR/~D is brought * high again. The initial speed for the TOIMx323 after RESET is * 9600 baud. The baudrate for command-mode is the last selected * baud-rate, or 9600 after a RESET. * * The dongle I have (below) adds some extra hardware on the front end, * but this is mostly directed towards pariasitic power from the RS232 * line rather than changing very much about how to communicate with * the TOIM3232. * * The protocol to talk to the TOIM4232 chipset seems to be almost * identical to the TOIM3232 (and the 4232 datasheet is more detailed) * so this code will probably work on that as well, although I haven't * tested it on that hardware. * * Target dongle variations that might be common: * * DTR and RTS function: * The data sheet for the 4232 has a sample implementation that hooks the * DTR and RTS lines to the RESET and BaudRate/~Data lines of the * chip (through line-converters). Given both DTR and RTS would have to * be held low in normal operation, and the TOIMx232 requires +5V to * signal ground, most dongle designers would almost certainly choose * an implementation that kept at least one of DTR or RTS high in * normal operation to provide power to the dongle, but will likely * vary between designs. * * User specified command bits: * There are two user-controllable output lines from the TOIMx232 that * can be set low or high by setting the appropriate bits in the * high-nibble of the command byte (when setting speed and pulse length). * These might be used to switch on and off added hardware or extra * dongle features. * * * Target hardware: IRWave IR320ST-2 * * The IRWave IR320ST-2 is a simple dongle based on the Vishay/Temic * TOIM3232 SIR Endec and the Vishay/Temic TFDS4500 SIR IRDA transceiver. * It uses a hex inverter and some discrete components to buffer and * line convert the RS232 down to 5V. * * The dongle is powered through a voltage regulator, fed by a large * capacitor. To switch the dongle on, DTR is brought high to charge * the capacitor and drive the voltage regulator. DTR isn't associated * with any control lines on the TOIM3232. Parisitic power is also taken * from the RTS, TD and RD lines when brought high, but through resistors. * When DTR is low, the circuit might lose power even with RTS high. * * RTS is inverted and attached to the BR/~D input pin. When RTS * is high, BR/~D is low, and the TOIM3232 is in the normal 'data' mode. * RTS is brought low, BR/~D is high, and the TOIM3232 is in 'command * mode'. * * For some unknown reason, the RESET line isn't actually connected * to anything. This means to reset the dongle to get it to a known * state (9600 baud) you must drop DTR and RTS low, wait for the power * capacitor to discharge, and then bring DTR (and RTS for data mode) * high again, and wait for the capacitor to charge, the power supply * to stabilise, and the oscillator clock to stabilise. * * Fortunately, if the current baudrate is known, the chipset can * easily change speed by entering command mode without having to * reset the dongle first. * * Major Components: * * - Vishay/Temic TOIM3232 SIR Endec to change RS232 pulse timings * to IRDA pulse timings * - 3.6864MHz crystal to drive TOIM3232 clock oscillator * - DM74lS04M Inverting Hex line buffer for RS232 input buffering * and level conversion * - PJ2951AC 150mA voltage regulator * - Vishay/Temic TFDS4500 SIR IRDA front-end transceiver * */ #include <linux/module.h> #include <linux/delay.h> #include <linux/init.h> #include <linux/sched.h> #include <net/irda/irda.h> #include "sir-dev.h" static int toim3232delay = 150; /* default is 150 ms */ module_param(toim3232delay, int, 0); MODULE_PARM_DESC(toim3232delay, "toim3232 dongle write complete delay"); #if 0 static int toim3232flipdtr = 0; /* default is DTR high to reset */ module_param(toim3232flipdtr, int, 0); MODULE_PARM_DESC(toim3232flipdtr, "toim3232 dongle invert DTR (Reset)"); static int toim3232fliprts = 0; /* default is RTS high for baud change */ module_param(toim3232fliptrs, int, 0); MODULE_PARM_DESC(toim3232fliprts, "toim3232 dongle invert RTS (BR/D)"); #endif static int toim3232_open(struct sir_dev *); static int toim3232_close(struct sir_dev *); static int toim3232_change_speed(struct sir_dev *, unsigned); static int toim3232_reset(struct sir_dev *); #define TOIM3232_115200 0x00 #define TOIM3232_57600 0x01 #define TOIM3232_38400 0x02 #define TOIM3232_19200 0x03 #define TOIM3232_9600 0x06 #define TOIM3232_2400 0x0A #define TOIM3232_PW 0x10 /* Pulse select bit */ static struct dongle_driver toim3232 = { .owner = THIS_MODULE, .driver_name = "Vishay TOIM3232", .type = IRDA_TOIM3232_DONGLE, .open = toim3232_open, .close = toim3232_close, .reset = toim3232_reset, .set_speed = toim3232_change_speed, }; static int __init toim3232_sir_init(void) { if (toim3232delay < 1 || toim3232delay > 500) toim3232delay = 200; IRDA_DEBUG(1, "%s - using %d ms delay\n", toim3232.driver_name, toim3232delay); return irda_register_dongle(&toim3232); } static void __exit toim3232_sir_cleanup(void) { irda_unregister_dongle(&toim3232); } static int toim3232_open(struct sir_dev *dev) { struct qos_info *qos = &dev->qos; IRDA_DEBUG(2, "%s()\n", __func__); /* Pull the lines high to start with. * * For the IR320ST-2, we need to charge the main supply capacitor to * switch the device on. We keep DTR high throughout to do this. * When RTS, TD and RD are high, they will also trickle-charge the * cap. RTS is high for data transmission, and low for baud rate select. * -- DGB */ sirdev_set_dtr_rts(dev, TRUE, TRUE); /* The TOI3232 supports many speeds between 1200bps and 115000bps. * We really only care about those supported by the IRDA spec, but * 38400 seems to be implemented in many places */ qos->baud_rate.bits &= IR_2400|IR_9600|IR_19200|IR_38400|IR_57600|IR_115200; /* From the tekram driver. Not sure what a reasonable value is -- DGB */ qos->min_turn_time.bits = 0x01; /* Needs at least 10 ms */ irda_qos_bits_to_value(qos); /* irda thread waits 50 msec for power settling */ return 0; } static int toim3232_close(struct sir_dev *dev) { IRDA_DEBUG(2, "%s()\n", __func__); /* Power off dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); return 0; } /* * Function toim3232change_speed (dev, state, speed) * * Set the speed for the TOIM3232 based dongle. Warning, this * function must be called with a process context! * * Algorithm * 1. keep DTR high but clear RTS to bring into baud programming mode * 2. wait at least 7us to enter programming mode * 3. send control word to set baud rate and timing * 4. wait at least 1us * 5. bring RTS high to enter DATA mode (RS232 is passed through to transceiver) * 6. should take effect immediately (although probably worth waiting) */ #define TOIM3232_STATE_WAIT_SPEED (SIRDEV_STATE_DONGLE_SPEED + 1) static int toim3232_change_speed(struct sir_dev *dev, unsigned speed) { unsigned state = dev->fsm.substate; unsigned delay = 0; u8 byte; static int ret = 0; IRDA_DEBUG(2, "%s()\n", __func__); switch(state) { case SIRDEV_STATE_DONGLE_SPEED: /* Figure out what we are going to send as a control byte */ switch (speed) { case 2400: byte = TOIM3232_PW|TOIM3232_2400; break; default: speed = 9600; ret = -EINVAL; /* fall thru */ case 9600: byte = TOIM3232_PW|TOIM3232_9600; break; case 19200: byte = TOIM3232_PW|TOIM3232_19200; break; case 38400: byte = TOIM3232_PW|TOIM3232_38400; break; case 57600: byte = TOIM3232_PW|TOIM3232_57600; break; case 115200: byte = TOIM3232_115200; break; } /* Set DTR, Clear RTS: Go into baud programming mode */ sirdev_set_dtr_rts(dev, TRUE, FALSE); /* Wait at least 7us */ udelay(14); /* Write control byte */ sirdev_raw_write(dev, &byte, 1); dev->speed = speed; state = TOIM3232_STATE_WAIT_SPEED; delay = toim3232delay; break; case TOIM3232_STATE_WAIT_SPEED: /* Have transmitted control byte * Wait for 'at least 1us' */ udelay(14); /* Set DTR, Set RTS: Go into normal data mode */ sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Wait (TODO: check this is needed) */ udelay(50); break; default: printk(KERN_ERR "%s - undefined state %d\n", __func__, state); ret = -EINVAL; break; } dev->fsm.substate = state; return (delay > 0) ? delay : ret; } /* * Function toim3232reset (driver) * * This function resets the toim3232 dongle. Warning, this function * must be called with a process context!! * * What we should do is: * 0. Pull RESET high * 1. Wait for at least 7us * 2. Pull RESET low * 3. Wait for at least 7us * 4. Pull BR/~D high * 5. Wait for at least 7us * 6. Send control byte to set baud rate * 7. Wait at least 1us after stop bit * 8. Pull BR/~D low * 9. Should then be in data mode * * Because the IR320ST-2 doesn't have the RESET line connected for some reason, * we'll have to do something else. * * The default speed after a RESET is 9600, so lets try just bringing it up in * data mode after switching it off, waiting for the supply capacitor to * discharge, and then switch it back on. This isn't actually pulling RESET * high, but it seems to have the same effect. * * This behaviour will probably work on dongles that have the RESET line connected, * but if not, add a flag for the IR320ST-2, and implment the above-listed proper * behaviour. * * RTS is inverted and then fed to BR/~D, so to put it in programming mode, we * need to have pull RTS low */ static int toim3232_reset(struct sir_dev *dev) { IRDA_DEBUG(2, "%s()\n", __func__); /* Switch off both DTR and RTS to switch off dongle */ sirdev_set_dtr_rts(dev, FALSE, FALSE); /* Should sleep a while. This might be evil doing it this way.*/ set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(50)); /* Set DTR, Set RTS (data mode) */ sirdev_set_dtr_rts(dev, TRUE, TRUE); /* Wait at least 10 ms for power to stabilize again */ set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(msecs_to_jiffies(10)); /* Speed should now be 9600 */ dev->speed = 9600; return 0; } MODULE_AUTHOR("David Basden <davidb-linux@rcpt.to>"); MODULE_DESCRIPTION("Vishay/Temic TOIM3232 based dongle driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("irda-dongle-12"); /* IRDA_TOIM3232_DONGLE */ module_init(toim3232_sir_init); module_exit(toim3232_sir_cleanup);
gpl-2.0
xenius9/d838_kernel
arch/tile/lib/strlen_32.c
9914
1158
/* * Copyright 2010 Tilera Corporation. All Rights Reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation, version 2. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for * more details. */ #include <linux/types.h> #include <linux/string.h> #include <linux/module.h> #undef strlen size_t strlen(const char *s) { /* Get an aligned pointer. */ const uintptr_t s_int = (uintptr_t) s; const uint32_t *p = (const uint32_t *)(s_int & -4); /* Read the first word, but force bytes before the string to be nonzero. * This expression works because we know shift counts are taken mod 32. */ uint32_t v = *p | ((1 << (s_int << 3)) - 1); uint32_t bits; while ((bits = __insn_seqb(v, 0)) == 0) v = *++p; return ((const char *)p) + (__insn_ctz(bits) >> 3) - s; } EXPORT_SYMBOL(strlen);
gpl-2.0
monishk10/android_kernel_xiaomi_cancro
arch/powerpc/boot/ebony.c
14010
2504
/* * Copyright 2007 David Gibson, IBM Corporation. * * Based on earlier code: * Copyright (C) Paul Mackerras 1997. * * Matt Porter <mporter@kernel.crashing.org> * Copyright 2002-2005 MontaVista Software Inc. * * Eugene Surovegin <eugene.surovegin@zultys.com> or <ebs@ebshome.net> * Copyright (c) 2003, 2004 Zultys Technologies * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <stdarg.h> #include <stddef.h> #include "types.h" #include "elf.h" #include "string.h" #include "stdio.h" #include "page.h" #include "ops.h" #include "reg.h" #include "io.h" #include "dcr.h" #include "4xx.h" #include "44x.h" static u8 *ebony_mac0, *ebony_mac1; #define EBONY_FPGA_PATH "/plb/opb/ebc/fpga" #define EBONY_FPGA_FLASH_SEL 0x01 #define EBONY_SMALL_FLASH_PATH "/plb/opb/ebc/small-flash" static void ebony_flashsel_fixup(void) { void *devp; u32 reg[3] = {0x0, 0x0, 0x80000}; u8 *fpga; u8 fpga_reg0 = 0x0; devp = finddevice(EBONY_FPGA_PATH); if (!devp) fatal("Couldn't locate FPGA node %s\n\r", EBONY_FPGA_PATH); if (getprop(devp, "virtual-reg", &fpga, sizeof(fpga)) != sizeof(fpga)) fatal("%s has missing or invalid virtual-reg property\n\r", EBONY_FPGA_PATH); fpga_reg0 = in_8(fpga); devp = finddevice(EBONY_SMALL_FLASH_PATH); if (!devp) fatal("Couldn't locate small flash node %s\n\r", EBONY_SMALL_FLASH_PATH); if (getprop(devp, "reg", reg, sizeof(reg)) != sizeof(reg)) fatal("%s has reg property of unexpected size\n\r", EBONY_SMALL_FLASH_PATH); /* Invert address bit 14 (IBM-endian) if FLASH_SEL fpga bit is set */ if (fpga_reg0 & EBONY_FPGA_FLASH_SEL) reg[1] ^= 0x80000; setprop(devp, "reg", reg, sizeof(reg)); } static void ebony_fixups(void) { // FIXME: sysclk should be derived by reading the FPGA registers unsigned long sysclk = 33000000; ibm440gp_fixup_clocks(sysclk, 6 * 1843200); ibm4xx_sdram_fixup_memsize(); dt_fixup_mac_address_by_alias("ethernet0", ebony_mac0); dt_fixup_mac_address_by_alias("ethernet1", ebony_mac1); ibm4xx_fixup_ebc_ranges("/plb/opb/ebc"); ebony_flashsel_fixup(); } void ebony_init(void *mac0, void *mac1) { platform_ops.fixups = ebony_fixups; platform_ops.exit = ibm44x_dbcr_reset; ebony_mac0 = mac0; ebony_mac1 = mac1; fdt_init(_dtb_start); serial_console_init(); }
gpl-2.0
kgugala/linux
fs/fscache/proc.c
14522
1933
/* FS-Cache statistics viewing interface * * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. * Written by David Howells (dhowells@redhat.com) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define FSCACHE_DEBUG_LEVEL OPERATION #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include "internal.h" /* * initialise the /proc/fs/fscache/ directory */ int __init fscache_proc_init(void) { _enter(""); if (!proc_mkdir("fs/fscache", NULL)) goto error_dir; #ifdef CONFIG_FSCACHE_STATS if (!proc_create("fs/fscache/stats", S_IFREG | 0444, NULL, &fscache_stats_fops)) goto error_stats; #endif #ifdef CONFIG_FSCACHE_HISTOGRAM if (!proc_create("fs/fscache/histogram", S_IFREG | 0444, NULL, &fscache_histogram_fops)) goto error_histogram; #endif #ifdef CONFIG_FSCACHE_OBJECT_LIST if (!proc_create("fs/fscache/objects", S_IFREG | 0444, NULL, &fscache_objlist_fops)) goto error_objects; #endif _leave(" = 0"); return 0; #ifdef CONFIG_FSCACHE_OBJECT_LIST error_objects: #endif #ifdef CONFIG_FSCACHE_HISTOGRAM remove_proc_entry("fs/fscache/histogram", NULL); error_histogram: #endif #ifdef CONFIG_FSCACHE_STATS remove_proc_entry("fs/fscache/stats", NULL); error_stats: #endif remove_proc_entry("fs/fscache", NULL); error_dir: _leave(" = -ENOMEM"); return -ENOMEM; } /* * clean up the /proc/fs/fscache/ directory */ void fscache_proc_cleanup(void) { #ifdef CONFIG_FSCACHE_OBJECT_LIST remove_proc_entry("fs/fscache/objects", NULL); #endif #ifdef CONFIG_FSCACHE_HISTOGRAM remove_proc_entry("fs/fscache/histogram", NULL); #endif #ifdef CONFIG_FSCACHE_STATS remove_proc_entry("fs/fscache/stats", NULL); #endif remove_proc_entry("fs/fscache", NULL); }
gpl-2.0
kefirnet/Leo-2.6.32
arch/x86/kernel/pci-dma.c
443
7332
#include <linux/dma-mapping.h> #include <linux/dma-debug.h> #include <linux/dmar.h> #include <linux/bootmem.h> #include <linux/pci.h> #include <linux/kmemleak.h> #include <asm/proto.h> #include <asm/dma.h> #include <asm/iommu.h> #include <asm/gart.h> #include <asm/calgary.h> #include <asm/amd_iommu.h> static int forbid_dac __read_mostly; struct dma_map_ops *dma_ops; EXPORT_SYMBOL(dma_ops); static int iommu_sac_force __read_mostly; #ifdef CONFIG_IOMMU_DEBUG int panic_on_overflow __read_mostly = 1; int force_iommu __read_mostly = 1; #else int panic_on_overflow __read_mostly = 0; int force_iommu __read_mostly = 0; #endif int iommu_merge __read_mostly = 0; int no_iommu __read_mostly; /* Set this to 1 if there is a HW IOMMU in the system */ int iommu_detected __read_mostly = 0; /* * This variable becomes 1 if iommu=pt is passed on the kernel command line. * If this variable is 1, IOMMU implementations do no DMA translation for * devices and allow every device to access to whole physical memory. This is * useful if a user want to use an IOMMU only for KVM device assignment to * guests and not for driver dma translation. */ int iommu_pass_through __read_mostly; dma_addr_t bad_dma_address __read_mostly = 0; EXPORT_SYMBOL(bad_dma_address); /* Dummy device used for NULL arguments (normally ISA). */ struct device x86_dma_fallback_dev = { .init_name = "fallback device", .coherent_dma_mask = ISA_DMA_BIT_MASK, .dma_mask = &x86_dma_fallback_dev.coherent_dma_mask, }; EXPORT_SYMBOL(x86_dma_fallback_dev); /* Number of entries preallocated for DMA-API debugging */ #define PREALLOC_DMA_DEBUG_ENTRIES 32768 int dma_set_mask(struct device *dev, u64 mask) { if (!dev->dma_mask || !dma_supported(dev, mask)) return -EIO; *dev->dma_mask = mask; return 0; } EXPORT_SYMBOL(dma_set_mask); #ifdef CONFIG_X86_64 static __initdata void *dma32_bootmem_ptr; static unsigned long dma32_bootmem_size __initdata = (128ULL<<20); static int __init parse_dma32_size_opt(char *p) { if (!p) return -EINVAL; dma32_bootmem_size = memparse(p, &p); return 0; } early_param("dma32_size", parse_dma32_size_opt); void __init dma32_reserve_bootmem(void) { unsigned long size, align; if (max_pfn <= MAX_DMA32_PFN) return; /* * check aperture_64.c allocate_aperture() for reason about * using 512M as goal */ align = 64ULL<<20; size = roundup(dma32_bootmem_size, align); dma32_bootmem_ptr = __alloc_bootmem_nopanic(size, align, 512ULL<<20); /* * Kmemleak should not scan this block as it may not be mapped via the * kernel direct mapping. */ kmemleak_ignore(dma32_bootmem_ptr); if (dma32_bootmem_ptr) dma32_bootmem_size = size; else dma32_bootmem_size = 0; } static void __init dma32_free_bootmem(void) { if (max_pfn <= MAX_DMA32_PFN) return; if (!dma32_bootmem_ptr) return; free_bootmem(__pa(dma32_bootmem_ptr), dma32_bootmem_size); dma32_bootmem_ptr = NULL; dma32_bootmem_size = 0; } #endif void __init pci_iommu_alloc(void) { #ifdef CONFIG_X86_64 /* free the range so iommu could get some range less than 4G */ dma32_free_bootmem(); #endif /* * The order of these functions is important for * fall-back/fail-over reasons */ gart_iommu_hole_init(); detect_calgary(); detect_intel_iommu(); amd_iommu_detect(); pci_swiotlb_init(); } void *dma_generic_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_addr, gfp_t flag) { unsigned long dma_mask; struct page *page; dma_addr_t addr; dma_mask = dma_alloc_coherent_mask(dev, flag); flag |= __GFP_ZERO; again: page = alloc_pages_node(dev_to_node(dev), flag, get_order(size)); if (!page) return NULL; addr = page_to_phys(page); if (addr + size > dma_mask) { __free_pages(page, get_order(size)); if (dma_mask < DMA_BIT_MASK(32) && !(flag & GFP_DMA)) { flag = (flag & ~GFP_DMA32) | GFP_DMA; goto again; } return NULL; } *dma_addr = addr; return page_address(page); } /* * See <Documentation/x86_64/boot-options.txt> for the iommu kernel parameter * documentation. */ static __init int iommu_setup(char *p) { iommu_merge = 1; if (!p) return -EINVAL; while (*p) { if (!strncmp(p, "off", 3)) no_iommu = 1; /* gart_parse_options has more force support */ if (!strncmp(p, "force", 5)) force_iommu = 1; if (!strncmp(p, "noforce", 7)) { iommu_merge = 0; force_iommu = 0; } if (!strncmp(p, "biomerge", 8)) { iommu_merge = 1; force_iommu = 1; } if (!strncmp(p, "panic", 5)) panic_on_overflow = 1; if (!strncmp(p, "nopanic", 7)) panic_on_overflow = 0; if (!strncmp(p, "merge", 5)) { iommu_merge = 1; force_iommu = 1; } if (!strncmp(p, "nomerge", 7)) iommu_merge = 0; if (!strncmp(p, "forcesac", 8)) iommu_sac_force = 1; if (!strncmp(p, "allowdac", 8)) forbid_dac = 0; if (!strncmp(p, "nodac", 5)) forbid_dac = 1; if (!strncmp(p, "usedac", 6)) { forbid_dac = -1; return 1; } #ifdef CONFIG_SWIOTLB if (!strncmp(p, "soft", 4)) swiotlb = 1; #endif if (!strncmp(p, "pt", 2)) iommu_pass_through = 1; gart_parse_options(p); #ifdef CONFIG_CALGARY_IOMMU if (!strncmp(p, "calgary", 7)) use_calgary = 1; #endif /* CONFIG_CALGARY_IOMMU */ p += strcspn(p, ","); if (*p == ',') ++p; } return 0; } early_param("iommu", iommu_setup); int dma_supported(struct device *dev, u64 mask) { struct dma_map_ops *ops = get_dma_ops(dev); #ifdef CONFIG_PCI if (mask > 0xffffffff && forbid_dac > 0) { dev_info(dev, "PCI: Disallowing DAC for device\n"); return 0; } #endif if (ops->dma_supported) return ops->dma_supported(dev, mask); /* Copied from i386. Doesn't make much sense, because it will only work for pci_alloc_coherent. The caller just has to use GFP_DMA in this case. */ if (mask < DMA_BIT_MASK(24)) return 0; /* Tell the device to use SAC when IOMMU force is on. This allows the driver to use cheaper accesses in some cases. Problem with this is that if we overflow the IOMMU area and return DAC as fallback address the device may not handle it correctly. As a special case some controllers have a 39bit address mode that is as efficient as 32bit (aic79xx). Don't force SAC for these. Assume all masks <= 40 bits are of this type. Normally this doesn't make any difference, but gives more gentle handling of IOMMU overflow. */ if (iommu_sac_force && (mask >= DMA_BIT_MASK(40))) { dev_info(dev, "Force SAC with mask %Lx\n", mask); return 0; } return 1; } EXPORT_SYMBOL(dma_supported); static int __init pci_iommu_init(void) { dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES); #ifdef CONFIG_PCI dma_debug_add_bus(&pci_bus_type); #endif calgary_iommu_init(); intel_iommu_init(); amd_iommu_init(); gart_iommu_init(); no_iommu_init(); return 0; } void pci_iommu_shutdown(void) { gart_iommu_shutdown(); amd_iommu_shutdown(); } /* Must execute after PCI subsystem */ rootfs_initcall(pci_iommu_init); #ifdef CONFIG_PCI /* Many VIA bridges seem to corrupt data for DAC. Disable it here */ static __devinit void via_no_dac(struct pci_dev *dev) { if ((dev->class >> 8) == PCI_CLASS_BRIDGE_PCI && forbid_dac == 0) { dev_info(&dev->dev, "disabling DAC on VIA PCI bridge\n"); forbid_dac = 1; } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_VIA, PCI_ANY_ID, via_no_dac); #endif
gpl-2.0
ashhher3/linux
drivers/net/wireless/ath/ath10k/swap.c
443
6169
/* * Copyright (c) 2015 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* This file has implementation for code swap logic. With code swap feature, * target can run the fw binary with even smaller IRAM size by using host * memory to store some of the code segments. */ #include "core.h" #include "bmi.h" #include "debug.h" static int ath10k_swap_code_seg_fill(struct ath10k *ar, struct ath10k_swap_code_seg_info *seg_info, const void *data, size_t data_len) { u8 *virt_addr = seg_info->virt_address[0]; u8 swap_magic[ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ] = {}; const u8 *fw_data = data; union ath10k_swap_code_seg_item *swap_item; u32 length = 0; u32 payload_len; u32 total_payload_len = 0; u32 size_left = data_len; /* Parse swap bin and copy the content to host allocated memory. * The format is Address, length and value. The last 4-bytes is * target write address. Currently address field is not used. */ seg_info->target_addr = -1; while (size_left >= sizeof(*swap_item)) { swap_item = (union ath10k_swap_code_seg_item *)fw_data; payload_len = __le32_to_cpu(swap_item->tlv.length); if ((payload_len > size_left) || (payload_len == 0 && size_left != sizeof(struct ath10k_swap_code_seg_tail))) { ath10k_err(ar, "refusing to parse invalid tlv length %d\n", payload_len); return -EINVAL; } if (payload_len == 0) { if (memcmp(swap_item->tail.magic_signature, swap_magic, ATH10K_SWAP_CODE_SEG_MAGIC_BYTES_SZ)) { ath10k_err(ar, "refusing an invalid swap file\n"); return -EINVAL; } seg_info->target_addr = __le32_to_cpu(swap_item->tail.bmi_write_addr); break; } memcpy(virt_addr, swap_item->tlv.data, payload_len); virt_addr += payload_len; length = payload_len + sizeof(struct ath10k_swap_code_seg_tlv); size_left -= length; fw_data += length; total_payload_len += payload_len; } if (seg_info->target_addr == -1) { ath10k_err(ar, "failed to parse invalid swap file\n"); return -EINVAL; } seg_info->seg_hw_info.swap_size = __cpu_to_le32(total_payload_len); return 0; } static void ath10k_swap_code_seg_free(struct ath10k *ar, struct ath10k_swap_code_seg_info *seg_info) { u32 seg_size; if (!seg_info) return; if (!seg_info->virt_address[0]) return; seg_size = __le32_to_cpu(seg_info->seg_hw_info.size); dma_free_coherent(ar->dev, seg_size, seg_info->virt_address[0], seg_info->paddr[0]); } static struct ath10k_swap_code_seg_info * ath10k_swap_code_seg_alloc(struct ath10k *ar, size_t swap_bin_len) { struct ath10k_swap_code_seg_info *seg_info; void *virt_addr; dma_addr_t paddr; swap_bin_len = roundup(swap_bin_len, 2); if (swap_bin_len > ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX) { ath10k_err(ar, "refusing code swap bin because it is too big %zu > %d\n", swap_bin_len, ATH10K_SWAP_CODE_SEG_BIN_LEN_MAX); return NULL; } seg_info = devm_kzalloc(ar->dev, sizeof(*seg_info), GFP_KERNEL); if (!seg_info) return NULL; virt_addr = dma_alloc_coherent(ar->dev, swap_bin_len, &paddr, GFP_KERNEL); if (!virt_addr) { ath10k_err(ar, "failed to allocate dma coherent memory\n"); return NULL; } seg_info->seg_hw_info.bus_addr[0] = __cpu_to_le32(paddr); seg_info->seg_hw_info.size = __cpu_to_le32(swap_bin_len); seg_info->seg_hw_info.swap_size = __cpu_to_le32(swap_bin_len); seg_info->seg_hw_info.num_segs = __cpu_to_le32(ATH10K_SWAP_CODE_SEG_NUM_SUPPORTED); seg_info->seg_hw_info.size_log2 = __cpu_to_le32(ilog2(swap_bin_len)); seg_info->virt_address[0] = virt_addr; seg_info->paddr[0] = paddr; return seg_info; } int ath10k_swap_code_seg_configure(struct ath10k *ar, enum ath10k_swap_code_seg_bin_type type) { int ret; struct ath10k_swap_code_seg_info *seg_info = NULL; switch (type) { case ATH10K_SWAP_CODE_SEG_BIN_TYPE_FW: if (!ar->swap.firmware_swap_code_seg_info) return 0; ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot found firmware code swap binary\n"); seg_info = ar->swap.firmware_swap_code_seg_info; break; default: case ATH10K_SWAP_CODE_SEG_BIN_TYPE_OTP: case ATH10K_SWAP_CODE_SEG_BIN_TYPE_UTF: ath10k_warn(ar, "ignoring unknown code swap binary type %d\n", type); return 0; } ret = ath10k_bmi_write_memory(ar, seg_info->target_addr, &seg_info->seg_hw_info, sizeof(seg_info->seg_hw_info)); if (ret) { ath10k_err(ar, "failed to write Code swap segment information (%d)\n", ret); return ret; } return 0; } void ath10k_swap_code_seg_release(struct ath10k *ar) { ath10k_swap_code_seg_free(ar, ar->swap.firmware_swap_code_seg_info); ar->swap.firmware_codeswap_data = NULL; ar->swap.firmware_codeswap_len = 0; ar->swap.firmware_swap_code_seg_info = NULL; } int ath10k_swap_code_seg_init(struct ath10k *ar) { int ret; struct ath10k_swap_code_seg_info *seg_info; if (!ar->swap.firmware_codeswap_len || !ar->swap.firmware_codeswap_data) return 0; seg_info = ath10k_swap_code_seg_alloc(ar, ar->swap.firmware_codeswap_len); if (!seg_info) { ath10k_err(ar, "failed to allocate fw code swap segment\n"); return -ENOMEM; } ret = ath10k_swap_code_seg_fill(ar, seg_info, ar->swap.firmware_codeswap_data, ar->swap.firmware_codeswap_len); if (ret) { ath10k_warn(ar, "failed to initialize fw code swap segment: %d\n", ret); ath10k_swap_code_seg_free(ar, seg_info); return ret; } ar->swap.firmware_swap_code_seg_info = seg_info; return 0; }
gpl-2.0
virtuous/kernel-7x30-froyo-v2
drivers/staging/otus/apdbg.c
699
12729
/* * Copyright (c) 2007-2008 Atheros Communications Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ /* */ /* Module Name : apdbg.c */ /* */ /* Abstract */ /* Debug tools */ /* */ /* NOTES */ /* None */ /* */ /************************************************************************/ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <string.h> #include <errno.h> #include <ctype.h> #include <sys/types.h> #include <sys/socket.h> #include <sys/ioctl.h> #include <net/if.h> #include <netinet/in.h> #include <linux/sockios.h> #define ZM_IOCTL_REG_READ 0x01 #define ZM_IOCTL_REG_WRITE 0x02 #define ZM_IOCTL_MEM_DUMP 0x03 #define ZM_IOCTL_REG_DUMP 0x05 #define ZM_IOCTL_TXD_DUMP 0x06 #define ZM_IOCTL_RXD_DUMP 0x07 #define ZM_IOCTL_MEM_READ 0x0B #define ZM_IOCTL_MEM_WRITE 0x0C #define ZM_IOCTL_DMA_TEST 0x10 #define ZM_IOCTL_REG_TEST 0x11 #define ZM_IOCTL_TEST 0x80 #define ZM_IOCTL_TALLY 0x81 //CWYang(+) #define ZM_IOCTL_RTS 0xA0 #define ZM_IOCTL_MIX_MODE 0xA1 #define ZM_IOCTL_FRAG 0xA2 #define ZM_IOCTL_SCAN 0xA3 #define ZM_IOCTL_KEY 0xA4 #define ZM_IOCTL_RATE 0xA5 #define ZM_IOCTL_ENCRYPTION_MODE 0xA6 #define ZM_IOCTL_GET_TXCNT 0xA7 #define ZM_IOCTL_GET_DEAGG_CNT 0xA8 #define ZM_IOCTL_DURATION_MODE 0xA9 #define ZM_IOCTL_SET_AES_KEY 0xAA #define ZM_IOCTL_SET_AES_MODE 0xAB #define ZM_IOCTL_SIGNAL_STRENGTH 0xAC //CWYang(+) #define ZM_IOCTL_SIGNAL_QUALITY 0xAD //CWYang(+) #define ZM_IOCTL_SET_PIBSS_MODE 0xAE #define ZDAPIOCTL SIOCDEVPRIVATE struct zdap_ioctl { unsigned short cmd; /* Command to run */ unsigned int addr; /* Length of the data buffer */ unsigned int value; /* Pointer to the data buffer */ unsigned char data[0x100]; }; /* Declaration of macro and function for handling WEP Keys */ #if 0 #define SKIP_ELEM { \ while(isxdigit(*p)) \ p++; \ } #define SKIP_DELIMETER { \ if(*p == ':' || *p == ' ') \ p++; \ } #endif char hex(char); unsigned char asctohex(char *str); char *prgname; int set_ioctl(int sock, struct ifreq *req) { if (ioctl(sock, ZDAPIOCTL, req) < 0) { fprintf(stderr, "%s: ioctl(SIOCGIFMAP): %s\n", prgname, strerror(errno)); return -1; } return 0; } int read_reg(int sock, struct ifreq *req) { struct zdap_ioctl *zdreq = 0; if (!set_ioctl(sock, req)) return -1; //zdreq = (struct zdap_ioctl *)req->ifr_data; //printf( "reg = %4x, value = %4x\n", zdreq->addr, zdreq->value); return 0; } int read_mem(int sock, struct ifreq *req) { struct zdap_ioctl *zdreq = 0; int i; if (!set_ioctl(sock, req)) return -1; /*zdreq = (struct zdap_ioctl *)req->ifr_data; printf( "dump mem from %x, length = %x\n", zdreq->addr, zdreq->value); for (i=0; i<zdreq->value; i++) { printf("%02x", zdreq->data[i]); printf(" "); if ((i>0) && ((i+1)%16 == 0)) printf("\n"); }*/ return 0; } int main(int argc, char **argv) { int sock; int addr, value; struct ifreq req; char *action = NULL; struct zdap_ioctl zdreq; prgname = argv[0]; if (argc < 3) { fprintf(stderr,"%s: usage is \"%s <ifname> <operation> [<address>] [<value>]\"\n", prgname, prgname); fprintf(stderr,"valid operation: read, write, mem, reg,\n"); fprintf(stderr," : txd, rxd, rmem, wmem\n"); fprintf(stderr," : dmat, regt, test\n"); fprintf(stderr," scan, Channel Scan\n"); fprintf(stderr," rts <decimal>, Set RTS Threshold\n"); fprintf(stderr," frag <decimal>, Set Fragment Threshold\n"); fprintf(stderr," rate <0-28>, 0:AUTO, 1-4:CCK, 5-12:OFDM, 13-28:HT\n"); fprintf(stderr," TBD mix <0 or 1>, Set 1 to enable mixed mode\n"); fprintf(stderr," enc, <0-3>, 0=>OPEN, 1=>WEP64, 2=>WEP128, 3=>WEP256\n"); fprintf(stderr," skey <key>, Set WEP key\n"); fprintf(stderr," txcnt, Get TxQ Cnt\n"); fprintf(stderr," dagcnt, Get Deaggregate Cnt\n"); fprintf(stderr," durmode <mode>, Set Duration Mode 0=>HW, 1=>SW\n"); fprintf(stderr," aeskey <user> <key>\n"); fprintf(stderr," aesmode <mode>\n"); fprintf(stderr," wlanmode <0,1> 0:Station mode, 1:PIBSS mode\n"); fprintf(stderr," tal <0,1>, Get Current Tally Info, 0=>read, 1=>read and reset\n"); exit(1); } strcpy(req.ifr_name, argv[1]); zdreq.addr = 0; zdreq.value = 0; /* a silly raw socket just for ioctl()ling it */ sock = socket(AF_INET, SOCK_RAW, IPPROTO_RAW); if (sock < 0) { fprintf(stderr, "%s: socket(): %s\n", argv[0], strerror(errno)); exit(1); } if (argc >= 4) { sscanf(argv[3], "%x", &addr); } if (argc >= 5) { sscanf(argv[4], "%x", &value); } zdreq.addr = addr; zdreq.value = value; if (!strcmp(argv[2], "read")) { zdreq.cmd = ZM_IOCTL_REG_READ; } else if (!strcmp(argv[2], "mem")) { zdreq.cmd = ZM_IOCTL_MEM_DUMP; } else if (!strcmp(argv[2], "write")) { zdreq.cmd = ZM_IOCTL_REG_WRITE; } else if (!strcmp(argv[2], "reg")) { zdreq.cmd = ZM_IOCTL_REG_DUMP; } else if (!strcmp(argv[2], "txd")) { zdreq.cmd = ZM_IOCTL_TXD_DUMP; } else if (!strcmp(argv[2], "rxd")) { zdreq.cmd = ZM_IOCTL_RXD_DUMP; } else if (!strcmp(argv[2], "rmem")) { zdreq.cmd = ZM_IOCTL_MEM_READ; } else if (!strcmp(argv[2], "wmem")) { zdreq.cmd = ZM_IOCTL_MEM_WRITE; } else if (!strcmp(argv[2], "dmat")) { zdreq.cmd = ZM_IOCTL_DMA_TEST; } else if (!strcmp(argv[2], "regt")) { zdreq.cmd = ZM_IOCTL_REG_TEST; } else if (!strcmp(argv[2], "test")) { zdreq.cmd = ZM_IOCTL_TEST; } else if (!strcmp(argv[2], "tal")) { sscanf(argv[3], "%d", &addr); zdreq.addr = addr; zdreq.cmd = ZM_IOCTL_TALLY; } else if (!strcmp(argv[2], "rts")) { sscanf(argv[3], "%d", &addr); zdreq.addr = addr; zdreq.cmd = ZM_IOCTL_RTS; } else if (!strcmp(argv[2], "mix")) { zdreq.cmd = ZM_IOCTL_MIX_MODE; } else if (!strcmp(argv[2], "frag")) { sscanf(argv[3], "%d", &addr); zdreq.addr = addr; zdreq.cmd = ZM_IOCTL_FRAG; } else if (!strcmp(argv[2], "scan")) { zdreq.cmd = ZM_IOCTL_SCAN; } else if (!strcmp(argv[2], "skey")) { zdreq.cmd = ZM_IOCTL_KEY; if (argc >= 4) { unsigned char temp[29]; int i; int keyLen; int encType; keyLen = strlen(argv[3]); if (keyLen == 10) { sscanf(argv[3], "%02x%02x%02x%02x%02x", &temp[0], &temp[1], &temp[2], &temp[3], &temp[4]); } else if (keyLen == 26) { sscanf(argv[3], "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", &temp[0], &temp[1], &temp[2], &temp[3], &temp[4], &temp[5], &temp[6], &temp[7], &temp[8], &temp[9], &temp[10], &temp[11], &temp[12]); } else if (keyLen == 58) { sscanf(argv[3], "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", &temp[0], &temp[1], &temp[2], &temp[3], &temp[4], &temp[5], &temp[6], &temp[7], &temp[8], &temp[9], &temp[10], &temp[11], &temp[12], &temp[13], &temp[14], &temp[15], &temp[16], &temp[17], &temp[18], &temp[19], &temp[20], &temp[21], &temp[22], &temp[23], &temp[24], &temp[25], &temp[26], &temp[27], &temp[28]); } else { fprintf(stderr, "Invalid key length\n"); exit(1); } zdreq.addr = keyLen/2; for(i=0; i<zdreq.addr; i++) { zdreq.data[i] = temp[i]; } } else { printf("Error : Key required!\n"); } } else if (!strcmp(argv[2], "rate")) { sscanf(argv[3], "%d", &addr); if (addr > 28) { fprintf(stderr, "Invalid rate, range:0~28\n"); exit(1); } zdreq.addr = addr; zdreq.cmd = ZM_IOCTL_RATE; } else if (!strcmp(argv[2], "enc")) { sscanf(argv[3], "%d", &addr); if (addr > 3) { fprintf(stderr, "Invalid encryption mode, range:0~3\n"); exit(1); } if (addr == 2) { addr = 5; } else if (addr == 3) { addr = 6; } zdreq.addr = addr; zdreq.cmd = ZM_IOCTL_ENCRYPTION_MODE; } else if (!strcmp(argv[2], "txcnt")) { zdreq.cmd = ZM_IOCTL_GET_TXCNT; } else if (!strcmp(argv[2], "dagcnt")) { sscanf(argv[3], "%d", &addr); if (addr != 0 && addr != 1) { fprintf(stderr, "The value should be 0 or 1\n"); exit(0); } zdreq.addr = addr; zdreq.cmd = ZM_IOCTL_GET_DEAGG_CNT; } else if (!strcmp(argv[2], "durmode")) { sscanf(argv[3], "%d", &addr); if (addr != 0 && addr != 1) { fprintf(stderr, "The Duration mode should be 0 or 1\n"); exit(0); } zdreq.addr = addr; zdreq.cmd = ZM_IOCTL_DURATION_MODE; } else if (!strcmp(argv[2], "aeskey")) { unsigned char temp[16]; int i; sscanf(argv[3], "%d", &addr); sscanf(argv[4], "%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x", &temp[0], &temp[1], &temp[2], &temp[3], &temp[4], &temp[5], &temp[6], &temp[7], &temp[8], &temp[9], &temp[10], &temp[11], &temp[12], &temp[13], &temp[14], &temp[15]); for(i = 0; i < 16; i++) { zdreq.data[i] = temp[i]; } zdreq.addr = addr; zdreq.cmd = ZM_IOCTL_SET_AES_KEY; } else if (!strcmp(argv[2], "aesmode")) { sscanf(argv[3], "%d", &addr); zdreq.addr = addr; zdreq.cmd = ZM_IOCTL_SET_AES_MODE; } else if (!strcmp(argv[2], "wlanmode")) { sscanf(argv[3], "%d", &addr); zdreq.addr = addr; zdreq.cmd = ZM_IOCTL_SET_PIBSS_MODE; } else { fprintf(stderr, "error action\n"); exit(1); } req.ifr_data = (char *)&zdreq; set_ioctl(sock, &req); fail: exit(0); } unsigned char asctohex(char *str) { unsigned char value; value = hex(*str) & 0x0f; value = value << 4; str++; value |= hex(*str) & 0x0f; return value; } char hex(char v) { if(isdigit(v)) return v - '0'; else if(isxdigit(v)) return (tolower(v) - 'a' + 10); else return 0; }
gpl-2.0
ytjiang/linux
drivers/gpu/drm/omapdrm/omap_irq.c
955
8395
/* * drivers/gpu/drm/omapdrm/omap_irq.c * * Copyright (C) 2012 Texas Instruments * Author: Rob Clark <rob.clark@linaro.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #include "omap_drv.h" static DEFINE_SPINLOCK(list_lock); static void omap_irq_error_handler(struct omap_drm_irq *irq, uint32_t irqstatus) { DRM_ERROR("errors: %08x\n", irqstatus); } /* call with list_lock and dispc runtime held */ static void omap_irq_update(struct drm_device *dev) { struct omap_drm_private *priv = dev->dev_private; struct omap_drm_irq *irq; uint32_t irqmask = priv->vblank_mask; BUG_ON(!spin_is_locked(&list_lock)); list_for_each_entry(irq, &priv->irq_list, node) irqmask |= irq->irqmask; DBG("irqmask=%08x", irqmask); dispc_write_irqenable(irqmask); dispc_read_irqenable(); /* flush posted write */ } void __omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq) { struct omap_drm_private *priv = dev->dev_private; unsigned long flags; spin_lock_irqsave(&list_lock, flags); if (!WARN_ON(irq->registered)) { irq->registered = true; list_add(&irq->node, &priv->irq_list); omap_irq_update(dev); } spin_unlock_irqrestore(&list_lock, flags); } void omap_irq_register(struct drm_device *dev, struct omap_drm_irq *irq) { dispc_runtime_get(); __omap_irq_register(dev, irq); dispc_runtime_put(); } void __omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq) { unsigned long flags; spin_lock_irqsave(&list_lock, flags); if (!WARN_ON(!irq->registered)) { irq->registered = false; list_del(&irq->node); omap_irq_update(dev); } spin_unlock_irqrestore(&list_lock, flags); } void omap_irq_unregister(struct drm_device *dev, struct omap_drm_irq *irq) { dispc_runtime_get(); __omap_irq_unregister(dev, irq); dispc_runtime_put(); } struct omap_irq_wait { struct omap_drm_irq irq; int count; }; static DECLARE_WAIT_QUEUE_HEAD(wait_event); static void wait_irq(struct omap_drm_irq *irq, uint32_t irqstatus) { struct omap_irq_wait *wait = container_of(irq, struct omap_irq_wait, irq); wait->count--; wake_up_all(&wait_event); } struct omap_irq_wait * omap_irq_wait_init(struct drm_device *dev, uint32_t irqmask, int count) { struct omap_irq_wait *wait = kzalloc(sizeof(*wait), GFP_KERNEL); wait->irq.irq = wait_irq; wait->irq.irqmask = irqmask; wait->count = count; omap_irq_register(dev, &wait->irq); return wait; } int omap_irq_wait(struct drm_device *dev, struct omap_irq_wait *wait, unsigned long timeout) { int ret = wait_event_timeout(wait_event, (wait->count <= 0), timeout); omap_irq_unregister(dev, &wait->irq); kfree(wait); if (ret == 0) return -1; return 0; } /** * enable_vblank - enable vblank interrupt events * @dev: DRM device * @crtc: which irq to enable * * Enable vblank interrupts for @crtc. If the device doesn't have * a hardware vblank counter, this routine should be a no-op, since * interrupts will have to stay on to keep the count accurate. * * RETURNS * Zero on success, appropriate errno if the given @crtc's vblank * interrupt cannot be enabled. */ int omap_irq_enable_vblank(struct drm_device *dev, int crtc_id) { struct omap_drm_private *priv = dev->dev_private; struct drm_crtc *crtc = priv->crtcs[crtc_id]; unsigned long flags; DBG("dev=%p, crtc=%d", dev, crtc_id); dispc_runtime_get(); spin_lock_irqsave(&list_lock, flags); priv->vblank_mask |= pipe2vbl(crtc); omap_irq_update(dev); spin_unlock_irqrestore(&list_lock, flags); dispc_runtime_put(); return 0; } /** * disable_vblank - disable vblank interrupt events * @dev: DRM device * @crtc: which irq to enable * * Disable vblank interrupts for @crtc. If the device doesn't have * a hardware vblank counter, this routine should be a no-op, since * interrupts will have to stay on to keep the count accurate. */ void omap_irq_disable_vblank(struct drm_device *dev, int crtc_id) { struct omap_drm_private *priv = dev->dev_private; struct drm_crtc *crtc = priv->crtcs[crtc_id]; unsigned long flags; DBG("dev=%p, crtc=%d", dev, crtc_id); dispc_runtime_get(); spin_lock_irqsave(&list_lock, flags); priv->vblank_mask &= ~pipe2vbl(crtc); omap_irq_update(dev); spin_unlock_irqrestore(&list_lock, flags); dispc_runtime_put(); } irqreturn_t omap_irq_handler(int irq, void *arg) { struct drm_device *dev = (struct drm_device *) arg; struct omap_drm_private *priv = dev->dev_private; struct omap_drm_irq *handler, *n; unsigned long flags; unsigned int id; u32 irqstatus; irqstatus = dispc_read_irqstatus(); dispc_clear_irqstatus(irqstatus); dispc_read_irqstatus(); /* flush posted write */ VERB("irqs: %08x", irqstatus); for (id = 0; id < priv->num_crtcs; id++) { struct drm_crtc *crtc = priv->crtcs[id]; if (irqstatus & pipe2vbl(crtc)) drm_handle_vblank(dev, id); } spin_lock_irqsave(&list_lock, flags); list_for_each_entry_safe(handler, n, &priv->irq_list, node) { if (handler->irqmask & irqstatus) { spin_unlock_irqrestore(&list_lock, flags); handler->irq(handler, handler->irqmask & irqstatus); spin_lock_irqsave(&list_lock, flags); } } spin_unlock_irqrestore(&list_lock, flags); return IRQ_HANDLED; } void omap_irq_preinstall(struct drm_device *dev) { DBG("dev=%p", dev); dispc_runtime_get(); dispc_clear_irqstatus(0xffffffff); dispc_runtime_put(); } int omap_irq_postinstall(struct drm_device *dev) { struct omap_drm_private *priv = dev->dev_private; struct omap_drm_irq *error_handler = &priv->error_handler; DBG("dev=%p", dev); INIT_LIST_HEAD(&priv->irq_list); error_handler->irq = omap_irq_error_handler; error_handler->irqmask = DISPC_IRQ_OCP_ERR; /* for now ignore DISPC_IRQ_SYNC_LOST_DIGIT.. really I think * we just need to ignore it while enabling tv-out */ error_handler->irqmask &= ~DISPC_IRQ_SYNC_LOST_DIGIT; omap_irq_register(dev, error_handler); return 0; } void omap_irq_uninstall(struct drm_device *dev) { DBG("dev=%p", dev); // TODO prolly need to call drm_irq_uninstall() somewhere too } /* * We need a special version, instead of just using drm_irq_install(), * because we need to register the irq via omapdss. Once omapdss and * omapdrm are merged together we can assign the dispc hwmod data to * ourselves and drop these and just use drm_irq_{install,uninstall}() */ int omap_drm_irq_install(struct drm_device *dev) { int ret; mutex_lock(&dev->struct_mutex); if (dev->irq_enabled) { mutex_unlock(&dev->struct_mutex); return -EBUSY; } dev->irq_enabled = true; mutex_unlock(&dev->struct_mutex); /* Before installing handler */ if (dev->driver->irq_preinstall) dev->driver->irq_preinstall(dev); ret = dispc_request_irq(dev->driver->irq_handler, dev); if (ret < 0) { mutex_lock(&dev->struct_mutex); dev->irq_enabled = false; mutex_unlock(&dev->struct_mutex); return ret; } /* After installing handler */ if (dev->driver->irq_postinstall) ret = dev->driver->irq_postinstall(dev); if (ret < 0) { mutex_lock(&dev->struct_mutex); dev->irq_enabled = false; mutex_unlock(&dev->struct_mutex); dispc_free_irq(dev); } return ret; } int omap_drm_irq_uninstall(struct drm_device *dev) { unsigned long irqflags; bool irq_enabled; int i; mutex_lock(&dev->struct_mutex); irq_enabled = dev->irq_enabled; dev->irq_enabled = false; mutex_unlock(&dev->struct_mutex); /* * Wake up any waiters so they don't hang. */ if (dev->num_crtcs) { spin_lock_irqsave(&dev->vbl_lock, irqflags); for (i = 0; i < dev->num_crtcs; i++) { wake_up(&dev->vblank[i].queue); dev->vblank[i].enabled = false; dev->vblank[i].last = dev->driver->get_vblank_counter(dev, i); } spin_unlock_irqrestore(&dev->vbl_lock, irqflags); } if (!irq_enabled) return -EINVAL; if (dev->driver->irq_uninstall) dev->driver->irq_uninstall(dev); dispc_free_irq(dev); return 0; }
gpl-2.0
TeamBliss-Devices/android_kernel_motorola_msm8226
net/ipv6/netfilter/nf_conntrack_reasm.c
1467
16211
/* * IPv6 fragment reassembly for connection tracking * * Copyright (C)2004 USAGI/WIDE Project * * Author: * Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp> * * Based on: net/ipv6/reassembly.c * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #define pr_fmt(fmt) "IPv6-nf: " fmt #include <linux/errno.h> #include <linux/types.h> #include <linux/string.h> #include <linux/socket.h> #include <linux/sockios.h> #include <linux/jiffies.h> #include <linux/net.h> #include <linux/list.h> #include <linux/netdevice.h> #include <linux/in6.h> #include <linux/ipv6.h> #include <linux/icmpv6.h> #include <linux/random.h> #include <linux/slab.h> #include <net/sock.h> #include <net/snmp.h> #include <net/inet_frag.h> #include <net/ipv6.h> #include <net/protocol.h> #include <net/transp_v6.h> #include <net/rawv6.h> #include <net/ndisc.h> #include <net/addrconf.h> #include <net/netfilter/ipv6/nf_conntrack_ipv6.h> #include <linux/sysctl.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv6.h> #include <linux/kernel.h> #include <linux/module.h> #include <net/netfilter/ipv6/nf_defrag_ipv6.h> struct nf_ct_frag6_skb_cb { struct inet6_skb_parm h; int offset; struct sk_buff *orig; }; #define NFCT_FRAG6_CB(skb) ((struct nf_ct_frag6_skb_cb*)((skb)->cb)) struct nf_ct_frag6_queue { struct inet_frag_queue q; __be32 id; /* fragment id */ u32 user; struct in6_addr saddr; struct in6_addr daddr; unsigned int csum; __u16 nhoffset; }; static struct inet_frags nf_frags; static struct netns_frags nf_init_frags; #ifdef CONFIG_SYSCTL static struct ctl_table nf_ct_frag6_sysctl_table[] = { { .procname = "nf_conntrack_frag6_timeout", .data = &nf_init_frags.timeout, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "nf_conntrack_frag6_low_thresh", .data = &nf_init_frags.low_thresh, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "nf_conntrack_frag6_high_thresh", .data = &nf_init_frags.high_thresh, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { } }; static struct ctl_table_header *nf_ct_frag6_sysctl_header; #endif static unsigned int nf_hashfn(struct inet_frag_queue *q) { const struct nf_ct_frag6_queue *nq; nq = container_of(q, struct nf_ct_frag6_queue, q); return inet6_hash_frag(nq->id, &nq->saddr, &nq->daddr, nf_frags.rnd); } static void nf_skb_free(struct sk_buff *skb) { if (NFCT_FRAG6_CB(skb)->orig) kfree_skb(NFCT_FRAG6_CB(skb)->orig); } /* Destruction primitives. */ static __inline__ void fq_put(struct nf_ct_frag6_queue *fq) { inet_frag_put(&fq->q, &nf_frags); } /* Kill fq entry. It is not destroyed immediately, * because caller (and someone more) holds reference count. */ static __inline__ void fq_kill(struct nf_ct_frag6_queue *fq) { inet_frag_kill(&fq->q, &nf_frags); } static void nf_ct_frag6_evictor(void) { local_bh_disable(); inet_frag_evictor(&nf_init_frags, &nf_frags); local_bh_enable(); } static void nf_ct_frag6_expire(unsigned long data) { struct nf_ct_frag6_queue *fq; fq = container_of((struct inet_frag_queue *)data, struct nf_ct_frag6_queue, q); spin_lock(&fq->q.lock); if (fq->q.last_in & INET_FRAG_COMPLETE) goto out; fq_kill(fq); out: spin_unlock(&fq->q.lock); fq_put(fq); } /* Creation primitives. */ static __inline__ struct nf_ct_frag6_queue * fq_find(__be32 id, u32 user, struct in6_addr *src, struct in6_addr *dst) { struct inet_frag_queue *q; struct ip6_create_arg arg; unsigned int hash; arg.id = id; arg.user = user; arg.src = src; arg.dst = dst; read_lock_bh(&nf_frags.lock); hash = inet6_hash_frag(id, src, dst, nf_frags.rnd); q = inet_frag_find(&nf_init_frags, &nf_frags, &arg, hash); local_bh_enable(); if (IS_ERR_OR_NULL(q)) { inet_frag_maybe_warn_overflow(q, pr_fmt()); return NULL; } return container_of(q, struct nf_ct_frag6_queue, q); } static int nf_ct_frag6_queue(struct nf_ct_frag6_queue *fq, struct sk_buff *skb, const struct frag_hdr *fhdr, int nhoff) { struct sk_buff *prev, *next; int offset, end; if (fq->q.last_in & INET_FRAG_COMPLETE) { pr_debug("Already completed\n"); goto err; } offset = ntohs(fhdr->frag_off) & ~0x7; end = offset + (ntohs(ipv6_hdr(skb)->payload_len) - ((u8 *)(fhdr + 1) - (u8 *)(ipv6_hdr(skb) + 1))); if ((unsigned int)end > IPV6_MAXPLEN) { pr_debug("offset is too large.\n"); return -1; } if (skb->ip_summed == CHECKSUM_COMPLETE) { const unsigned char *nh = skb_network_header(skb); skb->csum = csum_sub(skb->csum, csum_partial(nh, (u8 *)(fhdr + 1) - nh, 0)); } /* Is this the final fragment? */ if (!(fhdr->frag_off & htons(IP6_MF))) { /* If we already have some bits beyond end * or have different end, the segment is corrupted. */ if (end < fq->q.len || ((fq->q.last_in & INET_FRAG_LAST_IN) && end != fq->q.len)) { pr_debug("already received last fragment\n"); goto err; } fq->q.last_in |= INET_FRAG_LAST_IN; fq->q.len = end; } else { /* Check if the fragment is rounded to 8 bytes. * Required by the RFC. */ if (end & 0x7) { /* RFC2460 says always send parameter problem in * this case. -DaveM */ pr_debug("end of fragment not rounded to 8 bytes.\n"); return -1; } if (end > fq->q.len) { /* Some bits beyond end -> corruption. */ if (fq->q.last_in & INET_FRAG_LAST_IN) { pr_debug("last packet already reached.\n"); goto err; } fq->q.len = end; } } if (end == offset) goto err; /* Point into the IP datagram 'data' part. */ if (!pskb_pull(skb, (u8 *) (fhdr + 1) - skb->data)) { pr_debug("queue: message is too short.\n"); goto err; } if (pskb_trim_rcsum(skb, end - offset)) { pr_debug("Can't trim\n"); goto err; } /* Find out which fragments are in front and at the back of us * in the chain of fragments so far. We must know where to put * this fragment, right? */ prev = fq->q.fragments_tail; if (!prev || NFCT_FRAG6_CB(prev)->offset < offset) { next = NULL; goto found; } prev = NULL; for (next = fq->q.fragments; next != NULL; next = next->next) { if (NFCT_FRAG6_CB(next)->offset >= offset) break; /* bingo! */ prev = next; } found: /* RFC5722, Section 4: * When reassembling an IPv6 datagram, if * one or more its constituent fragments is determined to be an * overlapping fragment, the entire datagram (and any constituent * fragments, including those not yet received) MUST be silently * discarded. */ /* Check for overlap with preceding fragment. */ if (prev && (NFCT_FRAG6_CB(prev)->offset + prev->len) > offset) goto discard_fq; /* Look for overlap with succeeding segment. */ if (next && NFCT_FRAG6_CB(next)->offset < end) goto discard_fq; NFCT_FRAG6_CB(skb)->offset = offset; /* Insert this fragment in the chain of fragments. */ skb->next = next; if (!next) fq->q.fragments_tail = skb; if (prev) prev->next = skb; else fq->q.fragments = skb; skb->dev = NULL; fq->q.stamp = skb->tstamp; fq->q.meat += skb->len; atomic_add(skb->truesize, &nf_init_frags.mem); /* The first fragment. * nhoffset is obtained from the first fragment, of course. */ if (offset == 0) { fq->nhoffset = nhoff; fq->q.last_in |= INET_FRAG_FIRST_IN; } write_lock(&nf_frags.lock); list_move_tail(&fq->q.lru_list, &nf_init_frags.lru_list); write_unlock(&nf_frags.lock); return 0; discard_fq: fq_kill(fq); err: return -1; } /* * Check if this packet is complete. * Returns NULL on failure by any reason, and pointer * to current nexthdr field in reassembled frame. * * It is called with locked fq, and caller must check that * queue is eligible for reassembly i.e. it is not COMPLETE, * the last and the first frames arrived and all the bits are here. */ static struct sk_buff * nf_ct_frag6_reasm(struct nf_ct_frag6_queue *fq, struct net_device *dev) { struct sk_buff *fp, *op, *head = fq->q.fragments; int payload_len; fq_kill(fq); WARN_ON(head == NULL); WARN_ON(NFCT_FRAG6_CB(head)->offset != 0); /* Unfragmented part is taken from the first segment. */ payload_len = ((head->data - skb_network_header(head)) - sizeof(struct ipv6hdr) + fq->q.len - sizeof(struct frag_hdr)); if (payload_len > IPV6_MAXPLEN) { pr_debug("payload len is too large.\n"); goto out_oversize; } /* Head of list must not be cloned. */ if (skb_cloned(head) && pskb_expand_head(head, 0, 0, GFP_ATOMIC)) { pr_debug("skb is cloned but can't expand head"); goto out_oom; } /* If the first fragment is fragmented itself, we split * it to two chunks: the first with data and paged part * and the second, holding only fragments. */ if (skb_has_frag_list(head)) { struct sk_buff *clone; int i, plen = 0; clone = alloc_skb(0, GFP_ATOMIC); if (clone == NULL) goto out_oom; clone->next = head->next; head->next = clone; skb_shinfo(clone)->frag_list = skb_shinfo(head)->frag_list; skb_frag_list_init(head); for (i = 0; i < skb_shinfo(head)->nr_frags; i++) plen += skb_frag_size(&skb_shinfo(head)->frags[i]); clone->len = clone->data_len = head->data_len - plen; head->data_len -= clone->len; head->len -= clone->len; clone->csum = 0; clone->ip_summed = head->ip_summed; NFCT_FRAG6_CB(clone)->orig = NULL; atomic_add(clone->truesize, &nf_init_frags.mem); } /* We have to remove fragment header from datagram and to relocate * header in order to calculate ICV correctly. */ skb_network_header(head)[fq->nhoffset] = skb_transport_header(head)[0]; memmove(head->head + sizeof(struct frag_hdr), head->head, (head->data - head->head) - sizeof(struct frag_hdr)); head->mac_header += sizeof(struct frag_hdr); head->network_header += sizeof(struct frag_hdr); skb_shinfo(head)->frag_list = head->next; skb_reset_transport_header(head); skb_push(head, head->data - skb_network_header(head)); for (fp=head->next; fp; fp = fp->next) { head->data_len += fp->len; head->len += fp->len; if (head->ip_summed != fp->ip_summed) head->ip_summed = CHECKSUM_NONE; else if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_add(head->csum, fp->csum); head->truesize += fp->truesize; } atomic_sub(head->truesize, &nf_init_frags.mem); head->next = NULL; head->dev = dev; head->tstamp = fq->q.stamp; ipv6_hdr(head)->payload_len = htons(payload_len); /* Yes, and fold redundant checksum back. 8) */ if (head->ip_summed == CHECKSUM_COMPLETE) head->csum = csum_partial(skb_network_header(head), skb_network_header_len(head), head->csum); fq->q.fragments = NULL; fq->q.fragments_tail = NULL; /* all original skbs are linked into the NFCT_FRAG6_CB(head).orig */ fp = skb_shinfo(head)->frag_list; if (fp && NFCT_FRAG6_CB(fp)->orig == NULL) /* at above code, head skb is divided into two skbs. */ fp = fp->next; op = NFCT_FRAG6_CB(head)->orig; for (; fp; fp = fp->next) { struct sk_buff *orig = NFCT_FRAG6_CB(fp)->orig; op->next = orig; op = orig; NFCT_FRAG6_CB(fp)->orig = NULL; } return head; out_oversize: if (net_ratelimit()) printk(KERN_DEBUG "nf_ct_frag6_reasm: payload len = %d\n", payload_len); goto out_fail; out_oom: if (net_ratelimit()) printk(KERN_DEBUG "nf_ct_frag6_reasm: no memory for reassembly\n"); out_fail: return NULL; } /* * find the header just before Fragment Header. * * if success return 0 and set ... * (*prevhdrp): the value of "Next Header Field" in the header * just before Fragment Header. * (*prevhoff): the offset of "Next Header Field" in the header * just before Fragment Header. * (*fhoff) : the offset of Fragment Header. * * Based on ipv6_skip_hdr() in net/ipv6/exthdr.c * */ static int find_prev_fhdr(struct sk_buff *skb, u8 *prevhdrp, int *prevhoff, int *fhoff) { u8 nexthdr = ipv6_hdr(skb)->nexthdr; const int netoff = skb_network_offset(skb); u8 prev_nhoff = netoff + offsetof(struct ipv6hdr, nexthdr); int start = netoff + sizeof(struct ipv6hdr); int len = skb->len - start; u8 prevhdr = NEXTHDR_IPV6; while (nexthdr != NEXTHDR_FRAGMENT) { struct ipv6_opt_hdr hdr; int hdrlen; if (!ipv6_ext_hdr(nexthdr)) { return -1; } if (nexthdr == NEXTHDR_NONE) { pr_debug("next header is none\n"); return -1; } if (len < (int)sizeof(struct ipv6_opt_hdr)) { pr_debug("too short\n"); return -1; } if (skb_copy_bits(skb, start, &hdr, sizeof(hdr))) BUG(); if (nexthdr == NEXTHDR_AUTH) hdrlen = (hdr.hdrlen+2)<<2; else hdrlen = ipv6_optlen(&hdr); prevhdr = nexthdr; prev_nhoff = start; nexthdr = hdr.nexthdr; len -= hdrlen; start += hdrlen; } if (len < 0) return -1; *prevhdrp = prevhdr; *prevhoff = prev_nhoff; *fhoff = start; return 0; } struct sk_buff *nf_ct_frag6_gather(struct sk_buff *skb, u32 user) { struct sk_buff *clone; struct net_device *dev = skb->dev; struct frag_hdr *fhdr; struct nf_ct_frag6_queue *fq; struct ipv6hdr *hdr; int fhoff, nhoff; u8 prevhdr; struct sk_buff *ret_skb = NULL; /* Jumbo payload inhibits frag. header */ if (ipv6_hdr(skb)->payload_len == 0) { pr_debug("payload len = 0\n"); return skb; } if (find_prev_fhdr(skb, &prevhdr, &nhoff, &fhoff) < 0) return skb; clone = skb_clone(skb, GFP_ATOMIC); if (clone == NULL) { pr_debug("Can't clone skb\n"); return skb; } NFCT_FRAG6_CB(clone)->orig = skb; if (!pskb_may_pull(clone, fhoff + sizeof(*fhdr))) { pr_debug("message is too short.\n"); goto ret_orig; } skb_set_transport_header(clone, fhoff); hdr = ipv6_hdr(clone); fhdr = (struct frag_hdr *)skb_transport_header(clone); if (atomic_read(&nf_init_frags.mem) > nf_init_frags.high_thresh) nf_ct_frag6_evictor(); fq = fq_find(fhdr->identification, user, &hdr->saddr, &hdr->daddr); if (fq == NULL) { pr_debug("Can't find and can't create new queue\n"); goto ret_orig; } spin_lock_bh(&fq->q.lock); if (nf_ct_frag6_queue(fq, clone, fhdr, nhoff) < 0) { spin_unlock_bh(&fq->q.lock); pr_debug("Can't insert skb to queue\n"); fq_put(fq); goto ret_orig; } if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && fq->q.meat == fq->q.len) { ret_skb = nf_ct_frag6_reasm(fq, dev); if (ret_skb == NULL) pr_debug("Can't reassemble fragmented packets\n"); } spin_unlock_bh(&fq->q.lock); fq_put(fq); return ret_skb; ret_orig: kfree_skb(clone); return skb; } void nf_ct_frag6_output(unsigned int hooknum, struct sk_buff *skb, struct net_device *in, struct net_device *out, int (*okfn)(struct sk_buff *)) { struct sk_buff *s, *s2; for (s = NFCT_FRAG6_CB(skb)->orig; s;) { nf_conntrack_put_reasm(s->nfct_reasm); nf_conntrack_get_reasm(skb); s->nfct_reasm = skb; s2 = s->next; s->next = NULL; NF_HOOK_THRESH(NFPROTO_IPV6, hooknum, s, in, out, okfn, NF_IP6_PRI_CONNTRACK_DEFRAG + 1); s = s2; } nf_conntrack_put_reasm(skb); } int nf_ct_frag6_init(void) { nf_frags.hashfn = nf_hashfn; nf_frags.constructor = ip6_frag_init; nf_frags.destructor = NULL; nf_frags.skb_free = nf_skb_free; nf_frags.qsize = sizeof(struct nf_ct_frag6_queue); nf_frags.match = ip6_frag_match; nf_frags.frag_expire = nf_ct_frag6_expire; nf_frags.secret_interval = 10 * 60 * HZ; nf_init_frags.timeout = IPV6_FRAG_TIMEOUT; nf_init_frags.high_thresh = IPV6_FRAG_HIGH_THRESH; nf_init_frags.low_thresh = IPV6_FRAG_LOW_THRESH; inet_frags_init_net(&nf_init_frags); inet_frags_init(&nf_frags); #ifdef CONFIG_SYSCTL nf_ct_frag6_sysctl_header = register_sysctl_paths(nf_net_netfilter_sysctl_path, nf_ct_frag6_sysctl_table); if (!nf_ct_frag6_sysctl_header) { inet_frags_fini(&nf_frags); return -ENOMEM; } #endif return 0; } void nf_ct_frag6_cleanup(void) { #ifdef CONFIG_SYSCTL unregister_sysctl_table(nf_ct_frag6_sysctl_header); nf_ct_frag6_sysctl_header = NULL; #endif inet_frags_fini(&nf_frags); nf_init_frags.low_thresh = 0; nf_ct_frag6_evictor(); }
gpl-2.0
TheTypoMaster/android_kernel_samsung_hlte
arch/arm/mach-msm/devices-iommu.c
1979
23376
/* Copyright (c) 2010-2013, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/kernel.h> #include <linux/platform_device.h> #include <linux/bootmem.h> #include <linux/module.h> #include <mach/irqs.h> #include <mach/iommu.h> #include <mach/socinfo.h> static struct resource msm_iommu_jpegd_resources[] = { { .start = 0x07300000, .end = 0x07300000 + SZ_1M - 1, .name = "physbase", .flags = IORESOURCE_MEM, }, { .name = "nonsecure_irq", .start = 98, .end = 98, .flags = IORESOURCE_IRQ, }, { .name = "secure_irq", .start = 97, .end = 97, .flags = IORESOURCE_IRQ, }, }; static struct resource msm_iommu_vpe_resources[] = { { .start = 0x07400000, .end = 0x07400000 + SZ_1M - 1, .name = "physbase", .flags = IORESOURCE_MEM, }, { .name = "nonsecure_irq", .start = 84, .end = 84, .flags = IORESOURCE_IRQ, }, { .name = "secure_irq", .start = 83, .end = 83, .flags = IORESOURCE_IRQ, }, }; static struct resource msm_iommu_mdp0_resources[] = { { .start = 0x07500000, .end = 0x07500000 + SZ_1M - 1, .name = "physbase", .flags = IORESOURCE_MEM, }, { .name = "nonsecure_irq", .start = 96, .end = 96, .flags = IORESOURCE_IRQ, }, { .name = "secure_irq", .start = 95, .end = 95, .flags = IORESOURCE_IRQ, }, }; static struct resource msm_iommu_mdp1_resources[] = { { .start = 0x07600000, .end = 0x07600000 + SZ_1M - 1, .name = "physbase", .flags = IORESOURCE_MEM, }, { .name = "nonsecure_irq", .start = 94, .end = 94, .flags = IORESOURCE_IRQ, }, { .name = "secure_irq", .start = 93, .end = 93, .flags = IORESOURCE_IRQ, }, }; static struct resource msm_iommu_rot_resources[] = { { .start = 0x07700000, .end = 0x07700000 + SZ_1M - 1, .name = "physbase", .flags = IORESOURCE_MEM, }, { .name = "nonsecure_irq", .start = 92, .end = 92, .flags = IORESOURCE_IRQ, }, { .name = "secure_irq", .start = 91, .end = 91, .flags = IORESOURCE_IRQ, }, }; static struct resource msm_iommu_ijpeg_resources[] = { { .start = 0x07800000, .end = 0x07800000 + SZ_1M - 1, .name = "physbase", .flags = IORESOURCE_MEM, }, { .name = "nonsecure_irq", .start = 100, .end = 100, .flags = IORESOURCE_IRQ, }, { .name = "secure_irq", .start = 99, .end = 99, .flags = IORESOURCE_IRQ, }, }; static struct resource msm_iommu_vfe_resources[] = { { .start = 0x07900000, .end = 0x07900000 + SZ_1M - 1, .name = "physbase", .flags = IORESOURCE_MEM, }, { .name = "nonsecure_irq", .start = 86, .end = 86, .flags = IORESOURCE_IRQ, }, { .name = "secure_irq", .start = 85, .end = 85, .flags = IORESOURCE_IRQ, }, }; static struct resource msm_iommu_vcodec_a_resources[] = { { .start = 0x07A00000, .end = 0x07A00000 + SZ_1M - 1, .name = "physbase", .flags = IORESOURCE_MEM, }, { .name = "nonsecure_irq", .start = 90, .end = 90, .flags = IORESOURCE_IRQ, }, { .name = "secure_irq", .start = 89, .end = 89, .flags = IORESOURCE_IRQ, }, }; static struct resource msm_iommu_vcodec_b_resources[] = { { .start = 0x07B00000, .end = 0x07B00000 + SZ_1M - 1, .name = "physbase", .flags = IORESOURCE_MEM, }, { .name = "nonsecure_irq", .start = 88, .end = 88, .flags = IORESOURCE_IRQ, }, { .name = "secure_irq", .start = 87, .end = 87, .flags = IORESOURCE_IRQ, }, }; static struct resource msm_iommu_gfx3d_resources[] = { { .start = 0x07C00000, .end = 0x07C00000 + SZ_1M - 1, .name = "physbase", .flags = IORESOURCE_MEM, }, { .name = "nonsecure_irq", .start = 102, .end = 102, .flags = IORESOURCE_IRQ, }, { .name = "secure_irq", .start = 101, .end = 101, .flags = IORESOURCE_IRQ, }, }; static struct resource msm_iommu_gfx3d1_resources[] = { { .start = 0x07D00000, .end = 0x07D00000 + SZ_1M - 1, .name = "physbase", .flags = IORESOURCE_MEM, }, { .name = "nonsecure_irq", .start = 243, .end = 243, .flags = IORESOURCE_IRQ, }, { .name = "secure_irq", .start = 242, .end = 242, .flags = IORESOURCE_IRQ, }, }; static struct resource msm_iommu_gfx2d0_resources[] = { { .start = 0x07D00000, .end = 0x07D00000 + SZ_1M - 1, .name = "physbase", .flags = IORESOURCE_MEM, }, { .name = "nonsecure_irq", .start = 104, .end = 104, .flags = IORESOURCE_IRQ, }, { .name = "secure_irq", .start = 103, .end = 103, .flags = IORESOURCE_IRQ, }, }; static struct resource msm_iommu_gfx2d1_resources[] = { { .start = 0x07E00000, .end = 0x07E00000 + SZ_1M - 1, .name = "physbase", .flags = IORESOURCE_MEM, }, { .name = "nonsecure_irq", .start = 243, .end = 243, .flags = IORESOURCE_IRQ, }, { .name = "secure_irq", .start = 242, .end = 242, .flags = IORESOURCE_IRQ, }, }; static struct resource msm_iommu_vcap_resources[] = { { .start = 0x07200000, .end = 0x07200000 + SZ_1M - 1, .name = "physbase", .flags = IORESOURCE_MEM, }, { .name = "nonsecure_irq", .start = 269, .end = 269, .flags = IORESOURCE_IRQ, }, { .name = "secure_irq", .start = 268, .end = 268, .flags = IORESOURCE_IRQ, }, }; static struct msm_iommu_dev jpegd_iommu = { .name = "jpegd", .ncb = 2, }; static struct msm_iommu_dev vpe_iommu = { .name = "vpe", .ncb = 2, }; static struct msm_iommu_dev mdp0_iommu = { .name = "mdp0", .ncb = 2, }; static struct msm_iommu_dev mdp1_iommu = { .name = "mdp1", .ncb = 2, }; static struct msm_iommu_dev rot_iommu = { .name = "rot", .ncb = 2, }; static struct msm_iommu_dev ijpeg_iommu = { .name = "ijpeg", .ncb = 2, }; static struct msm_iommu_dev vfe_iommu = { .name = "vfe", .ncb = 2, }; static struct msm_iommu_dev vcodec_a_iommu = { .name = "vcodec_a", .ncb = 2, }; static struct msm_iommu_dev vcodec_b_iommu = { .name = "vcodec_b", .ncb = 2, }; static struct msm_iommu_dev gfx3d_iommu = { .name = "gfx3d", .ncb = 3, .ttbr_split = 0, }; static struct msm_iommu_dev gfx3d1_iommu = { .name = "gfx3d1", .ncb = 3, .ttbr_split = 0, }; static struct msm_iommu_dev gfx2d0_iommu = { .name = "gfx2d0", .ncb = 2, .ttbr_split = 0, }; static struct msm_iommu_dev gfx2d1_iommu = { .name = "gfx2d1", .ncb = 2, .ttbr_split = 0, }; static struct msm_iommu_dev vcap_iommu = { .name = "vcap", .ncb = 2, }; static struct platform_device msm_device_iommu_jpegd = { .name = "msm_iommu-v0", .id = 0, .dev = { .platform_data = &jpegd_iommu, }, .num_resources = ARRAY_SIZE(msm_iommu_jpegd_resources), .resource = msm_iommu_jpegd_resources, }; static struct platform_device msm_device_iommu_vpe = { .name = "msm_iommu-v0", .id = 1, .dev = { .platform_data = &vpe_iommu, }, .num_resources = ARRAY_SIZE(msm_iommu_vpe_resources), .resource = msm_iommu_vpe_resources, }; static struct platform_device msm_device_iommu_mdp0 = { .name = "msm_iommu-v0", .id = 2, .dev = { .platform_data = &mdp0_iommu, }, .num_resources = ARRAY_SIZE(msm_iommu_mdp0_resources), .resource = msm_iommu_mdp0_resources, }; static struct platform_device msm_device_iommu_mdp1 = { .name = "msm_iommu-v0", .id = 3, .dev = { .platform_data = &mdp1_iommu, }, .num_resources = ARRAY_SIZE(msm_iommu_mdp1_resources), .resource = msm_iommu_mdp1_resources, }; static struct platform_device msm_device_iommu_rot = { .name = "msm_iommu-v0", .id = 4, .dev = { .platform_data = &rot_iommu, }, .num_resources = ARRAY_SIZE(msm_iommu_rot_resources), .resource = msm_iommu_rot_resources, }; static struct platform_device msm_device_iommu_ijpeg = { .name = "msm_iommu-v0", .id = 5, .dev = { .platform_data = &ijpeg_iommu, }, .num_resources = ARRAY_SIZE(msm_iommu_ijpeg_resources), .resource = msm_iommu_ijpeg_resources, }; static struct platform_device msm_device_iommu_vfe = { .name = "msm_iommu-v0", .id = 6, .dev = { .platform_data = &vfe_iommu, }, .num_resources = ARRAY_SIZE(msm_iommu_vfe_resources), .resource = msm_iommu_vfe_resources, }; static struct platform_device msm_device_iommu_vcodec_a = { .name = "msm_iommu-v0", .id = 7, .dev = { .platform_data = &vcodec_a_iommu, }, .num_resources = ARRAY_SIZE(msm_iommu_vcodec_a_resources), .resource = msm_iommu_vcodec_a_resources, }; static struct platform_device msm_device_iommu_vcodec_b = { .name = "msm_iommu-v0", .id = 8, .dev = { .platform_data = &vcodec_b_iommu, }, .num_resources = ARRAY_SIZE(msm_iommu_vcodec_b_resources), .resource = msm_iommu_vcodec_b_resources, }; static struct platform_device msm_device_iommu_gfx3d = { .name = "msm_iommu-v0", .id = 9, .dev = { .platform_data = &gfx3d_iommu, }, .num_resources = ARRAY_SIZE(msm_iommu_gfx3d_resources), .resource = msm_iommu_gfx3d_resources, }; static struct platform_device msm_device_iommu_gfx3d1 = { .name = "msm_iommu-v0", .id = 10, .dev = { .platform_data = &gfx3d1_iommu, }, .num_resources = ARRAY_SIZE(msm_iommu_gfx3d1_resources), .resource = msm_iommu_gfx3d1_resources, }; static struct platform_device msm_device_iommu_gfx2d0 = { .name = "msm_iommu-v0", .id = 10, .dev = { .platform_data = &gfx2d0_iommu, }, .num_resources = ARRAY_SIZE(msm_iommu_gfx2d0_resources), .resource = msm_iommu_gfx2d0_resources, }; static struct platform_device msm_device_iommu_gfx2d1 = { .name = "msm_iommu-v0", .id = 11, .dev = { .platform_data = &gfx2d1_iommu, }, .num_resources = ARRAY_SIZE(msm_iommu_gfx2d1_resources), .resource = msm_iommu_gfx2d1_resources, }; static struct platform_device msm_device_iommu_vcap = { .name = "msm_iommu-v0", .id = 11, .dev = { .platform_data = &vcap_iommu, }, .num_resources = ARRAY_SIZE(msm_iommu_vcap_resources), .resource = msm_iommu_vcap_resources, }; static struct msm_iommu_ctx_dev jpegd_src_ctx = { .name = "jpegd_src", .num = 0, .mids = {0, -1} }; static struct msm_iommu_ctx_dev jpegd_dst_ctx = { .name = "jpegd_dst", .num = 1, .mids = {1, -1} }; static struct msm_iommu_ctx_dev vpe_src_ctx = { .name = "vpe_src", .num = 0, .mids = {0, -1} }; static struct msm_iommu_ctx_dev vpe_dst_ctx = { .name = "vpe_dst", .num = 1, .mids = {1, -1} }; static struct msm_iommu_ctx_dev mdp_port0_cb0_ctx = { .name = "mdp_port0_cb0", .num = 0, .mids = {0, 2, -1} }; static struct msm_iommu_ctx_dev mdp_port0_cb1_ctx = { .name = "mdp_port0_cb1", .num = 1, .mids = {1, 3, 4, 5, 6, 7, 8, 9, 10, -1} }; static struct msm_iommu_ctx_dev mdp_port1_cb0_ctx = { .name = "mdp_port1_cb0", .num = 0, .mids = {0, 2, -1} }; static struct msm_iommu_ctx_dev mdp_port1_cb1_ctx = { .name = "mdp_port1_cb1", .num = 1, .mids = {1, 3, 4, 5, 6, 7, 8, 9, 10, -1} }; static struct msm_iommu_ctx_dev rot_src_ctx = { .name = "rot_src", .num = 0, .mids = {0, -1} }; static struct msm_iommu_ctx_dev rot_dst_ctx = { .name = "rot_dst", .num = 1, .mids = {1, -1} }; static struct msm_iommu_ctx_dev ijpeg_src_ctx = { .name = "ijpeg_src", .num = 0, .mids = {0, -1} }; static struct msm_iommu_ctx_dev ijpeg_dst_ctx = { .name = "ijpeg_dst", .num = 1, .mids = {1, -1} }; static struct msm_iommu_ctx_dev vfe_imgwr_ctx = { .name = "vfe_imgwr", .num = 0, .mids = {2, 3, 4, 5, 6, 7, 8, -1} }; static struct msm_iommu_ctx_dev vfe_misc_ctx = { .name = "vfe_misc", .num = 1, .mids = {0, 1, 9, -1} }; static struct msm_iommu_ctx_dev vcodec_a_stream_ctx = { .name = "vcodec_a_stream", .num = 0, .mids = {2, 5, -1} }; static struct msm_iommu_ctx_dev vcodec_a_mm1_ctx = { .name = "vcodec_a_mm1", .num = 1, .mids = {0, 1, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, -1} }; static struct msm_iommu_ctx_dev vcodec_b_mm2_ctx = { .name = "vcodec_b_mm2", .num = 0, .mids = {0, 1, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, -1} }; static struct msm_iommu_ctx_dev gfx3d_user_ctx = { .name = "gfx3d_user", .num = 0, .mids = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, -1} }; static struct msm_iommu_ctx_dev gfx3d_priv_ctx = { .name = "gfx3d_priv", .num = 1, .mids = {16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, -1} }; static struct msm_iommu_ctx_dev gfx3d1_user_ctx = { .name = "gfx3d1_user", .num = 0, .mids = {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, -1} }; static struct msm_iommu_ctx_dev gfx3d1_priv_ctx = { .name = "gfx3d1_priv", .num = 1, .mids = {16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, -1} }; static struct msm_iommu_ctx_dev gfx2d0_2d0_ctx = { .name = "gfx2d0_2d0", .num = 0, .mids = {0, 1, 2, 3, 4, 5, 6, 7, -1} }; static struct msm_iommu_ctx_dev gfx2d1_2d1_ctx = { .name = "gfx2d1_2d1", .num = 0, .mids = {0, 1, 2, 3, 4, 5, 6, 7, -1} }; static struct msm_iommu_ctx_dev vcap_vc_ctx = { .name = "vcap_vc", .num = 0, .mids = {0, -1} }; static struct msm_iommu_ctx_dev vcap_vp_ctx = { .name = "vcap_vp", .num = 1, .mids = {1, -1} }; static struct platform_device msm_device_jpegd_src_ctx = { .name = "msm_iommu_ctx", .id = 0, .dev = { .parent = &msm_device_iommu_jpegd.dev, .platform_data = &jpegd_src_ctx, }, }; static struct platform_device msm_device_jpegd_dst_ctx = { .name = "msm_iommu_ctx", .id = 1, .dev = { .parent = &msm_device_iommu_jpegd.dev, .platform_data = &jpegd_dst_ctx, }, }; static struct platform_device msm_device_vpe_src_ctx = { .name = "msm_iommu_ctx", .id = 2, .dev = { .parent = &msm_device_iommu_vpe.dev, .platform_data = &vpe_src_ctx, }, }; static struct platform_device msm_device_vpe_dst_ctx = { .name = "msm_iommu_ctx", .id = 3, .dev = { .parent = &msm_device_iommu_vpe.dev, .platform_data = &vpe_dst_ctx, }, }; static struct platform_device msm_device_mdp_port0_cb0_ctx = { .name = "msm_iommu_ctx", .id = 4, .dev = { .parent = &msm_device_iommu_mdp0.dev, .platform_data = &mdp_port0_cb0_ctx, }, }; static struct platform_device msm_device_mdp_port0_cb1_ctx = { .name = "msm_iommu_ctx", .id = 5, .dev = { .parent = &msm_device_iommu_mdp0.dev, .platform_data = &mdp_port0_cb1_ctx, }, }; static struct platform_device msm_device_mdp_port1_cb0_ctx = { .name = "msm_iommu_ctx", .id = 6, .dev = { .parent = &msm_device_iommu_mdp1.dev, .platform_data = &mdp_port1_cb0_ctx, }, }; static struct platform_device msm_device_mdp_port1_cb1_ctx = { .name = "msm_iommu_ctx", .id = 7, .dev = { .parent = &msm_device_iommu_mdp1.dev, .platform_data = &mdp_port1_cb1_ctx, }, }; static struct platform_device msm_device_rot_src_ctx = { .name = "msm_iommu_ctx", .id = 8, .dev = { .parent = &msm_device_iommu_rot.dev, .platform_data = &rot_src_ctx, }, }; static struct platform_device msm_device_rot_dst_ctx = { .name = "msm_iommu_ctx", .id = 9, .dev = { .parent = &msm_device_iommu_rot.dev, .platform_data = &rot_dst_ctx, }, }; static struct platform_device msm_device_ijpeg_src_ctx = { .name = "msm_iommu_ctx", .id = 10, .dev = { .parent = &msm_device_iommu_ijpeg.dev, .platform_data = &ijpeg_src_ctx, }, }; static struct platform_device msm_device_ijpeg_dst_ctx = { .name = "msm_iommu_ctx", .id = 11, .dev = { .parent = &msm_device_iommu_ijpeg.dev, .platform_data = &ijpeg_dst_ctx, }, }; static struct platform_device msm_device_vfe_imgwr_ctx = { .name = "msm_iommu_ctx", .id = 12, .dev = { .parent = &msm_device_iommu_vfe.dev, .platform_data = &vfe_imgwr_ctx, }, }; static struct platform_device msm_device_vfe_misc_ctx = { .name = "msm_iommu_ctx", .id = 13, .dev = { .parent = &msm_device_iommu_vfe.dev, .platform_data = &vfe_misc_ctx, }, }; static struct platform_device msm_device_vcodec_a_stream_ctx = { .name = "msm_iommu_ctx", .id = 14, .dev = { .parent = &msm_device_iommu_vcodec_a.dev, .platform_data = &vcodec_a_stream_ctx, }, }; static struct platform_device msm_device_vcodec_a_mm1_ctx = { .name = "msm_iommu_ctx", .id = 15, .dev = { .parent = &msm_device_iommu_vcodec_a.dev, .platform_data = &vcodec_a_mm1_ctx, }, }; static struct platform_device msm_device_vcodec_b_mm2_ctx = { .name = "msm_iommu_ctx", .id = 16, .dev = { .parent = &msm_device_iommu_vcodec_b.dev, .platform_data = &vcodec_b_mm2_ctx, }, }; static struct platform_device msm_device_gfx3d_user_ctx = { .name = "msm_iommu_ctx", .id = 17, .dev = { .parent = &msm_device_iommu_gfx3d.dev, .platform_data = &gfx3d_user_ctx, }, }; static struct platform_device msm_device_gfx3d_priv_ctx = { .name = "msm_iommu_ctx", .id = 18, .dev = { .parent = &msm_device_iommu_gfx3d.dev, .platform_data = &gfx3d_priv_ctx, }, }; static struct platform_device msm_device_gfx3d1_user_ctx = { .name = "msm_iommu_ctx", .id = 19, .dev = { .parent = &msm_device_iommu_gfx3d1.dev, .platform_data = &gfx3d1_user_ctx, }, }; static struct platform_device msm_device_gfx3d1_priv_ctx = { .name = "msm_iommu_ctx", .id = 20, .dev = { .parent = &msm_device_iommu_gfx3d1.dev, .platform_data = &gfx3d1_priv_ctx, }, }; static struct platform_device msm_device_gfx2d0_2d0_ctx = { .name = "msm_iommu_ctx", .id = 19, .dev = { .parent = &msm_device_iommu_gfx2d0.dev, .platform_data = &gfx2d0_2d0_ctx, }, }; static struct platform_device msm_device_gfx2d1_2d1_ctx = { .name = "msm_iommu_ctx", .id = 20, .dev = { .parent = &msm_device_iommu_gfx2d1.dev, .platform_data = &gfx2d1_2d1_ctx, }, }; static struct platform_device msm_device_vcap_vc_ctx = { .name = "msm_iommu_ctx", .id = 21, .dev = { .parent = &msm_device_iommu_vcap.dev, .platform_data = &vcap_vc_ctx, }, }; static struct platform_device msm_device_vcap_vp_ctx = { .name = "msm_iommu_ctx", .id = 22, .dev = { .parent = &msm_device_iommu_vcap.dev, .platform_data = &vcap_vp_ctx, }, }; static struct platform_device *msm_iommu_common_devs[] = { &msm_device_iommu_vpe, &msm_device_iommu_mdp0, &msm_device_iommu_mdp1, &msm_device_iommu_rot, &msm_device_iommu_ijpeg, &msm_device_iommu_vfe, &msm_device_iommu_vcodec_a, &msm_device_iommu_vcodec_b, &msm_device_iommu_gfx3d, }; static struct platform_device *msm_iommu_gfx2d_devs[] = { &msm_device_iommu_gfx2d0, &msm_device_iommu_gfx2d1, }; static struct platform_device *msm_iommu_adreno3xx_gfx_devs[] = { &msm_device_iommu_gfx3d1, }; static struct platform_device *msm_iommu_vcap_devs[] = { &msm_device_iommu_vcap, }; static struct platform_device *msm_iommu_jpegd_devs[] = { &msm_device_iommu_jpegd, }; static struct platform_device *msm_iommu_common_ctx_devs[] = { &msm_device_vpe_src_ctx, &msm_device_vpe_dst_ctx, &msm_device_mdp_port0_cb0_ctx, &msm_device_mdp_port0_cb1_ctx, &msm_device_mdp_port1_cb0_ctx, &msm_device_mdp_port1_cb1_ctx, &msm_device_rot_src_ctx, &msm_device_rot_dst_ctx, &msm_device_ijpeg_src_ctx, &msm_device_ijpeg_dst_ctx, &msm_device_vfe_imgwr_ctx, &msm_device_vfe_misc_ctx, &msm_device_vcodec_a_stream_ctx, &msm_device_vcodec_a_mm1_ctx, &msm_device_vcodec_b_mm2_ctx, &msm_device_gfx3d_user_ctx, &msm_device_gfx3d_priv_ctx, }; static struct platform_device *msm_iommu_gfx2d_ctx_devs[] = { &msm_device_gfx2d0_2d0_ctx, &msm_device_gfx2d1_2d1_ctx, }; static struct platform_device *msm_iommu_adreno3xx_ctx_devs[] = { &msm_device_gfx3d1_user_ctx, &msm_device_gfx3d1_priv_ctx, }; static struct platform_device *msm_iommu_vcap_ctx_devs[] = { &msm_device_vcap_vc_ctx, &msm_device_vcap_vp_ctx, }; static struct platform_device *msm_iommu_jpegd_ctx_devs[] = { &msm_device_jpegd_src_ctx, &msm_device_jpegd_dst_ctx, }; static int __init iommu_init(void) { int ret; if (!msm_soc_version_supports_iommu_v0()) { pr_err("IOMMU v0 is not supported on this SoC version.\n"); return -ENODEV; } /* Initialize common devs */ platform_add_devices(msm_iommu_common_devs, ARRAY_SIZE(msm_iommu_common_devs)); /* Initialize soc-specific devs */ if (cpu_is_msm8x60() || cpu_is_msm8960()) { platform_add_devices(msm_iommu_jpegd_devs, ARRAY_SIZE(msm_iommu_jpegd_devs)); platform_add_devices(msm_iommu_gfx2d_devs, ARRAY_SIZE(msm_iommu_gfx2d_devs)); } if (soc_class_is_apq8064() || cpu_is_msm8960ab()) { platform_add_devices(msm_iommu_jpegd_devs, ARRAY_SIZE(msm_iommu_jpegd_devs)); platform_add_devices(msm_iommu_adreno3xx_gfx_devs, ARRAY_SIZE(msm_iommu_adreno3xx_gfx_devs)); } if (soc_class_is_apq8064()) platform_add_devices(msm_iommu_vcap_devs, ARRAY_SIZE(msm_iommu_vcap_devs)); /* Initialize common ctx_devs */ ret = platform_add_devices(msm_iommu_common_ctx_devs, ARRAY_SIZE(msm_iommu_common_ctx_devs)); /* Initialize soc-specific ctx_devs */ if (cpu_is_msm8x60() || cpu_is_msm8960()) { platform_add_devices(msm_iommu_jpegd_ctx_devs, ARRAY_SIZE(msm_iommu_jpegd_ctx_devs)); platform_add_devices(msm_iommu_gfx2d_ctx_devs, ARRAY_SIZE(msm_iommu_gfx2d_ctx_devs)); } if (soc_class_is_apq8064() || cpu_is_msm8960ab()) { platform_add_devices(msm_iommu_jpegd_ctx_devs, ARRAY_SIZE(msm_iommu_jpegd_ctx_devs)); platform_add_devices(msm_iommu_adreno3xx_ctx_devs, ARRAY_SIZE(msm_iommu_adreno3xx_ctx_devs)); } if (soc_class_is_apq8064()) platform_add_devices(msm_iommu_vcap_ctx_devs, ARRAY_SIZE(msm_iommu_vcap_ctx_devs)); return 0; } static void __exit iommu_exit(void) { int i; /* Common ctx_devs */ for (i = 0; i < ARRAY_SIZE(msm_iommu_common_ctx_devs); i++) platform_device_unregister(msm_iommu_common_ctx_devs[i]); /* Common devs. */ for (i = 0; i < ARRAY_SIZE(msm_iommu_common_devs); ++i) platform_device_unregister(msm_iommu_common_devs[i]); if (cpu_is_msm8x60() || cpu_is_msm8960()) { for (i = 0; i < ARRAY_SIZE(msm_iommu_gfx2d_ctx_devs); i++) platform_device_unregister(msm_iommu_gfx2d_ctx_devs[i]); for (i = 0; i < ARRAY_SIZE(msm_iommu_jpegd_ctx_devs); i++) platform_device_unregister(msm_iommu_jpegd_ctx_devs[i]); for (i = 0; i < ARRAY_SIZE(msm_iommu_gfx2d_devs); i++) platform_device_unregister(msm_iommu_gfx2d_devs[i]); for (i = 0; i < ARRAY_SIZE(msm_iommu_jpegd_devs); i++) platform_device_unregister(msm_iommu_jpegd_devs[i]); } if (soc_class_is_apq8064()) { for (i = 0; i < ARRAY_SIZE(msm_iommu_vcap_ctx_devs); i++) platform_device_unregister(msm_iommu_vcap_ctx_devs[i]); } if (soc_class_is_apq8064() || cpu_is_msm8960ab()) { for (i = 0; i < ARRAY_SIZE(msm_iommu_adreno3xx_ctx_devs); i++) platform_device_unregister( msm_iommu_adreno3xx_ctx_devs[i]); for (i = 0; i < ARRAY_SIZE(msm_iommu_jpegd_ctx_devs); i++) platform_device_unregister( msm_iommu_jpegd_ctx_devs[i]); if (soc_class_is_apq8064()) { for (i = 0; i < ARRAY_SIZE(msm_iommu_vcap_devs); i++) platform_device_unregister( msm_iommu_vcap_devs[i]); } for (i = 0; i < ARRAY_SIZE(msm_iommu_adreno3xx_gfx_devs); i++) platform_device_unregister( msm_iommu_adreno3xx_gfx_devs[i]); for (i = 0; i < ARRAY_SIZE(msm_iommu_jpegd_devs); i++) platform_device_unregister(msm_iommu_jpegd_devs[i]); } } subsys_initcall(iommu_init); module_exit(iommu_exit); MODULE_LICENSE("GPL v2"); MODULE_AUTHOR("Stepan Moskovchenko <stepanm@codeaurora.org>");
gpl-2.0
fortunave3gxx/android_kernel_samsung_fortuna-common
drivers/input/touchscreen/atmel_tsadcc.c
2235
11334
/* * Atmel Touch Screen Driver * * Copyright (c) 2008 ATMEL * Copyright (c) 2008 Dan Liang * Copyright (c) 2008 TimeSys Corporation * Copyright (c) 2008 Justin Waters * * Based on touchscreen code from Atmel Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/err.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/input.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/clk.h> #include <linux/platform_device.h> #include <linux/io.h> #include <linux/platform_data/atmel.h> #include <mach/cpu.h> /* Register definitions based on AT91SAM9RL64 preliminary draft datasheet */ #define ATMEL_TSADCC_CR 0x00 /* Control register */ #define ATMEL_TSADCC_SWRST (1 << 0) /* Software Reset*/ #define ATMEL_TSADCC_START (1 << 1) /* Start conversion */ #define ATMEL_TSADCC_MR 0x04 /* Mode register */ #define ATMEL_TSADCC_TSAMOD (3 << 0) /* ADC mode */ #define ATMEL_TSADCC_TSAMOD_ADC_ONLY_MODE (0x0) /* ADC Mode */ #define ATMEL_TSADCC_TSAMOD_TS_ONLY_MODE (0x1) /* Touch Screen Only Mode */ #define ATMEL_TSADCC_LOWRES (1 << 4) /* Resolution selection */ #define ATMEL_TSADCC_SLEEP (1 << 5) /* Sleep mode */ #define ATMEL_TSADCC_PENDET (1 << 6) /* Pen Detect selection */ #define ATMEL_TSADCC_PRES (1 << 7) /* Pressure Measurement Selection */ #define ATMEL_TSADCC_PRESCAL (0x3f << 8) /* Prescalar Rate Selection */ #define ATMEL_TSADCC_EPRESCAL (0xff << 8) /* Prescalar Rate Selection (Extended) */ #define ATMEL_TSADCC_STARTUP (0x7f << 16) /* Start Up time */ #define ATMEL_TSADCC_SHTIM (0xf << 24) /* Sample & Hold time */ #define ATMEL_TSADCC_PENDBC (0xf << 28) /* Pen Detect debouncing time */ #define ATMEL_TSADCC_TRGR 0x08 /* Trigger register */ #define ATMEL_TSADCC_TRGMOD (7 << 0) /* Trigger mode */ #define ATMEL_TSADCC_TRGMOD_NONE (0 << 0) #define ATMEL_TSADCC_TRGMOD_EXT_RISING (1 << 0) #define ATMEL_TSADCC_TRGMOD_EXT_FALLING (2 << 0) #define ATMEL_TSADCC_TRGMOD_EXT_ANY (3 << 0) #define ATMEL_TSADCC_TRGMOD_PENDET (4 << 0) #define ATMEL_TSADCC_TRGMOD_PERIOD (5 << 0) #define ATMEL_TSADCC_TRGMOD_CONTINUOUS (6 << 0) #define ATMEL_TSADCC_TRGPER (0xffff << 16) /* Trigger period */ #define ATMEL_TSADCC_TSR 0x0C /* Touch Screen register */ #define ATMEL_TSADCC_TSFREQ (0xf << 0) /* TS Frequency in Interleaved mode */ #define ATMEL_TSADCC_TSSHTIM (0xf << 24) /* Sample & Hold time */ #define ATMEL_TSADCC_CHER 0x10 /* Channel Enable register */ #define ATMEL_TSADCC_CHDR 0x14 /* Channel Disable register */ #define ATMEL_TSADCC_CHSR 0x18 /* Channel Status register */ #define ATMEL_TSADCC_CH(n) (1 << (n)) /* Channel number */ #define ATMEL_TSADCC_SR 0x1C /* Status register */ #define ATMEL_TSADCC_EOC(n) (1 << ((n)+0)) /* End of conversion for channel N */ #define ATMEL_TSADCC_OVRE(n) (1 << ((n)+8)) /* Overrun error for channel N */ #define ATMEL_TSADCC_DRDY (1 << 16) /* Data Ready */ #define ATMEL_TSADCC_GOVRE (1 << 17) /* General Overrun Error */ #define ATMEL_TSADCC_ENDRX (1 << 18) /* End of RX Buffer */ #define ATMEL_TSADCC_RXBUFF (1 << 19) /* TX Buffer full */ #define ATMEL_TSADCC_PENCNT (1 << 20) /* Pen contact */ #define ATMEL_TSADCC_NOCNT (1 << 21) /* No contact */ #define ATMEL_TSADCC_LCDR 0x20 /* Last Converted Data register */ #define ATMEL_TSADCC_DATA (0x3ff << 0) /* Channel data */ #define ATMEL_TSADCC_IER 0x24 /* Interrupt Enable register */ #define ATMEL_TSADCC_IDR 0x28 /* Interrupt Disable register */ #define ATMEL_TSADCC_IMR 0x2C /* Interrupt Mask register */ #define ATMEL_TSADCC_CDR0 0x30 /* Channel Data 0 */ #define ATMEL_TSADCC_CDR1 0x34 /* Channel Data 1 */ #define ATMEL_TSADCC_CDR2 0x38 /* Channel Data 2 */ #define ATMEL_TSADCC_CDR3 0x3C /* Channel Data 3 */ #define ATMEL_TSADCC_CDR4 0x40 /* Channel Data 4 */ #define ATMEL_TSADCC_CDR5 0x44 /* Channel Data 5 */ #define ATMEL_TSADCC_XPOS 0x50 #define ATMEL_TSADCC_Z1DAT 0x54 #define ATMEL_TSADCC_Z2DAT 0x58 #define PRESCALER_VAL(x) ((x) >> 8) #define ADC_DEFAULT_CLOCK 100000 struct atmel_tsadcc { struct input_dev *input; char phys[32]; struct clk *clk; int irq; unsigned int prev_absx; unsigned int prev_absy; unsigned char bufferedmeasure; }; static void __iomem *tsc_base; #define atmel_tsadcc_read(reg) __raw_readl(tsc_base + (reg)) #define atmel_tsadcc_write(reg, val) __raw_writel((val), tsc_base + (reg)) static irqreturn_t atmel_tsadcc_interrupt(int irq, void *dev) { struct atmel_tsadcc *ts_dev = (struct atmel_tsadcc *)dev; struct input_dev *input_dev = ts_dev->input; unsigned int status; unsigned int reg; status = atmel_tsadcc_read(ATMEL_TSADCC_SR); status &= atmel_tsadcc_read(ATMEL_TSADCC_IMR); if (status & ATMEL_TSADCC_NOCNT) { /* Contact lost */ reg = atmel_tsadcc_read(ATMEL_TSADCC_MR) | ATMEL_TSADCC_PENDBC; atmel_tsadcc_write(ATMEL_TSADCC_MR, reg); atmel_tsadcc_write(ATMEL_TSADCC_TRGR, ATMEL_TSADCC_TRGMOD_NONE); atmel_tsadcc_write(ATMEL_TSADCC_IDR, ATMEL_TSADCC_EOC(3) | ATMEL_TSADCC_NOCNT); atmel_tsadcc_write(ATMEL_TSADCC_IER, ATMEL_TSADCC_PENCNT); input_report_key(input_dev, BTN_TOUCH, 0); ts_dev->bufferedmeasure = 0; input_sync(input_dev); } else if (status & ATMEL_TSADCC_PENCNT) { /* Pen detected */ reg = atmel_tsadcc_read(ATMEL_TSADCC_MR); reg &= ~ATMEL_TSADCC_PENDBC; atmel_tsadcc_write(ATMEL_TSADCC_IDR, ATMEL_TSADCC_PENCNT); atmel_tsadcc_write(ATMEL_TSADCC_MR, reg); atmel_tsadcc_write(ATMEL_TSADCC_IER, ATMEL_TSADCC_EOC(3) | ATMEL_TSADCC_NOCNT); atmel_tsadcc_write(ATMEL_TSADCC_TRGR, ATMEL_TSADCC_TRGMOD_PERIOD | (0x0FFF << 16)); } else if (status & ATMEL_TSADCC_EOC(3)) { /* Conversion finished */ if (ts_dev->bufferedmeasure) { /* Last measurement is always discarded, since it can * be erroneous. * Always report previous measurement */ input_report_abs(input_dev, ABS_X, ts_dev->prev_absx); input_report_abs(input_dev, ABS_Y, ts_dev->prev_absy); input_report_key(input_dev, BTN_TOUCH, 1); input_sync(input_dev); } else ts_dev->bufferedmeasure = 1; /* Now make new measurement */ ts_dev->prev_absx = atmel_tsadcc_read(ATMEL_TSADCC_CDR3) << 10; ts_dev->prev_absx /= atmel_tsadcc_read(ATMEL_TSADCC_CDR2); ts_dev->prev_absy = atmel_tsadcc_read(ATMEL_TSADCC_CDR1) << 10; ts_dev->prev_absy /= atmel_tsadcc_read(ATMEL_TSADCC_CDR0); } return IRQ_HANDLED; } /* * The functions for inserting/removing us as a module. */ static int atmel_tsadcc_probe(struct platform_device *pdev) { struct atmel_tsadcc *ts_dev; struct input_dev *input_dev; struct resource *res; struct at91_tsadcc_data *pdata = pdev->dev.platform_data; int err = 0; unsigned int prsc; unsigned int reg; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&pdev->dev, "no mmio resource defined.\n"); return -ENXIO; } /* Allocate memory for device */ ts_dev = kzalloc(sizeof(struct atmel_tsadcc), GFP_KERNEL); if (!ts_dev) { dev_err(&pdev->dev, "failed to allocate memory.\n"); return -ENOMEM; } platform_set_drvdata(pdev, ts_dev); input_dev = input_allocate_device(); if (!input_dev) { dev_err(&pdev->dev, "failed to allocate input device.\n"); err = -EBUSY; goto err_free_mem; } ts_dev->irq = platform_get_irq(pdev, 0); if (ts_dev->irq < 0) { dev_err(&pdev->dev, "no irq ID is designated.\n"); err = -ENODEV; goto err_free_dev; } if (!request_mem_region(res->start, resource_size(res), "atmel tsadcc regs")) { dev_err(&pdev->dev, "resources is unavailable.\n"); err = -EBUSY; goto err_free_dev; } tsc_base = ioremap(res->start, resource_size(res)); if (!tsc_base) { dev_err(&pdev->dev, "failed to map registers.\n"); err = -ENOMEM; goto err_release_mem; } err = request_irq(ts_dev->irq, atmel_tsadcc_interrupt, 0, pdev->dev.driver->name, ts_dev); if (err) { dev_err(&pdev->dev, "failed to allocate irq.\n"); goto err_unmap_regs; } ts_dev->clk = clk_get(&pdev->dev, "tsc_clk"); if (IS_ERR(ts_dev->clk)) { dev_err(&pdev->dev, "failed to get ts_clk\n"); err = PTR_ERR(ts_dev->clk); goto err_free_irq; } ts_dev->input = input_dev; ts_dev->bufferedmeasure = 0; snprintf(ts_dev->phys, sizeof(ts_dev->phys), "%s/input0", dev_name(&pdev->dev)); input_dev->name = "atmel touch screen controller"; input_dev->phys = ts_dev->phys; input_dev->dev.parent = &pdev->dev; __set_bit(EV_ABS, input_dev->evbit); input_set_abs_params(input_dev, ABS_X, 0, 0x3FF, 0, 0); input_set_abs_params(input_dev, ABS_Y, 0, 0x3FF, 0, 0); input_set_capability(input_dev, EV_KEY, BTN_TOUCH); /* clk_enable() always returns 0, no need to check it */ clk_enable(ts_dev->clk); prsc = clk_get_rate(ts_dev->clk); dev_info(&pdev->dev, "Master clock is set at: %d Hz\n", prsc); if (!pdata) goto err_fail; if (!pdata->adc_clock) pdata->adc_clock = ADC_DEFAULT_CLOCK; prsc = (prsc / (2 * pdata->adc_clock)) - 1; /* saturate if this value is too high */ if (cpu_is_at91sam9rl()) { if (prsc > PRESCALER_VAL(ATMEL_TSADCC_PRESCAL)) prsc = PRESCALER_VAL(ATMEL_TSADCC_PRESCAL); } else { if (prsc > PRESCALER_VAL(ATMEL_TSADCC_EPRESCAL)) prsc = PRESCALER_VAL(ATMEL_TSADCC_EPRESCAL); } dev_info(&pdev->dev, "Prescaler is set at: %d\n", prsc); reg = ATMEL_TSADCC_TSAMOD_TS_ONLY_MODE | ((0x00 << 5) & ATMEL_TSADCC_SLEEP) | /* Normal Mode */ ((0x01 << 6) & ATMEL_TSADCC_PENDET) | /* Enable Pen Detect */ (prsc << 8) | ((0x26 << 16) & ATMEL_TSADCC_STARTUP) | ((pdata->pendet_debounce << 28) & ATMEL_TSADCC_PENDBC); atmel_tsadcc_write(ATMEL_TSADCC_CR, ATMEL_TSADCC_SWRST); atmel_tsadcc_write(ATMEL_TSADCC_MR, reg); atmel_tsadcc_write(ATMEL_TSADCC_TRGR, ATMEL_TSADCC_TRGMOD_NONE); atmel_tsadcc_write(ATMEL_TSADCC_TSR, (pdata->ts_sample_hold_time << 24) & ATMEL_TSADCC_TSSHTIM); atmel_tsadcc_read(ATMEL_TSADCC_SR); atmel_tsadcc_write(ATMEL_TSADCC_IER, ATMEL_TSADCC_PENCNT); /* All went ok, so register to the input system */ err = input_register_device(input_dev); if (err) goto err_fail; return 0; err_fail: clk_disable(ts_dev->clk); clk_put(ts_dev->clk); err_free_irq: free_irq(ts_dev->irq, ts_dev); err_unmap_regs: iounmap(tsc_base); err_release_mem: release_mem_region(res->start, resource_size(res)); err_free_dev: input_free_device(input_dev); err_free_mem: kfree(ts_dev); return err; } static int atmel_tsadcc_remove(struct platform_device *pdev) { struct atmel_tsadcc *ts_dev = dev_get_drvdata(&pdev->dev); struct resource *res; free_irq(ts_dev->irq, ts_dev); input_unregister_device(ts_dev->input); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); iounmap(tsc_base); release_mem_region(res->start, resource_size(res)); clk_disable(ts_dev->clk); clk_put(ts_dev->clk); kfree(ts_dev); return 0; } static struct platform_driver atmel_tsadcc_driver = { .probe = atmel_tsadcc_probe, .remove = atmel_tsadcc_remove, .driver = { .name = "atmel_tsadcc", }, }; module_platform_driver(atmel_tsadcc_driver); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Atmel TouchScreen Driver"); MODULE_AUTHOR("Dan Liang <dan.liang@atmel.com>");
gpl-2.0
mythos234/AndromedaN910F-CM12
drivers/media/usb/usbvision/usbvision-core.c
2747
75405
/* * usbvision-core.c - driver for NT100x USB video capture devices * * * Copyright (c) 1999-2005 Joerg Heckenbach <joerg@heckenbach-aw.de> * Dwaine Garden <dwainegarden@rogers.com> * * This module is part of usbvision driver project. * Updates to driver completed by Dwaine P. Garden * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/gfp.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/vmalloc.h> #include <linux/module.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/io.h> #include <linux/videodev2.h> #include <linux/i2c.h> #include <media/saa7115.h> #include <media/v4l2-common.h> #include <media/tuner.h> #include <linux/workqueue.h> #include "usbvision.h" static unsigned int core_debug; module_param(core_debug, int, 0644); MODULE_PARM_DESC(core_debug, "enable debug messages [core]"); static int adjust_compression = 1; /* Set the compression to be adaptive */ module_param(adjust_compression, int, 0444); MODULE_PARM_DESC(adjust_compression, " Set the ADPCM compression for the device. Default: 1 (On)"); /* To help people with Black and White output with using s-video input. * Some cables and input device are wired differently. */ static int switch_svideo_input; module_param(switch_svideo_input, int, 0444); MODULE_PARM_DESC(switch_svideo_input, " Set the S-Video input. Some cables and input device are wired differently. Default: 0 (Off)"); static unsigned int adjust_x_offset = -1; module_param(adjust_x_offset, int, 0644); MODULE_PARM_DESC(adjust_x_offset, "adjust X offset display [core]"); static unsigned int adjust_y_offset = -1; module_param(adjust_y_offset, int, 0644); MODULE_PARM_DESC(adjust_y_offset, "adjust Y offset display [core]"); #define ENABLE_HEXDUMP 0 /* Enable if you need it */ #ifdef USBVISION_DEBUG #define PDEBUG(level, fmt, args...) { \ if (core_debug & (level)) \ printk(KERN_INFO KBUILD_MODNAME ":[%s:%d] " fmt, \ __func__, __LINE__ , ## args); \ } #else #define PDEBUG(level, fmt, args...) do {} while (0) #endif #define DBG_HEADER (1 << 0) #define DBG_IRQ (1 << 1) #define DBG_ISOC (1 << 2) #define DBG_PARSE (1 << 3) #define DBG_SCRATCH (1 << 4) #define DBG_FUNC (1 << 5) static const int max_imgwidth = MAX_FRAME_WIDTH; static const int max_imgheight = MAX_FRAME_HEIGHT; static const int min_imgwidth = MIN_FRAME_WIDTH; static const int min_imgheight = MIN_FRAME_HEIGHT; /* The value of 'scratch_buf_size' affects quality of the picture * in many ways. Shorter buffers may cause loss of data when client * is too slow. Larger buffers are memory-consuming and take longer * to work with. This setting can be adjusted, but the default value * should be OK for most desktop users. */ #define DEFAULT_SCRATCH_BUF_SIZE (0x20000) /* 128kB memory scratch buffer */ static const int scratch_buf_size = DEFAULT_SCRATCH_BUF_SIZE; /* Function prototypes */ static int usbvision_request_intra(struct usb_usbvision *usbvision); static int usbvision_unrequest_intra(struct usb_usbvision *usbvision); static int usbvision_adjust_compression(struct usb_usbvision *usbvision); static int usbvision_measure_bandwidth(struct usb_usbvision *usbvision); /*******************************/ /* Memory management functions */ /*******************************/ /* * Here we want the physical address of the memory. * This is used when initializing the contents of the area. */ static void *usbvision_rvmalloc(unsigned long size) { void *mem; unsigned long adr; size = PAGE_ALIGN(size); mem = vmalloc_32(size); if (!mem) return NULL; memset(mem, 0, size); /* Clear the ram out, no junk to the user */ adr = (unsigned long) mem; while (size > 0) { SetPageReserved(vmalloc_to_page((void *)adr)); adr += PAGE_SIZE; size -= PAGE_SIZE; } return mem; } static void usbvision_rvfree(void *mem, unsigned long size) { unsigned long adr; if (!mem) return; size = PAGE_ALIGN(size); adr = (unsigned long) mem; while ((long) size > 0) { ClearPageReserved(vmalloc_to_page((void *)adr)); adr += PAGE_SIZE; size -= PAGE_SIZE; } vfree(mem); } #if ENABLE_HEXDUMP static void usbvision_hexdump(const unsigned char *data, int len) { char tmp[80]; int i, k; for (i = k = 0; len > 0; i++, len--) { if (i > 0 && (i % 16 == 0)) { printk("%s\n", tmp); k = 0; } k += sprintf(&tmp[k], "%02x ", data[i]); } if (k > 0) printk(KERN_CONT "%s\n", tmp); } #endif /******************************** * scratch ring buffer handling ********************************/ static int scratch_len(struct usb_usbvision *usbvision) /* This returns the amount of data actually in the buffer */ { int len = usbvision->scratch_write_ptr - usbvision->scratch_read_ptr; if (len < 0) len += scratch_buf_size; PDEBUG(DBG_SCRATCH, "scratch_len() = %d\n", len); return len; } /* This returns the free space left in the buffer */ static int scratch_free(struct usb_usbvision *usbvision) { int free = usbvision->scratch_read_ptr - usbvision->scratch_write_ptr; if (free <= 0) free += scratch_buf_size; if (free) { free -= 1; /* at least one byte in the buffer must */ /* left blank, otherwise there is no chance to differ between full and empty */ } PDEBUG(DBG_SCRATCH, "return %d\n", free); return free; } /* This puts data into the buffer */ static int scratch_put(struct usb_usbvision *usbvision, unsigned char *data, int len) { int len_part; if (usbvision->scratch_write_ptr + len < scratch_buf_size) { memcpy(usbvision->scratch + usbvision->scratch_write_ptr, data, len); usbvision->scratch_write_ptr += len; } else { len_part = scratch_buf_size - usbvision->scratch_write_ptr; memcpy(usbvision->scratch + usbvision->scratch_write_ptr, data, len_part); if (len == len_part) { usbvision->scratch_write_ptr = 0; /* just set write_ptr to zero */ } else { memcpy(usbvision->scratch, data + len_part, len - len_part); usbvision->scratch_write_ptr = len - len_part; } } PDEBUG(DBG_SCRATCH, "len=%d, new write_ptr=%d\n", len, usbvision->scratch_write_ptr); return len; } /* This marks the write_ptr as position of new frame header */ static void scratch_mark_header(struct usb_usbvision *usbvision) { PDEBUG(DBG_SCRATCH, "header at write_ptr=%d\n", usbvision->scratch_headermarker_write_ptr); usbvision->scratch_headermarker[usbvision->scratch_headermarker_write_ptr] = usbvision->scratch_write_ptr; usbvision->scratch_headermarker_write_ptr += 1; usbvision->scratch_headermarker_write_ptr %= USBVISION_NUM_HEADERMARKER; } /* This gets data from the buffer at the given "ptr" position */ static int scratch_get_extra(struct usb_usbvision *usbvision, unsigned char *data, int *ptr, int len) { int len_part; if (*ptr + len < scratch_buf_size) { memcpy(data, usbvision->scratch + *ptr, len); *ptr += len; } else { len_part = scratch_buf_size - *ptr; memcpy(data, usbvision->scratch + *ptr, len_part); if (len == len_part) { *ptr = 0; /* just set the y_ptr to zero */ } else { memcpy(data + len_part, usbvision->scratch, len - len_part); *ptr = len - len_part; } } PDEBUG(DBG_SCRATCH, "len=%d, new ptr=%d\n", len, *ptr); return len; } /* This sets the scratch extra read pointer */ static void scratch_set_extra_ptr(struct usb_usbvision *usbvision, int *ptr, int len) { *ptr = (usbvision->scratch_read_ptr + len) % scratch_buf_size; PDEBUG(DBG_SCRATCH, "ptr=%d\n", *ptr); } /* This increments the scratch extra read pointer */ static void scratch_inc_extra_ptr(int *ptr, int len) { *ptr = (*ptr + len) % scratch_buf_size; PDEBUG(DBG_SCRATCH, "ptr=%d\n", *ptr); } /* This gets data from the buffer */ static int scratch_get(struct usb_usbvision *usbvision, unsigned char *data, int len) { int len_part; if (usbvision->scratch_read_ptr + len < scratch_buf_size) { memcpy(data, usbvision->scratch + usbvision->scratch_read_ptr, len); usbvision->scratch_read_ptr += len; } else { len_part = scratch_buf_size - usbvision->scratch_read_ptr; memcpy(data, usbvision->scratch + usbvision->scratch_read_ptr, len_part); if (len == len_part) { usbvision->scratch_read_ptr = 0; /* just set the read_ptr to zero */ } else { memcpy(data + len_part, usbvision->scratch, len - len_part); usbvision->scratch_read_ptr = len - len_part; } } PDEBUG(DBG_SCRATCH, "len=%d, new read_ptr=%d\n", len, usbvision->scratch_read_ptr); return len; } /* This sets read pointer to next header and returns it */ static int scratch_get_header(struct usb_usbvision *usbvision, struct usbvision_frame_header *header) { int err_code = 0; PDEBUG(DBG_SCRATCH, "from read_ptr=%d", usbvision->scratch_headermarker_read_ptr); while (usbvision->scratch_headermarker_write_ptr - usbvision->scratch_headermarker_read_ptr != 0) { usbvision->scratch_read_ptr = usbvision->scratch_headermarker[usbvision->scratch_headermarker_read_ptr]; usbvision->scratch_headermarker_read_ptr += 1; usbvision->scratch_headermarker_read_ptr %= USBVISION_NUM_HEADERMARKER; scratch_get(usbvision, (unsigned char *)header, USBVISION_HEADER_LENGTH); if ((header->magic_1 == USBVISION_MAGIC_1) && (header->magic_2 == USBVISION_MAGIC_2) && (header->header_length == USBVISION_HEADER_LENGTH)) { err_code = USBVISION_HEADER_LENGTH; header->frame_width = header->frame_width_lo + (header->frame_width_hi << 8); header->frame_height = header->frame_height_lo + (header->frame_height_hi << 8); break; } } return err_code; } /* This removes len bytes of old data from the buffer */ static void scratch_rm_old(struct usb_usbvision *usbvision, int len) { usbvision->scratch_read_ptr += len; usbvision->scratch_read_ptr %= scratch_buf_size; PDEBUG(DBG_SCRATCH, "read_ptr is now %d\n", usbvision->scratch_read_ptr); } /* This resets the buffer - kills all data in it too */ static void scratch_reset(struct usb_usbvision *usbvision) { PDEBUG(DBG_SCRATCH, "\n"); usbvision->scratch_read_ptr = 0; usbvision->scratch_write_ptr = 0; usbvision->scratch_headermarker_read_ptr = 0; usbvision->scratch_headermarker_write_ptr = 0; usbvision->isocstate = isoc_state_no_frame; } int usbvision_scratch_alloc(struct usb_usbvision *usbvision) { usbvision->scratch = vmalloc_32(scratch_buf_size); scratch_reset(usbvision); if (usbvision->scratch == NULL) { dev_err(&usbvision->dev->dev, "%s: unable to allocate %d bytes for scratch\n", __func__, scratch_buf_size); return -ENOMEM; } return 0; } void usbvision_scratch_free(struct usb_usbvision *usbvision) { vfree(usbvision->scratch); usbvision->scratch = NULL; } /* * usbvision_decompress_alloc() * * allocates intermediate buffer for decompression */ int usbvision_decompress_alloc(struct usb_usbvision *usbvision) { int IFB_size = MAX_FRAME_WIDTH * MAX_FRAME_HEIGHT * 3 / 2; usbvision->intra_frame_buffer = vmalloc_32(IFB_size); if (usbvision->intra_frame_buffer == NULL) { dev_err(&usbvision->dev->dev, "%s: unable to allocate %d for compr. frame buffer\n", __func__, IFB_size); return -ENOMEM; } return 0; } /* * usbvision_decompress_free() * * frees intermediate buffer for decompression */ void usbvision_decompress_free(struct usb_usbvision *usbvision) { vfree(usbvision->intra_frame_buffer); usbvision->intra_frame_buffer = NULL; } /************************************************************ * Here comes the data parsing stuff that is run as interrupt ************************************************************/ /* * usbvision_find_header() * * Locate one of supported header markers in the scratch buffer. */ static enum parse_state usbvision_find_header(struct usb_usbvision *usbvision) { struct usbvision_frame *frame; int found_header = 0; frame = usbvision->cur_frame; while (scratch_get_header(usbvision, &frame->isoc_header) == USBVISION_HEADER_LENGTH) { /* found header in scratch */ PDEBUG(DBG_HEADER, "found header: 0x%02x%02x %d %d %d %d %#x 0x%02x %u %u", frame->isoc_header.magic_2, frame->isoc_header.magic_1, frame->isoc_header.header_length, frame->isoc_header.frame_num, frame->isoc_header.frame_phase, frame->isoc_header.frame_latency, frame->isoc_header.data_format, frame->isoc_header.format_param, frame->isoc_header.frame_width, frame->isoc_header.frame_height); if (usbvision->request_intra) { if (frame->isoc_header.format_param & 0x80) { found_header = 1; usbvision->last_isoc_frame_num = -1; /* do not check for lost frames this time */ usbvision_unrequest_intra(usbvision); break; } } else { found_header = 1; break; } } if (found_header) { frame->frmwidth = frame->isoc_header.frame_width * usbvision->stretch_width; frame->frmheight = frame->isoc_header.frame_height * usbvision->stretch_height; frame->v4l2_linesize = (frame->frmwidth * frame->v4l2_format.depth) >> 3; } else { /* no header found */ PDEBUG(DBG_HEADER, "skipping scratch data, no header"); scratch_reset(usbvision); return parse_state_end_parse; } /* found header */ if (frame->isoc_header.data_format == ISOC_MODE_COMPRESS) { /* check isoc_header.frame_num for lost frames */ if (usbvision->last_isoc_frame_num >= 0) { if (((usbvision->last_isoc_frame_num + 1) % 32) != frame->isoc_header.frame_num) { /* unexpected frame drop: need to request new intra frame */ PDEBUG(DBG_HEADER, "Lost frame before %d on USB", frame->isoc_header.frame_num); usbvision_request_intra(usbvision); return parse_state_next_frame; } } usbvision->last_isoc_frame_num = frame->isoc_header.frame_num; } usbvision->header_count++; frame->scanstate = scan_state_lines; frame->curline = 0; return parse_state_continue; } static enum parse_state usbvision_parse_lines_422(struct usb_usbvision *usbvision, long *pcopylen) { volatile struct usbvision_frame *frame; unsigned char *f; int len; int i; unsigned char yuyv[4] = { 180, 128, 10, 128 }; /* YUV components */ unsigned char rv, gv, bv; /* RGB components */ int clipmask_index, bytes_per_pixel; int stretch_bytes, clipmask_add; frame = usbvision->cur_frame; f = frame->data + (frame->v4l2_linesize * frame->curline); /* Make sure there's enough data for the entire line */ len = (frame->isoc_header.frame_width * 2) + 5; if (scratch_len(usbvision) < len) { PDEBUG(DBG_PARSE, "out of data in line %d, need %u.\n", frame->curline, len); return parse_state_out; } if ((frame->curline + 1) >= frame->frmheight) return parse_state_next_frame; bytes_per_pixel = frame->v4l2_format.bytes_per_pixel; stretch_bytes = (usbvision->stretch_width - 1) * bytes_per_pixel; clipmask_index = frame->curline * MAX_FRAME_WIDTH; clipmask_add = usbvision->stretch_width; for (i = 0; i < frame->frmwidth; i += (2 * usbvision->stretch_width)) { scratch_get(usbvision, &yuyv[0], 4); if (frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) { *f++ = yuyv[0]; /* Y */ *f++ = yuyv[3]; /* U */ } else { YUV_TO_RGB_BY_THE_BOOK(yuyv[0], yuyv[1], yuyv[3], rv, gv, bv); switch (frame->v4l2_format.format) { case V4L2_PIX_FMT_RGB565: *f++ = (0x1F & rv) | (0xE0 & (gv << 5)); *f++ = (0x07 & (gv >> 3)) | (0xF8 & bv); break; case V4L2_PIX_FMT_RGB24: *f++ = rv; *f++ = gv; *f++ = bv; break; case V4L2_PIX_FMT_RGB32: *f++ = rv; *f++ = gv; *f++ = bv; f++; break; case V4L2_PIX_FMT_RGB555: *f++ = (0x1F & rv) | (0xE0 & (gv << 5)); *f++ = (0x03 & (gv >> 3)) | (0x7C & (bv << 2)); break; } } clipmask_index += clipmask_add; f += stretch_bytes; if (frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) { *f++ = yuyv[2]; /* Y */ *f++ = yuyv[1]; /* V */ } else { YUV_TO_RGB_BY_THE_BOOK(yuyv[2], yuyv[1], yuyv[3], rv, gv, bv); switch (frame->v4l2_format.format) { case V4L2_PIX_FMT_RGB565: *f++ = (0x1F & rv) | (0xE0 & (gv << 5)); *f++ = (0x07 & (gv >> 3)) | (0xF8 & bv); break; case V4L2_PIX_FMT_RGB24: *f++ = rv; *f++ = gv; *f++ = bv; break; case V4L2_PIX_FMT_RGB32: *f++ = rv; *f++ = gv; *f++ = bv; f++; break; case V4L2_PIX_FMT_RGB555: *f++ = (0x1F & rv) | (0xE0 & (gv << 5)); *f++ = (0x03 & (gv >> 3)) | (0x7C & (bv << 2)); break; } } clipmask_index += clipmask_add; f += stretch_bytes; } frame->curline += usbvision->stretch_height; *pcopylen += frame->v4l2_linesize * usbvision->stretch_height; if (frame->curline >= frame->frmheight) return parse_state_next_frame; return parse_state_continue; } /* The decompression routine */ static int usbvision_decompress(struct usb_usbvision *usbvision, unsigned char *compressed, unsigned char *decompressed, int *start_pos, int *block_typestart_pos, int len) { int rest_pixel, idx, pos, extra_pos, block_len, block_type_pos, block_type_len; unsigned char block_byte, block_code, block_type, block_type_byte, integrator; integrator = 0; pos = *start_pos; block_type_pos = *block_typestart_pos; extra_pos = pos; block_len = 0; block_byte = 0; block_code = 0; block_type = 0; block_type_byte = 0; block_type_len = 0; rest_pixel = len; for (idx = 0; idx < len; idx++) { if (block_len == 0) { if (block_type_len == 0) { block_type_byte = compressed[block_type_pos]; block_type_pos++; block_type_len = 4; } block_type = (block_type_byte & 0xC0) >> 6; /* statistic: */ usbvision->compr_block_types[block_type]++; pos = extra_pos; if (block_type == 0) { if (rest_pixel >= 24) { idx += 23; rest_pixel -= 24; integrator = decompressed[idx]; } else { idx += rest_pixel - 1; rest_pixel = 0; } } else { block_code = compressed[pos]; pos++; if (rest_pixel >= 24) block_len = 24; else block_len = rest_pixel; rest_pixel -= block_len; extra_pos = pos + (block_len / 4); } block_type_byte <<= 2; block_type_len -= 1; } if (block_len > 0) { if ((block_len % 4) == 0) { block_byte = compressed[pos]; pos++; } if (block_type == 1) /* inter Block */ integrator = decompressed[idx]; switch (block_byte & 0xC0) { case 0x03 << 6: integrator += compressed[extra_pos]; extra_pos++; break; case 0x02 << 6: integrator += block_code; break; case 0x00: integrator -= block_code; break; } decompressed[idx] = integrator; block_byte <<= 2; block_len -= 1; } } *start_pos = extra_pos; *block_typestart_pos = block_type_pos; return idx; } /* * usbvision_parse_compress() * * Parse compressed frame from the scratch buffer, put * decoded RGB value into the current frame buffer and add the written * number of bytes (RGB) to the *pcopylen. * */ static enum parse_state usbvision_parse_compress(struct usb_usbvision *usbvision, long *pcopylen) { #define USBVISION_STRIP_MAGIC 0x5A #define USBVISION_STRIP_LEN_MAX 400 #define USBVISION_STRIP_HEADER_LEN 3 struct usbvision_frame *frame; unsigned char *f, *u = NULL, *v = NULL; unsigned char strip_data[USBVISION_STRIP_LEN_MAX]; unsigned char strip_header[USBVISION_STRIP_HEADER_LEN]; int idx, idx_end, strip_len, strip_ptr, startblock_pos, block_pos, block_type_pos; int clipmask_index; int image_size; unsigned char rv, gv, bv; static unsigned char *Y, *U, *V; frame = usbvision->cur_frame; image_size = frame->frmwidth * frame->frmheight; if ((frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) || (frame->v4l2_format.format == V4L2_PIX_FMT_YVU420)) { /* this is a planar format */ /* ... v4l2_linesize not used here. */ f = frame->data + (frame->width * frame->curline); } else f = frame->data + (frame->v4l2_linesize * frame->curline); if (frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) { /* initialise u and v pointers */ /* get base of u and b planes add halfoffset */ u = frame->data + image_size + (frame->frmwidth >> 1) * frame->curline; v = u + (image_size >> 1); } else if (frame->v4l2_format.format == V4L2_PIX_FMT_YVU420) { v = frame->data + image_size + ((frame->curline * (frame->width)) >> 2); u = v + (image_size >> 2); } if (frame->curline == 0) usbvision_adjust_compression(usbvision); if (scratch_len(usbvision) < USBVISION_STRIP_HEADER_LEN) return parse_state_out; /* get strip header without changing the scratch_read_ptr */ scratch_set_extra_ptr(usbvision, &strip_ptr, 0); scratch_get_extra(usbvision, &strip_header[0], &strip_ptr, USBVISION_STRIP_HEADER_LEN); if (strip_header[0] != USBVISION_STRIP_MAGIC) { /* wrong strip magic */ usbvision->strip_magic_errors++; return parse_state_next_frame; } if (frame->curline != (int)strip_header[2]) { /* line number mismatch error */ usbvision->strip_line_number_errors++; } strip_len = 2 * (unsigned int)strip_header[1]; if (strip_len > USBVISION_STRIP_LEN_MAX) { /* strip overrun */ /* I think this never happens */ usbvision_request_intra(usbvision); } if (scratch_len(usbvision) < strip_len) { /* there is not enough data for the strip */ return parse_state_out; } if (usbvision->intra_frame_buffer) { Y = usbvision->intra_frame_buffer + frame->frmwidth * frame->curline; U = usbvision->intra_frame_buffer + image_size + (frame->frmwidth / 2) * (frame->curline / 2); V = usbvision->intra_frame_buffer + image_size / 4 * 5 + (frame->frmwidth / 2) * (frame->curline / 2); } else { return parse_state_next_frame; } clipmask_index = frame->curline * MAX_FRAME_WIDTH; scratch_get(usbvision, strip_data, strip_len); idx_end = frame->frmwidth; block_type_pos = USBVISION_STRIP_HEADER_LEN; startblock_pos = block_type_pos + (idx_end - 1) / 96 + (idx_end / 2 - 1) / 96 + 2; block_pos = startblock_pos; usbvision->block_pos = block_pos; usbvision_decompress(usbvision, strip_data, Y, &block_pos, &block_type_pos, idx_end); if (strip_len > usbvision->max_strip_len) usbvision->max_strip_len = strip_len; if (frame->curline % 2) usbvision_decompress(usbvision, strip_data, V, &block_pos, &block_type_pos, idx_end / 2); else usbvision_decompress(usbvision, strip_data, U, &block_pos, &block_type_pos, idx_end / 2); if (block_pos > usbvision->comprblock_pos) usbvision->comprblock_pos = block_pos; if (block_pos > strip_len) usbvision->strip_len_errors++; for (idx = 0; idx < idx_end; idx++) { if (frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) { *f++ = Y[idx]; *f++ = idx & 0x01 ? U[idx / 2] : V[idx / 2]; } else if (frame->v4l2_format.format == V4L2_PIX_FMT_YUV422P) { *f++ = Y[idx]; if (idx & 0x01) *u++ = U[idx >> 1]; else *v++ = V[idx >> 1]; } else if (frame->v4l2_format.format == V4L2_PIX_FMT_YVU420) { *f++ = Y[idx]; if (!((idx & 0x01) | (frame->curline & 0x01))) { /* only need do this for 1 in 4 pixels */ /* intraframe buffer is YUV420 format */ *u++ = U[idx >> 1]; *v++ = V[idx >> 1]; } } else { YUV_TO_RGB_BY_THE_BOOK(Y[idx], U[idx / 2], V[idx / 2], rv, gv, bv); switch (frame->v4l2_format.format) { case V4L2_PIX_FMT_GREY: *f++ = Y[idx]; break; case V4L2_PIX_FMT_RGB555: *f++ = (0x1F & rv) | (0xE0 & (gv << 5)); *f++ = (0x03 & (gv >> 3)) | (0x7C & (bv << 2)); break; case V4L2_PIX_FMT_RGB565: *f++ = (0x1F & rv) | (0xE0 & (gv << 5)); *f++ = (0x07 & (gv >> 3)) | (0xF8 & bv); break; case V4L2_PIX_FMT_RGB24: *f++ = rv; *f++ = gv; *f++ = bv; break; case V4L2_PIX_FMT_RGB32: *f++ = rv; *f++ = gv; *f++ = bv; f++; break; } } clipmask_index++; } /* Deal with non-integer no. of bytes for YUV420P */ if (frame->v4l2_format.format != V4L2_PIX_FMT_YVU420) *pcopylen += frame->v4l2_linesize; else *pcopylen += frame->curline & 0x01 ? frame->v4l2_linesize : frame->v4l2_linesize << 1; frame->curline += 1; if (frame->curline >= frame->frmheight) return parse_state_next_frame; return parse_state_continue; } /* * usbvision_parse_lines_420() * * Parse two lines from the scratch buffer, put * decoded RGB value into the current frame buffer and add the written * number of bytes (RGB) to the *pcopylen. * */ static enum parse_state usbvision_parse_lines_420(struct usb_usbvision *usbvision, long *pcopylen) { struct usbvision_frame *frame; unsigned char *f_even = NULL, *f_odd = NULL; unsigned int pixel_per_line, block; int pixel, block_split; int y_ptr, u_ptr, v_ptr, y_odd_offset; const int y_block_size = 128; const int uv_block_size = 64; const int sub_block_size = 32; const int y_step[] = { 0, 0, 0, 2 }, y_step_size = 4; const int uv_step[] = { 0, 0, 0, 4 }, uv_step_size = 4; unsigned char y[2], u, v; /* YUV components */ int y_, u_, v_, vb, uvg, ur; int r_, g_, b_; /* RGB components */ unsigned char g; int clipmask_even_index, clipmask_odd_index, bytes_per_pixel; int clipmask_add, stretch_bytes; frame = usbvision->cur_frame; f_even = frame->data + (frame->v4l2_linesize * frame->curline); f_odd = f_even + frame->v4l2_linesize * usbvision->stretch_height; /* Make sure there's enough data for the entire line */ /* In this mode usbvision transfer 3 bytes for every 2 pixels */ /* I need two lines to decode the color */ bytes_per_pixel = frame->v4l2_format.bytes_per_pixel; stretch_bytes = (usbvision->stretch_width - 1) * bytes_per_pixel; clipmask_even_index = frame->curline * MAX_FRAME_WIDTH; clipmask_odd_index = clipmask_even_index + MAX_FRAME_WIDTH; clipmask_add = usbvision->stretch_width; pixel_per_line = frame->isoc_header.frame_width; if (scratch_len(usbvision) < (int)pixel_per_line * 3) { /* printk(KERN_DEBUG "out of data, need %d\n", len); */ return parse_state_out; } if ((frame->curline + 1) >= frame->frmheight) return parse_state_next_frame; block_split = (pixel_per_line%y_block_size) ? 1 : 0; /* are some blocks splitted into different lines? */ y_odd_offset = (pixel_per_line / y_block_size) * (y_block_size + uv_block_size) + block_split * uv_block_size; scratch_set_extra_ptr(usbvision, &y_ptr, y_odd_offset); scratch_set_extra_ptr(usbvision, &u_ptr, y_block_size); scratch_set_extra_ptr(usbvision, &v_ptr, y_odd_offset + (4 - block_split) * sub_block_size); for (block = 0; block < (pixel_per_line / sub_block_size); block++) { for (pixel = 0; pixel < sub_block_size; pixel += 2) { scratch_get(usbvision, &y[0], 2); scratch_get_extra(usbvision, &u, &u_ptr, 1); scratch_get_extra(usbvision, &v, &v_ptr, 1); /* I don't use the YUV_TO_RGB macro for better performance */ v_ = v - 128; u_ = u - 128; vb = 132252 * v_; uvg = -53281 * u_ - 25625 * v_; ur = 104595 * u_; if (frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) { *f_even++ = y[0]; *f_even++ = v; } else { y_ = 76284 * (y[0] - 16); b_ = (y_ + vb) >> 16; g_ = (y_ + uvg) >> 16; r_ = (y_ + ur) >> 16; switch (frame->v4l2_format.format) { case V4L2_PIX_FMT_RGB565: g = LIMIT_RGB(g_); *f_even++ = (0x1F & LIMIT_RGB(r_)) | (0xE0 & (g << 5)); *f_even++ = (0x07 & (g >> 3)) | (0xF8 & LIMIT_RGB(b_)); break; case V4L2_PIX_FMT_RGB24: *f_even++ = LIMIT_RGB(r_); *f_even++ = LIMIT_RGB(g_); *f_even++ = LIMIT_RGB(b_); break; case V4L2_PIX_FMT_RGB32: *f_even++ = LIMIT_RGB(r_); *f_even++ = LIMIT_RGB(g_); *f_even++ = LIMIT_RGB(b_); f_even++; break; case V4L2_PIX_FMT_RGB555: g = LIMIT_RGB(g_); *f_even++ = (0x1F & LIMIT_RGB(r_)) | (0xE0 & (g << 5)); *f_even++ = (0x03 & (g >> 3)) | (0x7C & (LIMIT_RGB(b_) << 2)); break; } } clipmask_even_index += clipmask_add; f_even += stretch_bytes; if (frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) { *f_even++ = y[1]; *f_even++ = u; } else { y_ = 76284 * (y[1] - 16); b_ = (y_ + vb) >> 16; g_ = (y_ + uvg) >> 16; r_ = (y_ + ur) >> 16; switch (frame->v4l2_format.format) { case V4L2_PIX_FMT_RGB565: g = LIMIT_RGB(g_); *f_even++ = (0x1F & LIMIT_RGB(r_)) | (0xE0 & (g << 5)); *f_even++ = (0x07 & (g >> 3)) | (0xF8 & LIMIT_RGB(b_)); break; case V4L2_PIX_FMT_RGB24: *f_even++ = LIMIT_RGB(r_); *f_even++ = LIMIT_RGB(g_); *f_even++ = LIMIT_RGB(b_); break; case V4L2_PIX_FMT_RGB32: *f_even++ = LIMIT_RGB(r_); *f_even++ = LIMIT_RGB(g_); *f_even++ = LIMIT_RGB(b_); f_even++; break; case V4L2_PIX_FMT_RGB555: g = LIMIT_RGB(g_); *f_even++ = (0x1F & LIMIT_RGB(r_)) | (0xE0 & (g << 5)); *f_even++ = (0x03 & (g >> 3)) | (0x7C & (LIMIT_RGB(b_) << 2)); break; } } clipmask_even_index += clipmask_add; f_even += stretch_bytes; scratch_get_extra(usbvision, &y[0], &y_ptr, 2); if (frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) { *f_odd++ = y[0]; *f_odd++ = v; } else { y_ = 76284 * (y[0] - 16); b_ = (y_ + vb) >> 16; g_ = (y_ + uvg) >> 16; r_ = (y_ + ur) >> 16; switch (frame->v4l2_format.format) { case V4L2_PIX_FMT_RGB565: g = LIMIT_RGB(g_); *f_odd++ = (0x1F & LIMIT_RGB(r_)) | (0xE0 & (g << 5)); *f_odd++ = (0x07 & (g >> 3)) | (0xF8 & LIMIT_RGB(b_)); break; case V4L2_PIX_FMT_RGB24: *f_odd++ = LIMIT_RGB(r_); *f_odd++ = LIMIT_RGB(g_); *f_odd++ = LIMIT_RGB(b_); break; case V4L2_PIX_FMT_RGB32: *f_odd++ = LIMIT_RGB(r_); *f_odd++ = LIMIT_RGB(g_); *f_odd++ = LIMIT_RGB(b_); f_odd++; break; case V4L2_PIX_FMT_RGB555: g = LIMIT_RGB(g_); *f_odd++ = (0x1F & LIMIT_RGB(r_)) | (0xE0 & (g << 5)); *f_odd++ = (0x03 & (g >> 3)) | (0x7C & (LIMIT_RGB(b_) << 2)); break; } } clipmask_odd_index += clipmask_add; f_odd += stretch_bytes; if (frame->v4l2_format.format == V4L2_PIX_FMT_YUYV) { *f_odd++ = y[1]; *f_odd++ = u; } else { y_ = 76284 * (y[1] - 16); b_ = (y_ + vb) >> 16; g_ = (y_ + uvg) >> 16; r_ = (y_ + ur) >> 16; switch (frame->v4l2_format.format) { case V4L2_PIX_FMT_RGB565: g = LIMIT_RGB(g_); *f_odd++ = (0x1F & LIMIT_RGB(r_)) | (0xE0 & (g << 5)); *f_odd++ = (0x07 & (g >> 3)) | (0xF8 & LIMIT_RGB(b_)); break; case V4L2_PIX_FMT_RGB24: *f_odd++ = LIMIT_RGB(r_); *f_odd++ = LIMIT_RGB(g_); *f_odd++ = LIMIT_RGB(b_); break; case V4L2_PIX_FMT_RGB32: *f_odd++ = LIMIT_RGB(r_); *f_odd++ = LIMIT_RGB(g_); *f_odd++ = LIMIT_RGB(b_); f_odd++; break; case V4L2_PIX_FMT_RGB555: g = LIMIT_RGB(g_); *f_odd++ = (0x1F & LIMIT_RGB(r_)) | (0xE0 & (g << 5)); *f_odd++ = (0x03 & (g >> 3)) | (0x7C & (LIMIT_RGB(b_) << 2)); break; } } clipmask_odd_index += clipmask_add; f_odd += stretch_bytes; } scratch_rm_old(usbvision, y_step[block % y_step_size] * sub_block_size); scratch_inc_extra_ptr(&y_ptr, y_step[(block + 2 * block_split) % y_step_size] * sub_block_size); scratch_inc_extra_ptr(&u_ptr, uv_step[block % uv_step_size] * sub_block_size); scratch_inc_extra_ptr(&v_ptr, uv_step[(block + 2 * block_split) % uv_step_size] * sub_block_size); } scratch_rm_old(usbvision, pixel_per_line * 3 / 2 + block_split * sub_block_size); frame->curline += 2 * usbvision->stretch_height; *pcopylen += frame->v4l2_linesize * 2 * usbvision->stretch_height; if (frame->curline >= frame->frmheight) return parse_state_next_frame; return parse_state_continue; } /* * usbvision_parse_data() * * Generic routine to parse the scratch buffer. It employs either * usbvision_find_header() or usbvision_parse_lines() to do most * of work. * */ static void usbvision_parse_data(struct usb_usbvision *usbvision) { struct usbvision_frame *frame; enum parse_state newstate; long copylen = 0; unsigned long lock_flags; frame = usbvision->cur_frame; PDEBUG(DBG_PARSE, "parsing len=%d\n", scratch_len(usbvision)); while (1) { newstate = parse_state_out; if (scratch_len(usbvision)) { if (frame->scanstate == scan_state_scanning) { newstate = usbvision_find_header(usbvision); } else if (frame->scanstate == scan_state_lines) { if (usbvision->isoc_mode == ISOC_MODE_YUV420) newstate = usbvision_parse_lines_420(usbvision, &copylen); else if (usbvision->isoc_mode == ISOC_MODE_YUV422) newstate = usbvision_parse_lines_422(usbvision, &copylen); else if (usbvision->isoc_mode == ISOC_MODE_COMPRESS) newstate = usbvision_parse_compress(usbvision, &copylen); } } if (newstate == parse_state_continue) continue; if ((newstate == parse_state_next_frame) || (newstate == parse_state_out)) break; return; /* parse_state_end_parse */ } if (newstate == parse_state_next_frame) { frame->grabstate = frame_state_done; v4l2_get_timestamp(&(frame->timestamp)); frame->sequence = usbvision->frame_num; spin_lock_irqsave(&usbvision->queue_lock, lock_flags); list_move_tail(&(frame->frame), &usbvision->outqueue); usbvision->cur_frame = NULL; spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags); usbvision->frame_num++; /* This will cause the process to request another frame. */ if (waitqueue_active(&usbvision->wait_frame)) { PDEBUG(DBG_PARSE, "Wake up !"); wake_up_interruptible(&usbvision->wait_frame); } } else { frame->grabstate = frame_state_grabbing; } /* Update the frame's uncompressed length. */ frame->scanlength += copylen; } /* * Make all of the blocks of data contiguous */ static int usbvision_compress_isochronous(struct usb_usbvision *usbvision, struct urb *urb) { unsigned char *packet_data; int i, totlen = 0; for (i = 0; i < urb->number_of_packets; i++) { int packet_len = urb->iso_frame_desc[i].actual_length; int packet_stat = urb->iso_frame_desc[i].status; packet_data = urb->transfer_buffer + urb->iso_frame_desc[i].offset; /* Detect and ignore errored packets */ if (packet_stat) { /* packet_stat != 0 ????????????? */ PDEBUG(DBG_ISOC, "data error: [%d] len=%d, status=%X", i, packet_len, packet_stat); usbvision->isoc_err_count++; continue; } /* Detect and ignore empty packets */ if (packet_len < 0) { PDEBUG(DBG_ISOC, "error packet [%d]", i); usbvision->isoc_skip_count++; continue; } else if (packet_len == 0) { /* Frame end ????? */ PDEBUG(DBG_ISOC, "null packet [%d]", i); usbvision->isocstate = isoc_state_no_frame; usbvision->isoc_skip_count++; continue; } else if (packet_len > usbvision->isoc_packet_size) { PDEBUG(DBG_ISOC, "packet[%d] > isoc_packet_size", i); usbvision->isoc_skip_count++; continue; } PDEBUG(DBG_ISOC, "packet ok [%d] len=%d", i, packet_len); if (usbvision->isocstate == isoc_state_no_frame) { /* new frame begins */ usbvision->isocstate = isoc_state_in_frame; scratch_mark_header(usbvision); usbvision_measure_bandwidth(usbvision); PDEBUG(DBG_ISOC, "packet with header"); } /* * If usbvision continues to feed us with data but there is no * consumption (if, for example, V4L client fell asleep) we * may overflow the buffer. We have to move old data over to * free room for new data. This is bad for old data. If we * just drop new data then it's bad for new data... choose * your favorite evil here. */ if (scratch_free(usbvision) < packet_len) { usbvision->scratch_ovf_count++; PDEBUG(DBG_ISOC, "scratch buf overflow! scr_len: %d, n: %d", scratch_len(usbvision), packet_len); scratch_rm_old(usbvision, packet_len - scratch_free(usbvision)); } /* Now we know that there is enough room in scratch buffer */ scratch_put(usbvision, packet_data, packet_len); totlen += packet_len; usbvision->isoc_data_count += packet_len; usbvision->isoc_packet_count++; } #if ENABLE_HEXDUMP if (totlen > 0) { static int foo; if (foo < 1) { printk(KERN_DEBUG "+%d.\n", usbvision->scratchlen); usbvision_hexdump(data0, (totlen > 64) ? 64 : totlen); ++foo; } } #endif return totlen; } static void usbvision_isoc_irq(struct urb *urb) { int err_code = 0; int len; struct usb_usbvision *usbvision = urb->context; int i; unsigned long start_time = jiffies; struct usbvision_frame **f; /* We don't want to do anything if we are about to be removed! */ if (!USBVISION_IS_OPERATIONAL(usbvision)) return; /* any urb with wrong status is ignored without acknowledgement */ if (urb->status == -ENOENT) return; f = &usbvision->cur_frame; /* Manage streaming interruption */ if (usbvision->streaming == stream_interrupt) { usbvision->streaming = stream_idle; if ((*f)) { (*f)->grabstate = frame_state_ready; (*f)->scanstate = scan_state_scanning; } PDEBUG(DBG_IRQ, "stream interrupted"); wake_up_interruptible(&usbvision->wait_stream); } /* Copy the data received into our scratch buffer */ len = usbvision_compress_isochronous(usbvision, urb); usbvision->isoc_urb_count++; usbvision->urb_length = len; if (usbvision->streaming == stream_on) { /* If we collected enough data let's parse! */ if (scratch_len(usbvision) > USBVISION_HEADER_LENGTH && !list_empty(&(usbvision->inqueue))) { if (!(*f)) { (*f) = list_entry(usbvision->inqueue.next, struct usbvision_frame, frame); } usbvision_parse_data(usbvision); } else { /* If we don't have a frame we're current working on, complain */ PDEBUG(DBG_IRQ, "received data, but no one needs it"); scratch_reset(usbvision); } } else { PDEBUG(DBG_IRQ, "received data, but no one needs it"); scratch_reset(usbvision); } usbvision->time_in_irq += jiffies - start_time; for (i = 0; i < USBVISION_URB_FRAMES; i++) { urb->iso_frame_desc[i].status = 0; urb->iso_frame_desc[i].actual_length = 0; } urb->status = 0; urb->dev = usbvision->dev; err_code = usb_submit_urb(urb, GFP_ATOMIC); if (err_code) { dev_err(&usbvision->dev->dev, "%s: usb_submit_urb failed: error %d\n", __func__, err_code); } return; } /*************************************/ /* Low level usbvision access functions */ /*************************************/ /* * usbvision_read_reg() * * return < 0 -> Error * >= 0 -> Data */ int usbvision_read_reg(struct usb_usbvision *usbvision, unsigned char reg) { int err_code = 0; unsigned char buffer[1]; if (!USBVISION_IS_OPERATIONAL(usbvision)) return -1; err_code = usb_control_msg(usbvision->dev, usb_rcvctrlpipe(usbvision->dev, 1), USBVISION_OP_CODE, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0, (__u16) reg, buffer, 1, HZ); if (err_code < 0) { dev_err(&usbvision->dev->dev, "%s: failed: error %d\n", __func__, err_code); return err_code; } return buffer[0]; } /* * usbvision_write_reg() * * return 1 -> Reg written * 0 -> usbvision is not yet ready * -1 -> Something went wrong */ int usbvision_write_reg(struct usb_usbvision *usbvision, unsigned char reg, unsigned char value) { int err_code = 0; if (!USBVISION_IS_OPERATIONAL(usbvision)) return 0; err_code = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1), USBVISION_OP_CODE, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0, (__u16) reg, &value, 1, HZ); if (err_code < 0) { dev_err(&usbvision->dev->dev, "%s: failed: error %d\n", __func__, err_code); } return err_code; } static void usbvision_ctrl_urb_complete(struct urb *urb) { struct usb_usbvision *usbvision = (struct usb_usbvision *)urb->context; PDEBUG(DBG_IRQ, ""); usbvision->ctrl_urb_busy = 0; if (waitqueue_active(&usbvision->ctrl_urb_wq)) wake_up_interruptible(&usbvision->ctrl_urb_wq); } static int usbvision_write_reg_irq(struct usb_usbvision *usbvision, int address, unsigned char *data, int len) { int err_code = 0; PDEBUG(DBG_IRQ, ""); if (len > 8) return -EFAULT; if (usbvision->ctrl_urb_busy) return -EBUSY; usbvision->ctrl_urb_busy = 1; usbvision->ctrl_urb_setup.bRequestType = USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT; usbvision->ctrl_urb_setup.bRequest = USBVISION_OP_CODE; usbvision->ctrl_urb_setup.wValue = 0; usbvision->ctrl_urb_setup.wIndex = cpu_to_le16(address); usbvision->ctrl_urb_setup.wLength = cpu_to_le16(len); usb_fill_control_urb(usbvision->ctrl_urb, usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1), (unsigned char *)&usbvision->ctrl_urb_setup, (void *)usbvision->ctrl_urb_buffer, len, usbvision_ctrl_urb_complete, (void *)usbvision); memcpy(usbvision->ctrl_urb_buffer, data, len); err_code = usb_submit_urb(usbvision->ctrl_urb, GFP_ATOMIC); if (err_code < 0) { /* error in usb_submit_urb() */ usbvision->ctrl_urb_busy = 0; } PDEBUG(DBG_IRQ, "submit %d byte: error %d", len, err_code); return err_code; } static int usbvision_init_compression(struct usb_usbvision *usbvision) { int err_code = 0; usbvision->last_isoc_frame_num = -1; usbvision->isoc_data_count = 0; usbvision->isoc_packet_count = 0; usbvision->isoc_skip_count = 0; usbvision->compr_level = 50; usbvision->last_compr_level = -1; usbvision->isoc_urb_count = 0; usbvision->request_intra = 1; usbvision->isoc_measure_bandwidth_count = 0; return err_code; } /* this function measures the used bandwidth since last call * return: 0 : no error * sets used_bandwidth to 1-100 : 1-100% of full bandwidth resp. to isoc_packet_size */ static int usbvision_measure_bandwidth(struct usb_usbvision *usbvision) { int err_code = 0; if (usbvision->isoc_measure_bandwidth_count < 2) { /* this gives an average bandwidth of 3 frames */ usbvision->isoc_measure_bandwidth_count++; return err_code; } if ((usbvision->isoc_packet_size > 0) && (usbvision->isoc_packet_count > 0)) { usbvision->used_bandwidth = usbvision->isoc_data_count / (usbvision->isoc_packet_count + usbvision->isoc_skip_count) * 100 / usbvision->isoc_packet_size; } usbvision->isoc_measure_bandwidth_count = 0; usbvision->isoc_data_count = 0; usbvision->isoc_packet_count = 0; usbvision->isoc_skip_count = 0; return err_code; } static int usbvision_adjust_compression(struct usb_usbvision *usbvision) { int err_code = 0; unsigned char buffer[6]; PDEBUG(DBG_IRQ, ""); if ((adjust_compression) && (usbvision->used_bandwidth > 0)) { usbvision->compr_level += (usbvision->used_bandwidth - 90) / 2; RESTRICT_TO_RANGE(usbvision->compr_level, 0, 100); if (usbvision->compr_level != usbvision->last_compr_level) { int distortion; if (usbvision->bridge_type == BRIDGE_NT1004 || usbvision->bridge_type == BRIDGE_NT1005) { buffer[0] = (unsigned char)(4 + 16 * usbvision->compr_level / 100); /* PCM Threshold 1 */ buffer[1] = (unsigned char)(4 + 8 * usbvision->compr_level / 100); /* PCM Threshold 2 */ distortion = 7 + 248 * usbvision->compr_level / 100; buffer[2] = (unsigned char)(distortion & 0xFF); /* Average distortion Threshold (inter) */ buffer[3] = (unsigned char)(distortion & 0xFF); /* Average distortion Threshold (intra) */ distortion = 1 + 42 * usbvision->compr_level / 100; buffer[4] = (unsigned char)(distortion & 0xFF); /* Maximum distortion Threshold (inter) */ buffer[5] = (unsigned char)(distortion & 0xFF); /* Maximum distortion Threshold (intra) */ } else { /* BRIDGE_NT1003 */ buffer[0] = (unsigned char)(4 + 16 * usbvision->compr_level / 100); /* PCM threshold 1 */ buffer[1] = (unsigned char)(4 + 8 * usbvision->compr_level / 100); /* PCM threshold 2 */ distortion = 2 + 253 * usbvision->compr_level / 100; buffer[2] = (unsigned char)(distortion & 0xFF); /* distortion threshold bit0-7 */ buffer[3] = 0; /* (unsigned char)((distortion >> 8) & 0x0F); distortion threshold bit 8-11 */ distortion = 0 + 43 * usbvision->compr_level / 100; buffer[4] = (unsigned char)(distortion & 0xFF); /* maximum distortion bit0-7 */ buffer[5] = 0; /* (unsigned char)((distortion >> 8) & 0x01); maximum distortion bit 8 */ } err_code = usbvision_write_reg_irq(usbvision, USBVISION_PCM_THR1, buffer, 6); if (err_code == 0) { PDEBUG(DBG_IRQ, "new compr params %#02x %#02x %#02x %#02x %#02x %#02x", buffer[0], buffer[1], buffer[2], buffer[3], buffer[4], buffer[5]); usbvision->last_compr_level = usbvision->compr_level; } } } return err_code; } static int usbvision_request_intra(struct usb_usbvision *usbvision) { int err_code = 0; unsigned char buffer[1]; PDEBUG(DBG_IRQ, ""); usbvision->request_intra = 1; buffer[0] = 1; usbvision_write_reg_irq(usbvision, USBVISION_FORCE_INTRA, buffer, 1); return err_code; } static int usbvision_unrequest_intra(struct usb_usbvision *usbvision) { int err_code = 0; unsigned char buffer[1]; PDEBUG(DBG_IRQ, ""); usbvision->request_intra = 0; buffer[0] = 0; usbvision_write_reg_irq(usbvision, USBVISION_FORCE_INTRA, buffer, 1); return err_code; } /******************************* * usbvision utility functions *******************************/ int usbvision_power_off(struct usb_usbvision *usbvision) { int err_code = 0; PDEBUG(DBG_FUNC, ""); err_code = usbvision_write_reg(usbvision, USBVISION_PWR_REG, USBVISION_SSPND_EN); if (err_code == 1) usbvision->power = 0; PDEBUG(DBG_FUNC, "%s: err_code %d", (err_code != 1) ? "ERROR" : "power is off", err_code); return err_code; } /* configure webcam image sensor using the serial port */ static int usbvision_init_webcam(struct usb_usbvision *usbvision) { int rc; int i; static char init_values[38][3] = { { 0x04, 0x12, 0x08 }, { 0x05, 0xff, 0xc8 }, { 0x06, 0x18, 0x07 }, { 0x07, 0x90, 0x00 }, { 0x09, 0x00, 0x00 }, { 0x0a, 0x00, 0x00 }, { 0x0b, 0x08, 0x00 }, { 0x0d, 0xcc, 0xcc }, { 0x0e, 0x13, 0x14 }, { 0x10, 0x9b, 0x83 }, { 0x11, 0x5a, 0x3f }, { 0x12, 0xe4, 0x73 }, { 0x13, 0x88, 0x84 }, { 0x14, 0x89, 0x80 }, { 0x15, 0x00, 0x20 }, { 0x16, 0x00, 0x00 }, { 0x17, 0xff, 0xa0 }, { 0x18, 0x6b, 0x20 }, { 0x19, 0x22, 0x40 }, { 0x1a, 0x10, 0x07 }, { 0x1b, 0x00, 0x47 }, { 0x1c, 0x03, 0xe0 }, { 0x1d, 0x00, 0x00 }, { 0x1e, 0x00, 0x00 }, { 0x1f, 0x00, 0x00 }, { 0x20, 0x00, 0x00 }, { 0x21, 0x00, 0x00 }, { 0x22, 0x00, 0x00 }, { 0x23, 0x00, 0x00 }, { 0x24, 0x00, 0x00 }, { 0x25, 0x00, 0x00 }, { 0x26, 0x00, 0x00 }, { 0x27, 0x00, 0x00 }, { 0x28, 0x00, 0x00 }, { 0x29, 0x00, 0x00 }, { 0x08, 0x80, 0x60 }, { 0x0f, 0x2d, 0x24 }, { 0x0c, 0x80, 0x80 } }; char value[3]; /* the only difference between PAL and NTSC init_values */ if (usbvision_device_data[usbvision->dev_model].video_norm == V4L2_STD_NTSC) init_values[4][1] = 0x34; for (i = 0; i < sizeof(init_values) / 3; i++) { usbvision_write_reg(usbvision, USBVISION_SER_MODE, USBVISION_SER_MODE_SOFT); memcpy(value, init_values[i], 3); rc = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1), USBVISION_OP_CODE, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0, (__u16) USBVISION_SER_DAT1, value, 3, HZ); if (rc < 0) return rc; usbvision_write_reg(usbvision, USBVISION_SER_MODE, USBVISION_SER_MODE_SIO); /* write 3 bytes to the serial port using SIO mode */ usbvision_write_reg(usbvision, USBVISION_SER_CONT, 3 | 0x10); usbvision_write_reg(usbvision, USBVISION_IOPIN_REG, 0); usbvision_write_reg(usbvision, USBVISION_SER_MODE, USBVISION_SER_MODE_SOFT); usbvision_write_reg(usbvision, USBVISION_IOPIN_REG, USBVISION_IO_2); usbvision_write_reg(usbvision, USBVISION_SER_MODE, USBVISION_SER_MODE_SOFT | USBVISION_CLK_OUT); usbvision_write_reg(usbvision, USBVISION_SER_MODE, USBVISION_SER_MODE_SOFT | USBVISION_DAT_IO); usbvision_write_reg(usbvision, USBVISION_SER_MODE, USBVISION_SER_MODE_SOFT | USBVISION_CLK_OUT | USBVISION_DAT_IO); } return 0; } /* * usbvision_set_video_format() * */ static int usbvision_set_video_format(struct usb_usbvision *usbvision, int format) { static const char proc[] = "usbvision_set_video_format"; int rc; unsigned char value[2]; if (!USBVISION_IS_OPERATIONAL(usbvision)) return 0; PDEBUG(DBG_FUNC, "isoc_mode %#02x", format); if ((format != ISOC_MODE_YUV422) && (format != ISOC_MODE_YUV420) && (format != ISOC_MODE_COMPRESS)) { printk(KERN_ERR "usbvision: unknown video format %02x, using default YUV420", format); format = ISOC_MODE_YUV420; } value[0] = 0x0A; /* TODO: See the effect of the filter */ value[1] = format; /* Sets the VO_MODE register which follows FILT_CONT */ rc = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1), USBVISION_OP_CODE, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0, (__u16) USBVISION_FILT_CONT, value, 2, HZ); if (rc < 0) { printk(KERN_ERR "%s: ERROR=%d. USBVISION stopped - " "reconnect or reload driver.\n", proc, rc); } usbvision->isoc_mode = format; return rc; } /* * usbvision_set_output() * */ int usbvision_set_output(struct usb_usbvision *usbvision, int width, int height) { int err_code = 0; int usb_width, usb_height; unsigned int frame_rate = 0, frame_drop = 0; unsigned char value[4]; if (!USBVISION_IS_OPERATIONAL(usbvision)) return 0; if (width > MAX_USB_WIDTH) { usb_width = width / 2; usbvision->stretch_width = 2; } else { usb_width = width; usbvision->stretch_width = 1; } if (height > MAX_USB_HEIGHT) { usb_height = height / 2; usbvision->stretch_height = 2; } else { usb_height = height; usbvision->stretch_height = 1; } RESTRICT_TO_RANGE(usb_width, MIN_FRAME_WIDTH, MAX_USB_WIDTH); usb_width &= ~(MIN_FRAME_WIDTH-1); RESTRICT_TO_RANGE(usb_height, MIN_FRAME_HEIGHT, MAX_USB_HEIGHT); usb_height &= ~(1); PDEBUG(DBG_FUNC, "usb %dx%d; screen %dx%d; stretch %dx%d", usb_width, usb_height, width, height, usbvision->stretch_width, usbvision->stretch_height); /* I'll not rewrite the same values */ if ((usb_width != usbvision->curwidth) || (usb_height != usbvision->curheight)) { value[0] = usb_width & 0xff; /* LSB */ value[1] = (usb_width >> 8) & 0x03; /* MSB */ value[2] = usb_height & 0xff; /* LSB */ value[3] = (usb_height >> 8) & 0x03; /* MSB */ err_code = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1), USBVISION_OP_CODE, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0, (__u16) USBVISION_LXSIZE_O, value, 4, HZ); if (err_code < 0) { dev_err(&usbvision->dev->dev, "%s failed: error %d\n", __func__, err_code); return err_code; } usbvision->curwidth = usbvision->stretch_width * usb_width; usbvision->curheight = usbvision->stretch_height * usb_height; } if (usbvision->isoc_mode == ISOC_MODE_YUV422) frame_rate = (usbvision->isoc_packet_size * 1000) / (usb_width * usb_height * 2); else if (usbvision->isoc_mode == ISOC_MODE_YUV420) frame_rate = (usbvision->isoc_packet_size * 1000) / ((usb_width * usb_height * 12) / 8); else frame_rate = FRAMERATE_MAX; if (usbvision->tvnorm_id & V4L2_STD_625_50) frame_drop = frame_rate * 32 / 25 - 1; else if (usbvision->tvnorm_id & V4L2_STD_525_60) frame_drop = frame_rate * 32 / 30 - 1; RESTRICT_TO_RANGE(frame_drop, FRAMERATE_MIN, FRAMERATE_MAX); PDEBUG(DBG_FUNC, "frame_rate %d fps, frame_drop %d", frame_rate, frame_drop); frame_drop = FRAMERATE_MAX; /* We can allow the maximum here, because dropping is controlled */ if (usbvision_device_data[usbvision->dev_model].codec == CODEC_WEBCAM) { if (usbvision_device_data[usbvision->dev_model].video_norm == V4L2_STD_PAL) frame_drop = 25; else frame_drop = 30; } /* frame_drop = 7; => frame_phase = 1, 5, 9, 13, 17, 21, 25, 0, 4, 8, ... => frame_skip = 4; => frame_rate = (7 + 1) * 25 / 32 = 200 / 32 = 6.25; frame_drop = 9; => frame_phase = 1, 5, 8, 11, 14, 17, 21, 24, 27, 1, 4, 8, ... => frame_skip = 4, 3, 3, 3, 3, 4, 3, 3, 3, 3, 4, ... => frame_rate = (9 + 1) * 25 / 32 = 250 / 32 = 7.8125; */ err_code = usbvision_write_reg(usbvision, USBVISION_FRM_RATE, frame_drop); return err_code; } /* * usbvision_frames_alloc * allocate the required frames */ int usbvision_frames_alloc(struct usb_usbvision *usbvision, int number_of_frames) { int i; /* needs to be page aligned cause the buffers can be mapped individually! */ usbvision->max_frame_size = PAGE_ALIGN(usbvision->curwidth * usbvision->curheight * usbvision->palette.bytes_per_pixel); /* Try to do my best to allocate the frames the user want in the remaining memory */ usbvision->num_frames = number_of_frames; while (usbvision->num_frames > 0) { usbvision->fbuf_size = usbvision->num_frames * usbvision->max_frame_size; usbvision->fbuf = usbvision_rvmalloc(usbvision->fbuf_size); if (usbvision->fbuf) break; usbvision->num_frames--; } spin_lock_init(&usbvision->queue_lock); init_waitqueue_head(&usbvision->wait_frame); init_waitqueue_head(&usbvision->wait_stream); /* Allocate all buffers */ for (i = 0; i < usbvision->num_frames; i++) { usbvision->frame[i].index = i; usbvision->frame[i].grabstate = frame_state_unused; usbvision->frame[i].data = usbvision->fbuf + i * usbvision->max_frame_size; /* * Set default sizes for read operation. */ usbvision->stretch_width = 1; usbvision->stretch_height = 1; usbvision->frame[i].width = usbvision->curwidth; usbvision->frame[i].height = usbvision->curheight; usbvision->frame[i].bytes_read = 0; } PDEBUG(DBG_FUNC, "allocated %d frames (%d bytes per frame)", usbvision->num_frames, usbvision->max_frame_size); return usbvision->num_frames; } /* * usbvision_frames_free * frees memory allocated for the frames */ void usbvision_frames_free(struct usb_usbvision *usbvision) { /* Have to free all that memory */ PDEBUG(DBG_FUNC, "free %d frames", usbvision->num_frames); if (usbvision->fbuf != NULL) { usbvision_rvfree(usbvision->fbuf, usbvision->fbuf_size); usbvision->fbuf = NULL; usbvision->num_frames = 0; } } /* * usbvision_empty_framequeues() * prepare queues for incoming and outgoing frames */ void usbvision_empty_framequeues(struct usb_usbvision *usbvision) { u32 i; INIT_LIST_HEAD(&(usbvision->inqueue)); INIT_LIST_HEAD(&(usbvision->outqueue)); for (i = 0; i < USBVISION_NUMFRAMES; i++) { usbvision->frame[i].grabstate = frame_state_unused; usbvision->frame[i].bytes_read = 0; } } /* * usbvision_stream_interrupt() * stops streaming */ int usbvision_stream_interrupt(struct usb_usbvision *usbvision) { int ret = 0; /* stop reading from the device */ usbvision->streaming = stream_interrupt; ret = wait_event_timeout(usbvision->wait_stream, (usbvision->streaming == stream_idle), msecs_to_jiffies(USBVISION_NUMSBUF*USBVISION_URB_FRAMES)); return ret; } /* * usbvision_set_compress_params() * */ static int usbvision_set_compress_params(struct usb_usbvision *usbvision) { static const char proc[] = "usbvision_set_compresion_params: "; int rc; unsigned char value[6]; value[0] = 0x0F; /* Intra-Compression cycle */ value[1] = 0x01; /* Reg.45 one line per strip */ value[2] = 0x00; /* Reg.46 Force intra mode on all new frames */ value[3] = 0x00; /* Reg.47 FORCE_UP <- 0 normal operation (not force) */ value[4] = 0xA2; /* Reg.48 BUF_THR I'm not sure if this does something in not compressed mode. */ value[5] = 0x00; /* Reg.49 DVI_YUV This has nothing to do with compression */ /* catched values for NT1004 */ /* value[0] = 0xFF; Never apply intra mode automatically */ /* value[1] = 0xF1; Use full frame height for virtual strip width; One line per strip */ /* value[2] = 0x01; Force intra mode on all new frames */ /* value[3] = 0x00; Strip size 400 Bytes; do not force up */ /* value[4] = 0xA2; */ if (!USBVISION_IS_OPERATIONAL(usbvision)) return 0; rc = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1), USBVISION_OP_CODE, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0, (__u16) USBVISION_INTRA_CYC, value, 5, HZ); if (rc < 0) { printk(KERN_ERR "%sERROR=%d. USBVISION stopped - " "reconnect or reload driver.\n", proc, rc); return rc; } if (usbvision->bridge_type == BRIDGE_NT1004) { value[0] = 20; /* PCM Threshold 1 */ value[1] = 12; /* PCM Threshold 2 */ value[2] = 255; /* Distortion Threshold inter */ value[3] = 255; /* Distortion Threshold intra */ value[4] = 43; /* Max Distortion inter */ value[5] = 43; /* Max Distortion intra */ } else { value[0] = 20; /* PCM Threshold 1 */ value[1] = 12; /* PCM Threshold 2 */ value[2] = 255; /* Distortion Threshold d7-d0 */ value[3] = 0; /* Distortion Threshold d11-d8 */ value[4] = 43; /* Max Distortion d7-d0 */ value[5] = 0; /* Max Distortion d8 */ } if (!USBVISION_IS_OPERATIONAL(usbvision)) return 0; rc = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1), USBVISION_OP_CODE, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0, (__u16) USBVISION_PCM_THR1, value, 6, HZ); if (rc < 0) { printk(KERN_ERR "%sERROR=%d. USBVISION stopped - " "reconnect or reload driver.\n", proc, rc); } return rc; } /* * usbvision_set_input() * * Set the input (saa711x, ...) size x y and other misc input params * I've no idea if this parameters are right * */ int usbvision_set_input(struct usb_usbvision *usbvision) { static const char proc[] = "usbvision_set_input: "; int rc; unsigned char value[8]; unsigned char dvi_yuv_value; if (!USBVISION_IS_OPERATIONAL(usbvision)) return 0; /* Set input format expected from decoder*/ if (usbvision_device_data[usbvision->dev_model].vin_reg1_override) { value[0] = usbvision_device_data[usbvision->dev_model].vin_reg1; } else if (usbvision_device_data[usbvision->dev_model].codec == CODEC_SAA7113) { /* SAA7113 uses 8 bit output */ value[0] = USBVISION_8_422_SYNC; } else { /* I'm sure only about d2-d0 [010] 16 bit 4:2:2 usin sync pulses * as that is how saa7111 is configured */ value[0] = USBVISION_16_422_SYNC; /* | USBVISION_VSNC_POL | USBVISION_VCLK_POL);*/ } rc = usbvision_write_reg(usbvision, USBVISION_VIN_REG1, value[0]); if (rc < 0) { printk(KERN_ERR "%sERROR=%d. USBVISION stopped - " "reconnect or reload driver.\n", proc, rc); return rc; } if (usbvision->tvnorm_id & V4L2_STD_PAL) { value[0] = 0xC0; value[1] = 0x02; /* 0x02C0 -> 704 Input video line length */ value[2] = 0x20; value[3] = 0x01; /* 0x0120 -> 288 Input video n. of lines */ value[4] = 0x60; value[5] = 0x00; /* 0x0060 -> 96 Input video h offset */ value[6] = 0x16; value[7] = 0x00; /* 0x0016 -> 22 Input video v offset */ } else if (usbvision->tvnorm_id & V4L2_STD_SECAM) { value[0] = 0xC0; value[1] = 0x02; /* 0x02C0 -> 704 Input video line length */ value[2] = 0x20; value[3] = 0x01; /* 0x0120 -> 288 Input video n. of lines */ value[4] = 0x01; value[5] = 0x00; /* 0x0001 -> 01 Input video h offset */ value[6] = 0x01; value[7] = 0x00; /* 0x0001 -> 01 Input video v offset */ } else { /* V4L2_STD_NTSC */ value[0] = 0xD0; value[1] = 0x02; /* 0x02D0 -> 720 Input video line length */ value[2] = 0xF0; value[3] = 0x00; /* 0x00F0 -> 240 Input video number of lines */ value[4] = 0x50; value[5] = 0x00; /* 0x0050 -> 80 Input video h offset */ value[6] = 0x10; value[7] = 0x00; /* 0x0010 -> 16 Input video v offset */ } /* webcam is only 480 pixels wide, both PAL and NTSC version */ if (usbvision_device_data[usbvision->dev_model].codec == CODEC_WEBCAM) { value[0] = 0xe0; value[1] = 0x01; /* 0x01E0 -> 480 Input video line length */ } if (usbvision_device_data[usbvision->dev_model].x_offset >= 0) { value[4] = usbvision_device_data[usbvision->dev_model].x_offset & 0xff; value[5] = (usbvision_device_data[usbvision->dev_model].x_offset & 0x0300) >> 8; } if (adjust_x_offset != -1) { value[4] = adjust_x_offset & 0xff; value[5] = (adjust_x_offset & 0x0300) >> 8; } if (usbvision_device_data[usbvision->dev_model].y_offset >= 0) { value[6] = usbvision_device_data[usbvision->dev_model].y_offset & 0xff; value[7] = (usbvision_device_data[usbvision->dev_model].y_offset & 0x0300) >> 8; } if (adjust_y_offset != -1) { value[6] = adjust_y_offset & 0xff; value[7] = (adjust_y_offset & 0x0300) >> 8; } rc = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1), USBVISION_OP_CODE, /* USBVISION specific code */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0, (__u16) USBVISION_LXSIZE_I, value, 8, HZ); if (rc < 0) { printk(KERN_ERR "%sERROR=%d. USBVISION stopped - " "reconnect or reload driver.\n", proc, rc); return rc; } dvi_yuv_value = 0x00; /* U comes after V, Ya comes after U/V, Yb comes after Yb */ if (usbvision_device_data[usbvision->dev_model].dvi_yuv_override) { dvi_yuv_value = usbvision_device_data[usbvision->dev_model].dvi_yuv; } else if (usbvision_device_data[usbvision->dev_model].codec == CODEC_SAA7113) { /* This changes as the fine sync control changes. Further investigation necessary */ dvi_yuv_value = 0x06; } return usbvision_write_reg(usbvision, USBVISION_DVI_YUV, dvi_yuv_value); } /* * usbvision_set_dram_settings() * * Set the buffer address needed by the usbvision dram to operate * This values has been taken with usbsnoop. * */ static int usbvision_set_dram_settings(struct usb_usbvision *usbvision) { int rc; unsigned char value[8]; if (usbvision->isoc_mode == ISOC_MODE_COMPRESS) { value[0] = 0x42; value[1] = 0x71; value[2] = 0xff; value[3] = 0x00; value[4] = 0x98; value[5] = 0xe0; value[6] = 0x71; value[7] = 0xff; /* UR: 0x0E200-0x3FFFF = 204288 Words (1 Word = 2 Byte) */ /* FDL: 0x00000-0x0E099 = 57498 Words */ /* VDW: 0x0E3FF-0x3FFFF */ } else { value[0] = 0x42; value[1] = 0x00; value[2] = 0xff; value[3] = 0x00; value[4] = 0x00; value[5] = 0x00; value[6] = 0x00; value[7] = 0xff; } /* These are the values of the address of the video buffer, * they have to be loaded into the USBVISION_DRM_PRM1-8 * * Start address of video output buffer for read: drm_prm1-2 -> 0x00000 * End address of video output buffer for read: drm_prm1-3 -> 0x1ffff * Start address of video frame delay buffer: drm_prm1-4 -> 0x20000 * Only used in compressed mode * End address of video frame delay buffer: drm_prm1-5-6 -> 0x3ffff * Only used in compressed mode * Start address of video output buffer for write: drm_prm1-7 -> 0x00000 * End address of video output buffer for write: drm_prm1-8 -> 0x1ffff */ if (!USBVISION_IS_OPERATIONAL(usbvision)) return 0; rc = usb_control_msg(usbvision->dev, usb_sndctrlpipe(usbvision->dev, 1), USBVISION_OP_CODE, /* USBVISION specific code */ USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT, 0, (__u16) USBVISION_DRM_PRM1, value, 8, HZ); if (rc < 0) { dev_err(&usbvision->dev->dev, "%s: ERROR=%d\n", __func__, rc); return rc; } /* Restart the video buffer logic */ rc = usbvision_write_reg(usbvision, USBVISION_DRM_CONT, USBVISION_RES_UR | USBVISION_RES_FDL | USBVISION_RES_VDW); if (rc < 0) return rc; rc = usbvision_write_reg(usbvision, USBVISION_DRM_CONT, 0x00); return rc; } /* * () * * Power on the device, enables suspend-resume logic * & reset the isoc End-Point * */ int usbvision_power_on(struct usb_usbvision *usbvision) { int err_code = 0; PDEBUG(DBG_FUNC, ""); usbvision_write_reg(usbvision, USBVISION_PWR_REG, USBVISION_SSPND_EN); usbvision_write_reg(usbvision, USBVISION_PWR_REG, USBVISION_SSPND_EN | USBVISION_RES2); if (usbvision_device_data[usbvision->dev_model].codec == CODEC_WEBCAM) { usbvision_write_reg(usbvision, USBVISION_VIN_REG1, USBVISION_16_422_SYNC | USBVISION_HVALID_PO); usbvision_write_reg(usbvision, USBVISION_VIN_REG2, USBVISION_NOHVALID | USBVISION_KEEP_BLANK); } usbvision_write_reg(usbvision, USBVISION_PWR_REG, USBVISION_SSPND_EN | USBVISION_PWR_VID); mdelay(10); err_code = usbvision_write_reg(usbvision, USBVISION_PWR_REG, USBVISION_SSPND_EN | USBVISION_PWR_VID | USBVISION_RES2); if (err_code == 1) usbvision->power = 1; PDEBUG(DBG_FUNC, "%s: err_code %d", (err_code < 0) ? "ERROR" : "power is on", err_code); return err_code; } /* * usbvision timer stuff */ /* to call usbvision_power_off from task queue */ static void call_usbvision_power_off(struct work_struct *work) { struct usb_usbvision *usbvision = container_of(work, struct usb_usbvision, power_off_work); PDEBUG(DBG_FUNC, ""); if (mutex_lock_interruptible(&usbvision->v4l2_lock)) return; if (usbvision->user == 0) { usbvision_i2c_unregister(usbvision); usbvision_power_off(usbvision); usbvision->initialized = 0; } mutex_unlock(&usbvision->v4l2_lock); } static void usbvision_power_off_timer(unsigned long data) { struct usb_usbvision *usbvision = (void *)data; PDEBUG(DBG_FUNC, ""); del_timer(&usbvision->power_off_timer); INIT_WORK(&usbvision->power_off_work, call_usbvision_power_off); (void) schedule_work(&usbvision->power_off_work); } void usbvision_init_power_off_timer(struct usb_usbvision *usbvision) { init_timer(&usbvision->power_off_timer); usbvision->power_off_timer.data = (long)usbvision; usbvision->power_off_timer.function = usbvision_power_off_timer; } void usbvision_set_power_off_timer(struct usb_usbvision *usbvision) { mod_timer(&usbvision->power_off_timer, jiffies + USBVISION_POWEROFF_TIME); } void usbvision_reset_power_off_timer(struct usb_usbvision *usbvision) { if (timer_pending(&usbvision->power_off_timer)) del_timer(&usbvision->power_off_timer); } /* * usbvision_begin_streaming() * Sure you have to put bit 7 to 0, if not incoming frames are droped, but no * idea about the rest */ int usbvision_begin_streaming(struct usb_usbvision *usbvision) { if (usbvision->isoc_mode == ISOC_MODE_COMPRESS) usbvision_init_compression(usbvision); return usbvision_write_reg(usbvision, USBVISION_VIN_REG2, USBVISION_NOHVALID | usbvision->vin_reg2_preset); } /* * usbvision_restart_isoc() * Not sure yet if touching here PWR_REG make loose the config */ int usbvision_restart_isoc(struct usb_usbvision *usbvision) { int ret; ret = usbvision_write_reg(usbvision, USBVISION_PWR_REG, USBVISION_SSPND_EN | USBVISION_PWR_VID); if (ret < 0) return ret; ret = usbvision_write_reg(usbvision, USBVISION_PWR_REG, USBVISION_SSPND_EN | USBVISION_PWR_VID | USBVISION_RES2); if (ret < 0) return ret; ret = usbvision_write_reg(usbvision, USBVISION_VIN_REG2, USBVISION_KEEP_BLANK | USBVISION_NOHVALID | usbvision->vin_reg2_preset); if (ret < 0) return ret; /* TODO: schedule timeout */ while ((usbvision_read_reg(usbvision, USBVISION_STATUS_REG) & 0x01) != 1) ; return 0; } int usbvision_audio_off(struct usb_usbvision *usbvision) { if (usbvision_write_reg(usbvision, USBVISION_IOPIN_REG, USBVISION_AUDIO_MUTE) < 0) { printk(KERN_ERR "usbvision_audio_off: can't write reg\n"); return -1; } usbvision->audio_mute = 0; usbvision->audio_channel = USBVISION_AUDIO_MUTE; return 0; } int usbvision_set_audio(struct usb_usbvision *usbvision, int audio_channel) { if (!usbvision->audio_mute) { if (usbvision_write_reg(usbvision, USBVISION_IOPIN_REG, audio_channel) < 0) { printk(KERN_ERR "usbvision_set_audio: can't write iopin register for audio switching\n"); return -1; } } usbvision->audio_channel = audio_channel; return 0; } int usbvision_setup(struct usb_usbvision *usbvision, int format) { if (usbvision_device_data[usbvision->dev_model].codec == CODEC_WEBCAM) usbvision_init_webcam(usbvision); usbvision_set_video_format(usbvision, format); usbvision_set_dram_settings(usbvision); usbvision_set_compress_params(usbvision); usbvision_set_input(usbvision); usbvision_set_output(usbvision, MAX_USB_WIDTH, MAX_USB_HEIGHT); usbvision_restart_isoc(usbvision); /* cosas del PCM */ return USBVISION_IS_OPERATIONAL(usbvision); } int usbvision_set_alternate(struct usb_usbvision *dev) { int err_code, prev_alt = dev->iface_alt; int i; dev->iface_alt = 0; for (i = 0; i < dev->num_alt; i++) if (dev->alt_max_pkt_size[i] > dev->alt_max_pkt_size[dev->iface_alt]) dev->iface_alt = i; if (dev->iface_alt != prev_alt) { dev->isoc_packet_size = dev->alt_max_pkt_size[dev->iface_alt]; PDEBUG(DBG_FUNC, "setting alternate %d with max_packet_size=%u", dev->iface_alt, dev->isoc_packet_size); err_code = usb_set_interface(dev->dev, dev->iface, dev->iface_alt); if (err_code < 0) { dev_err(&dev->dev->dev, "cannot change alternate number to %d (error=%i)\n", dev->iface_alt, err_code); return err_code; } } PDEBUG(DBG_ISOC, "ISO Packet Length:%d", dev->isoc_packet_size); return 0; } /* * usbvision_init_isoc() * */ int usbvision_init_isoc(struct usb_usbvision *usbvision) { struct usb_device *dev = usbvision->dev; int buf_idx, err_code, reg_value; int sb_size; if (!USBVISION_IS_OPERATIONAL(usbvision)) return -EFAULT; usbvision->cur_frame = NULL; scratch_reset(usbvision); /* Alternate interface 1 is is the biggest frame size */ err_code = usbvision_set_alternate(usbvision); if (err_code < 0) { usbvision->last_error = err_code; return -EBUSY; } sb_size = USBVISION_URB_FRAMES * usbvision->isoc_packet_size; reg_value = (16 - usbvision_read_reg(usbvision, USBVISION_ALTER_REG)) & 0x0F; usbvision->usb_bandwidth = reg_value >> 1; PDEBUG(DBG_ISOC, "USB Bandwidth Usage: %dMbit/Sec", usbvision->usb_bandwidth); /* We double buffer the Iso lists */ for (buf_idx = 0; buf_idx < USBVISION_NUMSBUF; buf_idx++) { int j, k; struct urb *urb; urb = usb_alloc_urb(USBVISION_URB_FRAMES, GFP_KERNEL); if (urb == NULL) { dev_err(&usbvision->dev->dev, "%s: usb_alloc_urb() failed\n", __func__); return -ENOMEM; } usbvision->sbuf[buf_idx].urb = urb; usbvision->sbuf[buf_idx].data = usb_alloc_coherent(usbvision->dev, sb_size, GFP_KERNEL, &urb->transfer_dma); urb->dev = dev; urb->context = usbvision; urb->pipe = usb_rcvisocpipe(dev, usbvision->video_endp); urb->transfer_flags = URB_ISO_ASAP | URB_NO_TRANSFER_DMA_MAP; urb->interval = 1; urb->transfer_buffer = usbvision->sbuf[buf_idx].data; urb->complete = usbvision_isoc_irq; urb->number_of_packets = USBVISION_URB_FRAMES; urb->transfer_buffer_length = usbvision->isoc_packet_size * USBVISION_URB_FRAMES; for (j = k = 0; j < USBVISION_URB_FRAMES; j++, k += usbvision->isoc_packet_size) { urb->iso_frame_desc[j].offset = k; urb->iso_frame_desc[j].length = usbvision->isoc_packet_size; } } /* Submit all URBs */ for (buf_idx = 0; buf_idx < USBVISION_NUMSBUF; buf_idx++) { err_code = usb_submit_urb(usbvision->sbuf[buf_idx].urb, GFP_KERNEL); if (err_code) { dev_err(&usbvision->dev->dev, "%s: usb_submit_urb(%d) failed: error %d\n", __func__, buf_idx, err_code); } } usbvision->streaming = stream_idle; PDEBUG(DBG_ISOC, "%s: streaming=1 usbvision->video_endp=$%02x", __func__, usbvision->video_endp); return 0; } /* * usbvision_stop_isoc() * * This procedure stops streaming and deallocates URBs. Then it * activates zero-bandwidth alt. setting of the video interface. * */ void usbvision_stop_isoc(struct usb_usbvision *usbvision) { int buf_idx, err_code, reg_value; int sb_size = USBVISION_URB_FRAMES * usbvision->isoc_packet_size; if ((usbvision->streaming == stream_off) || (usbvision->dev == NULL)) return; /* Unschedule all of the iso td's */ for (buf_idx = 0; buf_idx < USBVISION_NUMSBUF; buf_idx++) { usb_kill_urb(usbvision->sbuf[buf_idx].urb); if (usbvision->sbuf[buf_idx].data) { usb_free_coherent(usbvision->dev, sb_size, usbvision->sbuf[buf_idx].data, usbvision->sbuf[buf_idx].urb->transfer_dma); } usb_free_urb(usbvision->sbuf[buf_idx].urb); usbvision->sbuf[buf_idx].urb = NULL; } PDEBUG(DBG_ISOC, "%s: streaming=stream_off\n", __func__); usbvision->streaming = stream_off; if (!usbvision->remove_pending) { /* Set packet size to 0 */ usbvision->iface_alt = 0; err_code = usb_set_interface(usbvision->dev, usbvision->iface, usbvision->iface_alt); if (err_code < 0) { dev_err(&usbvision->dev->dev, "%s: usb_set_interface() failed: error %d\n", __func__, err_code); usbvision->last_error = err_code; } reg_value = (16-usbvision_read_reg(usbvision, USBVISION_ALTER_REG)) & 0x0F; usbvision->isoc_packet_size = (reg_value == 0) ? 0 : (reg_value * 64) - 1; PDEBUG(DBG_ISOC, "ISO Packet Length:%d", usbvision->isoc_packet_size); usbvision->usb_bandwidth = reg_value >> 1; PDEBUG(DBG_ISOC, "USB Bandwidth Usage: %dMbit/Sec", usbvision->usb_bandwidth); } } int usbvision_muxsel(struct usb_usbvision *usbvision, int channel) { /* inputs #0 and #3 are constant for every SAA711x. */ /* inputs #1 and #2 are variable for SAA7111 and SAA7113 */ int mode[4] = { SAA7115_COMPOSITE0, 0, 0, SAA7115_COMPOSITE3 }; int audio[] = { 1, 0, 0, 0 }; /* channel 0 is TV with audiochannel 1 (tuner mono) */ /* channel 1 is Composite with audio channel 0 (line in) */ /* channel 2 is S-Video with audio channel 0 (line in) */ /* channel 3 is additional video inputs to the device with audio channel 0 (line in) */ RESTRICT_TO_RANGE(channel, 0, usbvision->video_inputs); usbvision->ctl_input = channel; /* set the new channel */ /* Regular USB TV Tuners -> channel: 0 = Television, 1 = Composite, 2 = S-Video */ /* Four video input devices -> channel: 0 = Chan White, 1 = Chan Green, 2 = Chan Yellow, 3 = Chan Red */ switch (usbvision_device_data[usbvision->dev_model].codec) { case CODEC_SAA7113: mode[1] = SAA7115_COMPOSITE2; if (switch_svideo_input) { /* To handle problems with S-Video Input for * some devices. Use switch_svideo_input * parameter when loading the module.*/ mode[2] = SAA7115_COMPOSITE1; } else { mode[2] = SAA7115_SVIDEO1; } break; case CODEC_SAA7111: default: /* modes for saa7111 */ mode[1] = SAA7115_COMPOSITE1; mode[2] = SAA7115_SVIDEO1; break; } call_all(usbvision, video, s_routing, mode[channel], 0, 0); usbvision_set_audio(usbvision, audio[channel]); return 0; } /* * Overrides for Emacs so that we follow Linus's tabbing style. * --------------------------------------------------------------------------- * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
thomaslee/linux-3.14.18
drivers/media/pci/cx23885/cx23885-alsa.c
2747
13554
/* * * Support for CX23885 analog audio capture * * (c) 2008 Mijhail Moreyra <mijhail.moreyra@gmail.com> * Adapted from cx88-alsa.c * (c) 2009 Steven Toth <stoth@kernellabs.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> #include <linux/interrupt.h> #include <linux/vmalloc.h> #include <linux/dma-mapping.h> #include <linux/pci.h> #include <asm/delay.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/control.h> #include <sound/initval.h> #include <sound/tlv.h> #include "cx23885.h" #include "cx23885-reg.h" #define AUDIO_SRAM_CHANNEL SRAM_CH07 #define dprintk(level, fmt, arg...) do { \ if (audio_debug + 1 > level) \ printk(KERN_INFO "%s: " fmt, chip->dev->name , ## arg); \ } while(0) #define dprintk_core(level, fmt, arg...) if (audio_debug >= level) \ printk(KERN_DEBUG "%s: " fmt, chip->dev->name , ## arg) /**************************************************************************** Module global static vars ****************************************************************************/ static unsigned int disable_analog_audio; module_param(disable_analog_audio, int, 0644); MODULE_PARM_DESC(disable_analog_audio, "disable analog audio ALSA driver"); static unsigned int audio_debug; module_param(audio_debug, int, 0644); MODULE_PARM_DESC(audio_debug, "enable debug messages [analog audio]"); /**************************************************************************** Board specific funtions ****************************************************************************/ /* Constants taken from cx88-reg.h */ #define AUD_INT_DN_RISCI1 (1 << 0) #define AUD_INT_UP_RISCI1 (1 << 1) #define AUD_INT_RDS_DN_RISCI1 (1 << 2) #define AUD_INT_DN_RISCI2 (1 << 4) /* yes, 3 is skipped */ #define AUD_INT_UP_RISCI2 (1 << 5) #define AUD_INT_RDS_DN_RISCI2 (1 << 6) #define AUD_INT_DN_SYNC (1 << 12) #define AUD_INT_UP_SYNC (1 << 13) #define AUD_INT_RDS_DN_SYNC (1 << 14) #define AUD_INT_OPC_ERR (1 << 16) #define AUD_INT_BER_IRQ (1 << 20) #define AUD_INT_MCHG_IRQ (1 << 21) #define GP_COUNT_CONTROL_RESET 0x3 /* * BOARD Specific: Sets audio DMA */ static int cx23885_start_audio_dma(struct cx23885_audio_dev *chip) { struct cx23885_audio_buffer *buf = chip->buf; struct cx23885_dev *dev = chip->dev; struct sram_channel *audio_ch = &dev->sram_channels[AUDIO_SRAM_CHANNEL]; dprintk(1, "%s()\n", __func__); /* Make sure RISC/FIFO are off before changing FIFO/RISC settings */ cx_clear(AUD_INT_DMA_CTL, 0x11); /* setup fifo + format - out channel */ cx23885_sram_channel_setup(chip->dev, audio_ch, buf->bpl, buf->risc.dma); /* sets bpl size */ cx_write(AUD_INT_A_LNGTH, buf->bpl); /* This is required to get good audio (1 seems to be ok) */ cx_write(AUD_INT_A_MODE, 1); /* reset counter */ cx_write(AUD_INT_A_GPCNT_CTL, GP_COUNT_CONTROL_RESET); atomic_set(&chip->count, 0); dprintk(1, "Start audio DMA, %d B/line, %d lines/FIFO, %d periods, %d " "byte buffer\n", buf->bpl, cx_read(audio_ch->cmds_start+12)>>1, chip->num_periods, buf->bpl * chip->num_periods); /* Enables corresponding bits at AUD_INT_STAT */ cx_write(AUDIO_INT_INT_MSK, AUD_INT_OPC_ERR | AUD_INT_DN_SYNC | AUD_INT_DN_RISCI1); /* Clean any pending interrupt bits already set */ cx_write(AUDIO_INT_INT_STAT, ~0); /* enable audio irqs */ cx_set(PCI_INT_MSK, chip->dev->pci_irqmask | PCI_MSK_AUD_INT); /* start dma */ cx_set(DEV_CNTRL2, (1<<5)); /* Enables Risc Processor */ cx_set(AUD_INT_DMA_CTL, 0x11); /* audio downstream FIFO and RISC enable */ if (audio_debug) cx23885_sram_channel_dump(chip->dev, audio_ch); return 0; } /* * BOARD Specific: Resets audio DMA */ static int cx23885_stop_audio_dma(struct cx23885_audio_dev *chip) { struct cx23885_dev *dev = chip->dev; dprintk(1, "Stopping audio DMA\n"); /* stop dma */ cx_clear(AUD_INT_DMA_CTL, 0x11); /* disable irqs */ cx_clear(PCI_INT_MSK, PCI_MSK_AUD_INT); cx_clear(AUDIO_INT_INT_MSK, AUD_INT_OPC_ERR | AUD_INT_DN_SYNC | AUD_INT_DN_RISCI1); if (audio_debug) cx23885_sram_channel_dump(chip->dev, &dev->sram_channels[AUDIO_SRAM_CHANNEL]); return 0; } /* * BOARD Specific: Handles audio IRQ */ int cx23885_audio_irq(struct cx23885_dev *dev, u32 status, u32 mask) { struct cx23885_audio_dev *chip = dev->audio_dev; if (0 == (status & mask)) return 0; cx_write(AUDIO_INT_INT_STAT, status); /* risc op code error */ if (status & AUD_INT_OPC_ERR) { printk(KERN_WARNING "%s/1: Audio risc op code error\n", dev->name); cx_clear(AUD_INT_DMA_CTL, 0x11); cx23885_sram_channel_dump(dev, &dev->sram_channels[AUDIO_SRAM_CHANNEL]); } if (status & AUD_INT_DN_SYNC) { dprintk(1, "Downstream sync error\n"); cx_write(AUD_INT_A_GPCNT_CTL, GP_COUNT_CONTROL_RESET); return 1; } /* risc1 downstream */ if (status & AUD_INT_DN_RISCI1) { atomic_set(&chip->count, cx_read(AUD_INT_A_GPCNT)); snd_pcm_period_elapsed(chip->substream); } /* FIXME: Any other status should deserve a special handling? */ return 1; } static int dsp_buffer_free(struct cx23885_audio_dev *chip) { BUG_ON(!chip->dma_size); dprintk(2, "Freeing buffer\n"); videobuf_dma_unmap(&chip->pci->dev, chip->dma_risc); videobuf_dma_free(chip->dma_risc); btcx_riscmem_free(chip->pci, &chip->buf->risc); kfree(chip->buf); chip->dma_risc = NULL; chip->dma_size = 0; return 0; } /**************************************************************************** ALSA PCM Interface ****************************************************************************/ /* * Digital hardware definition */ #define DEFAULT_FIFO_SIZE 4096 static struct snd_pcm_hardware snd_cx23885_digital_hw = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_MMAP_VALID, .formats = SNDRV_PCM_FMTBIT_S16_LE, .rates = SNDRV_PCM_RATE_48000, .rate_min = 48000, .rate_max = 48000, .channels_min = 2, .channels_max = 2, /* Analog audio output will be full of clicks and pops if there are not exactly four lines in the SRAM FIFO buffer. */ .period_bytes_min = DEFAULT_FIFO_SIZE/4, .period_bytes_max = DEFAULT_FIFO_SIZE/4, .periods_min = 1, .periods_max = 1024, .buffer_bytes_max = (1024*1024), }; /* * audio pcm capture open callback */ static int snd_cx23885_pcm_open(struct snd_pcm_substream *substream) { struct cx23885_audio_dev *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; int err; if (!chip) { printk(KERN_ERR "BUG: cx23885 can't find device struct." " Can't proceed with open\n"); return -ENODEV; } err = snd_pcm_hw_constraint_pow2(runtime, 0, SNDRV_PCM_HW_PARAM_PERIODS); if (err < 0) goto _error; chip->substream = substream; runtime->hw = snd_cx23885_digital_hw; if (chip->dev->sram_channels[AUDIO_SRAM_CHANNEL].fifo_size != DEFAULT_FIFO_SIZE) { unsigned int bpl = chip->dev-> sram_channels[AUDIO_SRAM_CHANNEL].fifo_size / 4; bpl &= ~7; /* must be multiple of 8 */ runtime->hw.period_bytes_min = bpl; runtime->hw.period_bytes_max = bpl; } return 0; _error: dprintk(1, "Error opening PCM!\n"); return err; } /* * audio close callback */ static int snd_cx23885_close(struct snd_pcm_substream *substream) { return 0; } /* * hw_params callback */ static int snd_cx23885_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params) { struct cx23885_audio_dev *chip = snd_pcm_substream_chip(substream); struct videobuf_dmabuf *dma; struct cx23885_audio_buffer *buf; int ret; if (substream->runtime->dma_area) { dsp_buffer_free(chip); substream->runtime->dma_area = NULL; } chip->period_size = params_period_bytes(hw_params); chip->num_periods = params_periods(hw_params); chip->dma_size = chip->period_size * params_periods(hw_params); BUG_ON(!chip->dma_size); BUG_ON(chip->num_periods & (chip->num_periods-1)); buf = kzalloc(sizeof(*buf), GFP_KERNEL); if (NULL == buf) return -ENOMEM; buf->bpl = chip->period_size; dma = &buf->dma; videobuf_dma_init(dma); ret = videobuf_dma_init_kernel(dma, PCI_DMA_FROMDEVICE, (PAGE_ALIGN(chip->dma_size) >> PAGE_SHIFT)); if (ret < 0) goto error; ret = videobuf_dma_map(&chip->pci->dev, dma); if (ret < 0) goto error; ret = cx23885_risc_databuffer(chip->pci, &buf->risc, dma->sglist, chip->period_size, chip->num_periods, 1); if (ret < 0) goto error; /* Loop back to start of program */ buf->risc.jmp[0] = cpu_to_le32(RISC_JUMP|RISC_IRQ1|RISC_CNT_INC); buf->risc.jmp[1] = cpu_to_le32(buf->risc.dma); buf->risc.jmp[2] = cpu_to_le32(0); /* bits 63-32 */ chip->buf = buf; chip->dma_risc = dma; substream->runtime->dma_area = chip->dma_risc->vaddr; substream->runtime->dma_bytes = chip->dma_size; substream->runtime->dma_addr = 0; return 0; error: kfree(buf); return ret; } /* * hw free callback */ static int snd_cx23885_hw_free(struct snd_pcm_substream *substream) { struct cx23885_audio_dev *chip = snd_pcm_substream_chip(substream); if (substream->runtime->dma_area) { dsp_buffer_free(chip); substream->runtime->dma_area = NULL; } return 0; } /* * prepare callback */ static int snd_cx23885_prepare(struct snd_pcm_substream *substream) { return 0; } /* * trigger callback */ static int snd_cx23885_card_trigger(struct snd_pcm_substream *substream, int cmd) { struct cx23885_audio_dev *chip = snd_pcm_substream_chip(substream); int err; /* Local interrupts are already disabled by ALSA */ spin_lock(&chip->lock); switch (cmd) { case SNDRV_PCM_TRIGGER_START: err = cx23885_start_audio_dma(chip); break; case SNDRV_PCM_TRIGGER_STOP: err = cx23885_stop_audio_dma(chip); break; default: err = -EINVAL; break; } spin_unlock(&chip->lock); return err; } /* * pointer callback */ static snd_pcm_uframes_t snd_cx23885_pointer( struct snd_pcm_substream *substream) { struct cx23885_audio_dev *chip = snd_pcm_substream_chip(substream); struct snd_pcm_runtime *runtime = substream->runtime; u16 count; count = atomic_read(&chip->count); return runtime->period_size * (count & (runtime->periods-1)); } /* * page callback (needed for mmap) */ static struct page *snd_cx23885_page(struct snd_pcm_substream *substream, unsigned long offset) { void *pageptr = substream->runtime->dma_area + offset; return vmalloc_to_page(pageptr); } /* * operators */ static struct snd_pcm_ops snd_cx23885_pcm_ops = { .open = snd_cx23885_pcm_open, .close = snd_cx23885_close, .ioctl = snd_pcm_lib_ioctl, .hw_params = snd_cx23885_hw_params, .hw_free = snd_cx23885_hw_free, .prepare = snd_cx23885_prepare, .trigger = snd_cx23885_card_trigger, .pointer = snd_cx23885_pointer, .page = snd_cx23885_page, }; /* * create a PCM device */ static int snd_cx23885_pcm(struct cx23885_audio_dev *chip, int device, char *name) { int err; struct snd_pcm *pcm; err = snd_pcm_new(chip->card, name, device, 0, 1, &pcm); if (err < 0) return err; pcm->private_data = chip; strcpy(pcm->name, name); snd_pcm_set_ops(pcm, SNDRV_PCM_STREAM_CAPTURE, &snd_cx23885_pcm_ops); return 0; } /**************************************************************************** Basic Flow for Sound Devices ****************************************************************************/ /* * Alsa Constructor - Component probe */ struct cx23885_audio_dev *cx23885_audio_register(struct cx23885_dev *dev) { struct snd_card *card; struct cx23885_audio_dev *chip; int err; if (disable_analog_audio) return NULL; if (dev->sram_channels[AUDIO_SRAM_CHANNEL].cmds_start == 0) { printk(KERN_WARNING "%s(): Missing SRAM channel configuration " "for analog TV Audio\n", __func__); return NULL; } err = snd_card_create(SNDRV_DEFAULT_IDX1, SNDRV_DEFAULT_STR1, THIS_MODULE, sizeof(struct cx23885_audio_dev), &card); if (err < 0) goto error; chip = (struct cx23885_audio_dev *) card->private_data; chip->dev = dev; chip->pci = dev->pci; chip->card = card; spin_lock_init(&chip->lock); snd_card_set_dev(card, &dev->pci->dev); err = snd_cx23885_pcm(chip, 0, "CX23885 Digital"); if (err < 0) goto error; strcpy(card->driver, "CX23885"); sprintf(card->shortname, "Conexant CX23885"); sprintf(card->longname, "%s at %s", card->shortname, dev->name); err = snd_card_register(card); if (err < 0) goto error; dprintk(0, "registered ALSA audio device\n"); return chip; error: snd_card_free(card); printk(KERN_ERR "%s(): Failed to register analog " "audio adapter\n", __func__); return NULL; } /* * ALSA destructor */ void cx23885_audio_unregister(struct cx23885_dev *dev) { struct cx23885_audio_dev *chip = dev->audio_dev; snd_card_free(chip->card); }
gpl-2.0
narantech/linux-rpi
security/tomoyo/condition.c
3515
27738
/* * security/tomoyo/condition.c * * Copyright (C) 2005-2011 NTT DATA CORPORATION */ #include "common.h" #include <linux/slab.h> /* List of "struct tomoyo_condition". */ LIST_HEAD(tomoyo_condition_list); /** * tomoyo_argv - Check argv[] in "struct linux_binbrm". * * @index: Index number of @arg_ptr. * @arg_ptr: Contents of argv[@index]. * @argc: Length of @argv. * @argv: Pointer to "struct tomoyo_argv". * @checked: Set to true if @argv[@index] was found. * * Returns true on success, false otherwise. */ static bool tomoyo_argv(const unsigned int index, const char *arg_ptr, const int argc, const struct tomoyo_argv *argv, u8 *checked) { int i; struct tomoyo_path_info arg; arg.name = arg_ptr; for (i = 0; i < argc; argv++, checked++, i++) { bool result; if (index != argv->index) continue; *checked = 1; tomoyo_fill_path_info(&arg); result = tomoyo_path_matches_pattern(&arg, argv->value); if (argv->is_not) result = !result; if (!result) return false; } return true; } /** * tomoyo_envp - Check envp[] in "struct linux_binbrm". * * @env_name: The name of environment variable. * @env_value: The value of environment variable. * @envc: Length of @envp. * @envp: Pointer to "struct tomoyo_envp". * @checked: Set to true if @envp[@env_name] was found. * * Returns true on success, false otherwise. */ static bool tomoyo_envp(const char *env_name, const char *env_value, const int envc, const struct tomoyo_envp *envp, u8 *checked) { int i; struct tomoyo_path_info name; struct tomoyo_path_info value; name.name = env_name; tomoyo_fill_path_info(&name); value.name = env_value; tomoyo_fill_path_info(&value); for (i = 0; i < envc; envp++, checked++, i++) { bool result; if (!tomoyo_path_matches_pattern(&name, envp->name)) continue; *checked = 1; if (envp->value) { result = tomoyo_path_matches_pattern(&value, envp->value); if (envp->is_not) result = !result; } else { result = true; if (!envp->is_not) result = !result; } if (!result) return false; } return true; } /** * tomoyo_scan_bprm - Scan "struct linux_binprm". * * @ee: Pointer to "struct tomoyo_execve". * @argc: Length of @argc. * @argv: Pointer to "struct tomoyo_argv". * @envc: Length of @envp. * @envp: Poiner to "struct tomoyo_envp". * * Returns true on success, false otherwise. */ static bool tomoyo_scan_bprm(struct tomoyo_execve *ee, const u16 argc, const struct tomoyo_argv *argv, const u16 envc, const struct tomoyo_envp *envp) { struct linux_binprm *bprm = ee->bprm; struct tomoyo_page_dump *dump = &ee->dump; char *arg_ptr = ee->tmp; int arg_len = 0; unsigned long pos = bprm->p; int offset = pos % PAGE_SIZE; int argv_count = bprm->argc; int envp_count = bprm->envc; bool result = true; u8 local_checked[32]; u8 *checked; if (argc + envc <= sizeof(local_checked)) { checked = local_checked; memset(local_checked, 0, sizeof(local_checked)); } else { checked = kzalloc(argc + envc, GFP_NOFS); if (!checked) return false; } while (argv_count || envp_count) { if (!tomoyo_dump_page(bprm, pos, dump)) { result = false; goto out; } pos += PAGE_SIZE - offset; while (offset < PAGE_SIZE) { /* Read. */ const char *kaddr = dump->data; const unsigned char c = kaddr[offset++]; if (c && arg_len < TOMOYO_EXEC_TMPSIZE - 10) { if (c == '\\') { arg_ptr[arg_len++] = '\\'; arg_ptr[arg_len++] = '\\'; } else if (c > ' ' && c < 127) { arg_ptr[arg_len++] = c; } else { arg_ptr[arg_len++] = '\\'; arg_ptr[arg_len++] = (c >> 6) + '0'; arg_ptr[arg_len++] = ((c >> 3) & 7) + '0'; arg_ptr[arg_len++] = (c & 7) + '0'; } } else { arg_ptr[arg_len] = '\0'; } if (c) continue; /* Check. */ if (argv_count) { if (!tomoyo_argv(bprm->argc - argv_count, arg_ptr, argc, argv, checked)) { result = false; break; } argv_count--; } else if (envp_count) { char *cp = strchr(arg_ptr, '='); if (cp) { *cp = '\0'; if (!tomoyo_envp(arg_ptr, cp + 1, envc, envp, checked + argc)) { result = false; break; } } envp_count--; } else { break; } arg_len = 0; } offset = 0; if (!result) break; } out: if (result) { int i; /* Check not-yet-checked entries. */ for (i = 0; i < argc; i++) { if (checked[i]) continue; /* * Return true only if all unchecked indexes in * bprm->argv[] are not matched. */ if (argv[i].is_not) continue; result = false; break; } for (i = 0; i < envc; envp++, i++) { if (checked[argc + i]) continue; /* * Return true only if all unchecked environ variables * in bprm->envp[] are either undefined or not matched. */ if ((!envp->value && !envp->is_not) || (envp->value && envp->is_not)) continue; result = false; break; } } if (checked != local_checked) kfree(checked); return result; } /** * tomoyo_scan_exec_realpath - Check "exec.realpath" parameter of "struct tomoyo_condition". * * @file: Pointer to "struct file". * @ptr: Pointer to "struct tomoyo_name_union". * @match: True if "exec.realpath=", false if "exec.realpath!=". * * Returns true on success, false otherwise. */ static bool tomoyo_scan_exec_realpath(struct file *file, const struct tomoyo_name_union *ptr, const bool match) { bool result; struct tomoyo_path_info exe; if (!file) return false; exe.name = tomoyo_realpath_from_path(&file->f_path); if (!exe.name) return false; tomoyo_fill_path_info(&exe); result = tomoyo_compare_name_union(&exe, ptr); kfree(exe.name); return result == match; } /** * tomoyo_get_dqword - tomoyo_get_name() for a quoted string. * * @start: String to save. * * Returns pointer to "struct tomoyo_path_info" on success, NULL otherwise. */ static const struct tomoyo_path_info *tomoyo_get_dqword(char *start) { char *cp = start + strlen(start) - 1; if (cp == start || *start++ != '"' || *cp != '"') return NULL; *cp = '\0'; if (*start && !tomoyo_correct_word(start)) return NULL; return tomoyo_get_name(start); } /** * tomoyo_parse_name_union_quoted - Parse a quoted word. * * @param: Pointer to "struct tomoyo_acl_param". * @ptr: Pointer to "struct tomoyo_name_union". * * Returns true on success, false otherwise. */ static bool tomoyo_parse_name_union_quoted(struct tomoyo_acl_param *param, struct tomoyo_name_union *ptr) { char *filename = param->data; if (*filename == '@') return tomoyo_parse_name_union(param, ptr); ptr->filename = tomoyo_get_dqword(filename); return ptr->filename != NULL; } /** * tomoyo_parse_argv - Parse an argv[] condition part. * * @left: Lefthand value. * @right: Righthand value. * @argv: Pointer to "struct tomoyo_argv". * * Returns true on success, false otherwise. */ static bool tomoyo_parse_argv(char *left, char *right, struct tomoyo_argv *argv) { if (tomoyo_parse_ulong(&argv->index, &left) != TOMOYO_VALUE_TYPE_DECIMAL || *left++ != ']' || *left) return false; argv->value = tomoyo_get_dqword(right); return argv->value != NULL; } /** * tomoyo_parse_envp - Parse an envp[] condition part. * * @left: Lefthand value. * @right: Righthand value. * @envp: Pointer to "struct tomoyo_envp". * * Returns true on success, false otherwise. */ static bool tomoyo_parse_envp(char *left, char *right, struct tomoyo_envp *envp) { const struct tomoyo_path_info *name; const struct tomoyo_path_info *value; char *cp = left + strlen(left) - 1; if (*cp-- != ']' || *cp != '"') goto out; *cp = '\0'; if (!tomoyo_correct_word(left)) goto out; name = tomoyo_get_name(left); if (!name) goto out; if (!strcmp(right, "NULL")) { value = NULL; } else { value = tomoyo_get_dqword(right); if (!value) { tomoyo_put_name(name); goto out; } } envp->name = name; envp->value = value; return true; out: return false; } /** * tomoyo_same_condition - Check for duplicated "struct tomoyo_condition" entry. * * @a: Pointer to "struct tomoyo_condition". * @b: Pointer to "struct tomoyo_condition". * * Returns true if @a == @b, false otherwise. */ static inline bool tomoyo_same_condition(const struct tomoyo_condition *a, const struct tomoyo_condition *b) { return a->size == b->size && a->condc == b->condc && a->numbers_count == b->numbers_count && a->names_count == b->names_count && a->argc == b->argc && a->envc == b->envc && a->grant_log == b->grant_log && a->transit == b->transit && !memcmp(a + 1, b + 1, a->size - sizeof(*a)); } /** * tomoyo_condition_type - Get condition type. * * @word: Keyword string. * * Returns one of values in "enum tomoyo_conditions_index" on success, * TOMOYO_MAX_CONDITION_KEYWORD otherwise. */ static u8 tomoyo_condition_type(const char *word) { u8 i; for (i = 0; i < TOMOYO_MAX_CONDITION_KEYWORD; i++) { if (!strcmp(word, tomoyo_condition_keyword[i])) break; } return i; } /* Define this to enable debug mode. */ /* #define DEBUG_CONDITION */ #ifdef DEBUG_CONDITION #define dprintk printk #else #define dprintk(...) do { } while (0) #endif /** * tomoyo_commit_condition - Commit "struct tomoyo_condition". * * @entry: Pointer to "struct tomoyo_condition". * * Returns pointer to "struct tomoyo_condition" on success, NULL otherwise. * * This function merges duplicated entries. This function returns NULL if * @entry is not duplicated but memory quota for policy has exceeded. */ static struct tomoyo_condition *tomoyo_commit_condition (struct tomoyo_condition *entry) { struct tomoyo_condition *ptr; bool found = false; if (mutex_lock_interruptible(&tomoyo_policy_lock)) { dprintk(KERN_WARNING "%u: %s failed\n", __LINE__, __func__); ptr = NULL; found = true; goto out; } list_for_each_entry(ptr, &tomoyo_condition_list, head.list) { if (!tomoyo_same_condition(ptr, entry) || atomic_read(&ptr->head.users) == TOMOYO_GC_IN_PROGRESS) continue; /* Same entry found. Share this entry. */ atomic_inc(&ptr->head.users); found = true; break; } if (!found) { if (tomoyo_memory_ok(entry)) { atomic_set(&entry->head.users, 1); list_add(&entry->head.list, &tomoyo_condition_list); } else { found = true; ptr = NULL; } } mutex_unlock(&tomoyo_policy_lock); out: if (found) { tomoyo_del_condition(&entry->head.list); kfree(entry); entry = ptr; } return entry; } /** * tomoyo_get_transit_preference - Parse domain transition preference for execve(). * * @param: Pointer to "struct tomoyo_acl_param". * @e: Pointer to "struct tomoyo_condition". * * Returns the condition string part. */ static char *tomoyo_get_transit_preference(struct tomoyo_acl_param *param, struct tomoyo_condition *e) { char * const pos = param->data; bool flag; if (*pos == '<') { e->transit = tomoyo_get_domainname(param); goto done; } { char *cp = strchr(pos, ' '); if (cp) *cp = '\0'; flag = tomoyo_correct_path(pos) || !strcmp(pos, "keep") || !strcmp(pos, "initialize") || !strcmp(pos, "reset") || !strcmp(pos, "child") || !strcmp(pos, "parent"); if (cp) *cp = ' '; } if (!flag) return pos; e->transit = tomoyo_get_name(tomoyo_read_token(param)); done: if (e->transit) return param->data; /* * Return a bad read-only condition string that will let * tomoyo_get_condition() return NULL. */ return "/"; } /** * tomoyo_get_condition - Parse condition part. * * @param: Pointer to "struct tomoyo_acl_param". * * Returns pointer to "struct tomoyo_condition" on success, NULL otherwise. */ struct tomoyo_condition *tomoyo_get_condition(struct tomoyo_acl_param *param) { struct tomoyo_condition *entry = NULL; struct tomoyo_condition_element *condp = NULL; struct tomoyo_number_union *numbers_p = NULL; struct tomoyo_name_union *names_p = NULL; struct tomoyo_argv *argv = NULL; struct tomoyo_envp *envp = NULL; struct tomoyo_condition e = { }; char * const start_of_string = tomoyo_get_transit_preference(param, &e); char * const end_of_string = start_of_string + strlen(start_of_string); char *pos; rerun: pos = start_of_string; while (1) { u8 left = -1; u8 right = -1; char *left_word = pos; char *cp; char *right_word; bool is_not; if (!*left_word) break; /* * Since left-hand condition does not allow use of "path_group" * or "number_group" and environment variable's names do not * accept '=', it is guaranteed that the original line consists * of one or more repetition of $left$operator$right blocks * where "$left is free from '=' and ' '" and "$operator is * either '=' or '!='" and "$right is free from ' '". * Therefore, we can reconstruct the original line at the end * of dry run even if we overwrite $operator with '\0'. */ cp = strchr(pos, ' '); if (cp) { *cp = '\0'; /* Will restore later. */ pos = cp + 1; } else { pos = ""; } right_word = strchr(left_word, '='); if (!right_word || right_word == left_word) goto out; is_not = *(right_word - 1) == '!'; if (is_not) *(right_word++ - 1) = '\0'; /* Will restore later. */ else if (*(right_word + 1) != '=') *right_word++ = '\0'; /* Will restore later. */ else goto out; dprintk(KERN_WARNING "%u: <%s>%s=<%s>\n", __LINE__, left_word, is_not ? "!" : "", right_word); if (!strcmp(left_word, "grant_log")) { if (entry) { if (is_not || entry->grant_log != TOMOYO_GRANTLOG_AUTO) goto out; else if (!strcmp(right_word, "yes")) entry->grant_log = TOMOYO_GRANTLOG_YES; else if (!strcmp(right_word, "no")) entry->grant_log = TOMOYO_GRANTLOG_NO; else goto out; } continue; } if (!strncmp(left_word, "exec.argv[", 10)) { if (!argv) { e.argc++; e.condc++; } else { e.argc--; e.condc--; left = TOMOYO_ARGV_ENTRY; argv->is_not = is_not; if (!tomoyo_parse_argv(left_word + 10, right_word, argv++)) goto out; } goto store_value; } if (!strncmp(left_word, "exec.envp[\"", 11)) { if (!envp) { e.envc++; e.condc++; } else { e.envc--; e.condc--; left = TOMOYO_ENVP_ENTRY; envp->is_not = is_not; if (!tomoyo_parse_envp(left_word + 11, right_word, envp++)) goto out; } goto store_value; } left = tomoyo_condition_type(left_word); dprintk(KERN_WARNING "%u: <%s> left=%u\n", __LINE__, left_word, left); if (left == TOMOYO_MAX_CONDITION_KEYWORD) { if (!numbers_p) { e.numbers_count++; } else { e.numbers_count--; left = TOMOYO_NUMBER_UNION; param->data = left_word; if (*left_word == '@' || !tomoyo_parse_number_union(param, numbers_p++)) goto out; } } if (!condp) e.condc++; else e.condc--; if (left == TOMOYO_EXEC_REALPATH || left == TOMOYO_SYMLINK_TARGET) { if (!names_p) { e.names_count++; } else { e.names_count--; right = TOMOYO_NAME_UNION; param->data = right_word; if (!tomoyo_parse_name_union_quoted(param, names_p++)) goto out; } goto store_value; } right = tomoyo_condition_type(right_word); if (right == TOMOYO_MAX_CONDITION_KEYWORD) { if (!numbers_p) { e.numbers_count++; } else { e.numbers_count--; right = TOMOYO_NUMBER_UNION; param->data = right_word; if (!tomoyo_parse_number_union(param, numbers_p++)) goto out; } } store_value: if (!condp) { dprintk(KERN_WARNING "%u: dry_run left=%u right=%u " "match=%u\n", __LINE__, left, right, !is_not); continue; } condp->left = left; condp->right = right; condp->equals = !is_not; dprintk(KERN_WARNING "%u: left=%u right=%u match=%u\n", __LINE__, condp->left, condp->right, condp->equals); condp++; } dprintk(KERN_INFO "%u: cond=%u numbers=%u names=%u ac=%u ec=%u\n", __LINE__, e.condc, e.numbers_count, e.names_count, e.argc, e.envc); if (entry) { BUG_ON(e.names_count | e.numbers_count | e.argc | e.envc | e.condc); return tomoyo_commit_condition(entry); } e.size = sizeof(*entry) + e.condc * sizeof(struct tomoyo_condition_element) + e.numbers_count * sizeof(struct tomoyo_number_union) + e.names_count * sizeof(struct tomoyo_name_union) + e.argc * sizeof(struct tomoyo_argv) + e.envc * sizeof(struct tomoyo_envp); entry = kzalloc(e.size, GFP_NOFS); if (!entry) goto out2; *entry = e; e.transit = NULL; condp = (struct tomoyo_condition_element *) (entry + 1); numbers_p = (struct tomoyo_number_union *) (condp + e.condc); names_p = (struct tomoyo_name_union *) (numbers_p + e.numbers_count); argv = (struct tomoyo_argv *) (names_p + e.names_count); envp = (struct tomoyo_envp *) (argv + e.argc); { bool flag = false; for (pos = start_of_string; pos < end_of_string; pos++) { if (*pos) continue; if (flag) /* Restore " ". */ *pos = ' '; else if (*(pos + 1) == '=') /* Restore "!=". */ *pos = '!'; else /* Restore "=". */ *pos = '='; flag = !flag; } } goto rerun; out: dprintk(KERN_WARNING "%u: %s failed\n", __LINE__, __func__); if (entry) { tomoyo_del_condition(&entry->head.list); kfree(entry); } out2: tomoyo_put_name(e.transit); return NULL; } /** * tomoyo_get_attributes - Revalidate "struct inode". * * @obj: Pointer to "struct tomoyo_obj_info". * * Returns nothing. */ void tomoyo_get_attributes(struct tomoyo_obj_info *obj) { u8 i; struct dentry *dentry = NULL; for (i = 0; i < TOMOYO_MAX_PATH_STAT; i++) { struct inode *inode; switch (i) { case TOMOYO_PATH1: dentry = obj->path1.dentry; if (!dentry) continue; break; case TOMOYO_PATH2: dentry = obj->path2.dentry; if (!dentry) continue; break; default: if (!dentry) continue; dentry = dget_parent(dentry); break; } inode = dentry->d_inode; if (inode) { struct tomoyo_mini_stat *stat = &obj->stat[i]; stat->uid = inode->i_uid; stat->gid = inode->i_gid; stat->ino = inode->i_ino; stat->mode = inode->i_mode; stat->dev = inode->i_sb->s_dev; stat->rdev = inode->i_rdev; obj->stat_valid[i] = true; } if (i & 1) /* i == TOMOYO_PATH1_PARENT || i == TOMOYO_PATH2_PARENT */ dput(dentry); } } /** * tomoyo_condition - Check condition part. * * @r: Pointer to "struct tomoyo_request_info". * @cond: Pointer to "struct tomoyo_condition". Maybe NULL. * * Returns true on success, false otherwise. * * Caller holds tomoyo_read_lock(). */ bool tomoyo_condition(struct tomoyo_request_info *r, const struct tomoyo_condition *cond) { u32 i; unsigned long min_v[2] = { 0, 0 }; unsigned long max_v[2] = { 0, 0 }; const struct tomoyo_condition_element *condp; const struct tomoyo_number_union *numbers_p; const struct tomoyo_name_union *names_p; const struct tomoyo_argv *argv; const struct tomoyo_envp *envp; struct tomoyo_obj_info *obj; u16 condc; u16 argc; u16 envc; struct linux_binprm *bprm = NULL; if (!cond) return true; condc = cond->condc; argc = cond->argc; envc = cond->envc; obj = r->obj; if (r->ee) bprm = r->ee->bprm; if (!bprm && (argc || envc)) return false; condp = (struct tomoyo_condition_element *) (cond + 1); numbers_p = (const struct tomoyo_number_union *) (condp + condc); names_p = (const struct tomoyo_name_union *) (numbers_p + cond->numbers_count); argv = (const struct tomoyo_argv *) (names_p + cond->names_count); envp = (const struct tomoyo_envp *) (argv + argc); for (i = 0; i < condc; i++) { const bool match = condp->equals; const u8 left = condp->left; const u8 right = condp->right; bool is_bitop[2] = { false, false }; u8 j; condp++; /* Check argv[] and envp[] later. */ if (left == TOMOYO_ARGV_ENTRY || left == TOMOYO_ENVP_ENTRY) continue; /* Check string expressions. */ if (right == TOMOYO_NAME_UNION) { const struct tomoyo_name_union *ptr = names_p++; switch (left) { struct tomoyo_path_info *symlink; struct tomoyo_execve *ee; struct file *file; case TOMOYO_SYMLINK_TARGET: symlink = obj ? obj->symlink_target : NULL; if (!symlink || !tomoyo_compare_name_union(symlink, ptr) == match) goto out; break; case TOMOYO_EXEC_REALPATH: ee = r->ee; file = ee ? ee->bprm->file : NULL; if (!tomoyo_scan_exec_realpath(file, ptr, match)) goto out; break; } continue; } /* Check numeric or bit-op expressions. */ for (j = 0; j < 2; j++) { const u8 index = j ? right : left; unsigned long value = 0; switch (index) { case TOMOYO_TASK_UID: value = from_kuid(&init_user_ns, current_uid()); break; case TOMOYO_TASK_EUID: value = from_kuid(&init_user_ns, current_euid()); break; case TOMOYO_TASK_SUID: value = from_kuid(&init_user_ns, current_suid()); break; case TOMOYO_TASK_FSUID: value = from_kuid(&init_user_ns, current_fsuid()); break; case TOMOYO_TASK_GID: value = from_kgid(&init_user_ns, current_gid()); break; case TOMOYO_TASK_EGID: value = from_kgid(&init_user_ns, current_egid()); break; case TOMOYO_TASK_SGID: value = from_kgid(&init_user_ns, current_sgid()); break; case TOMOYO_TASK_FSGID: value = from_kgid(&init_user_ns, current_fsgid()); break; case TOMOYO_TASK_PID: value = tomoyo_sys_getpid(); break; case TOMOYO_TASK_PPID: value = tomoyo_sys_getppid(); break; case TOMOYO_TYPE_IS_SOCKET: value = S_IFSOCK; break; case TOMOYO_TYPE_IS_SYMLINK: value = S_IFLNK; break; case TOMOYO_TYPE_IS_FILE: value = S_IFREG; break; case TOMOYO_TYPE_IS_BLOCK_DEV: value = S_IFBLK; break; case TOMOYO_TYPE_IS_DIRECTORY: value = S_IFDIR; break; case TOMOYO_TYPE_IS_CHAR_DEV: value = S_IFCHR; break; case TOMOYO_TYPE_IS_FIFO: value = S_IFIFO; break; case TOMOYO_MODE_SETUID: value = S_ISUID; break; case TOMOYO_MODE_SETGID: value = S_ISGID; break; case TOMOYO_MODE_STICKY: value = S_ISVTX; break; case TOMOYO_MODE_OWNER_READ: value = S_IRUSR; break; case TOMOYO_MODE_OWNER_WRITE: value = S_IWUSR; break; case TOMOYO_MODE_OWNER_EXECUTE: value = S_IXUSR; break; case TOMOYO_MODE_GROUP_READ: value = S_IRGRP; break; case TOMOYO_MODE_GROUP_WRITE: value = S_IWGRP; break; case TOMOYO_MODE_GROUP_EXECUTE: value = S_IXGRP; break; case TOMOYO_MODE_OTHERS_READ: value = S_IROTH; break; case TOMOYO_MODE_OTHERS_WRITE: value = S_IWOTH; break; case TOMOYO_MODE_OTHERS_EXECUTE: value = S_IXOTH; break; case TOMOYO_EXEC_ARGC: if (!bprm) goto out; value = bprm->argc; break; case TOMOYO_EXEC_ENVC: if (!bprm) goto out; value = bprm->envc; break; case TOMOYO_NUMBER_UNION: /* Fetch values later. */ break; default: if (!obj) goto out; if (!obj->validate_done) { tomoyo_get_attributes(obj); obj->validate_done = true; } { u8 stat_index; struct tomoyo_mini_stat *stat; switch (index) { case TOMOYO_PATH1_UID: case TOMOYO_PATH1_GID: case TOMOYO_PATH1_INO: case TOMOYO_PATH1_MAJOR: case TOMOYO_PATH1_MINOR: case TOMOYO_PATH1_TYPE: case TOMOYO_PATH1_DEV_MAJOR: case TOMOYO_PATH1_DEV_MINOR: case TOMOYO_PATH1_PERM: stat_index = TOMOYO_PATH1; break; case TOMOYO_PATH2_UID: case TOMOYO_PATH2_GID: case TOMOYO_PATH2_INO: case TOMOYO_PATH2_MAJOR: case TOMOYO_PATH2_MINOR: case TOMOYO_PATH2_TYPE: case TOMOYO_PATH2_DEV_MAJOR: case TOMOYO_PATH2_DEV_MINOR: case TOMOYO_PATH2_PERM: stat_index = TOMOYO_PATH2; break; case TOMOYO_PATH1_PARENT_UID: case TOMOYO_PATH1_PARENT_GID: case TOMOYO_PATH1_PARENT_INO: case TOMOYO_PATH1_PARENT_PERM: stat_index = TOMOYO_PATH1_PARENT; break; case TOMOYO_PATH2_PARENT_UID: case TOMOYO_PATH2_PARENT_GID: case TOMOYO_PATH2_PARENT_INO: case TOMOYO_PATH2_PARENT_PERM: stat_index = TOMOYO_PATH2_PARENT; break; default: goto out; } if (!obj->stat_valid[stat_index]) goto out; stat = &obj->stat[stat_index]; switch (index) { case TOMOYO_PATH1_UID: case TOMOYO_PATH2_UID: case TOMOYO_PATH1_PARENT_UID: case TOMOYO_PATH2_PARENT_UID: value = from_kuid(&init_user_ns, stat->uid); break; case TOMOYO_PATH1_GID: case TOMOYO_PATH2_GID: case TOMOYO_PATH1_PARENT_GID: case TOMOYO_PATH2_PARENT_GID: value = from_kgid(&init_user_ns, stat->gid); break; case TOMOYO_PATH1_INO: case TOMOYO_PATH2_INO: case TOMOYO_PATH1_PARENT_INO: case TOMOYO_PATH2_PARENT_INO: value = stat->ino; break; case TOMOYO_PATH1_MAJOR: case TOMOYO_PATH2_MAJOR: value = MAJOR(stat->dev); break; case TOMOYO_PATH1_MINOR: case TOMOYO_PATH2_MINOR: value = MINOR(stat->dev); break; case TOMOYO_PATH1_TYPE: case TOMOYO_PATH2_TYPE: value = stat->mode & S_IFMT; break; case TOMOYO_PATH1_DEV_MAJOR: case TOMOYO_PATH2_DEV_MAJOR: value = MAJOR(stat->rdev); break; case TOMOYO_PATH1_DEV_MINOR: case TOMOYO_PATH2_DEV_MINOR: value = MINOR(stat->rdev); break; case TOMOYO_PATH1_PERM: case TOMOYO_PATH2_PERM: case TOMOYO_PATH1_PARENT_PERM: case TOMOYO_PATH2_PARENT_PERM: value = stat->mode & S_IALLUGO; break; } } break; } max_v[j] = value; min_v[j] = value; switch (index) { case TOMOYO_MODE_SETUID: case TOMOYO_MODE_SETGID: case TOMOYO_MODE_STICKY: case TOMOYO_MODE_OWNER_READ: case TOMOYO_MODE_OWNER_WRITE: case TOMOYO_MODE_OWNER_EXECUTE: case TOMOYO_MODE_GROUP_READ: case TOMOYO_MODE_GROUP_WRITE: case TOMOYO_MODE_GROUP_EXECUTE: case TOMOYO_MODE_OTHERS_READ: case TOMOYO_MODE_OTHERS_WRITE: case TOMOYO_MODE_OTHERS_EXECUTE: is_bitop[j] = true; } } if (left == TOMOYO_NUMBER_UNION) { /* Fetch values now. */ const struct tomoyo_number_union *ptr = numbers_p++; min_v[0] = ptr->values[0]; max_v[0] = ptr->values[1]; } if (right == TOMOYO_NUMBER_UNION) { /* Fetch values now. */ const struct tomoyo_number_union *ptr = numbers_p++; if (ptr->group) { if (tomoyo_number_matches_group(min_v[0], max_v[0], ptr->group) == match) continue; } else { if ((min_v[0] <= ptr->values[1] && max_v[0] >= ptr->values[0]) == match) continue; } goto out; } /* * Bit operation is valid only when counterpart value * represents permission. */ if (is_bitop[0] && is_bitop[1]) { goto out; } else if (is_bitop[0]) { switch (right) { case TOMOYO_PATH1_PERM: case TOMOYO_PATH1_PARENT_PERM: case TOMOYO_PATH2_PERM: case TOMOYO_PATH2_PARENT_PERM: if (!(max_v[0] & max_v[1]) == !match) continue; } goto out; } else if (is_bitop[1]) { switch (left) { case TOMOYO_PATH1_PERM: case TOMOYO_PATH1_PARENT_PERM: case TOMOYO_PATH2_PERM: case TOMOYO_PATH2_PARENT_PERM: if (!(max_v[0] & max_v[1]) == !match) continue; } goto out; } /* Normal value range comparison. */ if ((min_v[0] <= max_v[1] && max_v[0] >= min_v[1]) == match) continue; out: return false; } /* Check argv[] and envp[] now. */ if (r->ee && (argc || envc)) return tomoyo_scan_bprm(r->ee, argc, argv, envc, envp); return true; }
gpl-2.0
ftteam/kernel
drivers/media/rc/keymaps/rc-total-media-in-hand-02.c
4539
2966
/* * Total Media In Hand_02 remote controller keytable for Mygica X8507 * * Copyright (C) 2012 Alfredo J. Delaiti <alfredodelaiti@netscape.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. */ #include <media/rc-map.h> #include <linux/module.h> static struct rc_map_table total_media_in_hand_02[] = { { 0x0000, KEY_0 }, { 0x0001, KEY_1 }, { 0x0002, KEY_2 }, { 0x0003, KEY_3 }, { 0x0004, KEY_4 }, { 0x0005, KEY_5 }, { 0x0006, KEY_6 }, { 0x0007, KEY_7 }, { 0x0008, KEY_8 }, { 0x0009, KEY_9 }, { 0x000a, KEY_MUTE }, { 0x000b, KEY_STOP }, /* Stop */ { 0x000c, KEY_POWER2 }, /* Turn on/off application */ { 0x000d, KEY_OK }, /* OK */ { 0x000e, KEY_CAMERA }, /* Snapshot */ { 0x000f, KEY_ZOOM }, /* Full Screen/Restore */ { 0x0010, KEY_RIGHT }, /* Right arrow */ { 0x0011, KEY_LEFT }, /* Left arrow */ { 0x0012, KEY_CHANNELUP }, { 0x0013, KEY_CHANNELDOWN }, { 0x0014, KEY_SHUFFLE }, { 0x0016, KEY_PAUSE }, { 0x0017, KEY_PLAY }, /* Play */ { 0x001e, KEY_TIME }, /* Time Shift */ { 0x001f, KEY_RECORD }, { 0x0020, KEY_UP }, { 0x0021, KEY_DOWN }, { 0x0025, KEY_POWER }, /* Turn off computer */ { 0x0026, KEY_REWIND }, /* FR << */ { 0x0027, KEY_FASTFORWARD }, /* FF >> */ { 0x0029, KEY_ESC }, { 0x002b, KEY_VOLUMEUP }, { 0x002c, KEY_VOLUMEDOWN }, { 0x002d, KEY_CHANNEL }, /* CH Surfing */ { 0x0038, KEY_VIDEO }, /* TV/AV/S-Video/YPbPr */ }; static struct rc_map_list total_media_in_hand_02_map = { .map = { .scan = total_media_in_hand_02, .size = ARRAY_SIZE(total_media_in_hand_02), .rc_type = RC_TYPE_RC5, .name = RC_MAP_TOTAL_MEDIA_IN_HAND_02, } }; static int __init init_rc_map_total_media_in_hand_02(void) { return rc_map_register(&total_media_in_hand_02_map); } static void __exit exit_rc_map_total_media_in_hand_02(void) { rc_map_unregister(&total_media_in_hand_02_map); } module_init(init_rc_map_total_media_in_hand_02) module_exit(exit_rc_map_total_media_in_hand_02) MODULE_LICENSE("GPL"); MODULE_AUTHOR(" Alfredo J. Delaiti <alfredodelaiti@netscape.net>");
gpl-2.0
iceblu3710/SUNXI_XENOMAI
arch/x86/kernel/cpu/cyrix.c
9147
12513
#include <linux/init.h> #include <linux/bitops.h> #include <linux/delay.h> #include <linux/pci.h> #include <asm/dma.h> #include <linux/io.h> #include <asm/processor-cyrix.h> #include <asm/processor-flags.h> #include <linux/timer.h> #include <asm/pci-direct.h> #include <asm/tsc.h> #include "cpu.h" /* * Read NSC/Cyrix DEVID registers (DIR) to get more detailed info. about the CPU */ static void __cpuinit __do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) { unsigned char ccr2, ccr3; /* we test for DEVID by checking whether CCR3 is writable */ ccr3 = getCx86(CX86_CCR3); setCx86(CX86_CCR3, ccr3 ^ 0x80); getCx86(0xc0); /* dummy to change bus */ if (getCx86(CX86_CCR3) == ccr3) { /* no DEVID regs. */ ccr2 = getCx86(CX86_CCR2); setCx86(CX86_CCR2, ccr2 ^ 0x04); getCx86(0xc0); /* dummy */ if (getCx86(CX86_CCR2) == ccr2) /* old Cx486SLC/DLC */ *dir0 = 0xfd; else { /* Cx486S A step */ setCx86(CX86_CCR2, ccr2); *dir0 = 0xfe; } } else { setCx86(CX86_CCR3, ccr3); /* restore CCR3 */ /* read DIR0 and DIR1 CPU registers */ *dir0 = getCx86(CX86_DIR0); *dir1 = getCx86(CX86_DIR1); } } static void __cpuinit do_cyrix_devid(unsigned char *dir0, unsigned char *dir1) { unsigned long flags; local_irq_save(flags); __do_cyrix_devid(dir0, dir1); local_irq_restore(flags); } /* * Cx86_dir0_msb is a HACK needed by check_cx686_cpuid/slop in bugs.h in * order to identify the Cyrix CPU model after we're out of setup.c * * Actually since bugs.h doesn't even reference this perhaps someone should * fix the documentation ??? */ static unsigned char Cx86_dir0_msb __cpuinitdata = 0; static const char __cpuinitconst Cx86_model[][9] = { "Cx486", "Cx486", "5x86 ", "6x86", "MediaGX ", "6x86MX ", "M II ", "Unknown" }; static const char __cpuinitconst Cx486_name[][5] = { "SLC", "DLC", "SLC2", "DLC2", "SRx", "DRx", "SRx2", "DRx2" }; static const char __cpuinitconst Cx486S_name[][4] = { "S", "S2", "Se", "S2e" }; static const char __cpuinitconst Cx486D_name[][4] = { "DX", "DX2", "?", "?", "?", "DX4" }; static char Cx86_cb[] __cpuinitdata = "?.5x Core/Bus Clock"; static const char __cpuinitconst cyrix_model_mult1[] = "12??43"; static const char __cpuinitconst cyrix_model_mult2[] = "12233445"; /* * Reset the slow-loop (SLOP) bit on the 686(L) which is set by some old * BIOSes for compatibility with DOS games. This makes the udelay loop * work correctly, and improves performance. * * FIXME: our newer udelay uses the tsc. We don't need to frob with SLOP */ static void __cpuinit check_cx686_slop(struct cpuinfo_x86 *c) { unsigned long flags; if (Cx86_dir0_msb == 3) { unsigned char ccr3, ccr5; local_irq_save(flags); ccr3 = getCx86(CX86_CCR3); setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ ccr5 = getCx86(CX86_CCR5); if (ccr5 & 2) setCx86(CX86_CCR5, ccr5 & 0xfd); /* reset SLOP */ setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ local_irq_restore(flags); if (ccr5 & 2) { /* possible wrong calibration done */ printk(KERN_INFO "Recalibrating delay loop with SLOP bit reset\n"); calibrate_delay(); c->loops_per_jiffy = loops_per_jiffy; } } } static void __cpuinit set_cx86_reorder(void) { u8 ccr3; printk(KERN_INFO "Enable Memory access reorder on Cyrix/NSC processor.\n"); ccr3 = getCx86(CX86_CCR3); setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ /* Load/Store Serialize to mem access disable (=reorder it) */ setCx86_old(CX86_PCR0, getCx86_old(CX86_PCR0) & ~0x80); /* set load/store serialize from 1GB to 4GB */ ccr3 |= 0xe0; setCx86(CX86_CCR3, ccr3); } static void __cpuinit set_cx86_memwb(void) { printk(KERN_INFO "Enable Memory-Write-back mode on Cyrix/NSC processor.\n"); /* CCR2 bit 2: unlock NW bit */ setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) & ~0x04); /* set 'Not Write-through' */ write_cr0(read_cr0() | X86_CR0_NW); /* CCR2 bit 2: lock NW bit and set WT1 */ setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x14); } /* * Configure later MediaGX and/or Geode processor. */ static void __cpuinit geode_configure(void) { unsigned long flags; u8 ccr3; local_irq_save(flags); /* Suspend on halt power saving and enable #SUSP pin */ setCx86_old(CX86_CCR2, getCx86_old(CX86_CCR2) | 0x88); ccr3 = getCx86(CX86_CCR3); setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable MAPEN */ /* FPU fast, DTE cache, Mem bypass */ setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x38); setCx86(CX86_CCR3, ccr3); /* disable MAPEN */ set_cx86_memwb(); set_cx86_reorder(); local_irq_restore(flags); } static void __cpuinit early_init_cyrix(struct cpuinfo_x86 *c) { unsigned char dir0, dir0_msn, dir1 = 0; __do_cyrix_devid(&dir0, &dir1); dir0_msn = dir0 >> 4; /* identifies CPU "family" */ switch (dir0_msn) { case 3: /* 6x86/6x86L */ /* Emulate MTRRs using Cyrix's ARRs. */ set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); break; case 5: /* 6x86MX/M II */ /* Emulate MTRRs using Cyrix's ARRs. */ set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); break; } } static void __cpuinit init_cyrix(struct cpuinfo_x86 *c) { unsigned char dir0, dir0_msn, dir0_lsn, dir1 = 0; char *buf = c->x86_model_id; const char *p = NULL; /* * Bit 31 in normal CPUID used for nonstandard 3DNow ID; * 3DNow is IDd by bit 31 in extended CPUID (1*32+31) anyway */ clear_cpu_cap(c, 0*32+31); /* Cyrix used bit 24 in extended (AMD) CPUID for Cyrix MMX extensions */ if (test_cpu_cap(c, 1*32+24)) { clear_cpu_cap(c, 1*32+24); set_cpu_cap(c, X86_FEATURE_CXMMX); } do_cyrix_devid(&dir0, &dir1); check_cx686_slop(c); Cx86_dir0_msb = dir0_msn = dir0 >> 4; /* identifies CPU "family" */ dir0_lsn = dir0 & 0xf; /* model or clock multiplier */ /* common case step number/rev -- exceptions handled below */ c->x86_model = (dir1 >> 4) + 1; c->x86_mask = dir1 & 0xf; /* Now cook; the original recipe is by Channing Corn, from Cyrix. * We do the same thing for each generation: we work out * the model, multiplier and stepping. Black magic included, * to make the silicon step/rev numbers match the printed ones. */ switch (dir0_msn) { unsigned char tmp; case 0: /* Cx486SLC/DLC/SRx/DRx */ p = Cx486_name[dir0_lsn & 7]; break; case 1: /* Cx486S/DX/DX2/DX4 */ p = (dir0_lsn & 8) ? Cx486D_name[dir0_lsn & 5] : Cx486S_name[dir0_lsn & 3]; break; case 2: /* 5x86 */ Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5]; p = Cx86_cb+2; break; case 3: /* 6x86/6x86L */ Cx86_cb[1] = ' '; Cx86_cb[2] = cyrix_model_mult1[dir0_lsn & 5]; if (dir1 > 0x21) { /* 686L */ Cx86_cb[0] = 'L'; p = Cx86_cb; (c->x86_model)++; } else /* 686 */ p = Cx86_cb+1; /* Emulate MTRRs using Cyrix's ARRs. */ set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); /* 6x86's contain this bug */ c->coma_bug = 1; break; case 4: /* MediaGX/GXm or Geode GXM/GXLV/GX1 */ #ifdef CONFIG_PCI { u32 vendor, device; /* * It isn't really a PCI quirk directly, but the cure is the * same. The MediaGX has deep magic SMM stuff that handles the * SB emulation. It throws away the fifo on disable_dma() which * is wrong and ruins the audio. * * Bug2: VSA1 has a wrap bug so that using maximum sized DMA * causes bad things. According to NatSemi VSA2 has another * bug to do with 'hlt'. I've not seen any boards using VSA2 * and X doesn't seem to support it either so who cares 8). * VSA1 we work around however. */ printk(KERN_INFO "Working around Cyrix MediaGX virtual DMA bugs.\n"); isa_dma_bridge_buggy = 2; /* We do this before the PCI layer is running. However we are safe here as we know the bridge must be a Cyrix companion and must be present */ vendor = read_pci_config_16(0, 0, 0x12, PCI_VENDOR_ID); device = read_pci_config_16(0, 0, 0x12, PCI_DEVICE_ID); /* * The 5510/5520 companion chips have a funky PIT. */ if (vendor == PCI_VENDOR_ID_CYRIX && (device == PCI_DEVICE_ID_CYRIX_5510 || device == PCI_DEVICE_ID_CYRIX_5520)) mark_tsc_unstable("cyrix 5510/5520 detected"); } #endif c->x86_cache_size = 16; /* Yep 16K integrated cache thats it */ /* GXm supports extended cpuid levels 'ala' AMD */ if (c->cpuid_level == 2) { /* Enable cxMMX extensions (GX1 Datasheet 54) */ setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7) | 1); /* * GXm : 0x30 ... 0x5f GXm datasheet 51 * GXlv: 0x6x GXlv datasheet 54 * ? : 0x7x * GX1 : 0x8x GX1 datasheet 56 */ if ((0x30 <= dir1 && dir1 <= 0x6f) || (0x80 <= dir1 && dir1 <= 0x8f)) geode_configure(); return; } else { /* MediaGX */ Cx86_cb[2] = (dir0_lsn & 1) ? '3' : '4'; p = Cx86_cb+2; c->x86_model = (dir1 & 0x20) ? 1 : 2; } break; case 5: /* 6x86MX/M II */ if (dir1 > 7) { dir0_msn++; /* M II */ /* Enable MMX extensions (App note 108) */ setCx86_old(CX86_CCR7, getCx86_old(CX86_CCR7)|1); } else { c->coma_bug = 1; /* 6x86MX, it has the bug. */ } tmp = (!(dir0_lsn & 7) || dir0_lsn & 1) ? 2 : 0; Cx86_cb[tmp] = cyrix_model_mult2[dir0_lsn & 7]; p = Cx86_cb+tmp; if (((dir1 & 0x0f) > 4) || ((dir1 & 0xf0) == 0x20)) (c->x86_model)++; /* Emulate MTRRs using Cyrix's ARRs. */ set_cpu_cap(c, X86_FEATURE_CYRIX_ARR); break; case 0xf: /* Cyrix 486 without DEVID registers */ switch (dir0_lsn) { case 0xd: /* either a 486SLC or DLC w/o DEVID */ dir0_msn = 0; p = Cx486_name[(c->hard_math) ? 1 : 0]; break; case 0xe: /* a 486S A step */ dir0_msn = 0; p = Cx486S_name[0]; break; } break; default: /* unknown (shouldn't happen, we know everyone ;-) */ dir0_msn = 7; break; } strcpy(buf, Cx86_model[dir0_msn & 7]); if (p) strcat(buf, p); return; } /* * Handle National Semiconductor branded processors */ static void __cpuinit init_nsc(struct cpuinfo_x86 *c) { /* * There may be GX1 processors in the wild that are branded * NSC and not Cyrix. * * This function only handles the GX processor, and kicks every * thing else to the Cyrix init function above - that should * cover any processors that might have been branded differently * after NSC acquired Cyrix. * * If this breaks your GX1 horribly, please e-mail * info-linux@ldcmail.amd.com to tell us. */ /* Handle the GX (Formally known as the GX2) */ if (c->x86 == 5 && c->x86_model == 5) cpu_detect_cache_sizes(c); else init_cyrix(c); } /* * Cyrix CPUs without cpuid or with cpuid not yet enabled can be detected * by the fact that they preserve the flags across the division of 5/2. * PII and PPro exhibit this behavior too, but they have cpuid available. */ /* * Perform the Cyrix 5/2 test. A Cyrix won't change * the flags, while other 486 chips will. */ static inline int test_cyrix_52div(void) { unsigned int test; __asm__ __volatile__( "sahf\n\t" /* clear flags (%eax = 0x0005) */ "div %b2\n\t" /* divide 5 by 2 */ "lahf" /* store flags into %ah */ : "=a" (test) : "0" (5), "q" (2) : "cc"); /* AH is 0x02 on Cyrix after the divide.. */ return (unsigned char) (test >> 8) == 0x02; } static void __cpuinit cyrix_identify(struct cpuinfo_x86 *c) { /* Detect Cyrix with disabled CPUID */ if (c->x86 == 4 && test_cyrix_52div()) { unsigned char dir0, dir1; strcpy(c->x86_vendor_id, "CyrixInstead"); c->x86_vendor = X86_VENDOR_CYRIX; /* Actually enable cpuid on the older cyrix */ /* Retrieve CPU revisions */ do_cyrix_devid(&dir0, &dir1); dir0 >>= 4; /* Check it is an affected model */ if (dir0 == 5 || dir0 == 3) { unsigned char ccr3; unsigned long flags; printk(KERN_INFO "Enabling CPUID on Cyrix processor.\n"); local_irq_save(flags); ccr3 = getCx86(CX86_CCR3); /* enable MAPEN */ setCx86(CX86_CCR3, (ccr3 & 0x0f) | 0x10); /* enable cpuid */ setCx86_old(CX86_CCR4, getCx86_old(CX86_CCR4) | 0x80); /* disable MAPEN */ setCx86(CX86_CCR3, ccr3); local_irq_restore(flags); } } } static const struct cpu_dev __cpuinitconst cyrix_cpu_dev = { .c_vendor = "Cyrix", .c_ident = { "CyrixInstead" }, .c_early_init = early_init_cyrix, .c_init = init_cyrix, .c_identify = cyrix_identify, .c_x86_vendor = X86_VENDOR_CYRIX, }; cpu_dev_register(cyrix_cpu_dev); static const struct cpu_dev __cpuinitconst nsc_cpu_dev = { .c_vendor = "NSC", .c_ident = { "Geode by NSC" }, .c_init = init_nsc, .c_x86_vendor = X86_VENDOR_NSC, }; cpu_dev_register(nsc_cpu_dev);
gpl-2.0
multirom-op2/android_kernel_oneplus_msm8994
arch/x86/kernel/mmconf-fam10h_64.c
10171
5500
/* * AMD Family 10h mmconfig enablement */ #include <linux/types.h> #include <linux/mm.h> #include <linux/string.h> #include <linux/pci.h> #include <linux/dmi.h> #include <linux/range.h> #include <asm/pci-direct.h> #include <linux/sort.h> #include <asm/io.h> #include <asm/msr.h> #include <asm/acpi.h> #include <asm/mmconfig.h> #include <asm/pci_x86.h> struct pci_hostbridge_probe { u32 bus; u32 slot; u32 vendor; u32 device; }; static u64 __cpuinitdata fam10h_pci_mmconf_base; static struct pci_hostbridge_probe pci_probes[] __cpuinitdata = { { 0, 0x18, PCI_VENDOR_ID_AMD, 0x1200 }, { 0xff, 0, PCI_VENDOR_ID_AMD, 0x1200 }, }; static int __cpuinit cmp_range(const void *x1, const void *x2) { const struct range *r1 = x1; const struct range *r2 = x2; int start1, start2; start1 = r1->start >> 32; start2 = r2->start >> 32; return start1 - start2; } #define MMCONF_UNIT (1ULL << FAM10H_MMIO_CONF_BASE_SHIFT) #define MMCONF_MASK (~(MMCONF_UNIT - 1)) #define MMCONF_SIZE (MMCONF_UNIT << 8) /* need to avoid (0xfd<<32), (0xfe<<32), and (0xff<<32), ht used space */ #define FAM10H_PCI_MMCONF_BASE (0xfcULL<<32) #define BASE_VALID(b) ((b) + MMCONF_SIZE <= (0xfdULL<<32) || (b) >= (1ULL<<40)) static void __cpuinit get_fam10h_pci_mmconf_base(void) { int i; unsigned bus; unsigned slot; int found; u64 val; u32 address; u64 tom2; u64 base = FAM10H_PCI_MMCONF_BASE; int hi_mmio_num; struct range range[8]; /* only try to get setting from BSP */ if (fam10h_pci_mmconf_base) return; if (!early_pci_allowed()) return; found = 0; for (i = 0; i < ARRAY_SIZE(pci_probes); i++) { u32 id; u16 device; u16 vendor; bus = pci_probes[i].bus; slot = pci_probes[i].slot; id = read_pci_config(bus, slot, 0, PCI_VENDOR_ID); vendor = id & 0xffff; device = (id>>16) & 0xffff; if (pci_probes[i].vendor == vendor && pci_probes[i].device == device) { found = 1; break; } } if (!found) return; /* SYS_CFG */ address = MSR_K8_SYSCFG; rdmsrl(address, val); /* TOP_MEM2 is not enabled? */ if (!(val & (1<<21))) { tom2 = 1ULL << 32; } else { /* TOP_MEM2 */ address = MSR_K8_TOP_MEM2; rdmsrl(address, val); tom2 = max(val & 0xffffff800000ULL, 1ULL << 32); } if (base <= tom2) base = (tom2 + 2 * MMCONF_UNIT - 1) & MMCONF_MASK; /* * need to check if the range is in the high mmio range that is * above 4G */ hi_mmio_num = 0; for (i = 0; i < 8; i++) { u32 reg; u64 start; u64 end; reg = read_pci_config(bus, slot, 1, 0x80 + (i << 3)); if (!(reg & 3)) continue; start = (u64)(reg & 0xffffff00) << 8; /* 39:16 on 31:8*/ reg = read_pci_config(bus, slot, 1, 0x84 + (i << 3)); end = ((u64)(reg & 0xffffff00) << 8) | 0xffff; /* 39:16 on 31:8*/ if (end < tom2) continue; range[hi_mmio_num].start = start; range[hi_mmio_num].end = end; hi_mmio_num++; } if (!hi_mmio_num) goto out; /* sort the range */ sort(range, hi_mmio_num, sizeof(struct range), cmp_range, NULL); if (range[hi_mmio_num - 1].end < base) goto out; if (range[0].start > base + MMCONF_SIZE) goto out; /* need to find one window */ base = (range[0].start & MMCONF_MASK) - MMCONF_UNIT; if ((base > tom2) && BASE_VALID(base)) goto out; base = (range[hi_mmio_num - 1].end + MMCONF_UNIT) & MMCONF_MASK; if (BASE_VALID(base)) goto out; /* need to find window between ranges */ for (i = 1; i < hi_mmio_num; i++) { base = (range[i - 1].end + MMCONF_UNIT) & MMCONF_MASK; val = range[i].start & MMCONF_MASK; if (val >= base + MMCONF_SIZE && BASE_VALID(base)) goto out; } return; out: fam10h_pci_mmconf_base = base; } void __cpuinit fam10h_check_enable_mmcfg(void) { u64 val; u32 address; if (!(pci_probe & PCI_CHECK_ENABLE_AMD_MMCONF)) return; address = MSR_FAM10H_MMIO_CONF_BASE; rdmsrl(address, val); /* try to make sure that AP's setting is identical to BSP setting */ if (val & FAM10H_MMIO_CONF_ENABLE) { unsigned busnbits; busnbits = (val >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) & FAM10H_MMIO_CONF_BUSRANGE_MASK; /* only trust the one handle 256 buses, if acpi=off */ if (!acpi_pci_disabled || busnbits >= 8) { u64 base = val & MMCONF_MASK; if (!fam10h_pci_mmconf_base) { fam10h_pci_mmconf_base = base; return; } else if (fam10h_pci_mmconf_base == base) return; } } /* * if it is not enabled, try to enable it and assume only one segment * with 256 buses */ get_fam10h_pci_mmconf_base(); if (!fam10h_pci_mmconf_base) { pci_probe &= ~PCI_CHECK_ENABLE_AMD_MMCONF; return; } printk(KERN_INFO "Enable MMCONFIG on AMD Family 10h\n"); val &= ~((FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT) | (FAM10H_MMIO_CONF_BUSRANGE_MASK<<FAM10H_MMIO_CONF_BUSRANGE_SHIFT)); val |= fam10h_pci_mmconf_base | (8 << FAM10H_MMIO_CONF_BUSRANGE_SHIFT) | FAM10H_MMIO_CONF_ENABLE; wrmsrl(address, val); } static int __init set_check_enable_amd_mmconf(const struct dmi_system_id *d) { pci_probe |= PCI_CHECK_ENABLE_AMD_MMCONF; return 0; } static const struct dmi_system_id __initconst mmconf_dmi_table[] = { { .callback = set_check_enable_amd_mmconf, .ident = "Sun Microsystems Machine", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "Sun Microsystems"), }, }, {} }; /* Called from a __cpuinit function, but only on the BSP. */ void __ref check_enable_amd_mmconf_dmi(void) { dmi_check_system(mmconf_dmi_table); }
gpl-2.0
ChameleonOS/android_kernel_amazon_bowser-common
drivers/scsi/fnic/fnic_isr.c
11707
8328
/* * Copyright 2008 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/string.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <scsi/libfc.h> #include <scsi/fc_frame.h> #include "vnic_dev.h" #include "vnic_intr.h" #include "vnic_stats.h" #include "fnic_io.h" #include "fnic.h" static irqreturn_t fnic_isr_legacy(int irq, void *data) { struct fnic *fnic = data; u32 pba; unsigned long work_done = 0; pba = vnic_intr_legacy_pba(fnic->legacy_pba); if (!pba) return IRQ_NONE; if (pba & (1 << FNIC_INTX_NOTIFY)) { vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_NOTIFY]); fnic_handle_link_event(fnic); } if (pba & (1 << FNIC_INTX_ERR)) { vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_ERR]); fnic_log_q_error(fnic); } if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) { work_done += fnic_wq_copy_cmpl_handler(fnic, -1); work_done += fnic_wq_cmpl_handler(fnic, -1); work_done += fnic_rq_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[FNIC_INTX_WQ_RQ_COPYWQ], work_done, 1 /* unmask intr */, 1 /* reset intr timer */); } return IRQ_HANDLED; } static irqreturn_t fnic_isr_msi(int irq, void *data) { struct fnic *fnic = data; unsigned long work_done = 0; work_done += fnic_wq_copy_cmpl_handler(fnic, -1); work_done += fnic_wq_cmpl_handler(fnic, -1); work_done += fnic_rq_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[0], work_done, 1 /* unmask intr */, 1 /* reset intr timer */); return IRQ_HANDLED; } static irqreturn_t fnic_isr_msix_rq(int irq, void *data) { struct fnic *fnic = data; unsigned long rq_work_done = 0; rq_work_done = fnic_rq_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ], rq_work_done, 1 /* unmask intr */, 1 /* reset intr timer */); return IRQ_HANDLED; } static irqreturn_t fnic_isr_msix_wq(int irq, void *data) { struct fnic *fnic = data; unsigned long wq_work_done = 0; wq_work_done = fnic_wq_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ], wq_work_done, 1 /* unmask intr */, 1 /* reset intr timer */); return IRQ_HANDLED; } static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data) { struct fnic *fnic = data; unsigned long wq_copy_work_done = 0; wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY], wq_copy_work_done, 1 /* unmask intr */, 1 /* reset intr timer */); return IRQ_HANDLED; } static irqreturn_t fnic_isr_msix_err_notify(int irq, void *data) { struct fnic *fnic = data; vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]); fnic_log_q_error(fnic); fnic_handle_link_event(fnic); return IRQ_HANDLED; } void fnic_free_intr(struct fnic *fnic) { int i; switch (vnic_dev_get_intr_mode(fnic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: case VNIC_DEV_INTR_MODE_MSI: free_irq(fnic->pdev->irq, fnic); break; case VNIC_DEV_INTR_MODE_MSIX: for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) if (fnic->msix[i].requested) free_irq(fnic->msix_entry[i].vector, fnic->msix[i].devid); break; default: break; } } int fnic_request_intr(struct fnic *fnic) { int err = 0; int i; switch (vnic_dev_get_intr_mode(fnic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: err = request_irq(fnic->pdev->irq, &fnic_isr_legacy, IRQF_SHARED, DRV_NAME, fnic); break; case VNIC_DEV_INTR_MODE_MSI: err = request_irq(fnic->pdev->irq, &fnic_isr_msi, 0, fnic->name, fnic); break; case VNIC_DEV_INTR_MODE_MSIX: sprintf(fnic->msix[FNIC_MSIX_RQ].devname, "%.11s-fcs-rq", fnic->name); fnic->msix[FNIC_MSIX_RQ].isr = fnic_isr_msix_rq; fnic->msix[FNIC_MSIX_RQ].devid = fnic; sprintf(fnic->msix[FNIC_MSIX_WQ].devname, "%.11s-fcs-wq", fnic->name); fnic->msix[FNIC_MSIX_WQ].isr = fnic_isr_msix_wq; fnic->msix[FNIC_MSIX_WQ].devid = fnic; sprintf(fnic->msix[FNIC_MSIX_WQ_COPY].devname, "%.11s-scsi-wq", fnic->name); fnic->msix[FNIC_MSIX_WQ_COPY].isr = fnic_isr_msix_wq_copy; fnic->msix[FNIC_MSIX_WQ_COPY].devid = fnic; sprintf(fnic->msix[FNIC_MSIX_ERR_NOTIFY].devname, "%.11s-err-notify", fnic->name); fnic->msix[FNIC_MSIX_ERR_NOTIFY].isr = fnic_isr_msix_err_notify; fnic->msix[FNIC_MSIX_ERR_NOTIFY].devid = fnic; for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) { err = request_irq(fnic->msix_entry[i].vector, fnic->msix[i].isr, 0, fnic->msix[i].devname, fnic->msix[i].devid); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "MSIX: request_irq" " failed %d\n", err); fnic_free_intr(fnic); break; } fnic->msix[i].requested = 1; } break; default: break; } return err; } int fnic_set_intr_mode(struct fnic *fnic) { unsigned int n = ARRAY_SIZE(fnic->rq); unsigned int m = ARRAY_SIZE(fnic->wq); unsigned int o = ARRAY_SIZE(fnic->wq_copy); unsigned int i; /* * Set interrupt mode (INTx, MSI, MSI-X) depending * system capabilities. * * Try MSI-X first * * We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs * (last INTR is used for WQ/RQ errors and notification area) */ BUG_ON(ARRAY_SIZE(fnic->msix_entry) < n + m + o + 1); for (i = 0; i < n + m + o + 1; i++) fnic->msix_entry[i].entry = i; if (fnic->rq_count >= n && fnic->raw_wq_count >= m && fnic->wq_copy_count >= o && fnic->cq_count >= n + m + o) { if (!pci_enable_msix(fnic->pdev, fnic->msix_entry, n + m + o + 1)) { fnic->rq_count = n; fnic->raw_wq_count = m; fnic->wq_copy_count = o; fnic->wq_count = m + o; fnic->cq_count = n + m + o; fnic->intr_count = n + m + o + 1; fnic->err_intr_offset = FNIC_MSIX_ERR_NOTIFY; FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, "Using MSI-X Interrupts\n"); vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSIX); return 0; } } /* * Next try MSI * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 1 INTR */ if (fnic->rq_count >= 1 && fnic->raw_wq_count >= 1 && fnic->wq_copy_count >= 1 && fnic->cq_count >= 3 && fnic->intr_count >= 1 && !pci_enable_msi(fnic->pdev)) { fnic->rq_count = 1; fnic->raw_wq_count = 1; fnic->wq_copy_count = 1; fnic->wq_count = 2; fnic->cq_count = 3; fnic->intr_count = 1; fnic->err_intr_offset = 0; FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, "Using MSI Interrupts\n"); vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSI); return 0; } /* * Next try INTx * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 3 INTRs * 1 INTR is used for all 3 queues, 1 INTR for queue errors * 1 INTR for notification area */ if (fnic->rq_count >= 1 && fnic->raw_wq_count >= 1 && fnic->wq_copy_count >= 1 && fnic->cq_count >= 3 && fnic->intr_count >= 3) { fnic->rq_count = 1; fnic->raw_wq_count = 1; fnic->wq_copy_count = 1; fnic->cq_count = 3; fnic->intr_count = 3; FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, "Using Legacy Interrupts\n"); vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX); return 0; } vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); return -EINVAL; } void fnic_clear_intr_mode(struct fnic *fnic) { switch (vnic_dev_get_intr_mode(fnic->vdev)) { case VNIC_DEV_INTR_MODE_MSIX: pci_disable_msix(fnic->pdev); break; case VNIC_DEV_INTR_MODE_MSI: pci_disable_msi(fnic->pdev); break; default: break; } vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX); }
gpl-2.0
zarboz/android_kernel_htc_dlx
drivers/scsi/fnic/fnic_isr.c
11707
8328
/* * Copyright 2008 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/string.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <scsi/libfc.h> #include <scsi/fc_frame.h> #include "vnic_dev.h" #include "vnic_intr.h" #include "vnic_stats.h" #include "fnic_io.h" #include "fnic.h" static irqreturn_t fnic_isr_legacy(int irq, void *data) { struct fnic *fnic = data; u32 pba; unsigned long work_done = 0; pba = vnic_intr_legacy_pba(fnic->legacy_pba); if (!pba) return IRQ_NONE; if (pba & (1 << FNIC_INTX_NOTIFY)) { vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_NOTIFY]); fnic_handle_link_event(fnic); } if (pba & (1 << FNIC_INTX_ERR)) { vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_ERR]); fnic_log_q_error(fnic); } if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) { work_done += fnic_wq_copy_cmpl_handler(fnic, -1); work_done += fnic_wq_cmpl_handler(fnic, -1); work_done += fnic_rq_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[FNIC_INTX_WQ_RQ_COPYWQ], work_done, 1 /* unmask intr */, 1 /* reset intr timer */); } return IRQ_HANDLED; } static irqreturn_t fnic_isr_msi(int irq, void *data) { struct fnic *fnic = data; unsigned long work_done = 0; work_done += fnic_wq_copy_cmpl_handler(fnic, -1); work_done += fnic_wq_cmpl_handler(fnic, -1); work_done += fnic_rq_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[0], work_done, 1 /* unmask intr */, 1 /* reset intr timer */); return IRQ_HANDLED; } static irqreturn_t fnic_isr_msix_rq(int irq, void *data) { struct fnic *fnic = data; unsigned long rq_work_done = 0; rq_work_done = fnic_rq_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ], rq_work_done, 1 /* unmask intr */, 1 /* reset intr timer */); return IRQ_HANDLED; } static irqreturn_t fnic_isr_msix_wq(int irq, void *data) { struct fnic *fnic = data; unsigned long wq_work_done = 0; wq_work_done = fnic_wq_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ], wq_work_done, 1 /* unmask intr */, 1 /* reset intr timer */); return IRQ_HANDLED; } static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data) { struct fnic *fnic = data; unsigned long wq_copy_work_done = 0; wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY], wq_copy_work_done, 1 /* unmask intr */, 1 /* reset intr timer */); return IRQ_HANDLED; } static irqreturn_t fnic_isr_msix_err_notify(int irq, void *data) { struct fnic *fnic = data; vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]); fnic_log_q_error(fnic); fnic_handle_link_event(fnic); return IRQ_HANDLED; } void fnic_free_intr(struct fnic *fnic) { int i; switch (vnic_dev_get_intr_mode(fnic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: case VNIC_DEV_INTR_MODE_MSI: free_irq(fnic->pdev->irq, fnic); break; case VNIC_DEV_INTR_MODE_MSIX: for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) if (fnic->msix[i].requested) free_irq(fnic->msix_entry[i].vector, fnic->msix[i].devid); break; default: break; } } int fnic_request_intr(struct fnic *fnic) { int err = 0; int i; switch (vnic_dev_get_intr_mode(fnic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: err = request_irq(fnic->pdev->irq, &fnic_isr_legacy, IRQF_SHARED, DRV_NAME, fnic); break; case VNIC_DEV_INTR_MODE_MSI: err = request_irq(fnic->pdev->irq, &fnic_isr_msi, 0, fnic->name, fnic); break; case VNIC_DEV_INTR_MODE_MSIX: sprintf(fnic->msix[FNIC_MSIX_RQ].devname, "%.11s-fcs-rq", fnic->name); fnic->msix[FNIC_MSIX_RQ].isr = fnic_isr_msix_rq; fnic->msix[FNIC_MSIX_RQ].devid = fnic; sprintf(fnic->msix[FNIC_MSIX_WQ].devname, "%.11s-fcs-wq", fnic->name); fnic->msix[FNIC_MSIX_WQ].isr = fnic_isr_msix_wq; fnic->msix[FNIC_MSIX_WQ].devid = fnic; sprintf(fnic->msix[FNIC_MSIX_WQ_COPY].devname, "%.11s-scsi-wq", fnic->name); fnic->msix[FNIC_MSIX_WQ_COPY].isr = fnic_isr_msix_wq_copy; fnic->msix[FNIC_MSIX_WQ_COPY].devid = fnic; sprintf(fnic->msix[FNIC_MSIX_ERR_NOTIFY].devname, "%.11s-err-notify", fnic->name); fnic->msix[FNIC_MSIX_ERR_NOTIFY].isr = fnic_isr_msix_err_notify; fnic->msix[FNIC_MSIX_ERR_NOTIFY].devid = fnic; for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) { err = request_irq(fnic->msix_entry[i].vector, fnic->msix[i].isr, 0, fnic->msix[i].devname, fnic->msix[i].devid); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "MSIX: request_irq" " failed %d\n", err); fnic_free_intr(fnic); break; } fnic->msix[i].requested = 1; } break; default: break; } return err; } int fnic_set_intr_mode(struct fnic *fnic) { unsigned int n = ARRAY_SIZE(fnic->rq); unsigned int m = ARRAY_SIZE(fnic->wq); unsigned int o = ARRAY_SIZE(fnic->wq_copy); unsigned int i; /* * Set interrupt mode (INTx, MSI, MSI-X) depending * system capabilities. * * Try MSI-X first * * We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs * (last INTR is used for WQ/RQ errors and notification area) */ BUG_ON(ARRAY_SIZE(fnic->msix_entry) < n + m + o + 1); for (i = 0; i < n + m + o + 1; i++) fnic->msix_entry[i].entry = i; if (fnic->rq_count >= n && fnic->raw_wq_count >= m && fnic->wq_copy_count >= o && fnic->cq_count >= n + m + o) { if (!pci_enable_msix(fnic->pdev, fnic->msix_entry, n + m + o + 1)) { fnic->rq_count = n; fnic->raw_wq_count = m; fnic->wq_copy_count = o; fnic->wq_count = m + o; fnic->cq_count = n + m + o; fnic->intr_count = n + m + o + 1; fnic->err_intr_offset = FNIC_MSIX_ERR_NOTIFY; FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, "Using MSI-X Interrupts\n"); vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSIX); return 0; } } /* * Next try MSI * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 1 INTR */ if (fnic->rq_count >= 1 && fnic->raw_wq_count >= 1 && fnic->wq_copy_count >= 1 && fnic->cq_count >= 3 && fnic->intr_count >= 1 && !pci_enable_msi(fnic->pdev)) { fnic->rq_count = 1; fnic->raw_wq_count = 1; fnic->wq_copy_count = 1; fnic->wq_count = 2; fnic->cq_count = 3; fnic->intr_count = 1; fnic->err_intr_offset = 0; FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, "Using MSI Interrupts\n"); vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSI); return 0; } /* * Next try INTx * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 3 INTRs * 1 INTR is used for all 3 queues, 1 INTR for queue errors * 1 INTR for notification area */ if (fnic->rq_count >= 1 && fnic->raw_wq_count >= 1 && fnic->wq_copy_count >= 1 && fnic->cq_count >= 3 && fnic->intr_count >= 3) { fnic->rq_count = 1; fnic->raw_wq_count = 1; fnic->wq_copy_count = 1; fnic->cq_count = 3; fnic->intr_count = 3; FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, "Using Legacy Interrupts\n"); vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX); return 0; } vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); return -EINVAL; } void fnic_clear_intr_mode(struct fnic *fnic) { switch (vnic_dev_get_intr_mode(fnic->vdev)) { case VNIC_DEV_INTR_MODE_MSIX: pci_disable_msix(fnic->pdev); break; case VNIC_DEV_INTR_MODE_MSI: pci_disable_msi(fnic->pdev); break; default: break; } vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX); }
gpl-2.0
linux-rockchip/linux-rockchip
drivers/scsi/fnic/fnic_isr.c
11707
8328
/* * Copyright 2008 Cisco Systems, Inc. All rights reserved. * Copyright 2007 Nuova Systems, Inc. All rights reserved. * * This program is free software; you may redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/string.h> #include <linux/errno.h> #include <linux/pci.h> #include <linux/interrupt.h> #include <scsi/libfc.h> #include <scsi/fc_frame.h> #include "vnic_dev.h" #include "vnic_intr.h" #include "vnic_stats.h" #include "fnic_io.h" #include "fnic.h" static irqreturn_t fnic_isr_legacy(int irq, void *data) { struct fnic *fnic = data; u32 pba; unsigned long work_done = 0; pba = vnic_intr_legacy_pba(fnic->legacy_pba); if (!pba) return IRQ_NONE; if (pba & (1 << FNIC_INTX_NOTIFY)) { vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_NOTIFY]); fnic_handle_link_event(fnic); } if (pba & (1 << FNIC_INTX_ERR)) { vnic_intr_return_all_credits(&fnic->intr[FNIC_INTX_ERR]); fnic_log_q_error(fnic); } if (pba & (1 << FNIC_INTX_WQ_RQ_COPYWQ)) { work_done += fnic_wq_copy_cmpl_handler(fnic, -1); work_done += fnic_wq_cmpl_handler(fnic, -1); work_done += fnic_rq_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[FNIC_INTX_WQ_RQ_COPYWQ], work_done, 1 /* unmask intr */, 1 /* reset intr timer */); } return IRQ_HANDLED; } static irqreturn_t fnic_isr_msi(int irq, void *data) { struct fnic *fnic = data; unsigned long work_done = 0; work_done += fnic_wq_copy_cmpl_handler(fnic, -1); work_done += fnic_wq_cmpl_handler(fnic, -1); work_done += fnic_rq_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[0], work_done, 1 /* unmask intr */, 1 /* reset intr timer */); return IRQ_HANDLED; } static irqreturn_t fnic_isr_msix_rq(int irq, void *data) { struct fnic *fnic = data; unsigned long rq_work_done = 0; rq_work_done = fnic_rq_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_RQ], rq_work_done, 1 /* unmask intr */, 1 /* reset intr timer */); return IRQ_HANDLED; } static irqreturn_t fnic_isr_msix_wq(int irq, void *data) { struct fnic *fnic = data; unsigned long wq_work_done = 0; wq_work_done = fnic_wq_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ], wq_work_done, 1 /* unmask intr */, 1 /* reset intr timer */); return IRQ_HANDLED; } static irqreturn_t fnic_isr_msix_wq_copy(int irq, void *data) { struct fnic *fnic = data; unsigned long wq_copy_work_done = 0; wq_copy_work_done = fnic_wq_copy_cmpl_handler(fnic, -1); vnic_intr_return_credits(&fnic->intr[FNIC_MSIX_WQ_COPY], wq_copy_work_done, 1 /* unmask intr */, 1 /* reset intr timer */); return IRQ_HANDLED; } static irqreturn_t fnic_isr_msix_err_notify(int irq, void *data) { struct fnic *fnic = data; vnic_intr_return_all_credits(&fnic->intr[FNIC_MSIX_ERR_NOTIFY]); fnic_log_q_error(fnic); fnic_handle_link_event(fnic); return IRQ_HANDLED; } void fnic_free_intr(struct fnic *fnic) { int i; switch (vnic_dev_get_intr_mode(fnic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: case VNIC_DEV_INTR_MODE_MSI: free_irq(fnic->pdev->irq, fnic); break; case VNIC_DEV_INTR_MODE_MSIX: for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) if (fnic->msix[i].requested) free_irq(fnic->msix_entry[i].vector, fnic->msix[i].devid); break; default: break; } } int fnic_request_intr(struct fnic *fnic) { int err = 0; int i; switch (vnic_dev_get_intr_mode(fnic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: err = request_irq(fnic->pdev->irq, &fnic_isr_legacy, IRQF_SHARED, DRV_NAME, fnic); break; case VNIC_DEV_INTR_MODE_MSI: err = request_irq(fnic->pdev->irq, &fnic_isr_msi, 0, fnic->name, fnic); break; case VNIC_DEV_INTR_MODE_MSIX: sprintf(fnic->msix[FNIC_MSIX_RQ].devname, "%.11s-fcs-rq", fnic->name); fnic->msix[FNIC_MSIX_RQ].isr = fnic_isr_msix_rq; fnic->msix[FNIC_MSIX_RQ].devid = fnic; sprintf(fnic->msix[FNIC_MSIX_WQ].devname, "%.11s-fcs-wq", fnic->name); fnic->msix[FNIC_MSIX_WQ].isr = fnic_isr_msix_wq; fnic->msix[FNIC_MSIX_WQ].devid = fnic; sprintf(fnic->msix[FNIC_MSIX_WQ_COPY].devname, "%.11s-scsi-wq", fnic->name); fnic->msix[FNIC_MSIX_WQ_COPY].isr = fnic_isr_msix_wq_copy; fnic->msix[FNIC_MSIX_WQ_COPY].devid = fnic; sprintf(fnic->msix[FNIC_MSIX_ERR_NOTIFY].devname, "%.11s-err-notify", fnic->name); fnic->msix[FNIC_MSIX_ERR_NOTIFY].isr = fnic_isr_msix_err_notify; fnic->msix[FNIC_MSIX_ERR_NOTIFY].devid = fnic; for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) { err = request_irq(fnic->msix_entry[i].vector, fnic->msix[i].isr, 0, fnic->msix[i].devname, fnic->msix[i].devid); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "MSIX: request_irq" " failed %d\n", err); fnic_free_intr(fnic); break; } fnic->msix[i].requested = 1; } break; default: break; } return err; } int fnic_set_intr_mode(struct fnic *fnic) { unsigned int n = ARRAY_SIZE(fnic->rq); unsigned int m = ARRAY_SIZE(fnic->wq); unsigned int o = ARRAY_SIZE(fnic->wq_copy); unsigned int i; /* * Set interrupt mode (INTx, MSI, MSI-X) depending * system capabilities. * * Try MSI-X first * * We need n RQs, m WQs, o Copy WQs, n+m+o CQs, and n+m+o+1 INTRs * (last INTR is used for WQ/RQ errors and notification area) */ BUG_ON(ARRAY_SIZE(fnic->msix_entry) < n + m + o + 1); for (i = 0; i < n + m + o + 1; i++) fnic->msix_entry[i].entry = i; if (fnic->rq_count >= n && fnic->raw_wq_count >= m && fnic->wq_copy_count >= o && fnic->cq_count >= n + m + o) { if (!pci_enable_msix(fnic->pdev, fnic->msix_entry, n + m + o + 1)) { fnic->rq_count = n; fnic->raw_wq_count = m; fnic->wq_copy_count = o; fnic->wq_count = m + o; fnic->cq_count = n + m + o; fnic->intr_count = n + m + o + 1; fnic->err_intr_offset = FNIC_MSIX_ERR_NOTIFY; FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, "Using MSI-X Interrupts\n"); vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSIX); return 0; } } /* * Next try MSI * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 1 INTR */ if (fnic->rq_count >= 1 && fnic->raw_wq_count >= 1 && fnic->wq_copy_count >= 1 && fnic->cq_count >= 3 && fnic->intr_count >= 1 && !pci_enable_msi(fnic->pdev)) { fnic->rq_count = 1; fnic->raw_wq_count = 1; fnic->wq_copy_count = 1; fnic->wq_count = 2; fnic->cq_count = 3; fnic->intr_count = 1; fnic->err_intr_offset = 0; FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, "Using MSI Interrupts\n"); vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_MSI); return 0; } /* * Next try INTx * We need 1 RQ, 1 WQ, 1 WQ_COPY, 3 CQs, and 3 INTRs * 1 INTR is used for all 3 queues, 1 INTR for queue errors * 1 INTR for notification area */ if (fnic->rq_count >= 1 && fnic->raw_wq_count >= 1 && fnic->wq_copy_count >= 1 && fnic->cq_count >= 3 && fnic->intr_count >= 3) { fnic->rq_count = 1; fnic->raw_wq_count = 1; fnic->wq_copy_count = 1; fnic->cq_count = 3; fnic->intr_count = 3; FNIC_ISR_DBG(KERN_DEBUG, fnic->lport->host, "Using Legacy Interrupts\n"); vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX); return 0; } vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); return -EINVAL; } void fnic_clear_intr_mode(struct fnic *fnic) { switch (vnic_dev_get_intr_mode(fnic->vdev)) { case VNIC_DEV_INTR_MODE_MSIX: pci_disable_msix(fnic->pdev); break; case VNIC_DEV_INTR_MODE_MSI: pci_disable_msi(fnic->pdev); break; default: break; } vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX); }
gpl-2.0
SerenityS/android_kernel_allwinner_a31
drivers/uwb/address.c
12987
10383
/* * Ultra Wide Band * Address management * * Copyright (C) 2005-2006 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * FIXME: docs */ #include <linux/slab.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/device.h> #include <linux/random.h> #include <linux/etherdevice.h> #include "uwb-internal.h" /** Device Address Management command */ struct uwb_rc_cmd_dev_addr_mgmt { struct uwb_rccb rccb; u8 bmOperationType; u8 baAddr[6]; } __attribute__((packed)); /** * Low level command for setting/getting UWB radio's addresses * * @hwarc: HWA Radio Control interface instance * @bmOperationType: * Set/get, MAC/DEV (see WUSB1.0[8.6.2.2]) * @baAddr: address buffer--assumed to have enough data to hold * the address type requested. * @reply: Pointer to reply buffer (can be stack allocated) * @returns: 0 if ok, < 0 errno code on error. * * @cmd has to be allocated because USB cannot grok USB or vmalloc * buffers depending on your combination of host architecture. */ static int uwb_rc_dev_addr_mgmt(struct uwb_rc *rc, u8 bmOperationType, const u8 *baAddr, struct uwb_rc_evt_dev_addr_mgmt *reply) { int result; struct uwb_rc_cmd_dev_addr_mgmt *cmd; result = -ENOMEM; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) goto error_kzalloc; cmd->rccb.bCommandType = UWB_RC_CET_GENERAL; cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_DEV_ADDR_MGMT); cmd->bmOperationType = bmOperationType; if (baAddr) { size_t size = 0; switch (bmOperationType >> 1) { case 0: size = 2; break; case 1: size = 6; break; default: BUG(); } memcpy(cmd->baAddr, baAddr, size); } reply->rceb.bEventType = UWB_RC_CET_GENERAL; reply->rceb.wEvent = UWB_RC_CMD_DEV_ADDR_MGMT; result = uwb_rc_cmd(rc, "DEV-ADDR-MGMT", &cmd->rccb, sizeof(*cmd), &reply->rceb, sizeof(*reply)); if (result < 0) goto error_cmd; if (result < sizeof(*reply)) { dev_err(&rc->uwb_dev.dev, "DEV-ADDR-MGMT: not enough data replied: " "%d vs %zu bytes needed\n", result, sizeof(*reply)); result = -ENOMSG; } else if (reply->bResultCode != UWB_RC_RES_SUCCESS) { dev_err(&rc->uwb_dev.dev, "DEV-ADDR-MGMT: command execution failed: %s (%d)\n", uwb_rc_strerror(reply->bResultCode), reply->bResultCode); result = -EIO; } else result = 0; error_cmd: kfree(cmd); error_kzalloc: return result; } /** * Set the UWB RC MAC or device address. * * @rc: UWB Radio Controller * @_addr: Pointer to address to write [assumed to be either a * 'struct uwb_mac_addr *' or a 'struct uwb_dev_addr *']. * @type: Type of address to set (UWB_ADDR_DEV or UWB_ADDR_MAC). * @returns: 0 if ok, < 0 errno code on error. * * Some anal retentivity here: even if both 'struct * uwb_{dev,mac}_addr' have the actual byte array in the same offset * and I could just pass _addr to hwarc_cmd_dev_addr_mgmt(), I prefer * to use some syntatic sugar in case someday we decide to change the * format of the structs. The compiler will optimize it out anyway. */ static int uwb_rc_addr_set(struct uwb_rc *rc, const void *_addr, enum uwb_addr_type type) { int result; u8 bmOperationType = 0x1; /* Set address */ const struct uwb_dev_addr *dev_addr = _addr; const struct uwb_mac_addr *mac_addr = _addr; struct uwb_rc_evt_dev_addr_mgmt reply; const u8 *baAddr; result = -EINVAL; switch (type) { case UWB_ADDR_DEV: baAddr = dev_addr->data; break; case UWB_ADDR_MAC: baAddr = mac_addr->data; bmOperationType |= 0x2; break; default: return result; } return uwb_rc_dev_addr_mgmt(rc, bmOperationType, baAddr, &reply); } /** * Get the UWB radio's MAC or device address. * * @rc: UWB Radio Controller * @_addr: Where to write the address data [assumed to be either a * 'struct uwb_mac_addr *' or a 'struct uwb_dev_addr *']. * @type: Type of address to get (UWB_ADDR_DEV or UWB_ADDR_MAC). * @returns: 0 if ok (and *_addr set), < 0 errno code on error. * * See comment in uwb_rc_addr_set() about anal retentivity in the * type handling of the address variables. */ static int uwb_rc_addr_get(struct uwb_rc *rc, void *_addr, enum uwb_addr_type type) { int result; u8 bmOperationType = 0x0; /* Get address */ struct uwb_rc_evt_dev_addr_mgmt evt; struct uwb_dev_addr *dev_addr = _addr; struct uwb_mac_addr *mac_addr = _addr; u8 *baAddr; result = -EINVAL; switch (type) { case UWB_ADDR_DEV: baAddr = dev_addr->data; break; case UWB_ADDR_MAC: bmOperationType |= 0x2; baAddr = mac_addr->data; break; default: return result; } result = uwb_rc_dev_addr_mgmt(rc, bmOperationType, baAddr, &evt); if (result == 0) switch (type) { case UWB_ADDR_DEV: memcpy(&dev_addr->data, evt.baAddr, sizeof(dev_addr->data)); break; case UWB_ADDR_MAC: memcpy(&mac_addr->data, evt.baAddr, sizeof(mac_addr->data)); break; default: /* shut gcc up */ BUG(); } return result; } /** Get @rc's MAC address to @addr */ int uwb_rc_mac_addr_get(struct uwb_rc *rc, struct uwb_mac_addr *addr) { return uwb_rc_addr_get(rc, addr, UWB_ADDR_MAC); } EXPORT_SYMBOL_GPL(uwb_rc_mac_addr_get); /** Get @rc's device address to @addr */ int uwb_rc_dev_addr_get(struct uwb_rc *rc, struct uwb_dev_addr *addr) { return uwb_rc_addr_get(rc, addr, UWB_ADDR_DEV); } EXPORT_SYMBOL_GPL(uwb_rc_dev_addr_get); /** Set @rc's address to @addr */ int uwb_rc_mac_addr_set(struct uwb_rc *rc, const struct uwb_mac_addr *addr) { int result = -EINVAL; mutex_lock(&rc->uwb_dev.mutex); result = uwb_rc_addr_set(rc, addr, UWB_ADDR_MAC); mutex_unlock(&rc->uwb_dev.mutex); return result; } /** Set @rc's address to @addr */ int uwb_rc_dev_addr_set(struct uwb_rc *rc, const struct uwb_dev_addr *addr) { int result = -EINVAL; mutex_lock(&rc->uwb_dev.mutex); result = uwb_rc_addr_set(rc, addr, UWB_ADDR_DEV); rc->uwb_dev.dev_addr = *addr; mutex_unlock(&rc->uwb_dev.mutex); return result; } /* Returns !0 if given address is already assigned to device. */ int __uwb_mac_addr_assigned_check(struct device *dev, void *_addr) { struct uwb_dev *uwb_dev = to_uwb_dev(dev); struct uwb_mac_addr *addr = _addr; if (!uwb_mac_addr_cmp(addr, &uwb_dev->mac_addr)) return !0; return 0; } /* Returns !0 if given address is already assigned to device. */ int __uwb_dev_addr_assigned_check(struct device *dev, void *_addr) { struct uwb_dev *uwb_dev = to_uwb_dev(dev); struct uwb_dev_addr *addr = _addr; if (!uwb_dev_addr_cmp(addr, &uwb_dev->dev_addr)) return !0; return 0; } /** * uwb_dev_addr_assign - assigned a generated DevAddr to a radio controller * @rc: the (local) radio controller device requiring a new DevAddr * * A new DevAddr is required when: * - first setting up a radio controller * - if the hardware reports a DevAddr conflict * * The DevAddr is randomly generated in the generated DevAddr range * [0x100, 0xfeff]. The number of devices in a beacon group is limited * by mMaxBPLength (96) so this address space will never be exhausted. * * [ECMA-368] 17.1.1, 17.16. */ int uwb_rc_dev_addr_assign(struct uwb_rc *rc) { struct uwb_dev_addr new_addr; do { get_random_bytes(new_addr.data, sizeof(new_addr.data)); } while (new_addr.data[0] == 0x00 || new_addr.data[0] == 0xff || __uwb_dev_addr_assigned(rc, &new_addr)); return uwb_rc_dev_addr_set(rc, &new_addr); } /** * uwbd_evt_handle_rc_dev_addr_conflict - handle a DEV_ADDR_CONFLICT event * @evt: the DEV_ADDR_CONFLICT notification from the radio controller * * A new (non-conflicting) DevAddr is assigned to the radio controller. * * [ECMA-368] 17.1.1.1. */ int uwbd_evt_handle_rc_dev_addr_conflict(struct uwb_event *evt) { struct uwb_rc *rc = evt->rc; return uwb_rc_dev_addr_assign(rc); } /* * Print the 48-bit EUI MAC address of the radio controller when * reading /sys/class/uwb_rc/XX/mac_address */ static ssize_t uwb_rc_mac_addr_show(struct device *dev, struct device_attribute *attr, char *buf) { struct uwb_dev *uwb_dev = to_uwb_dev(dev); struct uwb_rc *rc = uwb_dev->rc; struct uwb_mac_addr addr; ssize_t result; mutex_lock(&rc->uwb_dev.mutex); result = uwb_rc_addr_get(rc, &addr, UWB_ADDR_MAC); mutex_unlock(&rc->uwb_dev.mutex); if (result >= 0) { result = uwb_mac_addr_print(buf, UWB_ADDR_STRSIZE, &addr); buf[result++] = '\n'; } return result; } /* * Parse a 48 bit address written to /sys/class/uwb_rc/XX/mac_address * and if correct, set it. */ static ssize_t uwb_rc_mac_addr_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct uwb_dev *uwb_dev = to_uwb_dev(dev); struct uwb_rc *rc = uwb_dev->rc; struct uwb_mac_addr addr; ssize_t result; result = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx\n", &addr.data[0], &addr.data[1], &addr.data[2], &addr.data[3], &addr.data[4], &addr.data[5]); if (result != 6) { result = -EINVAL; goto out; } if (is_multicast_ether_addr(addr.data)) { dev_err(&rc->uwb_dev.dev, "refusing to set multicast " "MAC address %s\n", buf); result = -EINVAL; goto out; } result = uwb_rc_mac_addr_set(rc, &addr); if (result == 0) rc->uwb_dev.mac_addr = addr; out: return result < 0 ? result : size; } DEVICE_ATTR(mac_address, S_IRUGO | S_IWUSR, uwb_rc_mac_addr_show, uwb_rc_mac_addr_store); /** Print @addr to @buf, @return bytes written */ size_t __uwb_addr_print(char *buf, size_t buf_size, const unsigned char *addr, int type) { size_t result; if (type) result = scnprintf(buf, buf_size, "%pM", addr); else result = scnprintf(buf, buf_size, "%02x:%02x", addr[1], addr[0]); return result; } EXPORT_SYMBOL_GPL(__uwb_addr_print);
gpl-2.0
akuster/linux-yocto-3.14
drivers/uwb/address.c
12987
10383
/* * Ultra Wide Band * Address management * * Copyright (C) 2005-2006 Intel Corporation * Inaky Perez-Gonzalez <inaky.perez-gonzalez@intel.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA * 02110-1301, USA. * * * FIXME: docs */ #include <linux/slab.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/device.h> #include <linux/random.h> #include <linux/etherdevice.h> #include "uwb-internal.h" /** Device Address Management command */ struct uwb_rc_cmd_dev_addr_mgmt { struct uwb_rccb rccb; u8 bmOperationType; u8 baAddr[6]; } __attribute__((packed)); /** * Low level command for setting/getting UWB radio's addresses * * @hwarc: HWA Radio Control interface instance * @bmOperationType: * Set/get, MAC/DEV (see WUSB1.0[8.6.2.2]) * @baAddr: address buffer--assumed to have enough data to hold * the address type requested. * @reply: Pointer to reply buffer (can be stack allocated) * @returns: 0 if ok, < 0 errno code on error. * * @cmd has to be allocated because USB cannot grok USB or vmalloc * buffers depending on your combination of host architecture. */ static int uwb_rc_dev_addr_mgmt(struct uwb_rc *rc, u8 bmOperationType, const u8 *baAddr, struct uwb_rc_evt_dev_addr_mgmt *reply) { int result; struct uwb_rc_cmd_dev_addr_mgmt *cmd; result = -ENOMEM; cmd = kzalloc(sizeof(*cmd), GFP_KERNEL); if (cmd == NULL) goto error_kzalloc; cmd->rccb.bCommandType = UWB_RC_CET_GENERAL; cmd->rccb.wCommand = cpu_to_le16(UWB_RC_CMD_DEV_ADDR_MGMT); cmd->bmOperationType = bmOperationType; if (baAddr) { size_t size = 0; switch (bmOperationType >> 1) { case 0: size = 2; break; case 1: size = 6; break; default: BUG(); } memcpy(cmd->baAddr, baAddr, size); } reply->rceb.bEventType = UWB_RC_CET_GENERAL; reply->rceb.wEvent = UWB_RC_CMD_DEV_ADDR_MGMT; result = uwb_rc_cmd(rc, "DEV-ADDR-MGMT", &cmd->rccb, sizeof(*cmd), &reply->rceb, sizeof(*reply)); if (result < 0) goto error_cmd; if (result < sizeof(*reply)) { dev_err(&rc->uwb_dev.dev, "DEV-ADDR-MGMT: not enough data replied: " "%d vs %zu bytes needed\n", result, sizeof(*reply)); result = -ENOMSG; } else if (reply->bResultCode != UWB_RC_RES_SUCCESS) { dev_err(&rc->uwb_dev.dev, "DEV-ADDR-MGMT: command execution failed: %s (%d)\n", uwb_rc_strerror(reply->bResultCode), reply->bResultCode); result = -EIO; } else result = 0; error_cmd: kfree(cmd); error_kzalloc: return result; } /** * Set the UWB RC MAC or device address. * * @rc: UWB Radio Controller * @_addr: Pointer to address to write [assumed to be either a * 'struct uwb_mac_addr *' or a 'struct uwb_dev_addr *']. * @type: Type of address to set (UWB_ADDR_DEV or UWB_ADDR_MAC). * @returns: 0 if ok, < 0 errno code on error. * * Some anal retentivity here: even if both 'struct * uwb_{dev,mac}_addr' have the actual byte array in the same offset * and I could just pass _addr to hwarc_cmd_dev_addr_mgmt(), I prefer * to use some syntatic sugar in case someday we decide to change the * format of the structs. The compiler will optimize it out anyway. */ static int uwb_rc_addr_set(struct uwb_rc *rc, const void *_addr, enum uwb_addr_type type) { int result; u8 bmOperationType = 0x1; /* Set address */ const struct uwb_dev_addr *dev_addr = _addr; const struct uwb_mac_addr *mac_addr = _addr; struct uwb_rc_evt_dev_addr_mgmt reply; const u8 *baAddr; result = -EINVAL; switch (type) { case UWB_ADDR_DEV: baAddr = dev_addr->data; break; case UWB_ADDR_MAC: baAddr = mac_addr->data; bmOperationType |= 0x2; break; default: return result; } return uwb_rc_dev_addr_mgmt(rc, bmOperationType, baAddr, &reply); } /** * Get the UWB radio's MAC or device address. * * @rc: UWB Radio Controller * @_addr: Where to write the address data [assumed to be either a * 'struct uwb_mac_addr *' or a 'struct uwb_dev_addr *']. * @type: Type of address to get (UWB_ADDR_DEV or UWB_ADDR_MAC). * @returns: 0 if ok (and *_addr set), < 0 errno code on error. * * See comment in uwb_rc_addr_set() about anal retentivity in the * type handling of the address variables. */ static int uwb_rc_addr_get(struct uwb_rc *rc, void *_addr, enum uwb_addr_type type) { int result; u8 bmOperationType = 0x0; /* Get address */ struct uwb_rc_evt_dev_addr_mgmt evt; struct uwb_dev_addr *dev_addr = _addr; struct uwb_mac_addr *mac_addr = _addr; u8 *baAddr; result = -EINVAL; switch (type) { case UWB_ADDR_DEV: baAddr = dev_addr->data; break; case UWB_ADDR_MAC: bmOperationType |= 0x2; baAddr = mac_addr->data; break; default: return result; } result = uwb_rc_dev_addr_mgmt(rc, bmOperationType, baAddr, &evt); if (result == 0) switch (type) { case UWB_ADDR_DEV: memcpy(&dev_addr->data, evt.baAddr, sizeof(dev_addr->data)); break; case UWB_ADDR_MAC: memcpy(&mac_addr->data, evt.baAddr, sizeof(mac_addr->data)); break; default: /* shut gcc up */ BUG(); } return result; } /** Get @rc's MAC address to @addr */ int uwb_rc_mac_addr_get(struct uwb_rc *rc, struct uwb_mac_addr *addr) { return uwb_rc_addr_get(rc, addr, UWB_ADDR_MAC); } EXPORT_SYMBOL_GPL(uwb_rc_mac_addr_get); /** Get @rc's device address to @addr */ int uwb_rc_dev_addr_get(struct uwb_rc *rc, struct uwb_dev_addr *addr) { return uwb_rc_addr_get(rc, addr, UWB_ADDR_DEV); } EXPORT_SYMBOL_GPL(uwb_rc_dev_addr_get); /** Set @rc's address to @addr */ int uwb_rc_mac_addr_set(struct uwb_rc *rc, const struct uwb_mac_addr *addr) { int result = -EINVAL; mutex_lock(&rc->uwb_dev.mutex); result = uwb_rc_addr_set(rc, addr, UWB_ADDR_MAC); mutex_unlock(&rc->uwb_dev.mutex); return result; } /** Set @rc's address to @addr */ int uwb_rc_dev_addr_set(struct uwb_rc *rc, const struct uwb_dev_addr *addr) { int result = -EINVAL; mutex_lock(&rc->uwb_dev.mutex); result = uwb_rc_addr_set(rc, addr, UWB_ADDR_DEV); rc->uwb_dev.dev_addr = *addr; mutex_unlock(&rc->uwb_dev.mutex); return result; } /* Returns !0 if given address is already assigned to device. */ int __uwb_mac_addr_assigned_check(struct device *dev, void *_addr) { struct uwb_dev *uwb_dev = to_uwb_dev(dev); struct uwb_mac_addr *addr = _addr; if (!uwb_mac_addr_cmp(addr, &uwb_dev->mac_addr)) return !0; return 0; } /* Returns !0 if given address is already assigned to device. */ int __uwb_dev_addr_assigned_check(struct device *dev, void *_addr) { struct uwb_dev *uwb_dev = to_uwb_dev(dev); struct uwb_dev_addr *addr = _addr; if (!uwb_dev_addr_cmp(addr, &uwb_dev->dev_addr)) return !0; return 0; } /** * uwb_dev_addr_assign - assigned a generated DevAddr to a radio controller * @rc: the (local) radio controller device requiring a new DevAddr * * A new DevAddr is required when: * - first setting up a radio controller * - if the hardware reports a DevAddr conflict * * The DevAddr is randomly generated in the generated DevAddr range * [0x100, 0xfeff]. The number of devices in a beacon group is limited * by mMaxBPLength (96) so this address space will never be exhausted. * * [ECMA-368] 17.1.1, 17.16. */ int uwb_rc_dev_addr_assign(struct uwb_rc *rc) { struct uwb_dev_addr new_addr; do { get_random_bytes(new_addr.data, sizeof(new_addr.data)); } while (new_addr.data[0] == 0x00 || new_addr.data[0] == 0xff || __uwb_dev_addr_assigned(rc, &new_addr)); return uwb_rc_dev_addr_set(rc, &new_addr); } /** * uwbd_evt_handle_rc_dev_addr_conflict - handle a DEV_ADDR_CONFLICT event * @evt: the DEV_ADDR_CONFLICT notification from the radio controller * * A new (non-conflicting) DevAddr is assigned to the radio controller. * * [ECMA-368] 17.1.1.1. */ int uwbd_evt_handle_rc_dev_addr_conflict(struct uwb_event *evt) { struct uwb_rc *rc = evt->rc; return uwb_rc_dev_addr_assign(rc); } /* * Print the 48-bit EUI MAC address of the radio controller when * reading /sys/class/uwb_rc/XX/mac_address */ static ssize_t uwb_rc_mac_addr_show(struct device *dev, struct device_attribute *attr, char *buf) { struct uwb_dev *uwb_dev = to_uwb_dev(dev); struct uwb_rc *rc = uwb_dev->rc; struct uwb_mac_addr addr; ssize_t result; mutex_lock(&rc->uwb_dev.mutex); result = uwb_rc_addr_get(rc, &addr, UWB_ADDR_MAC); mutex_unlock(&rc->uwb_dev.mutex); if (result >= 0) { result = uwb_mac_addr_print(buf, UWB_ADDR_STRSIZE, &addr); buf[result++] = '\n'; } return result; } /* * Parse a 48 bit address written to /sys/class/uwb_rc/XX/mac_address * and if correct, set it. */ static ssize_t uwb_rc_mac_addr_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { struct uwb_dev *uwb_dev = to_uwb_dev(dev); struct uwb_rc *rc = uwb_dev->rc; struct uwb_mac_addr addr; ssize_t result; result = sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx\n", &addr.data[0], &addr.data[1], &addr.data[2], &addr.data[3], &addr.data[4], &addr.data[5]); if (result != 6) { result = -EINVAL; goto out; } if (is_multicast_ether_addr(addr.data)) { dev_err(&rc->uwb_dev.dev, "refusing to set multicast " "MAC address %s\n", buf); result = -EINVAL; goto out; } result = uwb_rc_mac_addr_set(rc, &addr); if (result == 0) rc->uwb_dev.mac_addr = addr; out: return result < 0 ? result : size; } DEVICE_ATTR(mac_address, S_IRUGO | S_IWUSR, uwb_rc_mac_addr_show, uwb_rc_mac_addr_store); /** Print @addr to @buf, @return bytes written */ size_t __uwb_addr_print(char *buf, size_t buf_size, const unsigned char *addr, int type) { size_t result; if (type) result = scnprintf(buf, buf_size, "%pM", addr); else result = scnprintf(buf, buf_size, "%02x:%02x", addr[1], addr[0]); return result; } EXPORT_SYMBOL_GPL(__uwb_addr_print);
gpl-2.0
vic3t3chn0/android_kernel_sony_msm8974_togari_5.0.2
lib/reed_solomon/encode_rs.c
14523
1330
/* * lib/reed_solomon/encode_rs.c * * Overview: * Generic Reed Solomon encoder / decoder library * * Copyright 2002, Phil Karn, KA9Q * May be used under the terms of the GNU General Public License (GPL) * * Adaption to the kernel by Thomas Gleixner (tglx@linutronix.de) * * $Id: encode_rs.c,v 1.5 2005/11/07 11:14:59 gleixner Exp $ * */ /* Generic data width independent code which is included by the * wrappers. * int encode_rsX (struct rs_control *rs, uintX_t *data, int len, uintY_t *par) */ { int i, j, pad; int nn = rs->nn; int nroots = rs->nroots; uint16_t *alpha_to = rs->alpha_to; uint16_t *index_of = rs->index_of; uint16_t *genpoly = rs->genpoly; uint16_t fb; uint16_t msk = (uint16_t) rs->nn; /* Check length parameter for validity */ pad = nn - nroots - len; if (pad < 0 || pad >= nn) return -ERANGE; for (i = 0; i < len; i++) { fb = index_of[((((uint16_t) data[i])^invmsk) & msk) ^ par[0]]; /* feedback term is non-zero */ if (fb != nn) { for (j = 1; j < nroots; j++) { par[j] ^= alpha_to[rs_modnn(rs, fb + genpoly[nroots - j])]; } } /* Shift */ memmove(&par[0], &par[1], sizeof(uint16_t) * (nroots - 1)); if (fb != nn) { par[nroots - 1] = alpha_to[rs_modnn(rs, fb + genpoly[0])]; } else { par[nroots - 1] = 0; } } return 0; }
gpl-2.0
goulderb/sch-i405_kernel
drivers/media/video/tvaudio.c
956
63842
/* * Driver for simple i2c audio chips. * * Copyright (c) 2000 Gerd Knorr * based on code by: * Eric Sandeen (eric_sandeen@bigfoot.com) * Steve VanDeBogart (vandebo@uclink.berkeley.edu) * Greg Alexander (galexand@acm.org) * * Copyright(c) 2005-2008 Mauro Carvalho Chehab * - Some cleanups, code fixes, etc * - Convert it to V4L2 API * * This code is placed under the terms of the GNU General Public License * * OPTIONS: * debug - set to 1 if you'd like to see debug messages * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/videodev2.h> #include <linux/i2c.h> #include <linux/init.h> #include <linux/kthread.h> #include <linux/freezer.h> #include <media/tvaudio.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> #include <media/v4l2-i2c-drv.h> #include <media/i2c-addr.h> /* ---------------------------------------------------------------------- */ /* insmod args */ static int debug; /* insmod parameter */ module_param(debug, int, 0644); MODULE_DESCRIPTION("device driver for various i2c TV sound decoder / audiomux chips"); MODULE_AUTHOR("Eric Sandeen, Steve VanDeBogart, Greg Alexander, Gerd Knorr"); MODULE_LICENSE("GPL"); #define UNSET (-1U) /* ---------------------------------------------------------------------- */ /* our structs */ #define MAXREGS 256 struct CHIPSTATE; typedef int (*getvalue)(int); typedef int (*checkit)(struct CHIPSTATE*); typedef int (*initialize)(struct CHIPSTATE*); typedef int (*getmode)(struct CHIPSTATE*); typedef void (*setmode)(struct CHIPSTATE*, int mode); /* i2c command */ typedef struct AUDIOCMD { int count; /* # of bytes to send */ unsigned char bytes[MAXREGS+1]; /* addr, data, data, ... */ } audiocmd; /* chip description */ struct CHIPDESC { char *name; /* chip name */ int addr_lo, addr_hi; /* i2c address range */ int registers; /* # of registers */ int *insmodopt; checkit checkit; initialize initialize; int flags; #define CHIP_HAS_VOLUME 1 #define CHIP_HAS_BASSTREBLE 2 #define CHIP_HAS_INPUTSEL 4 #define CHIP_NEED_CHECKMODE 8 /* various i2c command sequences */ audiocmd init; /* which register has which value */ int leftreg,rightreg,treblereg,bassreg; /* initialize with (defaults to 65535/65535/32768/32768 */ int leftinit,rightinit,trebleinit,bassinit; /* functions to convert the values (v4l -> chip) */ getvalue volfunc,treblefunc,bassfunc; /* get/set mode */ getmode getmode; setmode setmode; /* input switch register + values for v4l inputs */ int inputreg; int inputmap[4]; int inputmute; int inputmask; }; /* current state of the chip */ struct CHIPSTATE { struct v4l2_subdev sd; /* chip-specific description - should point to an entry at CHIPDESC table */ struct CHIPDESC *desc; /* shadow register set */ audiocmd shadow; /* current settings */ __u16 left,right,treble,bass,muted,mode; int prevmode; int radio; int input; /* thread */ struct task_struct *thread; struct timer_list wt; int watch_stereo; int audmode; }; static inline struct CHIPSTATE *to_state(struct v4l2_subdev *sd) { return container_of(sd, struct CHIPSTATE, sd); } /* ---------------------------------------------------------------------- */ /* i2c I/O functions */ static int chip_write(struct CHIPSTATE *chip, int subaddr, int val) { struct v4l2_subdev *sd = &chip->sd; struct i2c_client *c = v4l2_get_subdevdata(sd); unsigned char buffer[2]; if (subaddr < 0) { v4l2_dbg(1, debug, sd, "chip_write: 0x%x\n", val); chip->shadow.bytes[1] = val; buffer[0] = val; if (1 != i2c_master_send(c, buffer, 1)) { v4l2_warn(sd, "I/O error (write 0x%x)\n", val); return -1; } } else { if (subaddr + 1 >= ARRAY_SIZE(chip->shadow.bytes)) { v4l2_info(sd, "Tried to access a non-existent register: %d\n", subaddr); return -EINVAL; } v4l2_dbg(1, debug, sd, "chip_write: reg%d=0x%x\n", subaddr, val); chip->shadow.bytes[subaddr+1] = val; buffer[0] = subaddr; buffer[1] = val; if (2 != i2c_master_send(c, buffer, 2)) { v4l2_warn(sd, "I/O error (write reg%d=0x%x)\n", subaddr, val); return -1; } } return 0; } static int chip_write_masked(struct CHIPSTATE *chip, int subaddr, int val, int mask) { struct v4l2_subdev *sd = &chip->sd; if (mask != 0) { if (subaddr < 0) { val = (chip->shadow.bytes[1] & ~mask) | (val & mask); } else { if (subaddr + 1 >= ARRAY_SIZE(chip->shadow.bytes)) { v4l2_info(sd, "Tried to access a non-existent register: %d\n", subaddr); return -EINVAL; } val = (chip->shadow.bytes[subaddr+1] & ~mask) | (val & mask); } } return chip_write(chip, subaddr, val); } static int chip_read(struct CHIPSTATE *chip) { struct v4l2_subdev *sd = &chip->sd; struct i2c_client *c = v4l2_get_subdevdata(sd); unsigned char buffer; if (1 != i2c_master_recv(c, &buffer, 1)) { v4l2_warn(sd, "I/O error (read)\n"); return -1; } v4l2_dbg(1, debug, sd, "chip_read: 0x%x\n", buffer); return buffer; } static int chip_read2(struct CHIPSTATE *chip, int subaddr) { struct v4l2_subdev *sd = &chip->sd; struct i2c_client *c = v4l2_get_subdevdata(sd); unsigned char write[1]; unsigned char read[1]; struct i2c_msg msgs[2] = { { c->addr, 0, 1, write }, { c->addr, I2C_M_RD, 1, read } }; write[0] = subaddr; if (2 != i2c_transfer(c->adapter, msgs, 2)) { v4l2_warn(sd, "I/O error (read2)\n"); return -1; } v4l2_dbg(1, debug, sd, "chip_read2: reg%d=0x%x\n", subaddr, read[0]); return read[0]; } static int chip_cmd(struct CHIPSTATE *chip, char *name, audiocmd *cmd) { struct v4l2_subdev *sd = &chip->sd; struct i2c_client *c = v4l2_get_subdevdata(sd); int i; if (0 == cmd->count) return 0; if (cmd->count + cmd->bytes[0] - 1 >= ARRAY_SIZE(chip->shadow.bytes)) { v4l2_info(sd, "Tried to access a non-existent register range: %d to %d\n", cmd->bytes[0] + 1, cmd->bytes[0] + cmd->count - 1); return -EINVAL; } /* FIXME: it seems that the shadow bytes are wrong bellow !*/ /* update our shadow register set; print bytes if (debug > 0) */ v4l2_dbg(1, debug, sd, "chip_cmd(%s): reg=%d, data:", name, cmd->bytes[0]); for (i = 1; i < cmd->count; i++) { if (debug) printk(KERN_CONT " 0x%x", cmd->bytes[i]); chip->shadow.bytes[i+cmd->bytes[0]] = cmd->bytes[i]; } if (debug) printk(KERN_CONT "\n"); /* send data to the chip */ if (cmd->count != i2c_master_send(c, cmd->bytes, cmd->count)) { v4l2_warn(sd, "I/O error (%s)\n", name); return -1; } return 0; } /* ---------------------------------------------------------------------- */ /* kernel thread for doing i2c stuff asyncronly * right now it is used only to check the audio mode (mono/stereo/whatever) * some time after switching to another TV channel, then turn on stereo * if available, ... */ static void chip_thread_wake(unsigned long data) { struct CHIPSTATE *chip = (struct CHIPSTATE*)data; wake_up_process(chip->thread); } static int chip_thread(void *data) { struct CHIPSTATE *chip = data; struct CHIPDESC *desc = chip->desc; struct v4l2_subdev *sd = &chip->sd; int mode; v4l2_dbg(1, debug, sd, "thread started\n"); set_freezable(); for (;;) { set_current_state(TASK_INTERRUPTIBLE); if (!kthread_should_stop()) schedule(); set_current_state(TASK_RUNNING); try_to_freeze(); if (kthread_should_stop()) break; v4l2_dbg(1, debug, sd, "thread wakeup\n"); /* don't do anything for radio or if mode != auto */ if (chip->radio || chip->mode != 0) continue; /* have a look what's going on */ mode = desc->getmode(chip); if (mode == chip->prevmode) continue; /* chip detected a new audio mode - set it */ v4l2_dbg(1, debug, sd, "thread checkmode\n"); chip->prevmode = mode; if (mode & V4L2_TUNER_MODE_STEREO) desc->setmode(chip, V4L2_TUNER_MODE_STEREO); if (mode & V4L2_TUNER_MODE_LANG1_LANG2) desc->setmode(chip, V4L2_TUNER_MODE_STEREO); else if (mode & V4L2_TUNER_MODE_LANG1) desc->setmode(chip, V4L2_TUNER_MODE_LANG1); else if (mode & V4L2_TUNER_MODE_LANG2) desc->setmode(chip, V4L2_TUNER_MODE_LANG2); else desc->setmode(chip, V4L2_TUNER_MODE_MONO); /* schedule next check */ mod_timer(&chip->wt, jiffies+msecs_to_jiffies(2000)); } v4l2_dbg(1, debug, sd, "thread exiting\n"); return 0; } /* ---------------------------------------------------------------------- */ /* audio chip descriptions - defines+functions for tda9840 */ #define TDA9840_SW 0x00 #define TDA9840_LVADJ 0x02 #define TDA9840_STADJ 0x03 #define TDA9840_TEST 0x04 #define TDA9840_MONO 0x10 #define TDA9840_STEREO 0x2a #define TDA9840_DUALA 0x12 #define TDA9840_DUALB 0x1e #define TDA9840_DUALAB 0x1a #define TDA9840_DUALBA 0x16 #define TDA9840_EXTERNAL 0x7a #define TDA9840_DS_DUAL 0x20 /* Dual sound identified */ #define TDA9840_ST_STEREO 0x40 /* Stereo sound identified */ #define TDA9840_PONRES 0x80 /* Power-on reset detected if = 1 */ #define TDA9840_TEST_INT1SN 0x1 /* Integration time 0.5s when set */ #define TDA9840_TEST_INTFU 0x02 /* Disables integrator function */ static int tda9840_getmode(struct CHIPSTATE *chip) { struct v4l2_subdev *sd = &chip->sd; int val, mode; val = chip_read(chip); mode = V4L2_TUNER_MODE_MONO; if (val & TDA9840_DS_DUAL) mode |= V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2; if (val & TDA9840_ST_STEREO) mode |= V4L2_TUNER_MODE_STEREO; v4l2_dbg(1, debug, sd, "tda9840_getmode(): raw chip read: %d, return: %d\n", val, mode); return mode; } static void tda9840_setmode(struct CHIPSTATE *chip, int mode) { int update = 1; int t = chip->shadow.bytes[TDA9840_SW + 1] & ~0x7e; switch (mode) { case V4L2_TUNER_MODE_MONO: t |= TDA9840_MONO; break; case V4L2_TUNER_MODE_STEREO: t |= TDA9840_STEREO; break; case V4L2_TUNER_MODE_LANG1: t |= TDA9840_DUALA; break; case V4L2_TUNER_MODE_LANG2: t |= TDA9840_DUALB; break; default: update = 0; } if (update) chip_write(chip, TDA9840_SW, t); } static int tda9840_checkit(struct CHIPSTATE *chip) { int rc; rc = chip_read(chip); /* lower 5 bits should be 0 */ return ((rc & 0x1f) == 0) ? 1 : 0; } /* ---------------------------------------------------------------------- */ /* audio chip descriptions - defines+functions for tda985x */ /* subaddresses for TDA9855 */ #define TDA9855_VR 0x00 /* Volume, right */ #define TDA9855_VL 0x01 /* Volume, left */ #define TDA9855_BA 0x02 /* Bass */ #define TDA9855_TR 0x03 /* Treble */ #define TDA9855_SW 0x04 /* Subwoofer - not connected on DTV2000 */ /* subaddresses for TDA9850 */ #define TDA9850_C4 0x04 /* Control 1 for TDA9850 */ /* subaddesses for both chips */ #define TDA985x_C5 0x05 /* Control 2 for TDA9850, Control 1 for TDA9855 */ #define TDA985x_C6 0x06 /* Control 3 for TDA9850, Control 2 for TDA9855 */ #define TDA985x_C7 0x07 /* Control 4 for TDA9850, Control 3 for TDA9855 */ #define TDA985x_A1 0x08 /* Alignment 1 for both chips */ #define TDA985x_A2 0x09 /* Alignment 2 for both chips */ #define TDA985x_A3 0x0a /* Alignment 3 for both chips */ /* Masks for bits in TDA9855 subaddresses */ /* 0x00 - VR in TDA9855 */ /* 0x01 - VL in TDA9855 */ /* lower 7 bits control gain from -71dB (0x28) to 16dB (0x7f) * in 1dB steps - mute is 0x27 */ /* 0x02 - BA in TDA9855 */ /* lower 5 bits control bass gain from -12dB (0x06) to 16.5dB (0x19) * in .5dB steps - 0 is 0x0E */ /* 0x03 - TR in TDA9855 */ /* 4 bits << 1 control treble gain from -12dB (0x3) to 12dB (0xb) * in 3dB steps - 0 is 0x7 */ /* Masks for bits in both chips' subaddresses */ /* 0x04 - SW in TDA9855, C4/Control 1 in TDA9850 */ /* Unique to TDA9855: */ /* 4 bits << 2 control subwoofer/surround gain from -14db (0x1) to 14db (0xf) * in 3dB steps - mute is 0x0 */ /* Unique to TDA9850: */ /* lower 4 bits control stereo noise threshold, over which stereo turns off * set to values of 0x00 through 0x0f for Ster1 through Ster16 */ /* 0x05 - C5 - Control 1 in TDA9855 , Control 2 in TDA9850*/ /* Unique to TDA9855: */ #define TDA9855_MUTE 1<<7 /* GMU, Mute at outputs */ #define TDA9855_AVL 1<<6 /* AVL, Automatic Volume Level */ #define TDA9855_LOUD 1<<5 /* Loudness, 1==off */ #define TDA9855_SUR 1<<3 /* Surround / Subwoofer 1==.5(L-R) 0==.5(L+R) */ /* Bits 0 to 3 select various combinations * of line in and line out, only the * interesting ones are defined */ #define TDA9855_EXT 1<<2 /* Selects inputs LIR and LIL. Pins 41 & 12 */ #define TDA9855_INT 0 /* Selects inputs LOR and LOL. (internal) */ /* Unique to TDA9850: */ /* lower 4 bits contol SAP noise threshold, over which SAP turns off * set to values of 0x00 through 0x0f for SAP1 through SAP16 */ /* 0x06 - C6 - Control 2 in TDA9855, Control 3 in TDA9850 */ /* Common to TDA9855 and TDA9850: */ #define TDA985x_SAP 3<<6 /* Selects SAP output, mute if not received */ #define TDA985x_STEREO 1<<6 /* Selects Stereo ouput, mono if not received */ #define TDA985x_MONO 0 /* Forces Mono output */ #define TDA985x_LMU 1<<3 /* Mute (LOR/LOL for 9855, OUTL/OUTR for 9850) */ /* Unique to TDA9855: */ #define TDA9855_TZCM 1<<5 /* If set, don't mute till zero crossing */ #define TDA9855_VZCM 1<<4 /* If set, don't change volume till zero crossing*/ #define TDA9855_LINEAR 0 /* Linear Stereo */ #define TDA9855_PSEUDO 1 /* Pseudo Stereo */ #define TDA9855_SPAT_30 2 /* Spatial Stereo, 30% anti-phase crosstalk */ #define TDA9855_SPAT_50 3 /* Spatial Stereo, 52% anti-phase crosstalk */ #define TDA9855_E_MONO 7 /* Forced mono - mono select elseware, so useless*/ /* 0x07 - C7 - Control 3 in TDA9855, Control 4 in TDA9850 */ /* Common to both TDA9855 and TDA9850: */ /* lower 4 bits control input gain from -3.5dB (0x0) to 4dB (0xF) * in .5dB steps - 0dB is 0x7 */ /* 0x08, 0x09 - A1 and A2 (read/write) */ /* Common to both TDA9855 and TDA9850: */ /* lower 5 bites are wideband and spectral expander alignment * from 0x00 to 0x1f - nominal at 0x0f and 0x10 (read/write) */ #define TDA985x_STP 1<<5 /* Stereo Pilot/detect (read-only) */ #define TDA985x_SAPP 1<<6 /* SAP Pilot/detect (read-only) */ #define TDA985x_STS 1<<7 /* Stereo trigger 1= <35mV 0= <30mV (write-only)*/ /* 0x0a - A3 */ /* Common to both TDA9855 and TDA9850: */ /* lower 3 bits control timing current for alignment: -30% (0x0), -20% (0x1), * -10% (0x2), nominal (0x3), +10% (0x6), +20% (0x5), +30% (0x4) */ #define TDA985x_ADJ 1<<7 /* Stereo adjust on/off (wideband and spectral */ static int tda9855_volume(int val) { return val/0x2e8+0x27; } static int tda9855_bass(int val) { return val/0xccc+0x06; } static int tda9855_treble(int val) { return (val/0x1c71+0x3)<<1; } static int tda985x_getmode(struct CHIPSTATE *chip) { int mode; mode = ((TDA985x_STP | TDA985x_SAPP) & chip_read(chip)) >> 4; /* Add mono mode regardless of SAP and stereo */ /* Allows forced mono */ return mode | V4L2_TUNER_MODE_MONO; } static void tda985x_setmode(struct CHIPSTATE *chip, int mode) { int update = 1; int c6 = chip->shadow.bytes[TDA985x_C6+1] & 0x3f; switch (mode) { case V4L2_TUNER_MODE_MONO: c6 |= TDA985x_MONO; break; case V4L2_TUNER_MODE_STEREO: c6 |= TDA985x_STEREO; break; case V4L2_TUNER_MODE_LANG1: c6 |= TDA985x_SAP; break; default: update = 0; } if (update) chip_write(chip,TDA985x_C6,c6); } /* ---------------------------------------------------------------------- */ /* audio chip descriptions - defines+functions for tda9873h */ /* Subaddresses for TDA9873H */ #define TDA9873_SW 0x00 /* Switching */ #define TDA9873_AD 0x01 /* Adjust */ #define TDA9873_PT 0x02 /* Port */ /* Subaddress 0x00: Switching Data * B7..B0: * * B1, B0: Input source selection * 0, 0 internal * 1, 0 external stereo * 0, 1 external mono */ #define TDA9873_INP_MASK 3 #define TDA9873_INTERNAL 0 #define TDA9873_EXT_STEREO 2 #define TDA9873_EXT_MONO 1 /* B3, B2: output signal select * B4 : transmission mode * 0, 0, 1 Mono * 1, 0, 0 Stereo * 1, 1, 1 Stereo (reversed channel) * 0, 0, 0 Dual AB * 0, 0, 1 Dual AA * 0, 1, 0 Dual BB * 0, 1, 1 Dual BA */ #define TDA9873_TR_MASK (7 << 2) #define TDA9873_TR_MONO 4 #define TDA9873_TR_STEREO 1 << 4 #define TDA9873_TR_REVERSE (1 << 3) & (1 << 2) #define TDA9873_TR_DUALA 1 << 2 #define TDA9873_TR_DUALB 1 << 3 /* output level controls * B5: output level switch (0 = reduced gain, 1 = normal gain) * B6: mute (1 = muted) * B7: auto-mute (1 = auto-mute enabled) */ #define TDA9873_GAIN_NORMAL 1 << 5 #define TDA9873_MUTE 1 << 6 #define TDA9873_AUTOMUTE 1 << 7 /* Subaddress 0x01: Adjust/standard */ /* Lower 4 bits (C3..C0) control stereo adjustment on R channel (-0.6 - +0.7 dB) * Recommended value is +0 dB */ #define TDA9873_STEREO_ADJ 0x06 /* 0dB gain */ /* Bits C6..C4 control FM stantard * C6, C5, C4 * 0, 0, 0 B/G (PAL FM) * 0, 0, 1 M * 0, 1, 0 D/K(1) * 0, 1, 1 D/K(2) * 1, 0, 0 D/K(3) * 1, 0, 1 I */ #define TDA9873_BG 0 #define TDA9873_M 1 #define TDA9873_DK1 2 #define TDA9873_DK2 3 #define TDA9873_DK3 4 #define TDA9873_I 5 /* C7 controls identification response time (1=fast/0=normal) */ #define TDA9873_IDR_NORM 0 #define TDA9873_IDR_FAST 1 << 7 /* Subaddress 0x02: Port data */ /* E1, E0 free programmable ports P1/P2 0, 0 both ports low 0, 1 P1 high 1, 0 P2 high 1, 1 both ports high */ #define TDA9873_PORTS 3 /* E2: test port */ #define TDA9873_TST_PORT 1 << 2 /* E5..E3 control mono output channel (together with transmission mode bit B4) * * E5 E4 E3 B4 OUTM * 0 0 0 0 mono * 0 0 1 0 DUAL B * 0 1 0 1 mono (from stereo decoder) */ #define TDA9873_MOUT_MONO 0 #define TDA9873_MOUT_FMONO 0 #define TDA9873_MOUT_DUALA 0 #define TDA9873_MOUT_DUALB 1 << 3 #define TDA9873_MOUT_ST 1 << 4 #define TDA9873_MOUT_EXTM (1 << 4 ) & (1 << 3) #define TDA9873_MOUT_EXTL 1 << 5 #define TDA9873_MOUT_EXTR (1 << 5 ) & (1 << 3) #define TDA9873_MOUT_EXTLR (1 << 5 ) & (1 << 4) #define TDA9873_MOUT_MUTE (1 << 5 ) & (1 << 4) & (1 << 3) /* Status bits: (chip read) */ #define TDA9873_PONR 0 /* Power-on reset detected if = 1 */ #define TDA9873_STEREO 2 /* Stereo sound is identified */ #define TDA9873_DUAL 4 /* Dual sound is identified */ static int tda9873_getmode(struct CHIPSTATE *chip) { struct v4l2_subdev *sd = &chip->sd; int val,mode; val = chip_read(chip); mode = V4L2_TUNER_MODE_MONO; if (val & TDA9873_STEREO) mode |= V4L2_TUNER_MODE_STEREO; if (val & TDA9873_DUAL) mode |= V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2; v4l2_dbg(1, debug, sd, "tda9873_getmode(): raw chip read: %d, return: %d\n", val, mode); return mode; } static void tda9873_setmode(struct CHIPSTATE *chip, int mode) { struct v4l2_subdev *sd = &chip->sd; int sw_data = chip->shadow.bytes[TDA9873_SW+1] & ~ TDA9873_TR_MASK; /* int adj_data = chip->shadow.bytes[TDA9873_AD+1] ; */ if ((sw_data & TDA9873_INP_MASK) != TDA9873_INTERNAL) { v4l2_dbg(1, debug, sd, "tda9873_setmode(): external input\n"); return; } v4l2_dbg(1, debug, sd, "tda9873_setmode(): chip->shadow.bytes[%d] = %d\n", TDA9873_SW+1, chip->shadow.bytes[TDA9873_SW+1]); v4l2_dbg(1, debug, sd, "tda9873_setmode(): sw_data = %d\n", sw_data); switch (mode) { case V4L2_TUNER_MODE_MONO: sw_data |= TDA9873_TR_MONO; break; case V4L2_TUNER_MODE_STEREO: sw_data |= TDA9873_TR_STEREO; break; case V4L2_TUNER_MODE_LANG1: sw_data |= TDA9873_TR_DUALA; break; case V4L2_TUNER_MODE_LANG2: sw_data |= TDA9873_TR_DUALB; break; default: chip->mode = 0; return; } chip_write(chip, TDA9873_SW, sw_data); v4l2_dbg(1, debug, sd, "tda9873_setmode(): req. mode %d; chip_write: %d\n", mode, sw_data); } static int tda9873_checkit(struct CHIPSTATE *chip) { int rc; if (-1 == (rc = chip_read2(chip,254))) return 0; return (rc & ~0x1f) == 0x80; } /* ---------------------------------------------------------------------- */ /* audio chip description - defines+functions for tda9874h and tda9874a */ /* Dariusz Kowalewski <darekk@automex.pl> */ /* Subaddresses for TDA9874H and TDA9874A (slave rx) */ #define TDA9874A_AGCGR 0x00 /* AGC gain */ #define TDA9874A_GCONR 0x01 /* general config */ #define TDA9874A_MSR 0x02 /* monitor select */ #define TDA9874A_C1FRA 0x03 /* carrier 1 freq. */ #define TDA9874A_C1FRB 0x04 /* carrier 1 freq. */ #define TDA9874A_C1FRC 0x05 /* carrier 1 freq. */ #define TDA9874A_C2FRA 0x06 /* carrier 2 freq. */ #define TDA9874A_C2FRB 0x07 /* carrier 2 freq. */ #define TDA9874A_C2FRC 0x08 /* carrier 2 freq. */ #define TDA9874A_DCR 0x09 /* demodulator config */ #define TDA9874A_FMER 0x0a /* FM de-emphasis */ #define TDA9874A_FMMR 0x0b /* FM dematrix */ #define TDA9874A_C1OLAR 0x0c /* ch.1 output level adj. */ #define TDA9874A_C2OLAR 0x0d /* ch.2 output level adj. */ #define TDA9874A_NCONR 0x0e /* NICAM config */ #define TDA9874A_NOLAR 0x0f /* NICAM output level adj. */ #define TDA9874A_NLELR 0x10 /* NICAM lower error limit */ #define TDA9874A_NUELR 0x11 /* NICAM upper error limit */ #define TDA9874A_AMCONR 0x12 /* audio mute control */ #define TDA9874A_SDACOSR 0x13 /* stereo DAC output select */ #define TDA9874A_AOSR 0x14 /* analog output select */ #define TDA9874A_DAICONR 0x15 /* digital audio interface config */ #define TDA9874A_I2SOSR 0x16 /* I2S-bus output select */ #define TDA9874A_I2SOLAR 0x17 /* I2S-bus output level adj. */ #define TDA9874A_MDACOSR 0x18 /* mono DAC output select (tda9874a) */ #define TDA9874A_ESP 0xFF /* easy standard progr. (tda9874a) */ /* Subaddresses for TDA9874H and TDA9874A (slave tx) */ #define TDA9874A_DSR 0x00 /* device status */ #define TDA9874A_NSR 0x01 /* NICAM status */ #define TDA9874A_NECR 0x02 /* NICAM error count */ #define TDA9874A_DR1 0x03 /* add. data LSB */ #define TDA9874A_DR2 0x04 /* add. data MSB */ #define TDA9874A_LLRA 0x05 /* monitor level read-out LSB */ #define TDA9874A_LLRB 0x06 /* monitor level read-out MSB */ #define TDA9874A_SIFLR 0x07 /* SIF level */ #define TDA9874A_TR2 252 /* test reg. 2 */ #define TDA9874A_TR1 253 /* test reg. 1 */ #define TDA9874A_DIC 254 /* device id. code */ #define TDA9874A_SIC 255 /* software id. code */ static int tda9874a_mode = 1; /* 0: A2, 1: NICAM */ static int tda9874a_GCONR = 0xc0; /* default config. input pin: SIFSEL=0 */ static int tda9874a_NCONR = 0x01; /* default NICAM config.: AMSEL=0,AMUTE=1 */ static int tda9874a_ESP = 0x07; /* default standard: NICAM D/K */ static int tda9874a_dic = -1; /* device id. code */ /* insmod options for tda9874a */ static unsigned int tda9874a_SIF = UNSET; static unsigned int tda9874a_AMSEL = UNSET; static unsigned int tda9874a_STD = UNSET; module_param(tda9874a_SIF, int, 0444); module_param(tda9874a_AMSEL, int, 0444); module_param(tda9874a_STD, int, 0444); /* * initialization table for tda9874 decoder: * - carrier 1 freq. registers (3 bytes) * - carrier 2 freq. registers (3 bytes) * - demudulator config register * - FM de-emphasis register (slow identification mode) * Note: frequency registers must be written in single i2c transfer. */ static struct tda9874a_MODES { char *name; audiocmd cmd; } tda9874a_modelist[9] = { { "A2, B/G", /* default */ { 9, { TDA9874A_C1FRA, 0x72,0x95,0x55, 0x77,0xA0,0x00, 0x00,0x00 }} }, { "A2, M (Korea)", { 9, { TDA9874A_C1FRA, 0x5D,0xC0,0x00, 0x62,0x6A,0xAA, 0x20,0x22 }} }, { "A2, D/K (1)", { 9, { TDA9874A_C1FRA, 0x87,0x6A,0xAA, 0x82,0x60,0x00, 0x00,0x00 }} }, { "A2, D/K (2)", { 9, { TDA9874A_C1FRA, 0x87,0x6A,0xAA, 0x8C,0x75,0x55, 0x00,0x00 }} }, { "A2, D/K (3)", { 9, { TDA9874A_C1FRA, 0x87,0x6A,0xAA, 0x77,0xA0,0x00, 0x00,0x00 }} }, { "NICAM, I", { 9, { TDA9874A_C1FRA, 0x7D,0x00,0x00, 0x88,0x8A,0xAA, 0x08,0x33 }} }, { "NICAM, B/G", { 9, { TDA9874A_C1FRA, 0x72,0x95,0x55, 0x79,0xEA,0xAA, 0x08,0x33 }} }, { "NICAM, D/K", { 9, { TDA9874A_C1FRA, 0x87,0x6A,0xAA, 0x79,0xEA,0xAA, 0x08,0x33 }} }, { "NICAM, L", { 9, { TDA9874A_C1FRA, 0x87,0x6A,0xAA, 0x79,0xEA,0xAA, 0x09,0x33 }} } }; static int tda9874a_setup(struct CHIPSTATE *chip) { struct v4l2_subdev *sd = &chip->sd; chip_write(chip, TDA9874A_AGCGR, 0x00); /* 0 dB */ chip_write(chip, TDA9874A_GCONR, tda9874a_GCONR); chip_write(chip, TDA9874A_MSR, (tda9874a_mode) ? 0x03:0x02); if(tda9874a_dic == 0x11) { chip_write(chip, TDA9874A_FMMR, 0x80); } else { /* dic == 0x07 */ chip_cmd(chip,"tda9874_modelist",&tda9874a_modelist[tda9874a_STD].cmd); chip_write(chip, TDA9874A_FMMR, 0x00); } chip_write(chip, TDA9874A_C1OLAR, 0x00); /* 0 dB */ chip_write(chip, TDA9874A_C2OLAR, 0x00); /* 0 dB */ chip_write(chip, TDA9874A_NCONR, tda9874a_NCONR); chip_write(chip, TDA9874A_NOLAR, 0x00); /* 0 dB */ /* Note: If signal quality is poor you may want to change NICAM */ /* error limit registers (NLELR and NUELR) to some greater values. */ /* Then the sound would remain stereo, but won't be so clear. */ chip_write(chip, TDA9874A_NLELR, 0x14); /* default */ chip_write(chip, TDA9874A_NUELR, 0x50); /* default */ if(tda9874a_dic == 0x11) { chip_write(chip, TDA9874A_AMCONR, 0xf9); chip_write(chip, TDA9874A_SDACOSR, (tda9874a_mode) ? 0x81:0x80); chip_write(chip, TDA9874A_AOSR, 0x80); chip_write(chip, TDA9874A_MDACOSR, (tda9874a_mode) ? 0x82:0x80); chip_write(chip, TDA9874A_ESP, tda9874a_ESP); } else { /* dic == 0x07 */ chip_write(chip, TDA9874A_AMCONR, 0xfb); chip_write(chip, TDA9874A_SDACOSR, (tda9874a_mode) ? 0x81:0x80); chip_write(chip, TDA9874A_AOSR, 0x00); /* or 0x10 */ } v4l2_dbg(1, debug, sd, "tda9874a_setup(): %s [0x%02X].\n", tda9874a_modelist[tda9874a_STD].name,tda9874a_STD); return 1; } static int tda9874a_getmode(struct CHIPSTATE *chip) { struct v4l2_subdev *sd = &chip->sd; int dsr,nsr,mode; int necr; /* just for debugging */ mode = V4L2_TUNER_MODE_MONO; if(-1 == (dsr = chip_read2(chip,TDA9874A_DSR))) return mode; if(-1 == (nsr = chip_read2(chip,TDA9874A_NSR))) return mode; if(-1 == (necr = chip_read2(chip,TDA9874A_NECR))) return mode; /* need to store dsr/nsr somewhere */ chip->shadow.bytes[MAXREGS-2] = dsr; chip->shadow.bytes[MAXREGS-1] = nsr; if(tda9874a_mode) { /* Note: DSR.RSSF and DSR.AMSTAT bits are also checked. * If NICAM auto-muting is enabled, DSR.AMSTAT=1 indicates * that sound has (temporarily) switched from NICAM to * mono FM (or AM) on 1st sound carrier due to high NICAM bit * error count. So in fact there is no stereo in this case :-( * But changing the mode to V4L2_TUNER_MODE_MONO would switch * external 4052 multiplexer in audio_hook(). */ if(nsr & 0x02) /* NSR.S/MB=1 */ mode |= V4L2_TUNER_MODE_STEREO; if(nsr & 0x01) /* NSR.D/SB=1 */ mode |= V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2; } else { if(dsr & 0x02) /* DSR.IDSTE=1 */ mode |= V4L2_TUNER_MODE_STEREO; if(dsr & 0x04) /* DSR.IDDUA=1 */ mode |= V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2; } v4l2_dbg(1, debug, sd, "tda9874a_getmode(): DSR=0x%X, NSR=0x%X, NECR=0x%X, return: %d.\n", dsr, nsr, necr, mode); return mode; } static void tda9874a_setmode(struct CHIPSTATE *chip, int mode) { struct v4l2_subdev *sd = &chip->sd; /* Disable/enable NICAM auto-muting (based on DSR.RSSF status bit). */ /* If auto-muting is disabled, we can hear a signal of degrading quality. */ if (tda9874a_mode) { if(chip->shadow.bytes[MAXREGS-2] & 0x20) /* DSR.RSSF=1 */ tda9874a_NCONR &= 0xfe; /* enable */ else tda9874a_NCONR |= 0x01; /* disable */ chip_write(chip, TDA9874A_NCONR, tda9874a_NCONR); } /* Note: TDA9874A supports automatic FM dematrixing (FMMR register) * and has auto-select function for audio output (AOSR register). * Old TDA9874H doesn't support these features. * TDA9874A also has additional mono output pin (OUTM), which * on same (all?) tv-cards is not used, anyway (as well as MONOIN). */ if(tda9874a_dic == 0x11) { int aosr = 0x80; int mdacosr = (tda9874a_mode) ? 0x82:0x80; switch(mode) { case V4L2_TUNER_MODE_MONO: case V4L2_TUNER_MODE_STEREO: break; case V4L2_TUNER_MODE_LANG1: aosr = 0x80; /* auto-select, dual A/A */ mdacosr = (tda9874a_mode) ? 0x82:0x80; break; case V4L2_TUNER_MODE_LANG2: aosr = 0xa0; /* auto-select, dual B/B */ mdacosr = (tda9874a_mode) ? 0x83:0x81; break; default: chip->mode = 0; return; } chip_write(chip, TDA9874A_AOSR, aosr); chip_write(chip, TDA9874A_MDACOSR, mdacosr); v4l2_dbg(1, debug, sd, "tda9874a_setmode(): req. mode %d; AOSR=0x%X, MDACOSR=0x%X.\n", mode, aosr, mdacosr); } else { /* dic == 0x07 */ int fmmr,aosr; switch(mode) { case V4L2_TUNER_MODE_MONO: fmmr = 0x00; /* mono */ aosr = 0x10; /* A/A */ break; case V4L2_TUNER_MODE_STEREO: if(tda9874a_mode) { fmmr = 0x00; aosr = 0x00; /* handled by NICAM auto-mute */ } else { fmmr = (tda9874a_ESP == 1) ? 0x05 : 0x04; /* stereo */ aosr = 0x00; } break; case V4L2_TUNER_MODE_LANG1: fmmr = 0x02; /* dual */ aosr = 0x10; /* dual A/A */ break; case V4L2_TUNER_MODE_LANG2: fmmr = 0x02; /* dual */ aosr = 0x20; /* dual B/B */ break; default: chip->mode = 0; return; } chip_write(chip, TDA9874A_FMMR, fmmr); chip_write(chip, TDA9874A_AOSR, aosr); v4l2_dbg(1, debug, sd, "tda9874a_setmode(): req. mode %d; FMMR=0x%X, AOSR=0x%X.\n", mode, fmmr, aosr); } } static int tda9874a_checkit(struct CHIPSTATE *chip) { struct v4l2_subdev *sd = &chip->sd; int dic,sic; /* device id. and software id. codes */ if(-1 == (dic = chip_read2(chip,TDA9874A_DIC))) return 0; if(-1 == (sic = chip_read2(chip,TDA9874A_SIC))) return 0; v4l2_dbg(1, debug, sd, "tda9874a_checkit(): DIC=0x%X, SIC=0x%X.\n", dic, sic); if((dic == 0x11)||(dic == 0x07)) { v4l2_info(sd, "found tda9874%s.\n", (dic == 0x11) ? "a" : "h"); tda9874a_dic = dic; /* remember device id. */ return 1; } return 0; /* not found */ } static int tda9874a_initialize(struct CHIPSTATE *chip) { if (tda9874a_SIF > 2) tda9874a_SIF = 1; if (tda9874a_STD >= ARRAY_SIZE(tda9874a_modelist)) tda9874a_STD = 0; if(tda9874a_AMSEL > 1) tda9874a_AMSEL = 0; if(tda9874a_SIF == 1) tda9874a_GCONR = 0xc0; /* sound IF input 1 */ else tda9874a_GCONR = 0xc1; /* sound IF input 2 */ tda9874a_ESP = tda9874a_STD; tda9874a_mode = (tda9874a_STD < 5) ? 0 : 1; if(tda9874a_AMSEL == 0) tda9874a_NCONR = 0x01; /* auto-mute: analog mono input */ else tda9874a_NCONR = 0x05; /* auto-mute: 1st carrier FM or AM */ tda9874a_setup(chip); return 0; } /* ---------------------------------------------------------------------- */ /* audio chip description - defines+functions for tda9875 */ /* The TDA9875 is made by Philips Semiconductor * http://www.semiconductors.philips.com * TDA9875: I2C-bus controlled DSP audio processor, FM demodulator * */ /* subaddresses for TDA9875 */ #define TDA9875_MUT 0x12 /*General mute (value --> 0b11001100*/ #define TDA9875_CFG 0x01 /* Config register (value --> 0b00000000 */ #define TDA9875_DACOS 0x13 /*DAC i/o select (ADC) 0b0000100*/ #define TDA9875_LOSR 0x16 /*Line output select regirter 0b0100 0001*/ #define TDA9875_CH1V 0x0c /*Channel 1 volume (mute)*/ #define TDA9875_CH2V 0x0d /*Channel 2 volume (mute)*/ #define TDA9875_SC1 0x14 /*SCART 1 in (mono)*/ #define TDA9875_SC2 0x15 /*SCART 2 in (mono)*/ #define TDA9875_ADCIS 0x17 /*ADC input select (mono) 0b0110 000*/ #define TDA9875_AER 0x19 /*Audio effect (AVL+Pseudo) 0b0000 0110*/ #define TDA9875_MCS 0x18 /*Main channel select (DAC) 0b0000100*/ #define TDA9875_MVL 0x1a /* Main volume gauche */ #define TDA9875_MVR 0x1b /* Main volume droite */ #define TDA9875_MBA 0x1d /* Main Basse */ #define TDA9875_MTR 0x1e /* Main treble */ #define TDA9875_ACS 0x1f /* Auxilary channel select (FM) 0b0000000*/ #define TDA9875_AVL 0x20 /* Auxilary volume gauche */ #define TDA9875_AVR 0x21 /* Auxilary volume droite */ #define TDA9875_ABA 0x22 /* Auxilary Basse */ #define TDA9875_ATR 0x23 /* Auxilary treble */ #define TDA9875_MSR 0x02 /* Monitor select register */ #define TDA9875_C1MSB 0x03 /* Carrier 1 (FM) frequency register MSB */ #define TDA9875_C1MIB 0x04 /* Carrier 1 (FM) frequency register (16-8]b */ #define TDA9875_C1LSB 0x05 /* Carrier 1 (FM) frequency register LSB */ #define TDA9875_C2MSB 0x06 /* Carrier 2 (nicam) frequency register MSB */ #define TDA9875_C2MIB 0x07 /* Carrier 2 (nicam) frequency register (16-8]b */ #define TDA9875_C2LSB 0x08 /* Carrier 2 (nicam) frequency register LSB */ #define TDA9875_DCR 0x09 /* Demodulateur configuration regirter*/ #define TDA9875_DEEM 0x0a /* FM de-emphasis regirter*/ #define TDA9875_FMAT 0x0b /* FM Matrix regirter*/ /* values */ #define TDA9875_MUTE_ON 0xff /* general mute */ #define TDA9875_MUTE_OFF 0xcc /* general no mute */ static int tda9875_initialize(struct CHIPSTATE *chip) { chip_write(chip, TDA9875_CFG, 0xd0); /*reg de config 0 (reset)*/ chip_write(chip, TDA9875_MSR, 0x03); /* Monitor 0b00000XXX*/ chip_write(chip, TDA9875_C1MSB, 0x00); /*Car1(FM) MSB XMHz*/ chip_write(chip, TDA9875_C1MIB, 0x00); /*Car1(FM) MIB XMHz*/ chip_write(chip, TDA9875_C1LSB, 0x00); /*Car1(FM) LSB XMHz*/ chip_write(chip, TDA9875_C2MSB, 0x00); /*Car2(NICAM) MSB XMHz*/ chip_write(chip, TDA9875_C2MIB, 0x00); /*Car2(NICAM) MIB XMHz*/ chip_write(chip, TDA9875_C2LSB, 0x00); /*Car2(NICAM) LSB XMHz*/ chip_write(chip, TDA9875_DCR, 0x00); /*Demod config 0x00*/ chip_write(chip, TDA9875_DEEM, 0x44); /*DE-Emph 0b0100 0100*/ chip_write(chip, TDA9875_FMAT, 0x00); /*FM Matrix reg 0x00*/ chip_write(chip, TDA9875_SC1, 0x00); /* SCART 1 (SC1)*/ chip_write(chip, TDA9875_SC2, 0x01); /* SCART 2 (sc2)*/ chip_write(chip, TDA9875_CH1V, 0x10); /* Channel volume 1 mute*/ chip_write(chip, TDA9875_CH2V, 0x10); /* Channel volume 2 mute */ chip_write(chip, TDA9875_DACOS, 0x02); /* sig DAC i/o(in:nicam)*/ chip_write(chip, TDA9875_ADCIS, 0x6f); /* sig ADC input(in:mono)*/ chip_write(chip, TDA9875_LOSR, 0x00); /* line out (in:mono)*/ chip_write(chip, TDA9875_AER, 0x00); /*06 Effect (AVL+PSEUDO) */ chip_write(chip, TDA9875_MCS, 0x44); /* Main ch select (DAC) */ chip_write(chip, TDA9875_MVL, 0x03); /* Vol Main left 10dB */ chip_write(chip, TDA9875_MVR, 0x03); /* Vol Main right 10dB*/ chip_write(chip, TDA9875_MBA, 0x00); /* Main Bass Main 0dB*/ chip_write(chip, TDA9875_MTR, 0x00); /* Main Treble Main 0dB*/ chip_write(chip, TDA9875_ACS, 0x44); /* Aux chan select (dac)*/ chip_write(chip, TDA9875_AVL, 0x00); /* Vol Aux left 0dB*/ chip_write(chip, TDA9875_AVR, 0x00); /* Vol Aux right 0dB*/ chip_write(chip, TDA9875_ABA, 0x00); /* Aux Bass Main 0dB*/ chip_write(chip, TDA9875_ATR, 0x00); /* Aux Aigus Main 0dB*/ chip_write(chip, TDA9875_MUT, 0xcc); /* General mute */ return 0; } static int tda9875_volume(int val) { return (unsigned char)(val / 602 - 84); } static int tda9875_bass(int val) { return (unsigned char)(max(-12, val / 2115 - 15)); } static int tda9875_treble(int val) { return (unsigned char)(val / 2622 - 12); } /* ----------------------------------------------------------------------- */ /* *********************** * * i2c interface functions * * *********************** */ static int tda9875_checkit(struct CHIPSTATE *chip) { struct v4l2_subdev *sd = &chip->sd; int dic, rev; dic = chip_read2(chip, 254); rev = chip_read2(chip, 255); if (dic == 0 || dic == 2) { /* tda9875 and tda9875A */ v4l2_info(sd, "found tda9875%s rev. %d.\n", dic == 0 ? "" : "A", rev); return 1; } return 0; } /* ---------------------------------------------------------------------- */ /* audio chip descriptions - defines+functions for tea6420 */ #define TEA6300_VL 0x00 /* volume left */ #define TEA6300_VR 0x01 /* volume right */ #define TEA6300_BA 0x02 /* bass */ #define TEA6300_TR 0x03 /* treble */ #define TEA6300_FA 0x04 /* fader control */ #define TEA6300_S 0x05 /* switch register */ /* values for those registers: */ #define TEA6300_S_SA 0x01 /* stereo A input */ #define TEA6300_S_SB 0x02 /* stereo B */ #define TEA6300_S_SC 0x04 /* stereo C */ #define TEA6300_S_GMU 0x80 /* general mute */ #define TEA6320_V 0x00 /* volume (0-5)/loudness off (6)/zero crossing mute(7) */ #define TEA6320_FFR 0x01 /* fader front right (0-5) */ #define TEA6320_FFL 0x02 /* fader front left (0-5) */ #define TEA6320_FRR 0x03 /* fader rear right (0-5) */ #define TEA6320_FRL 0x04 /* fader rear left (0-5) */ #define TEA6320_BA 0x05 /* bass (0-4) */ #define TEA6320_TR 0x06 /* treble (0-4) */ #define TEA6320_S 0x07 /* switch register */ /* values for those registers: */ #define TEA6320_S_SA 0x07 /* stereo A input */ #define TEA6320_S_SB 0x06 /* stereo B */ #define TEA6320_S_SC 0x05 /* stereo C */ #define TEA6320_S_SD 0x04 /* stereo D */ #define TEA6320_S_GMU 0x80 /* general mute */ #define TEA6420_S_SA 0x00 /* stereo A input */ #define TEA6420_S_SB 0x01 /* stereo B */ #define TEA6420_S_SC 0x02 /* stereo C */ #define TEA6420_S_SD 0x03 /* stereo D */ #define TEA6420_S_SE 0x04 /* stereo E */ #define TEA6420_S_GMU 0x05 /* general mute */ static int tea6300_shift10(int val) { return val >> 10; } static int tea6300_shift12(int val) { return val >> 12; } /* Assumes 16bit input (values 0x3f to 0x0c are unique, values less than */ /* 0x0c mirror those immediately higher) */ static int tea6320_volume(int val) { return (val / (65535/(63-12)) + 12) & 0x3f; } static int tea6320_shift11(int val) { return val >> 11; } static int tea6320_initialize(struct CHIPSTATE * chip) { chip_write(chip, TEA6320_FFR, 0x3f); chip_write(chip, TEA6320_FFL, 0x3f); chip_write(chip, TEA6320_FRR, 0x3f); chip_write(chip, TEA6320_FRL, 0x3f); return 0; } /* ---------------------------------------------------------------------- */ /* audio chip descriptions - defines+functions for tda8425 */ #define TDA8425_VL 0x00 /* volume left */ #define TDA8425_VR 0x01 /* volume right */ #define TDA8425_BA 0x02 /* bass */ #define TDA8425_TR 0x03 /* treble */ #define TDA8425_S1 0x08 /* switch functions */ /* values for those registers: */ #define TDA8425_S1_OFF 0xEE /* audio off (mute on) */ #define TDA8425_S1_CH1 0xCE /* audio channel 1 (mute off) - "linear stereo" mode */ #define TDA8425_S1_CH2 0xCF /* audio channel 2 (mute off) - "linear stereo" mode */ #define TDA8425_S1_MU 0x20 /* mute bit */ #define TDA8425_S1_STEREO 0x18 /* stereo bits */ #define TDA8425_S1_STEREO_SPATIAL 0x18 /* spatial stereo */ #define TDA8425_S1_STEREO_LINEAR 0x08 /* linear stereo */ #define TDA8425_S1_STEREO_PSEUDO 0x10 /* pseudo stereo */ #define TDA8425_S1_STEREO_MONO 0x00 /* forced mono */ #define TDA8425_S1_ML 0x06 /* language selector */ #define TDA8425_S1_ML_SOUND_A 0x02 /* sound a */ #define TDA8425_S1_ML_SOUND_B 0x04 /* sound b */ #define TDA8425_S1_ML_STEREO 0x06 /* stereo */ #define TDA8425_S1_IS 0x01 /* channel selector */ static int tda8425_shift10(int val) { return (val >> 10) | 0xc0; } static int tda8425_shift12(int val) { return (val >> 12) | 0xf0; } static int tda8425_initialize(struct CHIPSTATE *chip) { struct CHIPDESC *desc = chip->desc; struct i2c_client *c = v4l2_get_subdevdata(&chip->sd); int inputmap[4] = { /* tuner */ TDA8425_S1_CH2, /* radio */ TDA8425_S1_CH1, /* extern */ TDA8425_S1_CH1, /* intern */ TDA8425_S1_OFF}; if (c->adapter->id == I2C_HW_B_RIVA) memcpy(desc->inputmap, inputmap, sizeof(inputmap)); return 0; } static void tda8425_setmode(struct CHIPSTATE *chip, int mode) { int s1 = chip->shadow.bytes[TDA8425_S1+1] & 0xe1; if (mode & V4L2_TUNER_MODE_LANG1) { s1 |= TDA8425_S1_ML_SOUND_A; s1 |= TDA8425_S1_STEREO_PSEUDO; } else if (mode & V4L2_TUNER_MODE_LANG2) { s1 |= TDA8425_S1_ML_SOUND_B; s1 |= TDA8425_S1_STEREO_PSEUDO; } else { s1 |= TDA8425_S1_ML_STEREO; if (mode & V4L2_TUNER_MODE_MONO) s1 |= TDA8425_S1_STEREO_MONO; if (mode & V4L2_TUNER_MODE_STEREO) s1 |= TDA8425_S1_STEREO_SPATIAL; } chip_write(chip,TDA8425_S1,s1); } /* ---------------------------------------------------------------------- */ /* audio chip descriptions - defines+functions for pic16c54 (PV951) */ /* the registers of 16C54, I2C sub address. */ #define PIC16C54_REG_KEY_CODE 0x01 /* Not use. */ #define PIC16C54_REG_MISC 0x02 /* bit definition of the RESET register, I2C data. */ #define PIC16C54_MISC_RESET_REMOTE_CTL 0x01 /* bit 0, Reset to receive the key */ /* code of remote controller */ #define PIC16C54_MISC_MTS_MAIN 0x02 /* bit 1 */ #define PIC16C54_MISC_MTS_SAP 0x04 /* bit 2 */ #define PIC16C54_MISC_MTS_BOTH 0x08 /* bit 3 */ #define PIC16C54_MISC_SND_MUTE 0x10 /* bit 4, Mute Audio(Line-in and Tuner) */ #define PIC16C54_MISC_SND_NOTMUTE 0x20 /* bit 5 */ #define PIC16C54_MISC_SWITCH_TUNER 0x40 /* bit 6 , Switch to Line-in */ #define PIC16C54_MISC_SWITCH_LINE 0x80 /* bit 7 , Switch to Tuner */ /* ---------------------------------------------------------------------- */ /* audio chip descriptions - defines+functions for TA8874Z */ /* write 1st byte */ #define TA8874Z_LED_STE 0x80 #define TA8874Z_LED_BIL 0x40 #define TA8874Z_LED_EXT 0x20 #define TA8874Z_MONO_SET 0x10 #define TA8874Z_MUTE 0x08 #define TA8874Z_F_MONO 0x04 #define TA8874Z_MODE_SUB 0x02 #define TA8874Z_MODE_MAIN 0x01 /* write 2nd byte */ /*#define TA8874Z_TI 0x80 */ /* test mode */ #define TA8874Z_SEPARATION 0x3f #define TA8874Z_SEPARATION_DEFAULT 0x10 /* read */ #define TA8874Z_B1 0x80 #define TA8874Z_B0 0x40 #define TA8874Z_CHAG_FLAG 0x20 /* * B1 B0 * mono L H * stereo L L * BIL H L */ static int ta8874z_getmode(struct CHIPSTATE *chip) { int val, mode; val = chip_read(chip); mode = V4L2_TUNER_MODE_MONO; if (val & TA8874Z_B1){ mode |= V4L2_TUNER_MODE_LANG1 | V4L2_TUNER_MODE_LANG2; }else if (!(val & TA8874Z_B0)){ mode |= V4L2_TUNER_MODE_STEREO; } /* v4l_dbg(1, debug, chip->c, "ta8874z_getmode(): raw chip read: 0x%02x, return: 0x%02x\n", val, mode); */ return mode; } static audiocmd ta8874z_stereo = { 2, {0, TA8874Z_SEPARATION_DEFAULT}}; static audiocmd ta8874z_mono = {2, { TA8874Z_MONO_SET, TA8874Z_SEPARATION_DEFAULT}}; static audiocmd ta8874z_main = {2, { 0, TA8874Z_SEPARATION_DEFAULT}}; static audiocmd ta8874z_sub = {2, { TA8874Z_MODE_SUB, TA8874Z_SEPARATION_DEFAULT}}; static void ta8874z_setmode(struct CHIPSTATE *chip, int mode) { struct v4l2_subdev *sd = &chip->sd; int update = 1; audiocmd *t = NULL; v4l2_dbg(1, debug, sd, "ta8874z_setmode(): mode: 0x%02x\n", mode); switch(mode){ case V4L2_TUNER_MODE_MONO: t = &ta8874z_mono; break; case V4L2_TUNER_MODE_STEREO: t = &ta8874z_stereo; break; case V4L2_TUNER_MODE_LANG1: t = &ta8874z_main; break; case V4L2_TUNER_MODE_LANG2: t = &ta8874z_sub; break; default: update = 0; } if(update) chip_cmd(chip, "TA8874Z", t); } static int ta8874z_checkit(struct CHIPSTATE *chip) { int rc; rc = chip_read(chip); return ((rc & 0x1f) == 0x1f) ? 1 : 0; } /* ---------------------------------------------------------------------- */ /* audio chip descriptions - struct CHIPDESC */ /* insmod options to enable/disable individual audio chips */ static int tda8425 = 1; static int tda9840 = 1; static int tda9850 = 1; static int tda9855 = 1; static int tda9873 = 1; static int tda9874a = 1; static int tda9875 = 1; static int tea6300; /* default 0 - address clash with msp34xx */ static int tea6320; /* default 0 - address clash with msp34xx */ static int tea6420 = 1; static int pic16c54 = 1; static int ta8874z; /* default 0 - address clash with tda9840 */ module_param(tda8425, int, 0444); module_param(tda9840, int, 0444); module_param(tda9850, int, 0444); module_param(tda9855, int, 0444); module_param(tda9873, int, 0444); module_param(tda9874a, int, 0444); module_param(tda9875, int, 0444); module_param(tea6300, int, 0444); module_param(tea6320, int, 0444); module_param(tea6420, int, 0444); module_param(pic16c54, int, 0444); module_param(ta8874z, int, 0444); static struct CHIPDESC chiplist[] = { { .name = "tda9840", .insmodopt = &tda9840, .addr_lo = I2C_ADDR_TDA9840 >> 1, .addr_hi = I2C_ADDR_TDA9840 >> 1, .registers = 5, .flags = CHIP_NEED_CHECKMODE, /* callbacks */ .checkit = tda9840_checkit, .getmode = tda9840_getmode, .setmode = tda9840_setmode, .init = { 2, { TDA9840_TEST, TDA9840_TEST_INT1SN /* ,TDA9840_SW, TDA9840_MONO */} } }, { .name = "tda9873h", .insmodopt = &tda9873, .addr_lo = I2C_ADDR_TDA985x_L >> 1, .addr_hi = I2C_ADDR_TDA985x_H >> 1, .registers = 3, .flags = CHIP_HAS_INPUTSEL | CHIP_NEED_CHECKMODE, /* callbacks */ .checkit = tda9873_checkit, .getmode = tda9873_getmode, .setmode = tda9873_setmode, .init = { 4, { TDA9873_SW, 0xa4, 0x06, 0x03 } }, .inputreg = TDA9873_SW, .inputmute = TDA9873_MUTE | TDA9873_AUTOMUTE, .inputmap = {0xa0, 0xa2, 0xa0, 0xa0}, .inputmask = TDA9873_INP_MASK|TDA9873_MUTE|TDA9873_AUTOMUTE, }, { .name = "tda9874h/a", .insmodopt = &tda9874a, .addr_lo = I2C_ADDR_TDA9874 >> 1, .addr_hi = I2C_ADDR_TDA9874 >> 1, .flags = CHIP_NEED_CHECKMODE, /* callbacks */ .initialize = tda9874a_initialize, .checkit = tda9874a_checkit, .getmode = tda9874a_getmode, .setmode = tda9874a_setmode, }, { .name = "tda9875", .insmodopt = &tda9875, .addr_lo = I2C_ADDR_TDA9875 >> 1, .addr_hi = I2C_ADDR_TDA9875 >> 1, .flags = CHIP_HAS_VOLUME | CHIP_HAS_BASSTREBLE, /* callbacks */ .initialize = tda9875_initialize, .checkit = tda9875_checkit, .volfunc = tda9875_volume, .bassfunc = tda9875_bass, .treblefunc = tda9875_treble, .leftreg = TDA9875_MVL, .rightreg = TDA9875_MVR, .bassreg = TDA9875_MBA, .treblereg = TDA9875_MTR, .leftinit = 58880, .rightinit = 58880, }, { .name = "tda9850", .insmodopt = &tda9850, .addr_lo = I2C_ADDR_TDA985x_L >> 1, .addr_hi = I2C_ADDR_TDA985x_H >> 1, .registers = 11, .getmode = tda985x_getmode, .setmode = tda985x_setmode, .init = { 8, { TDA9850_C4, 0x08, 0x08, TDA985x_STEREO, 0x07, 0x10, 0x10, 0x03 } } }, { .name = "tda9855", .insmodopt = &tda9855, .addr_lo = I2C_ADDR_TDA985x_L >> 1, .addr_hi = I2C_ADDR_TDA985x_H >> 1, .registers = 11, .flags = CHIP_HAS_VOLUME | CHIP_HAS_BASSTREBLE, .leftreg = TDA9855_VL, .rightreg = TDA9855_VR, .bassreg = TDA9855_BA, .treblereg = TDA9855_TR, /* callbacks */ .volfunc = tda9855_volume, .bassfunc = tda9855_bass, .treblefunc = tda9855_treble, .getmode = tda985x_getmode, .setmode = tda985x_setmode, .init = { 12, { 0, 0x6f, 0x6f, 0x0e, 0x07<<1, 0x8<<2, TDA9855_MUTE | TDA9855_AVL | TDA9855_LOUD | TDA9855_INT, TDA985x_STEREO | TDA9855_LINEAR | TDA9855_TZCM | TDA9855_VZCM, 0x07, 0x10, 0x10, 0x03 }} }, { .name = "tea6300", .insmodopt = &tea6300, .addr_lo = I2C_ADDR_TEA6300 >> 1, .addr_hi = I2C_ADDR_TEA6300 >> 1, .registers = 6, .flags = CHIP_HAS_VOLUME | CHIP_HAS_BASSTREBLE | CHIP_HAS_INPUTSEL, .leftreg = TEA6300_VR, .rightreg = TEA6300_VL, .bassreg = TEA6300_BA, .treblereg = TEA6300_TR, /* callbacks */ .volfunc = tea6300_shift10, .bassfunc = tea6300_shift12, .treblefunc = tea6300_shift12, .inputreg = TEA6300_S, .inputmap = { TEA6300_S_SA, TEA6300_S_SB, TEA6300_S_SC }, .inputmute = TEA6300_S_GMU, }, { .name = "tea6320", .insmodopt = &tea6320, .addr_lo = I2C_ADDR_TEA6300 >> 1, .addr_hi = I2C_ADDR_TEA6300 >> 1, .registers = 8, .flags = CHIP_HAS_VOLUME | CHIP_HAS_BASSTREBLE | CHIP_HAS_INPUTSEL, .leftreg = TEA6320_V, .rightreg = TEA6320_V, .bassreg = TEA6320_BA, .treblereg = TEA6320_TR, /* callbacks */ .initialize = tea6320_initialize, .volfunc = tea6320_volume, .bassfunc = tea6320_shift11, .treblefunc = tea6320_shift11, .inputreg = TEA6320_S, .inputmap = { TEA6320_S_SA, TEA6420_S_SB, TEA6300_S_SC, TEA6320_S_SD }, .inputmute = TEA6300_S_GMU, }, { .name = "tea6420", .insmodopt = &tea6420, .addr_lo = I2C_ADDR_TEA6420 >> 1, .addr_hi = I2C_ADDR_TEA6420 >> 1, .registers = 1, .flags = CHIP_HAS_INPUTSEL, .inputreg = -1, .inputmap = { TEA6420_S_SA, TEA6420_S_SB, TEA6420_S_SC }, .inputmute = TEA6300_S_GMU, }, { .name = "tda8425", .insmodopt = &tda8425, .addr_lo = I2C_ADDR_TDA8425 >> 1, .addr_hi = I2C_ADDR_TDA8425 >> 1, .registers = 9, .flags = CHIP_HAS_VOLUME | CHIP_HAS_BASSTREBLE | CHIP_HAS_INPUTSEL, .leftreg = TDA8425_VL, .rightreg = TDA8425_VR, .bassreg = TDA8425_BA, .treblereg = TDA8425_TR, /* callbacks */ .initialize = tda8425_initialize, .volfunc = tda8425_shift10, .bassfunc = tda8425_shift12, .treblefunc = tda8425_shift12, .setmode = tda8425_setmode, .inputreg = TDA8425_S1, .inputmap = { TDA8425_S1_CH1, TDA8425_S1_CH1, TDA8425_S1_CH1 }, .inputmute = TDA8425_S1_OFF, }, { .name = "pic16c54 (PV951)", .insmodopt = &pic16c54, .addr_lo = I2C_ADDR_PIC16C54 >> 1, .addr_hi = I2C_ADDR_PIC16C54>> 1, .registers = 2, .flags = CHIP_HAS_INPUTSEL, .inputreg = PIC16C54_REG_MISC, .inputmap = {PIC16C54_MISC_SND_NOTMUTE|PIC16C54_MISC_SWITCH_TUNER, PIC16C54_MISC_SND_NOTMUTE|PIC16C54_MISC_SWITCH_LINE, PIC16C54_MISC_SND_NOTMUTE|PIC16C54_MISC_SWITCH_LINE, PIC16C54_MISC_SND_MUTE}, .inputmute = PIC16C54_MISC_SND_MUTE, }, { .name = "ta8874z", .checkit = ta8874z_checkit, .insmodopt = &ta8874z, .addr_lo = I2C_ADDR_TDA9840 >> 1, .addr_hi = I2C_ADDR_TDA9840 >> 1, .registers = 2, .flags = CHIP_NEED_CHECKMODE, /* callbacks */ .getmode = ta8874z_getmode, .setmode = ta8874z_setmode, .init = {2, { TA8874Z_MONO_SET, TA8874Z_SEPARATION_DEFAULT}}, }, { .name = NULL } /* EOF */ }; /* ---------------------------------------------------------------------- */ static int tvaudio_g_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { struct CHIPSTATE *chip = to_state(sd); struct CHIPDESC *desc = chip->desc; switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: if (!(desc->flags & CHIP_HAS_INPUTSEL)) break; ctrl->value=chip->muted; return 0; case V4L2_CID_AUDIO_VOLUME: if (!(desc->flags & CHIP_HAS_VOLUME)) break; ctrl->value = max(chip->left,chip->right); return 0; case V4L2_CID_AUDIO_BALANCE: { int volume; if (!(desc->flags & CHIP_HAS_VOLUME)) break; volume = max(chip->left,chip->right); if (volume) ctrl->value=(32768*min(chip->left,chip->right))/volume; else ctrl->value=32768; return 0; } case V4L2_CID_AUDIO_BASS: if (!(desc->flags & CHIP_HAS_BASSTREBLE)) break; ctrl->value = chip->bass; return 0; case V4L2_CID_AUDIO_TREBLE: if (!(desc->flags & CHIP_HAS_BASSTREBLE)) break; ctrl->value = chip->treble; return 0; } return -EINVAL; } static int tvaudio_s_ctrl(struct v4l2_subdev *sd, struct v4l2_control *ctrl) { struct CHIPSTATE *chip = to_state(sd); struct CHIPDESC *desc = chip->desc; switch (ctrl->id) { case V4L2_CID_AUDIO_MUTE: if (!(desc->flags & CHIP_HAS_INPUTSEL)) break; if (ctrl->value < 0 || ctrl->value >= 2) return -ERANGE; chip->muted = ctrl->value; if (chip->muted) chip_write_masked(chip,desc->inputreg,desc->inputmute,desc->inputmask); else chip_write_masked(chip,desc->inputreg, desc->inputmap[chip->input],desc->inputmask); return 0; case V4L2_CID_AUDIO_VOLUME: { int volume,balance; if (!(desc->flags & CHIP_HAS_VOLUME)) break; volume = max(chip->left,chip->right); if (volume) balance=(32768*min(chip->left,chip->right))/volume; else balance=32768; volume=ctrl->value; chip->left = (min(65536 - balance,32768) * volume) / 32768; chip->right = (min(balance,volume *(__u16)32768)) / 32768; chip_write(chip,desc->leftreg,desc->volfunc(chip->left)); chip_write(chip,desc->rightreg,desc->volfunc(chip->right)); return 0; } case V4L2_CID_AUDIO_BALANCE: { int volume, balance; if (!(desc->flags & CHIP_HAS_VOLUME)) break; volume = max(chip->left,chip->right); balance = ctrl->value; chip_write(chip,desc->leftreg,desc->volfunc(chip->left)); chip_write(chip,desc->rightreg,desc->volfunc(chip->right)); return 0; } case V4L2_CID_AUDIO_BASS: if (!(desc->flags & CHIP_HAS_BASSTREBLE)) break; chip->bass = ctrl->value; chip_write(chip,desc->bassreg,desc->bassfunc(chip->bass)); return 0; case V4L2_CID_AUDIO_TREBLE: if (!(desc->flags & CHIP_HAS_BASSTREBLE)) break; chip->treble = ctrl->value; chip_write(chip,desc->treblereg,desc->treblefunc(chip->treble)); return 0; } return -EINVAL; } /* ---------------------------------------------------------------------- */ /* video4linux interface */ static int tvaudio_s_radio(struct v4l2_subdev *sd) { struct CHIPSTATE *chip = to_state(sd); chip->radio = 1; chip->watch_stereo = 0; /* del_timer(&chip->wt); */ return 0; } static int tvaudio_queryctrl(struct v4l2_subdev *sd, struct v4l2_queryctrl *qc) { struct CHIPSTATE *chip = to_state(sd); struct CHIPDESC *desc = chip->desc; switch (qc->id) { case V4L2_CID_AUDIO_MUTE: if (desc->flags & CHIP_HAS_INPUTSEL) return v4l2_ctrl_query_fill(qc, 0, 1, 1, 0); break; case V4L2_CID_AUDIO_VOLUME: if (desc->flags & CHIP_HAS_VOLUME) return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, 58880); break; case V4L2_CID_AUDIO_BALANCE: if (desc->flags & CHIP_HAS_VOLUME) return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, 32768); break; case V4L2_CID_AUDIO_BASS: case V4L2_CID_AUDIO_TREBLE: if (desc->flags & CHIP_HAS_BASSTREBLE) return v4l2_ctrl_query_fill(qc, 0, 65535, 65535 / 100, 32768); break; default: break; } return -EINVAL; } static int tvaudio_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct CHIPSTATE *chip = to_state(sd); struct CHIPDESC *desc = chip->desc; if (!(desc->flags & CHIP_HAS_INPUTSEL)) return 0; if (input >= 4) return -EINVAL; /* There are four inputs: tuner, radio, extern and intern. */ chip->input = input; if (chip->muted) return 0; chip_write_masked(chip, desc->inputreg, desc->inputmap[chip->input], desc->inputmask); return 0; } static int tvaudio_s_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct CHIPSTATE *chip = to_state(sd); struct CHIPDESC *desc = chip->desc; int mode = 0; if (!desc->setmode) return 0; if (chip->radio) return 0; switch (vt->audmode) { case V4L2_TUNER_MODE_MONO: case V4L2_TUNER_MODE_STEREO: case V4L2_TUNER_MODE_LANG1: case V4L2_TUNER_MODE_LANG2: mode = vt->audmode; break; case V4L2_TUNER_MODE_LANG1_LANG2: mode = V4L2_TUNER_MODE_STEREO; break; default: return -EINVAL; } chip->audmode = vt->audmode; if (mode) { chip->watch_stereo = 0; /* del_timer(&chip->wt); */ chip->mode = mode; desc->setmode(chip, mode); } return 0; } static int tvaudio_g_tuner(struct v4l2_subdev *sd, struct v4l2_tuner *vt) { struct CHIPSTATE *chip = to_state(sd); struct CHIPDESC *desc = chip->desc; int mode = V4L2_TUNER_MODE_MONO; if (!desc->getmode) return 0; if (chip->radio) return 0; vt->audmode = chip->audmode; vt->rxsubchans = 0; vt->capability = V4L2_TUNER_CAP_STEREO | V4L2_TUNER_CAP_LANG1 | V4L2_TUNER_CAP_LANG2; mode = desc->getmode(chip); if (mode & V4L2_TUNER_MODE_MONO) vt->rxsubchans |= V4L2_TUNER_SUB_MONO; if (mode & V4L2_TUNER_MODE_STEREO) vt->rxsubchans |= V4L2_TUNER_SUB_STEREO; /* Note: for SAP it should be mono/lang2 or stereo/lang2. When this module is converted fully to v4l2, then this should change for those chips that can detect SAP. */ if (mode & V4L2_TUNER_MODE_LANG1) vt->rxsubchans = V4L2_TUNER_SUB_LANG1 | V4L2_TUNER_SUB_LANG2; return 0; } static int tvaudio_s_std(struct v4l2_subdev *sd, v4l2_std_id std) { struct CHIPSTATE *chip = to_state(sd); chip->radio = 0; return 0; } static int tvaudio_s_frequency(struct v4l2_subdev *sd, struct v4l2_frequency *freq) { struct CHIPSTATE *chip = to_state(sd); struct CHIPDESC *desc = chip->desc; chip->mode = 0; /* automatic */ /* For chips that provide getmode and setmode, and doesn't automatically follows the stereo carrier, a kthread is created to set the audio standard. In this case, when then the video channel is changed, tvaudio starts on MONO mode. After waiting for 2 seconds, the kernel thread is called, to follow whatever audio standard is pointed by the audio carrier. */ if (chip->thread) { desc->setmode(chip, V4L2_TUNER_MODE_MONO); if (chip->prevmode != V4L2_TUNER_MODE_MONO) chip->prevmode = -1; /* reset previous mode */ mod_timer(&chip->wt, jiffies+msecs_to_jiffies(2000)); } return 0; } static int tvaudio_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); return v4l2_chip_ident_i2c_client(client, chip, V4L2_IDENT_TVAUDIO, 0); } /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops tvaudio_core_ops = { .g_chip_ident = tvaudio_g_chip_ident, .queryctrl = tvaudio_queryctrl, .g_ctrl = tvaudio_g_ctrl, .s_ctrl = tvaudio_s_ctrl, .s_std = tvaudio_s_std, }; static const struct v4l2_subdev_tuner_ops tvaudio_tuner_ops = { .s_radio = tvaudio_s_radio, .s_frequency = tvaudio_s_frequency, .s_tuner = tvaudio_s_tuner, .g_tuner = tvaudio_g_tuner, }; static const struct v4l2_subdev_audio_ops tvaudio_audio_ops = { .s_routing = tvaudio_s_routing, }; static const struct v4l2_subdev_ops tvaudio_ops = { .core = &tvaudio_core_ops, .tuner = &tvaudio_tuner_ops, .audio = &tvaudio_audio_ops, }; /* ----------------------------------------------------------------------- */ /* i2c registration */ static int tvaudio_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct CHIPSTATE *chip; struct CHIPDESC *desc; struct v4l2_subdev *sd; if (debug) { printk(KERN_INFO "tvaudio: TV audio decoder + audio/video mux driver\n"); printk(KERN_INFO "tvaudio: known chips: "); for (desc = chiplist; desc->name != NULL; desc++) printk("%s%s", (desc == chiplist) ? "" : ", ", desc->name); printk("\n"); } chip = kzalloc(sizeof(*chip), GFP_KERNEL); if (!chip) return -ENOMEM; sd = &chip->sd; v4l2_i2c_subdev_init(sd, client, &tvaudio_ops); /* find description for the chip */ v4l2_dbg(1, debug, sd, "chip found @ 0x%x\n", client->addr<<1); for (desc = chiplist; desc->name != NULL; desc++) { if (0 == *(desc->insmodopt)) continue; if (client->addr < desc->addr_lo || client->addr > desc->addr_hi) continue; if (desc->checkit && !desc->checkit(chip)) continue; break; } if (desc->name == NULL) { v4l2_dbg(1, debug, sd, "no matching chip description found\n"); kfree(chip); return -EIO; } v4l2_info(sd, "%s found @ 0x%x (%s)\n", desc->name, client->addr<<1, client->adapter->name); if (desc->flags) { v4l2_dbg(1, debug, sd, "matches:%s%s%s.\n", (desc->flags & CHIP_HAS_VOLUME) ? " volume" : "", (desc->flags & CHIP_HAS_BASSTREBLE) ? " bass/treble" : "", (desc->flags & CHIP_HAS_INPUTSEL) ? " audiomux" : ""); } /* fill required data structures */ if (!id) strlcpy(client->name, desc->name, I2C_NAME_SIZE); chip->desc = desc; chip->shadow.count = desc->registers+1; chip->prevmode = -1; chip->audmode = V4L2_TUNER_MODE_LANG1; /* initialization */ if (desc->initialize != NULL) desc->initialize(chip); else chip_cmd(chip, "init", &desc->init); if (desc->flags & CHIP_HAS_VOLUME) { if (!desc->volfunc) { /* This shouldn't be happen. Warn user, but keep working without volume controls */ v4l2_info(sd, "volume callback undefined!\n"); desc->flags &= ~CHIP_HAS_VOLUME; } else { chip->left = desc->leftinit ? desc->leftinit : 65535; chip->right = desc->rightinit ? desc->rightinit : 65535; chip_write(chip, desc->leftreg, desc->volfunc(chip->left)); chip_write(chip, desc->rightreg, desc->volfunc(chip->right)); } } if (desc->flags & CHIP_HAS_BASSTREBLE) { if (!desc->bassfunc || !desc->treblefunc) { /* This shouldn't be happen. Warn user, but keep working without bass/treble controls */ v4l2_info(sd, "bass/treble callbacks undefined!\n"); desc->flags &= ~CHIP_HAS_BASSTREBLE; } else { chip->treble = desc->trebleinit ? desc->trebleinit : 32768; chip->bass = desc->bassinit ? desc->bassinit : 32768; chip_write(chip, desc->bassreg, desc->bassfunc(chip->bass)); chip_write(chip, desc->treblereg, desc->treblefunc(chip->treble)); } } chip->thread = NULL; init_timer(&chip->wt); if (desc->flags & CHIP_NEED_CHECKMODE) { if (!desc->getmode || !desc->setmode) { /* This shouldn't be happen. Warn user, but keep working without kthread */ v4l2_info(sd, "set/get mode callbacks undefined!\n"); return 0; } /* start async thread */ chip->wt.function = chip_thread_wake; chip->wt.data = (unsigned long)chip; chip->thread = kthread_run(chip_thread, chip, client->name); if (IS_ERR(chip->thread)) { v4l2_warn(sd, "failed to create kthread\n"); chip->thread = NULL; } } return 0; } static int tvaudio_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); struct CHIPSTATE *chip = to_state(sd); del_timer_sync(&chip->wt); if (chip->thread) { /* shutdown async thread */ kthread_stop(chip->thread); chip->thread = NULL; } v4l2_device_unregister_subdev(sd); kfree(chip); return 0; } /* This driver supports many devices and the idea is to let the driver detect which device is present. So rather than listing all supported devices here, we pretend to support a single, fake device type. */ static const struct i2c_device_id tvaudio_id[] = { { "tvaudio", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, tvaudio_id); static struct v4l2_i2c_driver_data v4l2_i2c_data = { .name = "tvaudio", .probe = tvaudio_probe, .remove = tvaudio_remove, .id_table = tvaudio_id, };
gpl-2.0
netarchy/Supersonic-2.6.35-Gingersense
drivers/staging/rtl8192su/ieee80211/ieee80211_softmac_wx.c
1468
14645
/* IEEE 802.11 SoftMAC layer * Copyright (c) 2005 Andrea Merello <andreamrl@tiscali.it> * * Mostly extracted from the rtl8180-sa2400 driver for the * in-kernel generic ieee802.11 stack. * * Some pieces of code might be stolen from ipw2100 driver * copyright of who own it's copyright ;-) * * PS wx handler mostly stolen from hostap, copyright who * own it's copyright ;-) * * released under the GPL */ #include "ieee80211.h" #include "dot11d.h" /* FIXME: add A freqs */ const long ieee80211_wlan_frequencies[] = { 2412, 2417, 2422, 2427, 2432, 2437, 2442, 2447, 2452, 2457, 2462, 2467, 2472, 2484 }; int ieee80211_wx_set_freq(struct ieee80211_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { int ret; struct iw_freq *fwrq = & wrqu->freq; down(&ieee->wx_sem); if(ieee->iw_mode == IW_MODE_INFRA){ ret = -EOPNOTSUPP; goto out; } /* if setting by freq convert to channel */ if (fwrq->e == 1) { if ((fwrq->m >= (int) 2.412e8 && fwrq->m <= (int) 2.487e8)) { int f = fwrq->m / 100000; int c = 0; while ((c < 14) && (f != ieee80211_wlan_frequencies[c])) c++; /* hack to fall through */ fwrq->e = 0; fwrq->m = c + 1; } } if (fwrq->e > 0 || fwrq->m > 14 || fwrq->m < 1 ){ ret = -EOPNOTSUPP; goto out; }else { /* Set the channel */ if (!(GET_DOT11D_INFO(ieee)->channel_map)[fwrq->m]) { ret = -EINVAL; goto out; } ieee->current_network.channel = fwrq->m; ieee->set_chan(ieee->dev, ieee->current_network.channel); if(ieee->iw_mode == IW_MODE_ADHOC || ieee->iw_mode == IW_MODE_MASTER) if(ieee->state == IEEE80211_LINKED){ ieee80211_stop_send_beacons(ieee); ieee80211_start_send_beacons(ieee); } } ret = 0; out: up(&ieee->wx_sem); return ret; } int ieee80211_wx_get_freq(struct ieee80211_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { struct iw_freq *fwrq = & wrqu->freq; if (ieee->current_network.channel == 0) return -1; //NM 0.7.0 will not accept channel any more. fwrq->m = ieee80211_wlan_frequencies[ieee->current_network.channel-1] * 100000; fwrq->e = 1; // fwrq->m = ieee->current_network.channel; // fwrq->e = 0; return 0; } int ieee80211_wx_get_wap(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { unsigned long flags; wrqu->ap_addr.sa_family = ARPHRD_ETHER; if (ieee->iw_mode == IW_MODE_MONITOR) return -1; /* We want avoid to give to the user inconsistent infos*/ spin_lock_irqsave(&ieee->lock, flags); if (ieee->state != IEEE80211_LINKED && ieee->state != IEEE80211_LINKED_SCANNING && ieee->wap_set == 0) memset(wrqu->ap_addr.sa_data, 0, ETH_ALEN); else memcpy(wrqu->ap_addr.sa_data, ieee->current_network.bssid, ETH_ALEN); spin_unlock_irqrestore(&ieee->lock, flags); return 0; } int ieee80211_wx_set_wap(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *awrq, char *extra) { int ret = 0; u8 zero[] = {0,0,0,0,0,0}; unsigned long flags; short ifup = ieee->proto_started;//dev->flags & IFF_UP; struct sockaddr *temp = (struct sockaddr *)awrq; ieee->sync_scan_hurryup = 1; down(&ieee->wx_sem); /* use ifconfig hw ether */ if (ieee->iw_mode == IW_MODE_MASTER){ ret = -1; goto out; } if (temp->sa_family != ARPHRD_ETHER){ ret = -EINVAL; goto out; } if (ifup) ieee80211_stop_protocol(ieee); /* just to avoid to give inconsistent infos in the * get wx method. not really needed otherwise */ spin_lock_irqsave(&ieee->lock, flags); memcpy(ieee->current_network.bssid, temp->sa_data, ETH_ALEN); ieee->wap_set = memcmp(temp->sa_data, zero,ETH_ALEN)!=0; spin_unlock_irqrestore(&ieee->lock, flags); if (ifup) ieee80211_start_protocol(ieee); out: up(&ieee->wx_sem); return ret; } int ieee80211_wx_get_essid(struct ieee80211_device *ieee, struct iw_request_info *a,union iwreq_data *wrqu,char *b) { int len,ret = 0; unsigned long flags; if (ieee->iw_mode == IW_MODE_MONITOR) return -1; /* We want avoid to give to the user inconsistent infos*/ spin_lock_irqsave(&ieee->lock, flags); if (ieee->current_network.ssid[0] == '\0' || ieee->current_network.ssid_len == 0){ ret = -1; goto out; } if (ieee->state != IEEE80211_LINKED && ieee->state != IEEE80211_LINKED_SCANNING && ieee->ssid_set == 0){ ret = -1; goto out; } len = ieee->current_network.ssid_len; wrqu->essid.length = len; strncpy(b,ieee->current_network.ssid,len); wrqu->essid.flags = 1; out: spin_unlock_irqrestore(&ieee->lock, flags); return ret; } int ieee80211_wx_set_rate(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { u32 target_rate = wrqu->bitrate.value; ieee->rate = target_rate/100000; //FIXME: we might want to limit rate also in management protocols. return 0; } int ieee80211_wx_get_rate(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { u32 tmp_rate = 0; //printk("===>mode:%d, halfNmode:%d\n", ieee->mode, ieee->bHalfWirelessN24GMode); if (ieee->mode & (IEEE_A | IEEE_B | IEEE_G)) tmp_rate = ieee->rate; else if (ieee->mode & IEEE_N_5G) tmp_rate = 580; else if (ieee->mode & IEEE_N_24G) { if (ieee->GetHalfNmodeSupportByAPsHandler(ieee->dev)) tmp_rate = HTHalfMcsToDataRate(ieee, 15); else tmp_rate = HTMcsToDataRate(ieee, 15); } wrqu->bitrate.value = tmp_rate * 500000; return 0; } int ieee80211_wx_set_rts(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { if (wrqu->rts.disabled || !wrqu->rts.fixed) ieee->rts = DEFAULT_RTS_THRESHOLD; else { if (wrqu->rts.value < MIN_RTS_THRESHOLD || wrqu->rts.value > MAX_RTS_THRESHOLD) return -EINVAL; ieee->rts = wrqu->rts.value; } return 0; } int ieee80211_wx_get_rts(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { wrqu->rts.value = ieee->rts; wrqu->rts.fixed = 0; /* no auto select */ wrqu->rts.disabled = (wrqu->rts.value == DEFAULT_RTS_THRESHOLD); return 0; } int ieee80211_wx_set_mode(struct ieee80211_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { ieee->sync_scan_hurryup = 1; down(&ieee->wx_sem); if (wrqu->mode == ieee->iw_mode) goto out; if (wrqu->mode == IW_MODE_MONITOR){ ieee->dev->type = ARPHRD_IEEE80211; }else{ ieee->dev->type = ARPHRD_ETHER; } if (!ieee->proto_started){ ieee->iw_mode = wrqu->mode; }else{ ieee80211_stop_protocol(ieee); ieee->iw_mode = wrqu->mode; ieee80211_start_protocol(ieee); } out: up(&ieee->wx_sem); return 0; } void ieee80211_wx_sync_scan_wq(struct work_struct *work) { struct ieee80211_device *ieee = container_of(work, struct ieee80211_device, wx_sync_scan_wq); short chan; HT_EXTCHNL_OFFSET chan_offset=0; HT_CHANNEL_WIDTH bandwidth=0; int b40M = 0; static int count = 0; chan = ieee->current_network.channel; netif_carrier_off(ieee->dev); if (ieee->data_hard_stop) ieee->data_hard_stop(ieee->dev); ieee80211_stop_send_beacons(ieee); ieee->state = IEEE80211_LINKED_SCANNING; ieee->link_change(ieee->dev); ieee->InitialGainHandler(ieee->dev,IG_Backup); if (ieee->SetFwCmdHandler) { ieee->SetFwCmdHandler(ieee->dev, FW_CMD_DIG_HALT); ieee->SetFwCmdHandler(ieee->dev, FW_CMD_HIGH_PWR_DISABLE); } if (ieee->pHTInfo->bCurrentHTSupport && ieee->pHTInfo->bEnableHT && ieee->pHTInfo->bCurBW40MHz) { b40M = 1; chan_offset = ieee->pHTInfo->CurSTAExtChnlOffset; bandwidth = (HT_CHANNEL_WIDTH)ieee->pHTInfo->bCurBW40MHz; printk("Scan in 40M, force to 20M first:%d, %d\n", chan_offset, bandwidth); ieee->SetBWModeHandler(ieee->dev, HT_CHANNEL_WIDTH_20, HT_EXTCHNL_OFFSET_NO_EXT); } ieee80211_start_scan_syncro(ieee); if (b40M) { printk("Scan in 20M, back to 40M\n"); if (chan_offset == HT_EXTCHNL_OFFSET_UPPER) ieee->set_chan(ieee->dev, chan + 2); else if (chan_offset == HT_EXTCHNL_OFFSET_LOWER) ieee->set_chan(ieee->dev, chan - 2); else ieee->set_chan(ieee->dev, chan); ieee->SetBWModeHandler(ieee->dev, bandwidth, chan_offset); } else { ieee->set_chan(ieee->dev, chan); } ieee->InitialGainHandler(ieee->dev,IG_Restore); if (ieee->SetFwCmdHandler) { ieee->SetFwCmdHandler(ieee->dev, FW_CMD_DIG_RESUME); ieee->SetFwCmdHandler(ieee->dev, FW_CMD_HIGH_PWR_ENABLE); } ieee->state = IEEE80211_LINKED; ieee->link_change(ieee->dev); // To prevent the immediately calling watch_dog after scan. if(ieee->LinkDetectInfo.NumRecvBcnInPeriod==0||ieee->LinkDetectInfo.NumRecvDataInPeriod==0 ) { ieee->LinkDetectInfo.NumRecvBcnInPeriod = 1; ieee->LinkDetectInfo.NumRecvDataInPeriod= 1; } if (ieee->data_hard_resume) ieee->data_hard_resume(ieee->dev); if(ieee->iw_mode == IW_MODE_ADHOC || ieee->iw_mode == IW_MODE_MASTER) ieee80211_start_send_beacons(ieee); netif_carrier_on(ieee->dev); count = 0; up(&ieee->wx_sem); } int ieee80211_wx_set_scan(struct ieee80211_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { int ret = 0; down(&ieee->wx_sem); if (ieee->iw_mode == IW_MODE_MONITOR || !(ieee->proto_started)){ ret = -1; goto out; } if ( ieee->state == IEEE80211_LINKED){ queue_work(ieee->wq, &ieee->wx_sync_scan_wq); /* intentionally forget to up sem */ return 0; } out: up(&ieee->wx_sem); return ret; } int ieee80211_wx_set_essid(struct ieee80211_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *extra) { int ret=0,len; short proto_started; unsigned long flags; ieee->sync_scan_hurryup = 1; down(&ieee->wx_sem); proto_started = ieee->proto_started; if (wrqu->essid.length > IW_ESSID_MAX_SIZE){ ret= -E2BIG; goto out; } if (ieee->iw_mode == IW_MODE_MONITOR){ ret= -1; goto out; } if(proto_started) ieee80211_stop_protocol(ieee); /* this is just to be sure that the GET wx callback * has consisten infos. not needed otherwise */ spin_lock_irqsave(&ieee->lock, flags); if (wrqu->essid.flags && wrqu->essid.length) { //first flush current network.ssid len = ((wrqu->essid.length-1) < IW_ESSID_MAX_SIZE) ? (wrqu->essid.length-1) : IW_ESSID_MAX_SIZE; strncpy(ieee->current_network.ssid, extra, len+1); ieee->current_network.ssid_len = len+1; ieee->ssid_set = 1; } else{ ieee->ssid_set = 0; ieee->current_network.ssid[0] = '\0'; ieee->current_network.ssid_len = 0; } spin_unlock_irqrestore(&ieee->lock, flags); if (proto_started) ieee80211_start_protocol(ieee); out: up(&ieee->wx_sem); return ret; } int ieee80211_wx_get_mode(struct ieee80211_device *ieee, struct iw_request_info *a, union iwreq_data *wrqu, char *b) { wrqu->mode = ieee->iw_mode; return 0; } int ieee80211_wx_set_rawtx(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int *parms = (int *)extra; int enable = (parms[0] > 0); short prev = ieee->raw_tx; down(&ieee->wx_sem); if(enable) ieee->raw_tx = 1; else ieee->raw_tx = 0; printk(KERN_INFO"raw TX is %s\n", ieee->raw_tx ? "enabled" : "disabled"); if(ieee->iw_mode == IW_MODE_MONITOR) { if(prev == 0 && ieee->raw_tx){ if (ieee->data_hard_resume) ieee->data_hard_resume(ieee->dev); netif_carrier_on(ieee->dev); } if(prev && ieee->raw_tx == 1) netif_carrier_off(ieee->dev); } up(&ieee->wx_sem); return 0; } int ieee80211_wx_get_name(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { strlcpy(wrqu->name, "802.11", IFNAMSIZ); if(ieee->modulation & IEEE80211_CCK_MODULATION){ strlcat(wrqu->name, "b", IFNAMSIZ); if(ieee->modulation & IEEE80211_OFDM_MODULATION) strlcat(wrqu->name, "/g", IFNAMSIZ); }else if(ieee->modulation & IEEE80211_OFDM_MODULATION) strlcat(wrqu->name, "g", IFNAMSIZ); if (ieee->mode & (IEEE_N_24G | IEEE_N_5G)) strlcat(wrqu->name, "/n", IFNAMSIZ); if((ieee->state == IEEE80211_LINKED) || (ieee->state == IEEE80211_LINKED_SCANNING)) strlcat(wrqu->name, " link", IFNAMSIZ); else if(ieee->state != IEEE80211_NOLINK) strlcat(wrqu->name, " .....", IFNAMSIZ); return 0; } /* this is mostly stolen from hostap */ int ieee80211_wx_set_power(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret = 0; #if 1 if( (!ieee->sta_wake_up) || // (!ieee->ps_request_tx_ack) || (!ieee->enter_sleep_state) || (!ieee->ps_is_queue_empty)){ // printk("ERROR. PS mode is tryied to be use but driver missed a callback\n\n"); return -1; } #endif down(&ieee->wx_sem); if (wrqu->power.disabled){ ieee->ps = IEEE80211_PS_DISABLED; goto exit; } if (wrqu->power.flags & IW_POWER_TIMEOUT) { //ieee->ps_period = wrqu->power.value / 1000; ieee->ps_timeout = wrqu->power.value / 1000; } if (wrqu->power.flags & IW_POWER_PERIOD) { //ieee->ps_timeout = wrqu->power.value / 1000; ieee->ps_period = wrqu->power.value / 1000; //wrq->value / 1024; } switch (wrqu->power.flags & IW_POWER_MODE) { case IW_POWER_UNICAST_R: ieee->ps = IEEE80211_PS_UNICAST; break; case IW_POWER_MULTICAST_R: ieee->ps = IEEE80211_PS_MBCAST; break; case IW_POWER_ALL_R: ieee->ps = IEEE80211_PS_UNICAST | IEEE80211_PS_MBCAST; break; case IW_POWER_ON: // ieee->ps = IEEE80211_PS_DISABLED; break; default: ret = -EINVAL; goto exit; } exit: up(&ieee->wx_sem); return ret; } /* this is stolen from hostap */ int ieee80211_wx_get_power(struct ieee80211_device *ieee, struct iw_request_info *info, union iwreq_data *wrqu, char *extra) { int ret =0; down(&ieee->wx_sem); if(ieee->ps == IEEE80211_PS_DISABLED){ wrqu->power.disabled = 1; goto exit; } wrqu->power.disabled = 0; if ((wrqu->power.flags & IW_POWER_TYPE) == IW_POWER_TIMEOUT) { wrqu->power.flags = IW_POWER_TIMEOUT; wrqu->power.value = ieee->ps_timeout * 1000; } else { // ret = -EOPNOTSUPP; // goto exit; wrqu->power.flags = IW_POWER_PERIOD; wrqu->power.value = ieee->ps_period * 1000; //ieee->current_network.dtim_period * ieee->current_network.beacon_interval * 1024; } if ((ieee->ps & (IEEE80211_PS_MBCAST | IEEE80211_PS_UNICAST)) == (IEEE80211_PS_MBCAST | IEEE80211_PS_UNICAST)) wrqu->power.flags |= IW_POWER_ALL_R; else if (ieee->ps & IEEE80211_PS_MBCAST) wrqu->power.flags |= IW_POWER_MULTICAST_R; else wrqu->power.flags |= IW_POWER_UNICAST_R; exit: up(&ieee->wx_sem); return ret; }
gpl-2.0
mrwargod/boeffla-kernel-slimversion-bacon
arch/arm/mach-msm/io.c
1468
17584
/* arch/arm/mach-msm/io.c * * MSM7K, QSD io support * * Copyright (C) 2007 Google, Inc. * Copyright (c) 2008-2013, The Linux Foundation. All rights reserved. * Author: Brian Swetland <swetland@google.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/export.h> #include <mach/hardware.h> #include <asm/page.h> #include <mach/msm_iomap.h> #include <mach/memory.h> #include <asm/mach/map.h> #include <linux/dma-mapping.h> #include <linux/of_fdt.h> #include <mach/board.h> #include "board-dt.h" #define MSM_CHIP_DEVICE(name, chip) { \ .virtual = (unsigned long) MSM_##name##_BASE, \ .pfn = __phys_to_pfn(chip##_##name##_PHYS), \ .length = chip##_##name##_SIZE, \ .type = MT_DEVICE, \ } #define MSM_DEVICE(name) MSM_CHIP_DEVICE(name, MSM) /* msm_shared_ram_phys default value of 0x00100000 is the most common value * and should work as-is for any target without stacked memory. */ phys_addr_t msm_shared_ram_phys = 0x00100000; static void __init msm_map_io(struct map_desc *io_desc, int size) { int i; BUG_ON(!size); for (i = 0; i < size; i++) if (io_desc[i].virtual == (unsigned long)MSM_SHARED_RAM_BASE) io_desc[i].pfn = __phys_to_pfn(msm_shared_ram_phys); iotable_init(io_desc, size); } #if defined(CONFIG_ARCH_MSM7X01A) || defined(CONFIG_ARCH_MSM7X27) \ || defined(CONFIG_ARCH_MSM7X25) static struct map_desc msm_io_desc[] __initdata = { MSM_CHIP_DEVICE(VIC, MSM7XXX), MSM_CHIP_DEVICE(CSR, MSM7XXX), MSM_CHIP_DEVICE(TMR, MSM7XXX), MSM_CHIP_DEVICE(GPIO1, MSM7XXX), MSM_CHIP_DEVICE(GPIO2, MSM7XXX), MSM_CHIP_DEVICE(CLK_CTL, MSM7XXX), MSM_CHIP_DEVICE(AD5, MSM7XXX), #if defined(CONFIG_DEBUG_MSM_UART1) || defined(CONFIG_DEBUG_MSM_UART2) || \ defined(CONFIG_DEBUG_MSM_UART3) MSM_DEVICE(DEBUG_UART), #endif #ifdef CONFIG_CACHE_L2X0 { .virtual = (unsigned long) MSM_L2CC_BASE, .pfn = __phys_to_pfn(MSM7XXX_L2CC_PHYS), .length = MSM7XXX_L2CC_SIZE, .type = MT_DEVICE, }, #endif { .virtual = (unsigned long) MSM_SHARED_RAM_BASE, .length = MSM_SHARED_RAM_SIZE, .type = MT_DEVICE, }, }; void __init msm_map_common_io(void) { /*Peripheral port memory remap, nothing looks to be there for * cortex a5. */ #ifndef CONFIG_ARCH_MSM_CORTEX_A5 /* Make sure the peripheral register window is closed, since * we will use PTE flags (TEX[1]=1,B=0,C=1) to determine which * pages are peripheral interface or not. */ asm("mcr p15, 0, %0, c15, c2, 4" : : "r" (0)); #endif msm_map_io(msm_io_desc, ARRAY_SIZE(msm_io_desc)); map_page_strongly_ordered(); } #endif #ifdef CONFIG_ARCH_QSD8X50 static struct map_desc qsd8x50_io_desc[] __initdata = { MSM_DEVICE(VIC), MSM_DEVICE(CSR), MSM_DEVICE(TMR), MSM_DEVICE(GPIO1), MSM_DEVICE(GPIO2), MSM_DEVICE(CLK_CTL), MSM_DEVICE(SIRC), MSM_DEVICE(SCPLL), MSM_DEVICE(AD5), MSM_DEVICE(TCSR), #if defined(CONFIG_DEBUG_MSM_UART1) || defined(CONFIG_DEBUG_MSM_UART2) || \ defined(CONFIG_DEBUG_MSM_UART3) MSM_DEVICE(DEBUG_UART), #endif { .virtual = (unsigned long) MSM_SHARED_RAM_BASE, .length = MSM_SHARED_RAM_SIZE, .type = MT_DEVICE, }, }; void __init msm_map_qsd8x50_io(void) { msm_map_io(qsd8x50_io_desc, ARRAY_SIZE(qsd8x50_io_desc)); } #endif /* CONFIG_ARCH_QSD8X50 */ #ifdef CONFIG_ARCH_MSM8X60 static struct map_desc msm8x60_io_desc[] __initdata = { MSM_DEVICE(QGIC_DIST), MSM_DEVICE(QGIC_CPU), MSM_DEVICE(TMR), MSM_DEVICE(TMR0), MSM_DEVICE(RPM_MPM), MSM_DEVICE(ACC), MSM_DEVICE(ACC0), MSM_DEVICE(ACC1), MSM_DEVICE(SAW0), MSM_DEVICE(SAW1), MSM_DEVICE(GCC), MSM_DEVICE(TLMM), MSM_DEVICE(SCPLL), MSM_DEVICE(RPM), MSM_DEVICE(CLK_CTL), MSM_DEVICE(MMSS_CLK_CTL), MSM_DEVICE(LPASS_CLK_CTL), MSM_DEVICE(TCSR), MSM_DEVICE(IMEM), MSM_DEVICE(HDMI), #ifdef CONFIG_DEBUG_MSM8660_UART MSM_DEVICE(DEBUG_UART), #endif MSM_DEVICE(SIC_NON_SECURE), { .virtual = (unsigned long) MSM_SHARED_RAM_BASE, .length = MSM_SHARED_RAM_SIZE, .type = MT_DEVICE, }, MSM_DEVICE(QFPROM), }; void __init msm_map_msm8x60_io(void) { msm_map_io(msm8x60_io_desc, ARRAY_SIZE(msm8x60_io_desc)); init_consistent_dma_size(14*SZ_1M); } #endif /* CONFIG_ARCH_MSM8X60 */ #ifdef CONFIG_ARCH_MSM8960 static struct map_desc msm8960_io_desc[] __initdata = { MSM_CHIP_DEVICE(QGIC_DIST, MSM8960), MSM_CHIP_DEVICE(QGIC_CPU, MSM8960), MSM_CHIP_DEVICE(ACC0, MSM8960), MSM_CHIP_DEVICE(ACC1, MSM8960), MSM_CHIP_DEVICE(TMR, MSM8960), MSM_CHIP_DEVICE(TMR0, MSM8960), MSM_CHIP_DEVICE(RPM_MPM, MSM8960), MSM_CHIP_DEVICE(CLK_CTL, MSM8960), MSM_CHIP_DEVICE(MMSS_CLK_CTL, MSM8960), MSM_CHIP_DEVICE(LPASS_CLK_CTL, MSM8960), MSM_CHIP_DEVICE(RPM, MSM8960), MSM_CHIP_DEVICE(TLMM, MSM8960), MSM_CHIP_DEVICE(HFPLL, MSM8960), MSM_CHIP_DEVICE(SAW0, MSM8960), MSM_CHIP_DEVICE(SAW1, MSM8960), MSM_CHIP_DEVICE(SAW_L2, MSM8960), MSM_CHIP_DEVICE(SIC_NON_SECURE, MSM8960), MSM_CHIP_DEVICE(APCS_GCC, MSM8960), MSM_CHIP_DEVICE(IMEM, MSM8960), MSM_CHIP_DEVICE(HDMI, MSM8960), { .virtual = (unsigned long) MSM_SHARED_RAM_BASE, .length = MSM_SHARED_RAM_SIZE, .type = MT_DEVICE, }, #ifdef CONFIG_DEBUG_MSM8960_UART MSM_DEVICE(DEBUG_UART), #endif MSM_CHIP_DEVICE(QFPROM, MSM8960), }; void __init msm_map_msm8960_io(void) { msm_map_io(msm8960_io_desc, ARRAY_SIZE(msm8960_io_desc)); } #endif /* CONFIG_ARCH_MSM8960 */ #ifdef CONFIG_ARCH_MSM8930 static struct map_desc msm8930_io_desc[] __initdata = { MSM_CHIP_DEVICE(QGIC_DIST, MSM8930), MSM_CHIP_DEVICE(QGIC_CPU, MSM8930), MSM_CHIP_DEVICE(ACC0, MSM8930), MSM_CHIP_DEVICE(ACC1, MSM8930), MSM_CHIP_DEVICE(TMR, MSM8930), MSM_CHIP_DEVICE(TMR0, MSM8930), MSM_CHIP_DEVICE(RPM_MPM, MSM8930), MSM_CHIP_DEVICE(CLK_CTL, MSM8930), MSM_CHIP_DEVICE(MMSS_CLK_CTL, MSM8930), MSM_CHIP_DEVICE(LPASS_CLK_CTL, MSM8930), MSM_CHIP_DEVICE(RPM, MSM8930), MSM_CHIP_DEVICE(TLMM, MSM8930), MSM_CHIP_DEVICE(HFPLL, MSM8930), MSM_CHIP_DEVICE(SAW0, MSM8930), MSM_CHIP_DEVICE(SAW1, MSM8930), MSM_CHIP_DEVICE(SAW_L2, MSM8930), MSM_CHIP_DEVICE(SIC_NON_SECURE, MSM8930), MSM_CHIP_DEVICE(APCS_GCC, MSM8930), MSM_CHIP_DEVICE(IMEM, MSM8930), MSM_CHIP_DEVICE(HDMI, MSM8930), { .virtual = (unsigned long) MSM_SHARED_RAM_BASE, .length = MSM_SHARED_RAM_SIZE, .type = MT_DEVICE, }, #ifdef CONFIG_DEBUG_MSM8930_UART MSM_DEVICE(DEBUG_UART), #endif MSM_CHIP_DEVICE(QFPROM, MSM8930), }; void __init msm_map_msm8930_io(void) { msm_map_io(msm8930_io_desc, ARRAY_SIZE(msm8930_io_desc)); } #endif /* CONFIG_ARCH_MSM8930 */ #ifdef CONFIG_ARCH_APQ8064 static struct map_desc apq8064_io_desc[] __initdata = { MSM_CHIP_DEVICE(QGIC_DIST, APQ8064), MSM_CHIP_DEVICE(QGIC_CPU, APQ8064), MSM_CHIP_DEVICE(TMR, APQ8064), MSM_CHIP_DEVICE(TMR0, APQ8064), MSM_CHIP_DEVICE(TLMM, APQ8064), MSM_CHIP_DEVICE(ACC0, APQ8064), MSM_CHIP_DEVICE(ACC1, APQ8064), MSM_CHIP_DEVICE(ACC2, APQ8064), MSM_CHIP_DEVICE(ACC3, APQ8064), MSM_CHIP_DEVICE(HFPLL, APQ8064), MSM_CHIP_DEVICE(CLK_CTL, APQ8064), MSM_CHIP_DEVICE(MMSS_CLK_CTL, APQ8064), MSM_CHIP_DEVICE(LPASS_CLK_CTL, APQ8064), MSM_CHIP_DEVICE(APCS_GCC, APQ8064), MSM_CHIP_DEVICE(RPM, APQ8064), MSM_CHIP_DEVICE(RPM_MPM, APQ8064), MSM_CHIP_DEVICE(SAW0, APQ8064), MSM_CHIP_DEVICE(SAW1, APQ8064), MSM_CHIP_DEVICE(SAW2, APQ8064), MSM_CHIP_DEVICE(SAW3, APQ8064), MSM_CHIP_DEVICE(SAW_L2, APQ8064), MSM_CHIP_DEVICE(IMEM, APQ8064), MSM_CHIP_DEVICE(HDMI, APQ8064), { .virtual = (unsigned long) MSM_SHARED_RAM_BASE, .length = MSM_SHARED_RAM_SIZE, .type = MT_DEVICE, }, MSM_CHIP_DEVICE(QFPROM, APQ8064), MSM_CHIP_DEVICE(SIC_NON_SECURE, APQ8064), #ifdef CONFIG_DEBUG_APQ8064_UART MSM_DEVICE(DEBUG_UART), #endif }; void __init msm_map_apq8064_io(void) { msm_map_io(apq8064_io_desc, ARRAY_SIZE(apq8064_io_desc)); } #endif /* CONFIG_ARCH_APQ8064 */ #ifdef CONFIG_ARCH_MSM8974 static struct map_desc msm_8974_io_desc[] __initdata = { MSM_CHIP_DEVICE(QGIC_DIST, MSM8974), MSM_CHIP_DEVICE(QGIC_CPU, MSM8974), MSM_CHIP_DEVICE(TLMM, MSM8974), MSM_CHIP_DEVICE(MPM2_PSHOLD, MSM8974), { .virtual = (unsigned long) MSM_SHARED_RAM_BASE, .length = MSM_SHARED_RAM_SIZE, .type = MT_DEVICE, }, #ifdef CONFIG_DEBUG_MSM8974_UART MSM_DEVICE(DEBUG_UART), #endif }; void __init msm_map_8974_io(void) { msm_shared_ram_phys = MSM8974_MSM_SHARED_RAM_PHYS; msm_map_io(msm_8974_io_desc, ARRAY_SIZE(msm_8974_io_desc)); of_scan_flat_dt(msm_scan_dt_map_imem, NULL); } #endif /* CONFIG_ARCH_MSM8974 */ #ifdef CONFIG_ARCH_APQ8084 static struct map_desc msm_8084_io_desc[] __initdata = { MSM_CHIP_DEVICE(QGIC_DIST, APQ8084), MSM_CHIP_DEVICE(QGIC_CPU, APQ8084), MSM_CHIP_DEVICE(TLMM, APQ8084), { .virtual = (unsigned long) MSM_SHARED_RAM_BASE, .length = MSM_SHARED_RAM_SIZE, .type = MT_DEVICE, }, #ifdef CONFIG_DEBUG_APQ8084_UART MSM_DEVICE(DEBUG_UART), #endif }; void __init msm_map_8084_io(void) { msm_shared_ram_phys = APQ8084_SHARED_RAM_PHYS; msm_map_io(msm_8084_io_desc, ARRAY_SIZE(msm_8084_io_desc)); of_scan_flat_dt(msm_scan_dt_map_imem, NULL); } #endif /* CONFIG_ARCH_APQ8084 */ #ifdef CONFIG_ARCH_MSM7X30 static struct map_desc msm7x30_io_desc[] __initdata = { MSM_CHIP_DEVICE(VIC, MSM7X30), MSM_CHIP_DEVICE(CSR, MSM7X30), MSM_CHIP_DEVICE(TMR, MSM7X30), MSM_CHIP_DEVICE(GPIO1, MSM7X30), MSM_CHIP_DEVICE(GPIO2, MSM7X30), MSM_CHIP_DEVICE(CLK_CTL, MSM7X30), MSM_CHIP_DEVICE(CLK_CTL_SH2, MSM7X30), MSM_CHIP_DEVICE(AD5, MSM7X30), MSM_CHIP_DEVICE(ACC0, MSM7X30), MSM_CHIP_DEVICE(SAW0, MSM7X30), MSM_CHIP_DEVICE(APCS_GCC, MSM7X30), MSM_CHIP_DEVICE(TCSR, MSM7X30), #if defined(CONFIG_DEBUG_MSM_UART1) || defined(CONFIG_DEBUG_MSM_UART2) || \ defined(CONFIG_DEBUG_MSM_UART3) MSM_DEVICE(DEBUG_UART), #endif { .virtual = (unsigned long) MSM_SHARED_RAM_BASE, .length = MSM_SHARED_RAM_SIZE, .type = MT_DEVICE, }, }; void __init msm_map_msm7x30_io(void) { msm_map_io(msm7x30_io_desc, ARRAY_SIZE(msm7x30_io_desc)); } #endif /* CONFIG_ARCH_MSM7X30 */ #ifdef CONFIG_ARCH_FSM9XXX static struct map_desc fsm9xxx_io_desc[] __initdata = { MSM_DEVICE(VIC), MSM_DEVICE(SIRC), MSM_DEVICE(CSR), MSM_DEVICE(TLMM), MSM_DEVICE(TCSR), MSM_DEVICE(CLK_CTL), MSM_DEVICE(ACC), MSM_DEVICE(SAW), MSM_DEVICE(GCC), MSM_DEVICE(GRFC), MSM_DEVICE(QFP_FUSE), MSM_DEVICE(HH), #if defined(CONFIG_DEBUG_MSM_UART1) || defined(CONFIG_DEBUG_MSM_UART2) || \ defined(CONFIG_DEBUG_MSM_UART3) MSM_DEVICE(DEBUG_UART), #endif { .virtual = (unsigned long) MSM_SHARED_RAM_BASE, .length = MSM_SHARED_RAM_SIZE, .type = MT_DEVICE, }, }; void __init msm_map_fsm9xxx_io(void) { msm_map_io(fsm9xxx_io_desc, ARRAY_SIZE(fsm9xxx_io_desc)); } #endif /* CONFIG_ARCH_FSM9XXX */ #ifdef CONFIG_ARCH_FSM9900 static struct map_desc fsm9900_io_desc[] __initdata = { MSM_CHIP_DEVICE(QGIC_DIST, FSM9900), MSM_CHIP_DEVICE(TLMM, FSM9900), { .virtual = (unsigned long) MSM_SHARED_RAM_BASE, .length = MSM_SHARED_RAM_SIZE, .type = MT_DEVICE, }, #ifdef CONFIG_DEBUG_FSM9900_UART MSM_DEVICE(DEBUG_UART), #endif }; void __init msm_map_fsm9900_io(void) { msm_shared_ram_phys = FSM9900_SHARED_RAM_PHYS; msm_map_io(fsm9900_io_desc, ARRAY_SIZE(fsm9900_io_desc)); of_scan_flat_dt(msm_scan_dt_map_imem, NULL); } #endif /* CONFIG_ARCH_FSM9900 */ #ifdef CONFIG_ARCH_MSM9615 static struct map_desc msm9615_io_desc[] __initdata = { MSM_CHIP_DEVICE(QGIC_DIST, MSM9615), MSM_CHIP_DEVICE(QGIC_CPU, MSM9615), MSM_CHIP_DEVICE(ACC0, MSM9615), MSM_CHIP_DEVICE(TMR, MSM9615), MSM_CHIP_DEVICE(TLMM, MSM9615), MSM_CHIP_DEVICE(SAW0, MSM9615), MSM_CHIP_DEVICE(APCS_GCC, MSM9615), MSM_CHIP_DEVICE(TCSR, MSM9615), MSM_CHIP_DEVICE(L2CC, MSM9615), MSM_CHIP_DEVICE(CLK_CTL, MSM9615), MSM_CHIP_DEVICE(LPASS_CLK_CTL, MSM9615), MSM_CHIP_DEVICE(RPM, MSM9615), MSM_CHIP_DEVICE(RPM_MPM, MSM9615), MSM_CHIP_DEVICE(APCS_GLB, MSM9615), MSM_CHIP_DEVICE(IMEM, MSM9615), { .virtual = (unsigned long) MSM_SHARED_RAM_BASE, .length = MSM_SHARED_RAM_SIZE, .type = MT_DEVICE, }, MSM_CHIP_DEVICE(QFPROM, MSM9615), }; void __init msm_map_msm9615_io(void) { msm_map_io(msm9615_io_desc, ARRAY_SIZE(msm9615_io_desc)); } #endif /* CONFIG_ARCH_MSM9615 */ #ifdef CONFIG_ARCH_MSM8625 static struct map_desc msm8625_io_desc[] __initdata = { MSM_CHIP_DEVICE(CSR, MSM7XXX), MSM_CHIP_DEVICE(GPIO1, MSM7XXX), MSM_CHIP_DEVICE(GPIO2, MSM7XXX), MSM_CHIP_DEVICE(QGIC_DIST, MSM8625), MSM_CHIP_DEVICE(QGIC_CPU, MSM8625), MSM_CHIP_DEVICE(TMR, MSM8625), MSM_CHIP_DEVICE(TMR0, MSM8625), MSM_CHIP_DEVICE(SCU, MSM8625), MSM_CHIP_DEVICE(CFG_CTL, MSM8625), MSM_CHIP_DEVICE(CLK_CTL, MSM8625), MSM_CHIP_DEVICE(SAW0, MSM8625), MSM_CHIP_DEVICE(SAW1, MSM8625), MSM_CHIP_DEVICE(SAW2, MSM8625), MSM_CHIP_DEVICE(SAW3, MSM8625), MSM_CHIP_DEVICE(AD5, MSM7XXX), #if defined(CONFIG_DEBUG_MSM_UART1) || defined(CONFIG_DEBUG_MSM_UART2) || \ defined(CONFIG_DEBUG_MSM_UART3) MSM_DEVICE(DEBUG_UART), #endif #ifdef CONFIG_CACHE_L2X0 { .virtual = (unsigned long) MSM_L2CC_BASE, .pfn = __phys_to_pfn(MSM7XXX_L2CC_PHYS), .length = MSM7XXX_L2CC_SIZE, .type = MT_DEVICE, }, #endif { .virtual = (unsigned long) MSM_SHARED_RAM_BASE, .length = MSM_SHARED_RAM_SIZE, .type = MT_DEVICE, }, }; void __init msm_map_msm8625_io(void) { msm_map_io(msm8625_io_desc, ARRAY_SIZE(msm8625_io_desc)); map_page_strongly_ordered(); } #else void __init msm_map_msm8625_io(void) { return; } #endif /* CONFIG_ARCH_MSM8625 */ #ifdef CONFIG_ARCH_MSM9625 static struct map_desc msm9625_io_desc[] __initdata = { MSM_CHIP_DEVICE(QGIC_DIST, MSM9625), MSM_CHIP_DEVICE(QGIC_CPU, MSM9625), MSM_CHIP_DEVICE(TLMM, MSM9625), MSM_CHIP_DEVICE(MPM2_PSHOLD, MSM9625), MSM_CHIP_DEVICE(TMR, MSM9625), { .virtual = (unsigned long) MSM_SHARED_RAM_BASE, .length = MSM_SHARED_RAM_SIZE, .type = MT_DEVICE, }, #ifdef CONFIG_DEBUG_MSM9625_UART MSM_DEVICE(DEBUG_UART), #endif }; void __init msm_map_msm9625_io(void) { msm_shared_ram_phys = MSM9625_SHARED_RAM_PHYS; msm_map_io(msm9625_io_desc, ARRAY_SIZE(msm9625_io_desc)); of_scan_flat_dt(msm_scan_dt_map_imem, NULL); } #endif /* CONFIG_ARCH_MSM9625 */ #ifdef CONFIG_ARCH_MSMKRYPTON static struct map_desc msmkrypton_io_desc[] __initdata = { MSM_CHIP_DEVICE(TLMM, MSMKRYPTON), MSM_CHIP_DEVICE(MPM2_PSHOLD, MSMKRYPTON), { .virtual = (unsigned long) MSM_SHARED_RAM_BASE, .length = MSM_SHARED_RAM_SIZE, .type = MT_DEVICE, }, }; void __init msm_map_msmkrypton_io(void) { msm_shared_ram_phys = MSMKRYPTON_SHARED_RAM_PHYS; msm_map_io(msmkrypton_io_desc, ARRAY_SIZE(msmkrypton_io_desc)); of_scan_flat_dt(msm_scan_dt_map_imem, NULL); } #endif /* CONFIG_ARCH_MSMKRYPTON */ #ifdef CONFIG_ARCH_MPQ8092 static struct map_desc mpq8092_io_desc[] __initdata = { MSM_CHIP_DEVICE(QGIC_DIST, MPQ8092), MSM_CHIP_DEVICE(QGIC_CPU, MPQ8092), MSM_CHIP_DEVICE(TLMM, MPQ8092), { .virtual = (unsigned long) MSM_SHARED_RAM_BASE, .length = MSM_SHARED_RAM_SIZE, .type = MT_DEVICE, }, #ifdef CONFIG_DEBUG_MPQ8092_UART MSM_DEVICE(DEBUG_UART), #endif }; void __init msm_map_mpq8092_io(void) { msm_shared_ram_phys = MPQ8092_MSM_SHARED_RAM_PHYS; msm_map_io(mpq8092_io_desc, ARRAY_SIZE(mpq8092_io_desc)); of_scan_flat_dt(msm_scan_dt_map_imem, NULL); } #endif /* CONFIG_ARCH_MPQ8092 */ #ifdef CONFIG_ARCH_MSM8226 static struct map_desc msm_8226_io_desc[] __initdata = { MSM_CHIP_DEVICE(QGIC_DIST, MSM8226), MSM_CHIP_DEVICE(QGIC_CPU, MSM8226), MSM_CHIP_DEVICE(APCS_GCC, MSM8226), MSM_CHIP_DEVICE(TLMM, MSM8226), MSM_CHIP_DEVICE(MPM2_PSHOLD, MSM8226), { .virtual = (unsigned long) MSM_SHARED_RAM_BASE, .length = MSM_SHARED_RAM_SIZE, .type = MT_DEVICE, }, #ifdef CONFIG_DEBUG_MSM8226_UART MSM_DEVICE(DEBUG_UART), #endif }; void __init msm_map_msm8226_io(void) { msm_shared_ram_phys = MSM8226_MSM_SHARED_RAM_PHYS; msm_map_io(msm_8226_io_desc, ARRAY_SIZE(msm_8226_io_desc)); of_scan_flat_dt(msm_scan_dt_map_imem, NULL); } #endif /* CONFIG_ARCH_MSM8226 */ #ifdef CONFIG_ARCH_MSM8610 static struct map_desc msm8610_io_desc[] __initdata = { MSM_CHIP_DEVICE(QGIC_DIST, MSM8610), MSM_CHIP_DEVICE(QGIC_CPU, MSM8610), MSM_CHIP_DEVICE(APCS_GCC, MSM8610), MSM_CHIP_DEVICE(TLMM, MSM8610), MSM_CHIP_DEVICE(MPM2_PSHOLD, MSM8610), { .virtual = (unsigned long) MSM_SHARED_RAM_BASE, .length = MSM_SHARED_RAM_SIZE, .type = MT_DEVICE, }, }; void __init msm_map_msm8610_io(void) { msm_shared_ram_phys = MSM8610_MSM_SHARED_RAM_PHYS; msm_map_io(msm8610_io_desc, ARRAY_SIZE(msm8610_io_desc)); of_scan_flat_dt(msm_scan_dt_map_imem, NULL); } #endif /* CONFIG_ARCH_MSM8610 */ #ifdef CONFIG_ARCH_MSMSAMARIUM static struct map_desc msmsamarium_io_desc[] __initdata = { MSM_CHIP_DEVICE(QGIC_DIST, MSMSAMARIUM), MSM_CHIP_DEVICE(TLMM, MSMSAMARIUM), MSM_CHIP_DEVICE(MPM2_PSHOLD, MSMSAMARIUM), { .virtual = (unsigned long) MSM_SHARED_RAM_BASE, .length = MSM_SHARED_RAM_SIZE, .type = MT_DEVICE, }, #if defined(CONFIG_DEBUG_MSMSAMARIUM_UART) || defined(CONFIG_DEBUG_MSM8974_UART) MSM_DEVICE(DEBUG_UART), #endif }; void __init msm_map_msmsamarium_io(void) { msm_shared_ram_phys = MSMSAMARIUM_SHARED_RAM_PHYS; msm_map_io(msmsamarium_io_desc, ARRAY_SIZE(msmsamarium_io_desc)); of_scan_flat_dt(msm_scan_dt_map_imem, NULL); } #endif /* CONFIG_ARCH_MSMSAMARIUM */
gpl-2.0
gchild320/shamu
net/netfilter/xt_set.c
2236
14170
/* Copyright (C) 2000-2002 Joakim Axelsson <gozem@linux.nu> * Patrick Schaaf <bof@bof.de> * Martin Josefsson <gandalf@wlug.westbo.se> * Copyright (C) 2003-2013 Jozsef Kadlecsik <kadlec@blackhole.kfki.hu> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ /* Kernel module which implements the set match and SET target * for netfilter/iptables. */ #include <linux/module.h> #include <linux/skbuff.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_set.h> #include <linux/netfilter/ipset/ip_set_timeout.h> MODULE_LICENSE("GPL"); MODULE_AUTHOR("Jozsef Kadlecsik <kadlec@blackhole.kfki.hu>"); MODULE_DESCRIPTION("Xtables: IP set match and target module"); MODULE_ALIAS("xt_SET"); MODULE_ALIAS("ipt_set"); MODULE_ALIAS("ip6t_set"); MODULE_ALIAS("ipt_SET"); MODULE_ALIAS("ip6t_SET"); static inline int match_set(ip_set_id_t index, const struct sk_buff *skb, const struct xt_action_param *par, struct ip_set_adt_opt *opt, int inv) { if (ip_set_test(index, skb, par, opt)) inv = !inv; return inv; } #define ADT_OPT(n, f, d, fs, cfs, t) \ struct ip_set_adt_opt n = { \ .family = f, \ .dim = d, \ .flags = fs, \ .cmdflags = cfs, \ .ext.timeout = t, \ } /* Revision 0 interface: backward compatible with netfilter/iptables */ static bool set_match_v0(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_set_info_match_v0 *info = par->matchinfo; ADT_OPT(opt, par->family, info->match_set.u.compat.dim, info->match_set.u.compat.flags, 0, UINT_MAX); return match_set(info->match_set.index, skb, par, &opt, info->match_set.u.compat.flags & IPSET_INV_MATCH); } static void compat_flags(struct xt_set_info_v0 *info) { u_int8_t i; /* Fill out compatibility data according to enum ip_set_kopt */ info->u.compat.dim = IPSET_DIM_ZERO; if (info->u.flags[0] & IPSET_MATCH_INV) info->u.compat.flags |= IPSET_INV_MATCH; for (i = 0; i < IPSET_DIM_MAX-1 && info->u.flags[i]; i++) { info->u.compat.dim++; if (info->u.flags[i] & IPSET_SRC) info->u.compat.flags |= (1<<info->u.compat.dim); } } static int set_match_v0_checkentry(const struct xt_mtchk_param *par) { struct xt_set_info_match_v0 *info = par->matchinfo; ip_set_id_t index; index = ip_set_nfnl_get_byindex(info->match_set.index); if (index == IPSET_INVALID_ID) { pr_warning("Cannot find set indentified by id %u to match\n", info->match_set.index); return -ENOENT; } if (info->match_set.u.flags[IPSET_DIM_MAX-1] != 0) { pr_warning("Protocol error: set match dimension " "is over the limit!\n"); ip_set_nfnl_put(info->match_set.index); return -ERANGE; } /* Fill out compatibility data */ compat_flags(&info->match_set); return 0; } static void set_match_v0_destroy(const struct xt_mtdtor_param *par) { struct xt_set_info_match_v0 *info = par->matchinfo; ip_set_nfnl_put(info->match_set.index); } static unsigned int set_target_v0(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_set_info_target_v0 *info = par->targinfo; ADT_OPT(add_opt, par->family, info->add_set.u.compat.dim, info->add_set.u.compat.flags, 0, UINT_MAX); ADT_OPT(del_opt, par->family, info->del_set.u.compat.dim, info->del_set.u.compat.flags, 0, UINT_MAX); if (info->add_set.index != IPSET_INVALID_ID) ip_set_add(info->add_set.index, skb, par, &add_opt); if (info->del_set.index != IPSET_INVALID_ID) ip_set_del(info->del_set.index, skb, par, &del_opt); return XT_CONTINUE; } static int set_target_v0_checkentry(const struct xt_tgchk_param *par) { struct xt_set_info_target_v0 *info = par->targinfo; ip_set_id_t index; if (info->add_set.index != IPSET_INVALID_ID) { index = ip_set_nfnl_get_byindex(info->add_set.index); if (index == IPSET_INVALID_ID) { pr_warning("Cannot find add_set index %u as target\n", info->add_set.index); return -ENOENT; } } if (info->del_set.index != IPSET_INVALID_ID) { index = ip_set_nfnl_get_byindex(info->del_set.index); if (index == IPSET_INVALID_ID) { pr_warning("Cannot find del_set index %u as target\n", info->del_set.index); if (info->add_set.index != IPSET_INVALID_ID) ip_set_nfnl_put(info->add_set.index); return -ENOENT; } } if (info->add_set.u.flags[IPSET_DIM_MAX-1] != 0 || info->del_set.u.flags[IPSET_DIM_MAX-1] != 0) { pr_warning("Protocol error: SET target dimension " "is over the limit!\n"); if (info->add_set.index != IPSET_INVALID_ID) ip_set_nfnl_put(info->add_set.index); if (info->del_set.index != IPSET_INVALID_ID) ip_set_nfnl_put(info->del_set.index); return -ERANGE; } /* Fill out compatibility data */ compat_flags(&info->add_set); compat_flags(&info->del_set); return 0; } static void set_target_v0_destroy(const struct xt_tgdtor_param *par) { const struct xt_set_info_target_v0 *info = par->targinfo; if (info->add_set.index != IPSET_INVALID_ID) ip_set_nfnl_put(info->add_set.index); if (info->del_set.index != IPSET_INVALID_ID) ip_set_nfnl_put(info->del_set.index); } /* Revision 1 match and target */ static bool set_match_v1(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_set_info_match_v1 *info = par->matchinfo; ADT_OPT(opt, par->family, info->match_set.dim, info->match_set.flags, 0, UINT_MAX); if (opt.flags & IPSET_RETURN_NOMATCH) opt.cmdflags |= IPSET_FLAG_RETURN_NOMATCH; return match_set(info->match_set.index, skb, par, &opt, info->match_set.flags & IPSET_INV_MATCH); } static int set_match_v1_checkentry(const struct xt_mtchk_param *par) { struct xt_set_info_match_v1 *info = par->matchinfo; ip_set_id_t index; index = ip_set_nfnl_get_byindex(info->match_set.index); if (index == IPSET_INVALID_ID) { pr_warning("Cannot find set indentified by id %u to match\n", info->match_set.index); return -ENOENT; } if (info->match_set.dim > IPSET_DIM_MAX) { pr_warning("Protocol error: set match dimension " "is over the limit!\n"); ip_set_nfnl_put(info->match_set.index); return -ERANGE; } return 0; } static void set_match_v1_destroy(const struct xt_mtdtor_param *par) { struct xt_set_info_match_v1 *info = par->matchinfo; ip_set_nfnl_put(info->match_set.index); } static unsigned int set_target_v1(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_set_info_target_v1 *info = par->targinfo; ADT_OPT(add_opt, par->family, info->add_set.dim, info->add_set.flags, 0, UINT_MAX); ADT_OPT(del_opt, par->family, info->del_set.dim, info->del_set.flags, 0, UINT_MAX); if (info->add_set.index != IPSET_INVALID_ID) ip_set_add(info->add_set.index, skb, par, &add_opt); if (info->del_set.index != IPSET_INVALID_ID) ip_set_del(info->del_set.index, skb, par, &del_opt); return XT_CONTINUE; } static int set_target_v1_checkentry(const struct xt_tgchk_param *par) { const struct xt_set_info_target_v1 *info = par->targinfo; ip_set_id_t index; if (info->add_set.index != IPSET_INVALID_ID) { index = ip_set_nfnl_get_byindex(info->add_set.index); if (index == IPSET_INVALID_ID) { pr_warning("Cannot find add_set index %u as target\n", info->add_set.index); return -ENOENT; } } if (info->del_set.index != IPSET_INVALID_ID) { index = ip_set_nfnl_get_byindex(info->del_set.index); if (index == IPSET_INVALID_ID) { pr_warning("Cannot find del_set index %u as target\n", info->del_set.index); if (info->add_set.index != IPSET_INVALID_ID) ip_set_nfnl_put(info->add_set.index); return -ENOENT; } } if (info->add_set.dim > IPSET_DIM_MAX || info->del_set.dim > IPSET_DIM_MAX) { pr_warning("Protocol error: SET target dimension " "is over the limit!\n"); if (info->add_set.index != IPSET_INVALID_ID) ip_set_nfnl_put(info->add_set.index); if (info->del_set.index != IPSET_INVALID_ID) ip_set_nfnl_put(info->del_set.index); return -ERANGE; } return 0; } static void set_target_v1_destroy(const struct xt_tgdtor_param *par) { const struct xt_set_info_target_v1 *info = par->targinfo; if (info->add_set.index != IPSET_INVALID_ID) ip_set_nfnl_put(info->add_set.index); if (info->del_set.index != IPSET_INVALID_ID) ip_set_nfnl_put(info->del_set.index); } /* Revision 2 target */ static unsigned int set_target_v2(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_set_info_target_v2 *info = par->targinfo; ADT_OPT(add_opt, par->family, info->add_set.dim, info->add_set.flags, info->flags, info->timeout); ADT_OPT(del_opt, par->family, info->del_set.dim, info->del_set.flags, 0, UINT_MAX); /* Normalize to fit into jiffies */ if (add_opt.ext.timeout != IPSET_NO_TIMEOUT && add_opt.ext.timeout > UINT_MAX/MSEC_PER_SEC) add_opt.ext.timeout = UINT_MAX/MSEC_PER_SEC; if (info->add_set.index != IPSET_INVALID_ID) ip_set_add(info->add_set.index, skb, par, &add_opt); if (info->del_set.index != IPSET_INVALID_ID) ip_set_del(info->del_set.index, skb, par, &del_opt); return XT_CONTINUE; } #define set_target_v2_checkentry set_target_v1_checkentry #define set_target_v2_destroy set_target_v1_destroy /* Revision 3 match */ static bool match_counter(u64 counter, const struct ip_set_counter_match *info) { switch (info->op) { case IPSET_COUNTER_NONE: return true; case IPSET_COUNTER_EQ: return counter == info->value; case IPSET_COUNTER_NE: return counter != info->value; case IPSET_COUNTER_LT: return counter < info->value; case IPSET_COUNTER_GT: return counter > info->value; } return false; } static bool set_match_v3(const struct sk_buff *skb, struct xt_action_param *par) { const struct xt_set_info_match_v3 *info = par->matchinfo; ADT_OPT(opt, par->family, info->match_set.dim, info->match_set.flags, info->flags, UINT_MAX); int ret; if (info->packets.op != IPSET_COUNTER_NONE || info->bytes.op != IPSET_COUNTER_NONE) opt.cmdflags |= IPSET_FLAG_MATCH_COUNTERS; ret = match_set(info->match_set.index, skb, par, &opt, info->match_set.flags & IPSET_INV_MATCH); if (!(ret && opt.cmdflags & IPSET_FLAG_MATCH_COUNTERS)) return ret; if (!match_counter(opt.ext.packets, &info->packets)) return 0; return match_counter(opt.ext.bytes, &info->bytes); } #define set_match_v3_checkentry set_match_v1_checkentry #define set_match_v3_destroy set_match_v1_destroy static struct xt_match set_matches[] __read_mostly = { { .name = "set", .family = NFPROTO_IPV4, .revision = 0, .match = set_match_v0, .matchsize = sizeof(struct xt_set_info_match_v0), .checkentry = set_match_v0_checkentry, .destroy = set_match_v0_destroy, .me = THIS_MODULE }, { .name = "set", .family = NFPROTO_IPV4, .revision = 1, .match = set_match_v1, .matchsize = sizeof(struct xt_set_info_match_v1), .checkentry = set_match_v1_checkentry, .destroy = set_match_v1_destroy, .me = THIS_MODULE }, { .name = "set", .family = NFPROTO_IPV6, .revision = 1, .match = set_match_v1, .matchsize = sizeof(struct xt_set_info_match_v1), .checkentry = set_match_v1_checkentry, .destroy = set_match_v1_destroy, .me = THIS_MODULE }, /* --return-nomatch flag support */ { .name = "set", .family = NFPROTO_IPV4, .revision = 2, .match = set_match_v1, .matchsize = sizeof(struct xt_set_info_match_v1), .checkentry = set_match_v1_checkentry, .destroy = set_match_v1_destroy, .me = THIS_MODULE }, { .name = "set", .family = NFPROTO_IPV6, .revision = 2, .match = set_match_v1, .matchsize = sizeof(struct xt_set_info_match_v1), .checkentry = set_match_v1_checkentry, .destroy = set_match_v1_destroy, .me = THIS_MODULE }, /* counters support: update, match */ { .name = "set", .family = NFPROTO_IPV4, .revision = 3, .match = set_match_v3, .matchsize = sizeof(struct xt_set_info_match_v3), .checkentry = set_match_v3_checkentry, .destroy = set_match_v3_destroy, .me = THIS_MODULE }, { .name = "set", .family = NFPROTO_IPV6, .revision = 3, .match = set_match_v3, .matchsize = sizeof(struct xt_set_info_match_v3), .checkentry = set_match_v3_checkentry, .destroy = set_match_v3_destroy, .me = THIS_MODULE }, }; static struct xt_target set_targets[] __read_mostly = { { .name = "SET", .revision = 0, .family = NFPROTO_IPV4, .target = set_target_v0, .targetsize = sizeof(struct xt_set_info_target_v0), .checkentry = set_target_v0_checkentry, .destroy = set_target_v0_destroy, .me = THIS_MODULE }, { .name = "SET", .revision = 1, .family = NFPROTO_IPV4, .target = set_target_v1, .targetsize = sizeof(struct xt_set_info_target_v1), .checkentry = set_target_v1_checkentry, .destroy = set_target_v1_destroy, .me = THIS_MODULE }, { .name = "SET", .revision = 1, .family = NFPROTO_IPV6, .target = set_target_v1, .targetsize = sizeof(struct xt_set_info_target_v1), .checkentry = set_target_v1_checkentry, .destroy = set_target_v1_destroy, .me = THIS_MODULE }, /* --timeout and --exist flags support */ { .name = "SET", .revision = 2, .family = NFPROTO_IPV4, .target = set_target_v2, .targetsize = sizeof(struct xt_set_info_target_v2), .checkentry = set_target_v2_checkentry, .destroy = set_target_v2_destroy, .me = THIS_MODULE }, { .name = "SET", .revision = 2, .family = NFPROTO_IPV6, .target = set_target_v2, .targetsize = sizeof(struct xt_set_info_target_v2), .checkentry = set_target_v2_checkentry, .destroy = set_target_v2_destroy, .me = THIS_MODULE }, }; static int __init xt_set_init(void) { int ret = xt_register_matches(set_matches, ARRAY_SIZE(set_matches)); if (!ret) { ret = xt_register_targets(set_targets, ARRAY_SIZE(set_targets)); if (ret) xt_unregister_matches(set_matches, ARRAY_SIZE(set_matches)); } return ret; } static void __exit xt_set_fini(void) { xt_unregister_matches(set_matches, ARRAY_SIZE(set_matches)); xt_unregister_targets(set_targets, ARRAY_SIZE(set_targets)); } module_init(xt_set_init); module_exit(xt_set_fini);
gpl-2.0
renaudallard/kernel-GB-GTI9000
sound/isa/sb/jazz16.c
4028
10746
/* * jazz16.c - driver for Media Vision Jazz16 based soundcards. * Copyright (C) 2009 Krzysztof Helt <krzysztof.h1@wp.pl> * Based on patches posted by Rask Ingemann Lambertsen and Rene Herman. * Based on OSS Sound Blaster driver. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/io.h> #include <linux/delay.h> #include <asm/dma.h> #include <linux/isa.h> #include <sound/core.h> #include <sound/mpu401.h> #include <sound/opl3.h> #include <sound/sb.h> #define SNDRV_LEGACY_FIND_FREE_IRQ #define SNDRV_LEGACY_FIND_FREE_DMA #include <sound/initval.h> #define PFX "jazz16: " MODULE_DESCRIPTION("Media Vision Jazz16"); MODULE_SUPPORTED_DEVICE("{{Media Vision ??? }," "{RTL,RTL3000}}"); MODULE_AUTHOR("Krzysztof Helt <krzysztof.h1@wp.pl>"); MODULE_LICENSE("GPL"); static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; /* Index 0-MAX */ static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; /* ID for this card */ static int enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE; /* Enable this card */ static unsigned long port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; static unsigned long mpu_port[SNDRV_CARDS] = SNDRV_DEFAULT_PORT; static int irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; static int mpu_irq[SNDRV_CARDS] = SNDRV_DEFAULT_IRQ; static int dma8[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; static int dma16[SNDRV_CARDS] = SNDRV_DEFAULT_DMA; module_param_array(index, int, NULL, 0444); MODULE_PARM_DESC(index, "Index value for Media Vision Jazz16 based soundcard."); module_param_array(id, charp, NULL, 0444); MODULE_PARM_DESC(id, "ID string for Media Vision Jazz16 based soundcard."); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(enable, "Enable Media Vision Jazz16 based soundcard."); module_param_array(port, long, NULL, 0444); MODULE_PARM_DESC(port, "Port # for jazz16 driver."); module_param_array(mpu_port, long, NULL, 0444); MODULE_PARM_DESC(mpu_port, "MPU-401 port # for jazz16 driver."); module_param_array(irq, int, NULL, 0444); MODULE_PARM_DESC(irq, "IRQ # for jazz16 driver."); module_param_array(mpu_irq, int, NULL, 0444); MODULE_PARM_DESC(mpu_irq, "MPU-401 IRQ # for jazz16 driver."); module_param_array(dma8, int, NULL, 0444); MODULE_PARM_DESC(dma8, "DMA8 # for jazz16 driver."); module_param_array(dma16, int, NULL, 0444); MODULE_PARM_DESC(dma16, "DMA16 # for jazz16 driver."); #define SB_JAZZ16_WAKEUP 0xaf #define SB_JAZZ16_SET_PORTS 0x50 #define SB_DSP_GET_JAZZ_BRD_REV 0xfa #define SB_JAZZ16_SET_DMAINTR 0xfb #define SB_DSP_GET_JAZZ_MODEL 0xfe struct snd_card_jazz16 { struct snd_sb *chip; }; static irqreturn_t jazz16_interrupt(int irq, void *chip) { return snd_sb8dsp_interrupt(chip); } static int __devinit jazz16_configure_ports(unsigned long port, unsigned long mpu_port, int idx) { unsigned char val; if (!request_region(0x201, 1, "jazz16 config")) { snd_printk(KERN_ERR "config port region is already in use.\n"); return -EBUSY; } outb(SB_JAZZ16_WAKEUP - idx, 0x201); udelay(100); outb(SB_JAZZ16_SET_PORTS + idx, 0x201); udelay(100); val = port & 0x70; val |= (mpu_port & 0x30) >> 4; outb(val, 0x201); release_region(0x201, 1); return 0; } static int __devinit jazz16_detect_board(unsigned long port, unsigned long mpu_port) { int err; int val; struct snd_sb chip; if (!request_region(port, 0x10, "jazz16")) { snd_printk(KERN_ERR "I/O port region is already in use.\n"); return -EBUSY; } /* just to call snd_sbdsp_command/reset/get_byte() */ chip.port = port; err = snd_sbdsp_reset(&chip); if (err < 0) for (val = 0; val < 4; val++) { err = jazz16_configure_ports(port, mpu_port, val); if (err < 0) break; err = snd_sbdsp_reset(&chip); if (!err) break; } if (err < 0) { err = -ENODEV; goto err_unmap; } if (!snd_sbdsp_command(&chip, SB_DSP_GET_JAZZ_BRD_REV)) { err = -EBUSY; goto err_unmap; } val = snd_sbdsp_get_byte(&chip); if (val >= 0x30) snd_sbdsp_get_byte(&chip); if ((val & 0xf0) != 0x10) { err = -ENODEV; goto err_unmap; } if (!snd_sbdsp_command(&chip, SB_DSP_GET_JAZZ_MODEL)) { err = -EBUSY; goto err_unmap; } snd_sbdsp_get_byte(&chip); err = snd_sbdsp_get_byte(&chip); snd_printd("Media Vision Jazz16 board detected: rev 0x%x, model 0x%x\n", val, err); err = 0; err_unmap: release_region(port, 0x10); return err; } static int __devinit jazz16_configure_board(struct snd_sb *chip, int mpu_irq) { static unsigned char jazz_irq_bits[] = { 0, 0, 2, 3, 0, 1, 0, 4, 0, 2, 5, 0, 0, 0, 0, 6 }; static unsigned char jazz_dma_bits[] = { 0, 1, 0, 2, 0, 3, 0, 4 }; if (jazz_dma_bits[chip->dma8] == 0 || jazz_dma_bits[chip->dma16] == 0 || jazz_irq_bits[chip->irq] == 0) return -EINVAL; if (!snd_sbdsp_command(chip, SB_JAZZ16_SET_DMAINTR)) return -EBUSY; if (!snd_sbdsp_command(chip, jazz_dma_bits[chip->dma8] | (jazz_dma_bits[chip->dma16] << 4))) return -EBUSY; if (!snd_sbdsp_command(chip, jazz_irq_bits[chip->irq] | (jazz_irq_bits[mpu_irq] << 4))) return -EBUSY; return 0; } static int __devinit snd_jazz16_match(struct device *devptr, unsigned int dev) { if (!enable[dev]) return 0; if (port[dev] == SNDRV_AUTO_PORT) { snd_printk(KERN_ERR "please specify port\n"); return 0; } else if (port[dev] == 0x200 || (port[dev] & ~0x270)) { snd_printk(KERN_ERR "incorrect port specified\n"); return 0; } if (dma8[dev] != SNDRV_AUTO_DMA && dma8[dev] != 1 && dma8[dev] != 3) { snd_printk(KERN_ERR "dma8 must be 1 or 3\n"); return 0; } if (dma16[dev] != SNDRV_AUTO_DMA && dma16[dev] != 5 && dma16[dev] != 7) { snd_printk(KERN_ERR "dma16 must be 5 or 7\n"); return 0; } if (mpu_port[dev] != SNDRV_AUTO_PORT && (mpu_port[dev] & ~0x030) != 0x300) { snd_printk(KERN_ERR "incorrect mpu_port specified\n"); return 0; } if (mpu_irq[dev] != SNDRV_AUTO_DMA && mpu_irq[dev] != 2 && mpu_irq[dev] != 3 && mpu_irq[dev] != 5 && mpu_irq[dev] != 7) { snd_printk(KERN_ERR "mpu_irq must be 2, 3, 5 or 7\n"); return 0; } return 1; } static int __devinit snd_jazz16_probe(struct device *devptr, unsigned int dev) { struct snd_card *card; struct snd_card_jazz16 *jazz16; struct snd_sb *chip; struct snd_opl3 *opl3; static int possible_irqs[] = {2, 3, 5, 7, 9, 10, 15, -1}; static int possible_dmas8[] = {1, 3, -1}; static int possible_dmas16[] = {5, 7, -1}; int err, xirq, xdma8, xdma16, xmpu_port, xmpu_irq; err = snd_card_create(index[dev], id[dev], THIS_MODULE, sizeof(struct snd_card_jazz16), &card); if (err < 0) return err; jazz16 = card->private_data; xirq = irq[dev]; if (xirq == SNDRV_AUTO_IRQ) { xirq = snd_legacy_find_free_irq(possible_irqs); if (xirq < 0) { snd_printk(KERN_ERR "unable to find a free IRQ\n"); err = -EBUSY; goto err_free; } } xdma8 = dma8[dev]; if (xdma8 == SNDRV_AUTO_DMA) { xdma8 = snd_legacy_find_free_dma(possible_dmas8); if (xdma8 < 0) { snd_printk(KERN_ERR "unable to find a free DMA8\n"); err = -EBUSY; goto err_free; } } xdma16 = dma16[dev]; if (xdma16 == SNDRV_AUTO_DMA) { xdma16 = snd_legacy_find_free_dma(possible_dmas16); if (xdma16 < 0) { snd_printk(KERN_ERR "unable to find a free DMA16\n"); err = -EBUSY; goto err_free; } } xmpu_port = mpu_port[dev]; if (xmpu_port == SNDRV_AUTO_PORT) xmpu_port = 0; err = jazz16_detect_board(port[dev], xmpu_port); if (err < 0) { printk(KERN_ERR "Media Vision Jazz16 board not detected\n"); goto err_free; } err = snd_sbdsp_create(card, port[dev], irq[dev], jazz16_interrupt, dma8[dev], dma16[dev], SB_HW_JAZZ16, &chip); if (err < 0) goto err_free; xmpu_irq = mpu_irq[dev]; if (xmpu_irq == SNDRV_AUTO_IRQ || mpu_port[dev] == SNDRV_AUTO_PORT) xmpu_irq = 0; err = jazz16_configure_board(chip, xmpu_irq); if (err < 0) { printk(KERN_ERR "Media Vision Jazz16 configuration failed\n"); goto err_free; } jazz16->chip = chip; strcpy(card->driver, "jazz16"); strcpy(card->shortname, "Media Vision Jazz16"); sprintf(card->longname, "Media Vision Jazz16 at 0x%lx, irq %d, dma8 %d, dma16 %d", port[dev], xirq, xdma8, xdma16); err = snd_sb8dsp_pcm(chip, 0, NULL); if (err < 0) goto err_free; err = snd_sbmixer_new(chip); if (err < 0) goto err_free; err = snd_opl3_create(card, chip->port, chip->port + 2, OPL3_HW_AUTO, 1, &opl3); if (err < 0) snd_printk(KERN_WARNING "no OPL device at 0x%lx-0x%lx\n", chip->port, chip->port + 2); else { err = snd_opl3_hwdep_new(opl3, 0, 1, NULL); if (err < 0) goto err_free; } if (mpu_port[dev] > 0 && mpu_port[dev] != SNDRV_AUTO_PORT) { if (mpu_irq[dev] == SNDRV_AUTO_IRQ) mpu_irq[dev] = -1; if (snd_mpu401_uart_new(card, 0, MPU401_HW_MPU401, mpu_port[dev], 0, mpu_irq[dev], mpu_irq[dev] >= 0 ? IRQF_DISABLED : 0, NULL) < 0) snd_printk(KERN_ERR "no MPU-401 device at 0x%lx\n", mpu_port[dev]); } snd_card_set_dev(card, devptr); err = snd_card_register(card); if (err < 0) goto err_free; dev_set_drvdata(devptr, card); return 0; err_free: snd_card_free(card); return err; } static int __devexit snd_jazz16_remove(struct device *devptr, unsigned int dev) { struct snd_card *card = dev_get_drvdata(devptr); dev_set_drvdata(devptr, NULL); snd_card_free(card); return 0; } #ifdef CONFIG_PM static int snd_jazz16_suspend(struct device *pdev, unsigned int n, pm_message_t state) { struct snd_card *card = dev_get_drvdata(pdev); struct snd_card_jazz16 *acard = card->private_data; struct snd_sb *chip = acard->chip; snd_power_change_state(card, SNDRV_CTL_POWER_D3hot); snd_pcm_suspend_all(chip->pcm); snd_sbmixer_suspend(chip); return 0; } static int snd_jazz16_resume(struct device *pdev, unsigned int n) { struct snd_card *card = dev_get_drvdata(pdev); struct snd_card_jazz16 *acard = card->private_data; struct snd_sb *chip = acard->chip; snd_sbdsp_reset(chip); snd_sbmixer_resume(chip); snd_power_change_state(card, SNDRV_CTL_POWER_D0); return 0; } #endif static struct isa_driver snd_jazz16_driver = { .match = snd_jazz16_match, .probe = snd_jazz16_probe, .remove = __devexit_p(snd_jazz16_remove), #ifdef CONFIG_PM .suspend = snd_jazz16_suspend, .resume = snd_jazz16_resume, #endif .driver = { .name = "jazz16" }, }; static int __init alsa_card_jazz16_init(void) { return isa_register_driver(&snd_jazz16_driver, SNDRV_CARDS); } static void __exit alsa_card_jazz16_exit(void) { isa_unregister_driver(&snd_jazz16_driver); } module_init(alsa_card_jazz16_init) module_exit(alsa_card_jazz16_exit)
gpl-2.0
bio4554/ker.nl
drivers/clk/mxs/clk-ref.c
4028
3381
/* * Copyright 2012 Freescale Semiconductor, Inc. * * The code contained herein is licensed under the GNU General Public * License. You may obtain a copy of the GNU General Public License * Version 2 or later at the following locations: * * http://www.opensource.org/licenses/gpl-license.html * http://www.gnu.org/copyleft/gpl.html */ #include <linux/clk.h> #include <linux/clk-provider.h> #include <linux/err.h> #include <linux/io.h> #include <linux/slab.h> #include "clk.h" /** * struct clk_ref - mxs reference clock * @hw: clk_hw for the reference clock * @reg: register address * @idx: the index of the reference clock within the same register * * The mxs reference clock sources from pll. Every 4 reference clocks share * one register space, and @idx is used to identify them. Each reference * clock has a gate control and a fractional * divider. The rate is calculated * as pll rate * (18 / FRAC), where FRAC = 18 ~ 35. */ struct clk_ref { struct clk_hw hw; void __iomem *reg; u8 idx; }; #define to_clk_ref(_hw) container_of(_hw, struct clk_ref, hw) static int clk_ref_enable(struct clk_hw *hw) { struct clk_ref *ref = to_clk_ref(hw); writel_relaxed(1 << ((ref->idx + 1) * 8 - 1), ref->reg + CLR); return 0; } static void clk_ref_disable(struct clk_hw *hw) { struct clk_ref *ref = to_clk_ref(hw); writel_relaxed(1 << ((ref->idx + 1) * 8 - 1), ref->reg + SET); } static unsigned long clk_ref_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { struct clk_ref *ref = to_clk_ref(hw); u64 tmp = parent_rate; u8 frac = (readl_relaxed(ref->reg) >> (ref->idx * 8)) & 0x3f; tmp *= 18; do_div(tmp, frac); return tmp; } static long clk_ref_round_rate(struct clk_hw *hw, unsigned long rate, unsigned long *prate) { unsigned long parent_rate = *prate; u64 tmp = parent_rate; u8 frac; tmp = tmp * 18 + rate / 2; do_div(tmp, rate); frac = tmp; if (frac < 18) frac = 18; else if (frac > 35) frac = 35; tmp = parent_rate; tmp *= 18; do_div(tmp, frac); return tmp; } static int clk_ref_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_ref *ref = to_clk_ref(hw); unsigned long flags; u64 tmp = parent_rate; u32 val; u8 frac, shift = ref->idx * 8; tmp = tmp * 18 + rate / 2; do_div(tmp, rate); frac = tmp; if (frac < 18) frac = 18; else if (frac > 35) frac = 35; spin_lock_irqsave(&mxs_lock, flags); val = readl_relaxed(ref->reg); val &= ~(0x3f << shift); val |= frac << shift; writel_relaxed(val, ref->reg); spin_unlock_irqrestore(&mxs_lock, flags); return 0; } static const struct clk_ops clk_ref_ops = { .enable = clk_ref_enable, .disable = clk_ref_disable, .recalc_rate = clk_ref_recalc_rate, .round_rate = clk_ref_round_rate, .set_rate = clk_ref_set_rate, }; struct clk *mxs_clk_ref(const char *name, const char *parent_name, void __iomem *reg, u8 idx) { struct clk_ref *ref; struct clk *clk; struct clk_init_data init; ref = kzalloc(sizeof(*ref), GFP_KERNEL); if (!ref) return ERR_PTR(-ENOMEM); init.name = name; init.ops = &clk_ref_ops; init.flags = 0; init.parent_names = (parent_name ? &parent_name: NULL); init.num_parents = (parent_name ? 1 : 0); ref->reg = reg; ref->idx = idx; ref->hw.init = &init; clk = clk_register(NULL, &ref->hw); if (IS_ERR(clk)) kfree(ref); return clk; }
gpl-2.0
CandyDevices/kernel_htc_msm8974
arch/um/kernel/trap.c
4284
6689
/* * Copyright (C) 2000 - 2007 Jeff Dike (jdike@{addtoit,linux.intel}.com) * Licensed under the GPL */ #include <linux/mm.h> #include <linux/sched.h> #include <linux/hardirq.h> #include <linux/module.h> #include <asm/current.h> #include <asm/pgtable.h> #include <asm/tlbflush.h> #include "arch.h" #include "as-layout.h" #include "kern_util.h" #include "os.h" #include "skas.h" /* * Note this is constrained to return 0, -EFAULT, -EACCESS, -ENOMEM by * segv(). */ int handle_page_fault(unsigned long address, unsigned long ip, int is_write, int is_user, int *code_out) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; int err = -EFAULT; *code_out = SEGV_MAPERR; /* * If the fault was during atomic operation, don't take the fault, just * fail. */ if (in_atomic()) goto out_nosemaphore; down_read(&mm->mmap_sem); vma = find_vma(mm, address); if (!vma) goto out; else if (vma->vm_start <= address) goto good_area; else if (!(vma->vm_flags & VM_GROWSDOWN)) goto out; else if (is_user && !ARCH_IS_STACKGROW(address)) goto out; else if (expand_stack(vma, address)) goto out; good_area: *code_out = SEGV_ACCERR; if (is_write && !(vma->vm_flags & VM_WRITE)) goto out; /* Don't require VM_READ|VM_EXEC for write faults! */ if (!is_write && !(vma->vm_flags & (VM_READ | VM_EXEC))) goto out; do { int fault; fault = handle_mm_fault(mm, vma, address, is_write ? FAULT_FLAG_WRITE : 0); if (unlikely(fault & VM_FAULT_ERROR)) { if (fault & VM_FAULT_OOM) { goto out_of_memory; } else if (fault & VM_FAULT_SIGBUS) { err = -EACCES; goto out; } BUG(); } if (fault & VM_FAULT_MAJOR) current->maj_flt++; else current->min_flt++; pgd = pgd_offset(mm, address); pud = pud_offset(pgd, address); pmd = pmd_offset(pud, address); pte = pte_offset_kernel(pmd, address); } while (!pte_present(*pte)); err = 0; /* * The below warning was added in place of * pte_mkyoung(); if (is_write) pte_mkdirty(); * If it's triggered, we'd see normally a hang here (a clean pte is * marked read-only to emulate the dirty bit). * However, the generic code can mark a PTE writable but clean on a * concurrent read fault, triggering this harmlessly. So comment it out. */ #if 0 WARN_ON(!pte_young(*pte) || (is_write && !pte_dirty(*pte))); #endif flush_tlb_page(vma, address); out: up_read(&mm->mmap_sem); out_nosemaphore: return err; out_of_memory: /* * We ran out of memory, call the OOM killer, and return the userspace * (which will retry the fault, or kill us if we got oom-killed). */ up_read(&mm->mmap_sem); pagefault_out_of_memory(); return 0; } EXPORT_SYMBOL(handle_page_fault); static void show_segv_info(struct uml_pt_regs *regs) { struct task_struct *tsk = current; struct faultinfo *fi = UPT_FAULTINFO(regs); if (!unhandled_signal(tsk, SIGSEGV)) return; if (!printk_ratelimit()) return; printk("%s%s[%d]: segfault at %lx ip %p sp %p error %x", task_pid_nr(tsk) > 1 ? KERN_INFO : KERN_EMERG, tsk->comm, task_pid_nr(tsk), FAULT_ADDRESS(*fi), (void *)UPT_IP(regs), (void *)UPT_SP(regs), fi->error_code); print_vma_addr(KERN_CONT " in ", UPT_IP(regs)); printk(KERN_CONT "\n"); } static void bad_segv(struct faultinfo fi, unsigned long ip) { struct siginfo si; si.si_signo = SIGSEGV; si.si_code = SEGV_ACCERR; si.si_addr = (void __user *) FAULT_ADDRESS(fi); current->thread.arch.faultinfo = fi; force_sig_info(SIGSEGV, &si, current); } void fatal_sigsegv(void) { force_sigsegv(SIGSEGV, current); do_signal(); /* * This is to tell gcc that we're not returning - do_signal * can, in general, return, but in this case, it's not, since * we just got a fatal SIGSEGV queued. */ os_dump_core(); } void segv_handler(int sig, struct uml_pt_regs *regs) { struct faultinfo * fi = UPT_FAULTINFO(regs); if (UPT_IS_USER(regs) && !SEGV_IS_FIXABLE(fi)) { show_segv_info(regs); bad_segv(*fi, UPT_IP(regs)); return; } segv(*fi, UPT_IP(regs), UPT_IS_USER(regs), regs); } /* * We give a *copy* of the faultinfo in the regs to segv. * This must be done, since nesting SEGVs could overwrite * the info in the regs. A pointer to the info then would * give us bad data! */ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user, struct uml_pt_regs *regs) { struct siginfo si; jmp_buf *catcher; int err; int is_write = FAULT_WRITE(fi); unsigned long address = FAULT_ADDRESS(fi); if (!is_user && (address >= start_vm) && (address < end_vm)) { flush_tlb_kernel_vm(); return 0; } else if (current->mm == NULL) { show_regs(container_of(regs, struct pt_regs, regs)); panic("Segfault with no mm"); } if (SEGV_IS_FIXABLE(&fi) || SEGV_MAYBE_FIXABLE(&fi)) err = handle_page_fault(address, ip, is_write, is_user, &si.si_code); else { err = -EFAULT; /* * A thread accessed NULL, we get a fault, but CR2 is invalid. * This code is used in __do_copy_from_user() of TT mode. * XXX tt mode is gone, so maybe this isn't needed any more */ address = 0; } catcher = current->thread.fault_catcher; if (!err) return 0; else if (catcher != NULL) { current->thread.fault_addr = (void *) address; UML_LONGJMP(catcher, 1); } else if (current->thread.fault_addr != NULL) panic("fault_addr set but no fault catcher"); else if (!is_user && arch_fixup(ip, regs)) return 0; if (!is_user) { show_regs(container_of(regs, struct pt_regs, regs)); panic("Kernel mode fault at addr 0x%lx, ip 0x%lx", address, ip); } show_segv_info(regs); if (err == -EACCES) { si.si_signo = SIGBUS; si.si_errno = 0; si.si_code = BUS_ADRERR; si.si_addr = (void __user *)address; current->thread.arch.faultinfo = fi; force_sig_info(SIGBUS, &si, current); } else { BUG_ON(err != -EFAULT); si.si_signo = SIGSEGV; si.si_addr = (void __user *) address; current->thread.arch.faultinfo = fi; force_sig_info(SIGSEGV, &si, current); } return 0; } void relay_signal(int sig, struct uml_pt_regs *regs) { if (!UPT_IS_USER(regs)) { if (sig == SIGBUS) printk(KERN_ERR "Bus error - the host /dev/shm or /tmp " "mount likely just ran out of space\n"); panic("Kernel mode signal %d", sig); } arch_examine_signal(sig, regs); current->thread.arch.faultinfo = *UPT_FAULTINFO(regs); force_sig(sig, current); } void bus_handler(int sig, struct uml_pt_regs *regs) { if (current->thread.fault_catcher != NULL) UML_LONGJMP(current->thread.fault_catcher, 1); else relay_signal(sig, regs); } void winch(int sig, struct uml_pt_regs *regs) { do_IRQ(WINCH_IRQ, regs); } void trap_init(void) { }
gpl-2.0
EmbeddedAndroid/linaro-android-3.1
drivers/hid/hid-speedlink.c
4540
2336
/* * HID driver for Speedlink Vicious and Divine Cezanne (USB mouse). * Fixes "jumpy" cursor and removes nonexistent keyboard LEDS from * the HID descriptor. * * Copyright (c) 2011 Stefan Kriwanek <mail@stefankriwanek.de> */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #include <linux/device.h> #include <linux/hid.h> #include <linux/module.h> #include <linux/usb.h> #include "hid-ids.h" #include "usbhid/usbhid.h" static const struct hid_device_id speedlink_devices[] = { { HID_USB_DEVICE(USB_VENDOR_ID_X_TENSIONS, USB_DEVICE_ID_SPEEDLINK_VAD_CEZANNE)}, { } }; static int speedlink_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { /* * The Cezanne mouse has a second "keyboard" USB endpoint for it is * able to map keyboard events to the button presses. * It sends a standard keyboard report descriptor, though, whose * LEDs we ignore. */ switch (usage->hid & HID_USAGE_PAGE) { case HID_UP_LED: return -1; } return 0; } static int speedlink_event(struct hid_device *hdev, struct hid_field *field, struct hid_usage *usage, __s32 value) { /* No other conditions due to usage_table. */ /* Fix "jumpy" cursor (invalid events sent by device). */ if (value == 256) return 1; /* Drop useless distance 0 events (on button clicks etc.) as well */ if (value == 0) return 1; return 0; } MODULE_DEVICE_TABLE(hid, speedlink_devices); static const struct hid_usage_id speedlink_grabbed_usages[] = { { HID_GD_X, EV_REL, 0 }, { HID_GD_Y, EV_REL, 1 }, { HID_ANY_ID - 1, HID_ANY_ID - 1, HID_ANY_ID - 1} }; static struct hid_driver speedlink_driver = { .name = "speedlink", .id_table = speedlink_devices, .usage_table = speedlink_grabbed_usages, .input_mapping = speedlink_input_mapping, .event = speedlink_event, }; static int __init speedlink_init(void) { return hid_register_driver(&speedlink_driver); } static void __exit speedlink_exit(void) { hid_unregister_driver(&speedlink_driver); } module_init(speedlink_init); module_exit(speedlink_exit); MODULE_LICENSE("GPL");
gpl-2.0
CyanogenMod/android_kernel_sony_msm8x60
drivers/block/xsysace.c
5052
33494
/* * Xilinx SystemACE device driver * * Copyright 2007 Secret Lab Technologies Ltd. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation. */ /* * The SystemACE chip is designed to configure FPGAs by loading an FPGA * bitstream from a file on a CF card and squirting it into FPGAs connected * to the SystemACE JTAG chain. It also has the advantage of providing an * MPU interface which can be used to control the FPGA configuration process * and to use the attached CF card for general purpose storage. * * This driver is a block device driver for the SystemACE. * * Initialization: * The driver registers itself as a platform_device driver at module * load time. The platform bus will take care of calling the * ace_probe() method for all SystemACE instances in the system. Any * number of SystemACE instances are supported. ace_probe() calls * ace_setup() which initialized all data structures, reads the CF * id structure and registers the device. * * Processing: * Just about all of the heavy lifting in this driver is performed by * a Finite State Machine (FSM). The driver needs to wait on a number * of events; some raised by interrupts, some which need to be polled * for. Describing all of the behaviour in a FSM seems to be the * easiest way to keep the complexity low and make it easy to * understand what the driver is doing. If the block ops or the * request function need to interact with the hardware, then they * simply need to flag the request and kick of FSM processing. * * The FSM itself is atomic-safe code which can be run from any * context. The general process flow is: * 1. obtain the ace->lock spinlock. * 2. loop on ace_fsm_dostate() until the ace->fsm_continue flag is * cleared. * 3. release the lock. * * Individual states do not sleep in any way. If a condition needs to * be waited for then the state much clear the fsm_continue flag and * either schedule the FSM to be run again at a later time, or expect * an interrupt to call the FSM when the desired condition is met. * * In normal operation, the FSM is processed at interrupt context * either when the driver's tasklet is scheduled, or when an irq is * raised by the hardware. The tasklet can be scheduled at any time. * The request method in particular schedules the tasklet when a new * request has been indicated by the block layer. Once started, the * FSM proceeds as far as it can processing the request until it * needs on a hardware event. At this point, it must yield execution. * * A state has two options when yielding execution: * 1. ace_fsm_yield() * - Call if need to poll for event. * - clears the fsm_continue flag to exit the processing loop * - reschedules the tasklet to run again as soon as possible * 2. ace_fsm_yieldirq() * - Call if an irq is expected from the HW * - clears the fsm_continue flag to exit the processing loop * - does not reschedule the tasklet so the FSM will not be processed * again until an irq is received. * After calling a yield function, the state must return control back * to the FSM main loop. * * Additionally, the driver maintains a kernel timer which can process * the FSM. If the FSM gets stalled, typically due to a missed * interrupt, then the kernel timer will expire and the driver can * continue where it left off. * * To Do: * - Add FPGA configuration control interface. * - Request major number from lanana */ #undef DEBUG #include <linux/module.h> #include <linux/ctype.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/blkdev.h> #include <linux/mutex.h> #include <linux/ata.h> #include <linux/hdreg.h> #include <linux/platform_device.h> #if defined(CONFIG_OF) #include <linux/of_address.h> #include <linux/of_device.h> #include <linux/of_platform.h> #endif MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); MODULE_DESCRIPTION("Xilinx SystemACE device driver"); MODULE_LICENSE("GPL"); /* SystemACE register definitions */ #define ACE_BUSMODE (0x00) #define ACE_STATUS (0x04) #define ACE_STATUS_CFGLOCK (0x00000001) #define ACE_STATUS_MPULOCK (0x00000002) #define ACE_STATUS_CFGERROR (0x00000004) /* config controller error */ #define ACE_STATUS_CFCERROR (0x00000008) /* CF controller error */ #define ACE_STATUS_CFDETECT (0x00000010) #define ACE_STATUS_DATABUFRDY (0x00000020) #define ACE_STATUS_DATABUFMODE (0x00000040) #define ACE_STATUS_CFGDONE (0x00000080) #define ACE_STATUS_RDYFORCFCMD (0x00000100) #define ACE_STATUS_CFGMODEPIN (0x00000200) #define ACE_STATUS_CFGADDR_MASK (0x0000e000) #define ACE_STATUS_CFBSY (0x00020000) #define ACE_STATUS_CFRDY (0x00040000) #define ACE_STATUS_CFDWF (0x00080000) #define ACE_STATUS_CFDSC (0x00100000) #define ACE_STATUS_CFDRQ (0x00200000) #define ACE_STATUS_CFCORR (0x00400000) #define ACE_STATUS_CFERR (0x00800000) #define ACE_ERROR (0x08) #define ACE_CFGLBA (0x0c) #define ACE_MPULBA (0x10) #define ACE_SECCNTCMD (0x14) #define ACE_SECCNTCMD_RESET (0x0100) #define ACE_SECCNTCMD_IDENTIFY (0x0200) #define ACE_SECCNTCMD_READ_DATA (0x0300) #define ACE_SECCNTCMD_WRITE_DATA (0x0400) #define ACE_SECCNTCMD_ABORT (0x0600) #define ACE_VERSION (0x16) #define ACE_VERSION_REVISION_MASK (0x00FF) #define ACE_VERSION_MINOR_MASK (0x0F00) #define ACE_VERSION_MAJOR_MASK (0xF000) #define ACE_CTRL (0x18) #define ACE_CTRL_FORCELOCKREQ (0x0001) #define ACE_CTRL_LOCKREQ (0x0002) #define ACE_CTRL_FORCECFGADDR (0x0004) #define ACE_CTRL_FORCECFGMODE (0x0008) #define ACE_CTRL_CFGMODE (0x0010) #define ACE_CTRL_CFGSTART (0x0020) #define ACE_CTRL_CFGSEL (0x0040) #define ACE_CTRL_CFGRESET (0x0080) #define ACE_CTRL_DATABUFRDYIRQ (0x0100) #define ACE_CTRL_ERRORIRQ (0x0200) #define ACE_CTRL_CFGDONEIRQ (0x0400) #define ACE_CTRL_RESETIRQ (0x0800) #define ACE_CTRL_CFGPROG (0x1000) #define ACE_CTRL_CFGADDR_MASK (0xe000) #define ACE_FATSTAT (0x1c) #define ACE_NUM_MINORS 16 #define ACE_SECTOR_SIZE (512) #define ACE_FIFO_SIZE (32) #define ACE_BUF_PER_SECTOR (ACE_SECTOR_SIZE / ACE_FIFO_SIZE) #define ACE_BUS_WIDTH_8 0 #define ACE_BUS_WIDTH_16 1 struct ace_reg_ops; struct ace_device { /* driver state data */ int id; int media_change; int users; struct list_head list; /* finite state machine data */ struct tasklet_struct fsm_tasklet; uint fsm_task; /* Current activity (ACE_TASK_*) */ uint fsm_state; /* Current state (ACE_FSM_STATE_*) */ uint fsm_continue_flag; /* cleared to exit FSM mainloop */ uint fsm_iter_num; struct timer_list stall_timer; /* Transfer state/result, use for both id and block request */ struct request *req; /* request being processed */ void *data_ptr; /* pointer to I/O buffer */ int data_count; /* number of buffers remaining */ int data_result; /* Result of transfer; 0 := success */ int id_req_count; /* count of id requests */ int id_result; struct completion id_completion; /* used when id req finishes */ int in_irq; /* Details of hardware device */ resource_size_t physaddr; void __iomem *baseaddr; int irq; int bus_width; /* 0 := 8 bit; 1 := 16 bit */ struct ace_reg_ops *reg_ops; int lock_count; /* Block device data structures */ spinlock_t lock; struct device *dev; struct request_queue *queue; struct gendisk *gd; /* Inserted CF card parameters */ u16 cf_id[ATA_ID_WORDS]; }; static DEFINE_MUTEX(xsysace_mutex); static int ace_major; /* --------------------------------------------------------------------- * Low level register access */ struct ace_reg_ops { u16(*in) (struct ace_device * ace, int reg); void (*out) (struct ace_device * ace, int reg, u16 val); void (*datain) (struct ace_device * ace); void (*dataout) (struct ace_device * ace); }; /* 8 Bit bus width */ static u16 ace_in_8(struct ace_device *ace, int reg) { void __iomem *r = ace->baseaddr + reg; return in_8(r) | (in_8(r + 1) << 8); } static void ace_out_8(struct ace_device *ace, int reg, u16 val) { void __iomem *r = ace->baseaddr + reg; out_8(r, val); out_8(r + 1, val >> 8); } static void ace_datain_8(struct ace_device *ace) { void __iomem *r = ace->baseaddr + 0x40; u8 *dst = ace->data_ptr; int i = ACE_FIFO_SIZE; while (i--) *dst++ = in_8(r++); ace->data_ptr = dst; } static void ace_dataout_8(struct ace_device *ace) { void __iomem *r = ace->baseaddr + 0x40; u8 *src = ace->data_ptr; int i = ACE_FIFO_SIZE; while (i--) out_8(r++, *src++); ace->data_ptr = src; } static struct ace_reg_ops ace_reg_8_ops = { .in = ace_in_8, .out = ace_out_8, .datain = ace_datain_8, .dataout = ace_dataout_8, }; /* 16 bit big endian bus attachment */ static u16 ace_in_be16(struct ace_device *ace, int reg) { return in_be16(ace->baseaddr + reg); } static void ace_out_be16(struct ace_device *ace, int reg, u16 val) { out_be16(ace->baseaddr + reg, val); } static void ace_datain_be16(struct ace_device *ace) { int i = ACE_FIFO_SIZE / 2; u16 *dst = ace->data_ptr; while (i--) *dst++ = in_le16(ace->baseaddr + 0x40); ace->data_ptr = dst; } static void ace_dataout_be16(struct ace_device *ace) { int i = ACE_FIFO_SIZE / 2; u16 *src = ace->data_ptr; while (i--) out_le16(ace->baseaddr + 0x40, *src++); ace->data_ptr = src; } /* 16 bit little endian bus attachment */ static u16 ace_in_le16(struct ace_device *ace, int reg) { return in_le16(ace->baseaddr + reg); } static void ace_out_le16(struct ace_device *ace, int reg, u16 val) { out_le16(ace->baseaddr + reg, val); } static void ace_datain_le16(struct ace_device *ace) { int i = ACE_FIFO_SIZE / 2; u16 *dst = ace->data_ptr; while (i--) *dst++ = in_be16(ace->baseaddr + 0x40); ace->data_ptr = dst; } static void ace_dataout_le16(struct ace_device *ace) { int i = ACE_FIFO_SIZE / 2; u16 *src = ace->data_ptr; while (i--) out_be16(ace->baseaddr + 0x40, *src++); ace->data_ptr = src; } static struct ace_reg_ops ace_reg_be16_ops = { .in = ace_in_be16, .out = ace_out_be16, .datain = ace_datain_be16, .dataout = ace_dataout_be16, }; static struct ace_reg_ops ace_reg_le16_ops = { .in = ace_in_le16, .out = ace_out_le16, .datain = ace_datain_le16, .dataout = ace_dataout_le16, }; static inline u16 ace_in(struct ace_device *ace, int reg) { return ace->reg_ops->in(ace, reg); } static inline u32 ace_in32(struct ace_device *ace, int reg) { return ace_in(ace, reg) | (ace_in(ace, reg + 2) << 16); } static inline void ace_out(struct ace_device *ace, int reg, u16 val) { ace->reg_ops->out(ace, reg, val); } static inline void ace_out32(struct ace_device *ace, int reg, u32 val) { ace_out(ace, reg, val); ace_out(ace, reg + 2, val >> 16); } /* --------------------------------------------------------------------- * Debug support functions */ #if defined(DEBUG) static void ace_dump_mem(void *base, int len) { const char *ptr = base; int i, j; for (i = 0; i < len; i += 16) { printk(KERN_INFO "%.8x:", i); for (j = 0; j < 16; j++) { if (!(j % 4)) printk(" "); printk("%.2x", ptr[i + j]); } printk(" "); for (j = 0; j < 16; j++) printk("%c", isprint(ptr[i + j]) ? ptr[i + j] : '.'); printk("\n"); } } #else static inline void ace_dump_mem(void *base, int len) { } #endif static void ace_dump_regs(struct ace_device *ace) { dev_info(ace->dev, " ctrl: %.8x seccnt/cmd: %.4x ver:%.4x\n" " status:%.8x mpu_lba:%.8x busmode:%4x\n" " error: %.8x cfg_lba:%.8x fatstat:%.4x\n", ace_in32(ace, ACE_CTRL), ace_in(ace, ACE_SECCNTCMD), ace_in(ace, ACE_VERSION), ace_in32(ace, ACE_STATUS), ace_in32(ace, ACE_MPULBA), ace_in(ace, ACE_BUSMODE), ace_in32(ace, ACE_ERROR), ace_in32(ace, ACE_CFGLBA), ace_in(ace, ACE_FATSTAT)); } void ace_fix_driveid(u16 *id) { #if defined(__BIG_ENDIAN) int i; /* All half words have wrong byte order; swap the bytes */ for (i = 0; i < ATA_ID_WORDS; i++, id++) *id = le16_to_cpu(*id); #endif } /* --------------------------------------------------------------------- * Finite State Machine (FSM) implementation */ /* FSM tasks; used to direct state transitions */ #define ACE_TASK_IDLE 0 #define ACE_TASK_IDENTIFY 1 #define ACE_TASK_READ 2 #define ACE_TASK_WRITE 3 #define ACE_FSM_NUM_TASKS 4 /* FSM state definitions */ #define ACE_FSM_STATE_IDLE 0 #define ACE_FSM_STATE_REQ_LOCK 1 #define ACE_FSM_STATE_WAIT_LOCK 2 #define ACE_FSM_STATE_WAIT_CFREADY 3 #define ACE_FSM_STATE_IDENTIFY_PREPARE 4 #define ACE_FSM_STATE_IDENTIFY_TRANSFER 5 #define ACE_FSM_STATE_IDENTIFY_COMPLETE 6 #define ACE_FSM_STATE_REQ_PREPARE 7 #define ACE_FSM_STATE_REQ_TRANSFER 8 #define ACE_FSM_STATE_REQ_COMPLETE 9 #define ACE_FSM_STATE_ERROR 10 #define ACE_FSM_NUM_STATES 11 /* Set flag to exit FSM loop and reschedule tasklet */ static inline void ace_fsm_yield(struct ace_device *ace) { dev_dbg(ace->dev, "ace_fsm_yield()\n"); tasklet_schedule(&ace->fsm_tasklet); ace->fsm_continue_flag = 0; } /* Set flag to exit FSM loop and wait for IRQ to reschedule tasklet */ static inline void ace_fsm_yieldirq(struct ace_device *ace) { dev_dbg(ace->dev, "ace_fsm_yieldirq()\n"); if (!ace->irq) /* No IRQ assigned, so need to poll */ tasklet_schedule(&ace->fsm_tasklet); ace->fsm_continue_flag = 0; } /* Get the next read/write request; ending requests that we don't handle */ struct request *ace_get_next_request(struct request_queue * q) { struct request *req; while ((req = blk_peek_request(q)) != NULL) { if (req->cmd_type == REQ_TYPE_FS) break; blk_start_request(req); __blk_end_request_all(req, -EIO); } return req; } static void ace_fsm_dostate(struct ace_device *ace) { struct request *req; u32 status; u16 val; int count; #if defined(DEBUG) dev_dbg(ace->dev, "fsm_state=%i, id_req_count=%i\n", ace->fsm_state, ace->id_req_count); #endif /* Verify that there is actually a CF in the slot. If not, then * bail out back to the idle state and wake up all the waiters */ status = ace_in32(ace, ACE_STATUS); if ((status & ACE_STATUS_CFDETECT) == 0) { ace->fsm_state = ACE_FSM_STATE_IDLE; ace->media_change = 1; set_capacity(ace->gd, 0); dev_info(ace->dev, "No CF in slot\n"); /* Drop all in-flight and pending requests */ if (ace->req) { __blk_end_request_all(ace->req, -EIO); ace->req = NULL; } while ((req = blk_fetch_request(ace->queue)) != NULL) __blk_end_request_all(req, -EIO); /* Drop back to IDLE state and notify waiters */ ace->fsm_state = ACE_FSM_STATE_IDLE; ace->id_result = -EIO; while (ace->id_req_count) { complete(&ace->id_completion); ace->id_req_count--; } } switch (ace->fsm_state) { case ACE_FSM_STATE_IDLE: /* See if there is anything to do */ if (ace->id_req_count || ace_get_next_request(ace->queue)) { ace->fsm_iter_num++; ace->fsm_state = ACE_FSM_STATE_REQ_LOCK; mod_timer(&ace->stall_timer, jiffies + HZ); if (!timer_pending(&ace->stall_timer)) add_timer(&ace->stall_timer); break; } del_timer(&ace->stall_timer); ace->fsm_continue_flag = 0; break; case ACE_FSM_STATE_REQ_LOCK: if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) { /* Already have the lock, jump to next state */ ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY; break; } /* Request the lock */ val = ace_in(ace, ACE_CTRL); ace_out(ace, ACE_CTRL, val | ACE_CTRL_LOCKREQ); ace->fsm_state = ACE_FSM_STATE_WAIT_LOCK; break; case ACE_FSM_STATE_WAIT_LOCK: if (ace_in(ace, ACE_STATUS) & ACE_STATUS_MPULOCK) { /* got the lock; move to next state */ ace->fsm_state = ACE_FSM_STATE_WAIT_CFREADY; break; } /* wait a bit for the lock */ ace_fsm_yield(ace); break; case ACE_FSM_STATE_WAIT_CFREADY: status = ace_in32(ace, ACE_STATUS); if (!(status & ACE_STATUS_RDYFORCFCMD) || (status & ACE_STATUS_CFBSY)) { /* CF card isn't ready; it needs to be polled */ ace_fsm_yield(ace); break; } /* Device is ready for command; determine what to do next */ if (ace->id_req_count) ace->fsm_state = ACE_FSM_STATE_IDENTIFY_PREPARE; else ace->fsm_state = ACE_FSM_STATE_REQ_PREPARE; break; case ACE_FSM_STATE_IDENTIFY_PREPARE: /* Send identify command */ ace->fsm_task = ACE_TASK_IDENTIFY; ace->data_ptr = ace->cf_id; ace->data_count = ACE_BUF_PER_SECTOR; ace_out(ace, ACE_SECCNTCMD, ACE_SECCNTCMD_IDENTIFY); /* As per datasheet, put config controller in reset */ val = ace_in(ace, ACE_CTRL); ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET); /* irq handler takes over from this point; wait for the * transfer to complete */ ace->fsm_state = ACE_FSM_STATE_IDENTIFY_TRANSFER; ace_fsm_yieldirq(ace); break; case ACE_FSM_STATE_IDENTIFY_TRANSFER: /* Check that the sysace is ready to receive data */ status = ace_in32(ace, ACE_STATUS); if (status & ACE_STATUS_CFBSY) { dev_dbg(ace->dev, "CFBSY set; t=%i iter=%i dc=%i\n", ace->fsm_task, ace->fsm_iter_num, ace->data_count); ace_fsm_yield(ace); break; } if (!(status & ACE_STATUS_DATABUFRDY)) { ace_fsm_yield(ace); break; } /* Transfer the next buffer */ ace->reg_ops->datain(ace); ace->data_count--; /* If there are still buffers to be transfers; jump out here */ if (ace->data_count != 0) { ace_fsm_yieldirq(ace); break; } /* transfer finished; kick state machine */ dev_dbg(ace->dev, "identify finished\n"); ace->fsm_state = ACE_FSM_STATE_IDENTIFY_COMPLETE; break; case ACE_FSM_STATE_IDENTIFY_COMPLETE: ace_fix_driveid(ace->cf_id); ace_dump_mem(ace->cf_id, 512); /* Debug: Dump out disk ID */ if (ace->data_result) { /* Error occurred, disable the disk */ ace->media_change = 1; set_capacity(ace->gd, 0); dev_err(ace->dev, "error fetching CF id (%i)\n", ace->data_result); } else { ace->media_change = 0; /* Record disk parameters */ set_capacity(ace->gd, ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY)); dev_info(ace->dev, "capacity: %i sectors\n", ata_id_u32(ace->cf_id, ATA_ID_LBA_CAPACITY)); } /* We're done, drop to IDLE state and notify waiters */ ace->fsm_state = ACE_FSM_STATE_IDLE; ace->id_result = ace->data_result; while (ace->id_req_count) { complete(&ace->id_completion); ace->id_req_count--; } break; case ACE_FSM_STATE_REQ_PREPARE: req = ace_get_next_request(ace->queue); if (!req) { ace->fsm_state = ACE_FSM_STATE_IDLE; break; } blk_start_request(req); /* Okay, it's a data request, set it up for transfer */ dev_dbg(ace->dev, "request: sec=%llx hcnt=%x, ccnt=%x, dir=%i\n", (unsigned long long)blk_rq_pos(req), blk_rq_sectors(req), blk_rq_cur_sectors(req), rq_data_dir(req)); ace->req = req; ace->data_ptr = req->buffer; ace->data_count = blk_rq_cur_sectors(req) * ACE_BUF_PER_SECTOR; ace_out32(ace, ACE_MPULBA, blk_rq_pos(req) & 0x0FFFFFFF); count = blk_rq_sectors(req); if (rq_data_dir(req)) { /* Kick off write request */ dev_dbg(ace->dev, "write data\n"); ace->fsm_task = ACE_TASK_WRITE; ace_out(ace, ACE_SECCNTCMD, count | ACE_SECCNTCMD_WRITE_DATA); } else { /* Kick off read request */ dev_dbg(ace->dev, "read data\n"); ace->fsm_task = ACE_TASK_READ; ace_out(ace, ACE_SECCNTCMD, count | ACE_SECCNTCMD_READ_DATA); } /* As per datasheet, put config controller in reset */ val = ace_in(ace, ACE_CTRL); ace_out(ace, ACE_CTRL, val | ACE_CTRL_CFGRESET); /* Move to the transfer state. The systemace will raise * an interrupt once there is something to do */ ace->fsm_state = ACE_FSM_STATE_REQ_TRANSFER; if (ace->fsm_task == ACE_TASK_READ) ace_fsm_yieldirq(ace); /* wait for data ready */ break; case ACE_FSM_STATE_REQ_TRANSFER: /* Check that the sysace is ready to receive data */ status = ace_in32(ace, ACE_STATUS); if (status & ACE_STATUS_CFBSY) { dev_dbg(ace->dev, "CFBSY set; t=%i iter=%i c=%i dc=%i irq=%i\n", ace->fsm_task, ace->fsm_iter_num, blk_rq_cur_sectors(ace->req) * 16, ace->data_count, ace->in_irq); ace_fsm_yield(ace); /* need to poll CFBSY bit */ break; } if (!(status & ACE_STATUS_DATABUFRDY)) { dev_dbg(ace->dev, "DATABUF not set; t=%i iter=%i c=%i dc=%i irq=%i\n", ace->fsm_task, ace->fsm_iter_num, blk_rq_cur_sectors(ace->req) * 16, ace->data_count, ace->in_irq); ace_fsm_yieldirq(ace); break; } /* Transfer the next buffer */ if (ace->fsm_task == ACE_TASK_WRITE) ace->reg_ops->dataout(ace); else ace->reg_ops->datain(ace); ace->data_count--; /* If there are still buffers to be transfers; jump out here */ if (ace->data_count != 0) { ace_fsm_yieldirq(ace); break; } /* bio finished; is there another one? */ if (__blk_end_request_cur(ace->req, 0)) { /* dev_dbg(ace->dev, "next block; h=%u c=%u\n", * blk_rq_sectors(ace->req), * blk_rq_cur_sectors(ace->req)); */ ace->data_ptr = ace->req->buffer; ace->data_count = blk_rq_cur_sectors(ace->req) * 16; ace_fsm_yieldirq(ace); break; } ace->fsm_state = ACE_FSM_STATE_REQ_COMPLETE; break; case ACE_FSM_STATE_REQ_COMPLETE: ace->req = NULL; /* Finished request; go to idle state */ ace->fsm_state = ACE_FSM_STATE_IDLE; break; default: ace->fsm_state = ACE_FSM_STATE_IDLE; break; } } static void ace_fsm_tasklet(unsigned long data) { struct ace_device *ace = (void *)data; unsigned long flags; spin_lock_irqsave(&ace->lock, flags); /* Loop over state machine until told to stop */ ace->fsm_continue_flag = 1; while (ace->fsm_continue_flag) ace_fsm_dostate(ace); spin_unlock_irqrestore(&ace->lock, flags); } static void ace_stall_timer(unsigned long data) { struct ace_device *ace = (void *)data; unsigned long flags; dev_warn(ace->dev, "kicking stalled fsm; state=%i task=%i iter=%i dc=%i\n", ace->fsm_state, ace->fsm_task, ace->fsm_iter_num, ace->data_count); spin_lock_irqsave(&ace->lock, flags); /* Rearm the stall timer *before* entering FSM (which may then * delete the timer) */ mod_timer(&ace->stall_timer, jiffies + HZ); /* Loop over state machine until told to stop */ ace->fsm_continue_flag = 1; while (ace->fsm_continue_flag) ace_fsm_dostate(ace); spin_unlock_irqrestore(&ace->lock, flags); } /* --------------------------------------------------------------------- * Interrupt handling routines */ static int ace_interrupt_checkstate(struct ace_device *ace) { u32 sreg = ace_in32(ace, ACE_STATUS); u16 creg = ace_in(ace, ACE_CTRL); /* Check for error occurrence */ if ((sreg & (ACE_STATUS_CFGERROR | ACE_STATUS_CFCERROR)) && (creg & ACE_CTRL_ERRORIRQ)) { dev_err(ace->dev, "transfer failure\n"); ace_dump_regs(ace); return -EIO; } return 0; } static irqreturn_t ace_interrupt(int irq, void *dev_id) { u16 creg; struct ace_device *ace = dev_id; /* be safe and get the lock */ spin_lock(&ace->lock); ace->in_irq = 1; /* clear the interrupt */ creg = ace_in(ace, ACE_CTRL); ace_out(ace, ACE_CTRL, creg | ACE_CTRL_RESETIRQ); ace_out(ace, ACE_CTRL, creg); /* check for IO failures */ if (ace_interrupt_checkstate(ace)) ace->data_result = -EIO; if (ace->fsm_task == 0) { dev_err(ace->dev, "spurious irq; stat=%.8x ctrl=%.8x cmd=%.4x\n", ace_in32(ace, ACE_STATUS), ace_in32(ace, ACE_CTRL), ace_in(ace, ACE_SECCNTCMD)); dev_err(ace->dev, "fsm_task=%i fsm_state=%i data_count=%i\n", ace->fsm_task, ace->fsm_state, ace->data_count); } /* Loop over state machine until told to stop */ ace->fsm_continue_flag = 1; while (ace->fsm_continue_flag) ace_fsm_dostate(ace); /* done with interrupt; drop the lock */ ace->in_irq = 0; spin_unlock(&ace->lock); return IRQ_HANDLED; } /* --------------------------------------------------------------------- * Block ops */ static void ace_request(struct request_queue * q) { struct request *req; struct ace_device *ace; req = ace_get_next_request(q); if (req) { ace = req->rq_disk->private_data; tasklet_schedule(&ace->fsm_tasklet); } } static unsigned int ace_check_events(struct gendisk *gd, unsigned int clearing) { struct ace_device *ace = gd->private_data; dev_dbg(ace->dev, "ace_check_events(): %i\n", ace->media_change); return ace->media_change ? DISK_EVENT_MEDIA_CHANGE : 0; } static int ace_revalidate_disk(struct gendisk *gd) { struct ace_device *ace = gd->private_data; unsigned long flags; dev_dbg(ace->dev, "ace_revalidate_disk()\n"); if (ace->media_change) { dev_dbg(ace->dev, "requesting cf id and scheduling tasklet\n"); spin_lock_irqsave(&ace->lock, flags); ace->id_req_count++; spin_unlock_irqrestore(&ace->lock, flags); tasklet_schedule(&ace->fsm_tasklet); wait_for_completion(&ace->id_completion); } dev_dbg(ace->dev, "revalidate complete\n"); return ace->id_result; } static int ace_open(struct block_device *bdev, fmode_t mode) { struct ace_device *ace = bdev->bd_disk->private_data; unsigned long flags; dev_dbg(ace->dev, "ace_open() users=%i\n", ace->users + 1); mutex_lock(&xsysace_mutex); spin_lock_irqsave(&ace->lock, flags); ace->users++; spin_unlock_irqrestore(&ace->lock, flags); check_disk_change(bdev); mutex_unlock(&xsysace_mutex); return 0; } static int ace_release(struct gendisk *disk, fmode_t mode) { struct ace_device *ace = disk->private_data; unsigned long flags; u16 val; dev_dbg(ace->dev, "ace_release() users=%i\n", ace->users - 1); mutex_lock(&xsysace_mutex); spin_lock_irqsave(&ace->lock, flags); ace->users--; if (ace->users == 0) { val = ace_in(ace, ACE_CTRL); ace_out(ace, ACE_CTRL, val & ~ACE_CTRL_LOCKREQ); } spin_unlock_irqrestore(&ace->lock, flags); mutex_unlock(&xsysace_mutex); return 0; } static int ace_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct ace_device *ace = bdev->bd_disk->private_data; u16 *cf_id = ace->cf_id; dev_dbg(ace->dev, "ace_getgeo()\n"); geo->heads = cf_id[ATA_ID_HEADS]; geo->sectors = cf_id[ATA_ID_SECTORS]; geo->cylinders = cf_id[ATA_ID_CYLS]; return 0; } static const struct block_device_operations ace_fops = { .owner = THIS_MODULE, .open = ace_open, .release = ace_release, .check_events = ace_check_events, .revalidate_disk = ace_revalidate_disk, .getgeo = ace_getgeo, }; /* -------------------------------------------------------------------- * SystemACE device setup/teardown code */ static int __devinit ace_setup(struct ace_device *ace) { u16 version; u16 val; int rc; dev_dbg(ace->dev, "ace_setup(ace=0x%p)\n", ace); dev_dbg(ace->dev, "physaddr=0x%llx irq=%i\n", (unsigned long long)ace->physaddr, ace->irq); spin_lock_init(&ace->lock); init_completion(&ace->id_completion); /* * Map the device */ ace->baseaddr = ioremap(ace->physaddr, 0x80); if (!ace->baseaddr) goto err_ioremap; /* * Initialize the state machine tasklet and stall timer */ tasklet_init(&ace->fsm_tasklet, ace_fsm_tasklet, (unsigned long)ace); setup_timer(&ace->stall_timer, ace_stall_timer, (unsigned long)ace); /* * Initialize the request queue */ ace->queue = blk_init_queue(ace_request, &ace->lock); if (ace->queue == NULL) goto err_blk_initq; blk_queue_logical_block_size(ace->queue, 512); /* * Allocate and initialize GD structure */ ace->gd = alloc_disk(ACE_NUM_MINORS); if (!ace->gd) goto err_alloc_disk; ace->gd->major = ace_major; ace->gd->first_minor = ace->id * ACE_NUM_MINORS; ace->gd->fops = &ace_fops; ace->gd->queue = ace->queue; ace->gd->private_data = ace; snprintf(ace->gd->disk_name, 32, "xs%c", ace->id + 'a'); /* set bus width */ if (ace->bus_width == ACE_BUS_WIDTH_16) { /* 0x0101 should work regardless of endianess */ ace_out_le16(ace, ACE_BUSMODE, 0x0101); /* read it back to determine endianess */ if (ace_in_le16(ace, ACE_BUSMODE) == 0x0001) ace->reg_ops = &ace_reg_le16_ops; else ace->reg_ops = &ace_reg_be16_ops; } else { ace_out_8(ace, ACE_BUSMODE, 0x00); ace->reg_ops = &ace_reg_8_ops; } /* Make sure version register is sane */ version = ace_in(ace, ACE_VERSION); if ((version == 0) || (version == 0xFFFF)) goto err_read; /* Put sysace in a sane state by clearing most control reg bits */ ace_out(ace, ACE_CTRL, ACE_CTRL_FORCECFGMODE | ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ); /* Now we can hook up the irq handler */ if (ace->irq) { rc = request_irq(ace->irq, ace_interrupt, 0, "systemace", ace); if (rc) { /* Failure - fall back to polled mode */ dev_err(ace->dev, "request_irq failed\n"); ace->irq = 0; } } /* Enable interrupts */ val = ace_in(ace, ACE_CTRL); val |= ACE_CTRL_DATABUFRDYIRQ | ACE_CTRL_ERRORIRQ; ace_out(ace, ACE_CTRL, val); /* Print the identification */ dev_info(ace->dev, "Xilinx SystemACE revision %i.%i.%i\n", (version >> 12) & 0xf, (version >> 8) & 0x0f, version & 0xff); dev_dbg(ace->dev, "physaddr 0x%llx, mapped to 0x%p, irq=%i\n", (unsigned long long) ace->physaddr, ace->baseaddr, ace->irq); ace->media_change = 1; ace_revalidate_disk(ace->gd); /* Make the sysace device 'live' */ add_disk(ace->gd); return 0; err_read: put_disk(ace->gd); err_alloc_disk: blk_cleanup_queue(ace->queue); err_blk_initq: iounmap(ace->baseaddr); err_ioremap: dev_info(ace->dev, "xsysace: error initializing device at 0x%llx\n", (unsigned long long) ace->physaddr); return -ENOMEM; } static void __devexit ace_teardown(struct ace_device *ace) { if (ace->gd) { del_gendisk(ace->gd); put_disk(ace->gd); } if (ace->queue) blk_cleanup_queue(ace->queue); tasklet_kill(&ace->fsm_tasklet); if (ace->irq) free_irq(ace->irq, ace); iounmap(ace->baseaddr); } static int __devinit ace_alloc(struct device *dev, int id, resource_size_t physaddr, int irq, int bus_width) { struct ace_device *ace; int rc; dev_dbg(dev, "ace_alloc(%p)\n", dev); if (!physaddr) { rc = -ENODEV; goto err_noreg; } /* Allocate and initialize the ace device structure */ ace = kzalloc(sizeof(struct ace_device), GFP_KERNEL); if (!ace) { rc = -ENOMEM; goto err_alloc; } ace->dev = dev; ace->id = id; ace->physaddr = physaddr; ace->irq = irq; ace->bus_width = bus_width; /* Call the setup code */ rc = ace_setup(ace); if (rc) goto err_setup; dev_set_drvdata(dev, ace); return 0; err_setup: dev_set_drvdata(dev, NULL); kfree(ace); err_alloc: err_noreg: dev_err(dev, "could not initialize device, err=%i\n", rc); return rc; } static void __devexit ace_free(struct device *dev) { struct ace_device *ace = dev_get_drvdata(dev); dev_dbg(dev, "ace_free(%p)\n", dev); if (ace) { ace_teardown(ace); dev_set_drvdata(dev, NULL); kfree(ace); } } /* --------------------------------------------------------------------- * Platform Bus Support */ static int __devinit ace_probe(struct platform_device *dev) { resource_size_t physaddr = 0; int bus_width = ACE_BUS_WIDTH_16; /* FIXME: should not be hard coded */ u32 id = dev->id; int irq = 0; int i; dev_dbg(&dev->dev, "ace_probe(%p)\n", dev); /* device id and bus width */ of_property_read_u32(dev->dev.of_node, "port-number", &id); if (id < 0) id = 0; if (of_find_property(dev->dev.of_node, "8-bit", NULL)) bus_width = ACE_BUS_WIDTH_8; for (i = 0; i < dev->num_resources; i++) { if (dev->resource[i].flags & IORESOURCE_MEM) physaddr = dev->resource[i].start; if (dev->resource[i].flags & IORESOURCE_IRQ) irq = dev->resource[i].start; } /* Call the bus-independent setup code */ return ace_alloc(&dev->dev, id, physaddr, irq, bus_width); } /* * Platform bus remove() method */ static int __devexit ace_remove(struct platform_device *dev) { ace_free(&dev->dev); return 0; } #if defined(CONFIG_OF) /* Match table for of_platform binding */ static const struct of_device_id ace_of_match[] __devinitconst = { { .compatible = "xlnx,opb-sysace-1.00.b", }, { .compatible = "xlnx,opb-sysace-1.00.c", }, { .compatible = "xlnx,xps-sysace-1.00.a", }, { .compatible = "xlnx,sysace", }, {}, }; MODULE_DEVICE_TABLE(of, ace_of_match); #else /* CONFIG_OF */ #define ace_of_match NULL #endif /* CONFIG_OF */ static struct platform_driver ace_platform_driver = { .probe = ace_probe, .remove = __devexit_p(ace_remove), .driver = { .owner = THIS_MODULE, .name = "xsysace", .of_match_table = ace_of_match, }, }; /* --------------------------------------------------------------------- * Module init/exit routines */ static int __init ace_init(void) { int rc; ace_major = register_blkdev(ace_major, "xsysace"); if (ace_major <= 0) { rc = -ENOMEM; goto err_blk; } rc = platform_driver_register(&ace_platform_driver); if (rc) goto err_plat; pr_info("Xilinx SystemACE device driver, major=%i\n", ace_major); return 0; err_plat: unregister_blkdev(ace_major, "xsysace"); err_blk: printk(KERN_ERR "xsysace: registration failed; err=%i\n", rc); return rc; } module_init(ace_init); static void __exit ace_exit(void) { pr_debug("Unregistering Xilinx SystemACE driver\n"); platform_driver_unregister(&ace_platform_driver); unregister_blkdev(ace_major, "xsysace"); } module_exit(ace_exit);
gpl-2.0
jxxhwy/A850S_JB_KERNEL
drivers/mfd/ab3100-otp.c
8636
6986
/* * drivers/mfd/ab3100_otp.c * * Copyright (C) 2007-2009 ST-Ericsson AB * License terms: GNU General Public License (GPL) version 2 * Driver to read out OTP from the AB3100 Mixed-signal circuit * Author: Linus Walleij <linus.walleij@stericsson.com> */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/mfd/abx500.h> #include <linux/debugfs.h> #include <linux/seq_file.h> /* The OTP registers */ #define AB3100_OTP0 0xb0 #define AB3100_OTP1 0xb1 #define AB3100_OTP2 0xb2 #define AB3100_OTP3 0xb3 #define AB3100_OTP4 0xb4 #define AB3100_OTP5 0xb5 #define AB3100_OTP6 0xb6 #define AB3100_OTP7 0xb7 #define AB3100_OTPP 0xbf /** * struct ab3100_otp * @dev containing device * @locked whether the OTP is locked, after locking, no more bits * can be changed but before locking it is still possible * to change bits from 1->0. * @freq clocking frequency for the OTP, this frequency is either * 32768Hz or 1MHz/30 * @paf product activation flag, indicates whether this is a real * product (paf true) or a lab board etc (paf false) * @imeich if this is set it is possible to override the * IMEI number found in the tac, fac and svn fields with * (secured) software * @cid customer ID * @tac type allocation code of the IMEI * @fac final assembly code of the IMEI * @svn software version number of the IMEI * @debugfs a debugfs file used when dumping to file */ struct ab3100_otp { struct device *dev; bool locked; u32 freq; bool paf; bool imeich; u16 cid:14; u32 tac:20; u8 fac; u32 svn:20; struct dentry *debugfs; }; static int __init ab3100_otp_read(struct ab3100_otp *otp) { u8 otpval[8]; u8 otpp; int err; err = abx500_get_register_interruptible(otp->dev, 0, AB3100_OTPP, &otpp); if (err) { dev_err(otp->dev, "unable to read OTPP register\n"); return err; } err = abx500_get_register_page_interruptible(otp->dev, 0, AB3100_OTP0, otpval, 8); if (err) { dev_err(otp->dev, "unable to read OTP register page\n"); return err; } /* Cache OTP properties, they never change by nature */ otp->locked = (otpp & 0x80); otp->freq = (otpp & 0x40) ? 32768 : 34100; otp->paf = (otpval[1] & 0x80); otp->imeich = (otpval[1] & 0x40); otp->cid = ((otpval[1] << 8) | otpval[0]) & 0x3fff; otp->tac = ((otpval[4] & 0x0f) << 16) | (otpval[3] << 8) | otpval[2]; otp->fac = ((otpval[5] & 0x0f) << 4) | (otpval[4] >> 4); otp->svn = (otpval[7] << 12) | (otpval[6] << 4) | (otpval[5] >> 4); return 0; } /* * This is a simple debugfs human-readable file that dumps out * the contents of the OTP. */ #ifdef CONFIG_DEBUG_FS static int ab3100_show_otp(struct seq_file *s, void *v) { struct ab3100_otp *otp = s->private; seq_printf(s, "OTP is %s\n", otp->locked ? "LOCKED" : "UNLOCKED"); seq_printf(s, "OTP clock switch startup is %uHz\n", otp->freq); seq_printf(s, "PAF is %s\n", otp->paf ? "SET" : "NOT SET"); seq_printf(s, "IMEI is %s\n", otp->imeich ? "CHANGEABLE" : "NOT CHANGEABLE"); seq_printf(s, "CID: 0x%04x (decimal: %d)\n", otp->cid, otp->cid); seq_printf(s, "IMEI: %u-%u-%u\n", otp->tac, otp->fac, otp->svn); return 0; } static int ab3100_otp_open(struct inode *inode, struct file *file) { return single_open(file, ab3100_show_otp, inode->i_private); } static const struct file_operations ab3100_otp_operations = { .open = ab3100_otp_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init ab3100_otp_init_debugfs(struct device *dev, struct ab3100_otp *otp) { otp->debugfs = debugfs_create_file("ab3100_otp", S_IFREG | S_IRUGO, NULL, otp, &ab3100_otp_operations); if (!otp->debugfs) { dev_err(dev, "AB3100 debugfs OTP file registration failed!\n"); return -ENOENT; } return 0; } static void __exit ab3100_otp_exit_debugfs(struct ab3100_otp *otp) { debugfs_remove(otp->debugfs); } #else /* Compile this out if debugfs not selected */ static inline int __init ab3100_otp_init_debugfs(struct device *dev, struct ab3100_otp *otp) { return 0; } static inline void __exit ab3100_otp_exit_debugfs(struct ab3100_otp *otp) { } #endif #define SHOW_AB3100_ATTR(name) \ static ssize_t ab3100_otp_##name##_show(struct device *dev, \ struct device_attribute *attr, \ char *buf) \ {\ struct ab3100_otp *otp = dev_get_drvdata(dev); \ return sprintf(buf, "%u\n", otp->name); \ } SHOW_AB3100_ATTR(locked) SHOW_AB3100_ATTR(freq) SHOW_AB3100_ATTR(paf) SHOW_AB3100_ATTR(imeich) SHOW_AB3100_ATTR(cid) SHOW_AB3100_ATTR(fac) SHOW_AB3100_ATTR(tac) SHOW_AB3100_ATTR(svn) static struct device_attribute ab3100_otp_attrs[] = { __ATTR(locked, S_IRUGO, ab3100_otp_locked_show, NULL), __ATTR(freq, S_IRUGO, ab3100_otp_freq_show, NULL), __ATTR(paf, S_IRUGO, ab3100_otp_paf_show, NULL), __ATTR(imeich, S_IRUGO, ab3100_otp_imeich_show, NULL), __ATTR(cid, S_IRUGO, ab3100_otp_cid_show, NULL), __ATTR(fac, S_IRUGO, ab3100_otp_fac_show, NULL), __ATTR(tac, S_IRUGO, ab3100_otp_tac_show, NULL), __ATTR(svn, S_IRUGO, ab3100_otp_svn_show, NULL), }; static int __init ab3100_otp_probe(struct platform_device *pdev) { struct ab3100_otp *otp; int err = 0; int i; otp = kzalloc(sizeof(struct ab3100_otp), GFP_KERNEL); if (!otp) { dev_err(&pdev->dev, "could not allocate AB3100 OTP device\n"); return -ENOMEM; } otp->dev = &pdev->dev; /* Replace platform data coming in with a local struct */ platform_set_drvdata(pdev, otp); err = ab3100_otp_read(otp); if (err) goto err_otp_read; dev_info(&pdev->dev, "AB3100 OTP readout registered\n"); /* sysfs entries */ for (i = 0; i < ARRAY_SIZE(ab3100_otp_attrs); i++) { err = device_create_file(&pdev->dev, &ab3100_otp_attrs[i]); if (err) goto err_create_file; } /* debugfs entries */ err = ab3100_otp_init_debugfs(&pdev->dev, otp); if (err) goto err_init_debugfs; return 0; err_init_debugfs: err_create_file: while (--i >= 0) device_remove_file(&pdev->dev, &ab3100_otp_attrs[i]); err_otp_read: kfree(otp); return err; } static int __exit ab3100_otp_remove(struct platform_device *pdev) { struct ab3100_otp *otp = platform_get_drvdata(pdev); int i; for (i = 0; i < ARRAY_SIZE(ab3100_otp_attrs); i++) device_remove_file(&pdev->dev, &ab3100_otp_attrs[i]); ab3100_otp_exit_debugfs(otp); kfree(otp); return 0; } static struct platform_driver ab3100_otp_driver = { .driver = { .name = "ab3100-otp", .owner = THIS_MODULE, }, .remove = __exit_p(ab3100_otp_remove), }; static int __init ab3100_otp_init(void) { return platform_driver_probe(&ab3100_otp_driver, ab3100_otp_probe); } static void __exit ab3100_otp_exit(void) { platform_driver_unregister(&ab3100_otp_driver); } module_init(ab3100_otp_init); module_exit(ab3100_otp_exit); MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>"); MODULE_DESCRIPTION("AB3100 OTP Readout Driver"); MODULE_LICENSE("GPL");
gpl-2.0
jrior001/evitaul-3.4.100-HTC
arch/powerpc/platforms/83xx/mpc837x_mds.c
8892
2705
/* * arch/powerpc/platforms/83xx/mpc837x_mds.c * * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved. * * MPC837x MDS board specific routines * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/pci.h> #include <linux/of.h> #include <linux/of_platform.h> #include <asm/time.h> #include <asm/ipic.h> #include <asm/udbg.h> #include <asm/prom.h> #include <sysdev/fsl_pci.h> #include "mpc83xx.h" #define BCSR12_USB_SER_MASK 0x8a #define BCSR12_USB_SER_PIN 0x80 #define BCSR12_USB_SER_DEVICE 0x02 static int mpc837xmds_usb_cfg(void) { struct device_node *np; const void *phy_type, *mode; void __iomem *bcsr_regs = NULL; u8 bcsr12; int ret; ret = mpc837x_usb_cfg(); if (ret) return ret; /* Map BCSR area */ np = of_find_compatible_node(NULL, NULL, "fsl,mpc837xmds-bcsr"); if (np) { bcsr_regs = of_iomap(np, 0); of_node_put(np); } if (!bcsr_regs) return -1; np = of_find_node_by_name(NULL, "usb"); if (!np) { ret = -ENODEV; goto out; } phy_type = of_get_property(np, "phy_type", NULL); if (phy_type && !strcmp(phy_type, "ulpi")) { clrbits8(bcsr_regs + 12, BCSR12_USB_SER_PIN); } else if (phy_type && !strcmp(phy_type, "serial")) { mode = of_get_property(np, "dr_mode", NULL); bcsr12 = in_8(bcsr_regs + 12) & ~BCSR12_USB_SER_MASK; bcsr12 |= BCSR12_USB_SER_PIN; if (mode && !strcmp(mode, "peripheral")) bcsr12 |= BCSR12_USB_SER_DEVICE; out_8(bcsr_regs + 12, bcsr12); } else { printk(KERN_ERR "USB DR: unsupported PHY\n"); } of_node_put(np); out: iounmap(bcsr_regs); return ret; } /* ************************************************************************ * * Setup the architecture * */ static void __init mpc837x_mds_setup_arch(void) { if (ppc_md.progress) ppc_md.progress("mpc837x_mds_setup_arch()", 0); mpc83xx_setup_pci(); mpc837xmds_usb_cfg(); } machine_device_initcall(mpc837x_mds, mpc83xx_declare_of_platform_devices); /* * Called very early, MMU is off, device-tree isn't unflattened */ static int __init mpc837x_mds_probe(void) { unsigned long root = of_get_flat_dt_root(); return of_flat_dt_is_compatible(root, "fsl,mpc837xmds"); } define_machine(mpc837x_mds) { .name = "MPC837x MDS", .probe = mpc837x_mds_probe, .setup_arch = mpc837x_mds_setup_arch, .init_IRQ = mpc83xx_ipic_init_IRQ, .get_irq = ipic_get_irq, .restart = mpc83xx_restart, .time_init = mpc83xx_time_init, .calibrate_decr = generic_calibrate_decr, .progress = udbg_progress, };
gpl-2.0
Stane1983/android_kernel_xiaomi_dior_DEPRECATED
drivers/staging/rtl8192u/r8190_rtl8256.c
9404
10482
/* This is part of the rtl8192 driver released under the GPL (See file COPYING for details). This files contains programming code for the rtl8256 radio frontend. *Many* thanks to Realtek Corp. for their great support! */ #include "r8192U.h" #include "r8192U_hw.h" #include "r819xU_phyreg.h" #include "r819xU_phy.h" #include "r8190_rtl8256.h" /*-------------------------------------------------------------------------- * Overview: set RF band width (20M or 40M) * Input: struct net_device* dev * WIRELESS_BANDWIDTH_E Bandwidth //20M or 40M * Output: NONE * Return: NONE * Note: 8226 support both 20M and 40 MHz *---------------------------------------------------------------------------*/ void PHY_SetRF8256Bandwidth(struct net_device* dev , HT_CHANNEL_WIDTH Bandwidth) //20M or 40M { u8 eRFPath; struct r8192_priv *priv = ieee80211_priv(dev); //for(eRFPath = RF90_PATH_A; eRFPath <pHalData->NumTotalRFPath; eRFPath++) for(eRFPath = 0; eRFPath <RF90_PATH_MAX; eRFPath++) { if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath)) continue; switch(Bandwidth) { case HT_CHANNEL_WIDTH_20: if(priv->card_8192_version == VERSION_819xU_A || priv->card_8192_version == VERSION_819xU_B)// 8256 D-cut, E-cut, xiong: consider it later! { rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x0b, bMask12Bits, 0x100); //phy para:1ba rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x2c, bMask12Bits, 0x3d7); rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x0e, bMask12Bits, 0x021); //cosa add for sd3's request 01/23/2008 rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x14, bMask12Bits, 0x5ab); } else { RT_TRACE(COMP_ERR, "PHY_SetRF8256Bandwidth(): unknown hardware version\n"); } break; case HT_CHANNEL_WIDTH_20_40: if(priv->card_8192_version == VERSION_819xU_A ||priv->card_8192_version == VERSION_819xU_B)// 8256 D-cut, E-cut, xiong: consider it later! { rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x0b, bMask12Bits, 0x300); //phy para:3ba rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x2c, bMask12Bits, 0x3df); rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x0e, bMask12Bits, 0x0a1); //cosa add for sd3's request 01/23/2008 if(priv->chan == 3 || priv->chan == 9) //I need to set priv->chan whenever current channel changes rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x14, bMask12Bits, 0x59b); else rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x14, bMask12Bits, 0x5ab); } else { RT_TRACE(COMP_ERR, "PHY_SetRF8256Bandwidth(): unknown hardware version\n"); } break; default: RT_TRACE(COMP_ERR, "PHY_SetRF8256Bandwidth(): unknown Bandwidth: %#X\n",Bandwidth ); break; } } return; } /*-------------------------------------------------------------------------- * Overview: Interface to config 8256 * Input: struct net_device* dev * Output: NONE * Return: NONE *---------------------------------------------------------------------------*/ void PHY_RF8256_Config(struct net_device* dev) { struct r8192_priv *priv = ieee80211_priv(dev); // Initialize general global value // // TODO: Extend RF_PATH_C and RF_PATH_D in the future priv->NumTotalRFPath = RTL819X_TOTAL_RF_PATH; // Config BB and RF phy_RF8256_Config_ParaFile(dev); return; } /*-------------------------------------------------------------------------- * Overview: Interface to config 8256 * Input: struct net_device* dev * Output: NONE * Return: NONE *---------------------------------------------------------------------------*/ void phy_RF8256_Config_ParaFile(struct net_device* dev) { u32 u4RegValue = 0; //static s1Byte szRadioAFile[] = RTL819X_PHY_RADIO_A; //static s1Byte szRadioBFile[] = RTL819X_PHY_RADIO_B; //static s1Byte szRadioCFile[] = RTL819X_PHY_RADIO_C; //static s1Byte szRadioDFile[] = RTL819X_PHY_RADIO_D; u8 eRFPath; BB_REGISTER_DEFINITION_T *pPhyReg; struct r8192_priv *priv = ieee80211_priv(dev); u32 RegOffSetToBeCheck = 0x3; u32 RegValueToBeCheck = 0x7f1; u32 RF3_Final_Value = 0; u8 ConstRetryTimes = 5, RetryTimes = 5; u8 ret = 0; //3//----------------------------------------------------------------- //3// <2> Initialize RF //3//----------------------------------------------------------------- for(eRFPath = (RF90_RADIO_PATH_E)RF90_PATH_A; eRFPath <priv->NumTotalRFPath; eRFPath++) { if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath)) continue; pPhyReg = &priv->PHYRegDef[eRFPath]; // Joseph test for shorten RF config // pHalData->RfReg0Value[eRFPath] = rtl8192_phy_QueryRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, rGlobalCtrl, bMaskDWord); /*----Store original RFENV control type----*/ switch(eRFPath) { case RF90_PATH_A: case RF90_PATH_C: u4RegValue = rtl8192_QueryBBReg(dev, pPhyReg->rfintfs, bRFSI_RFENV); break; case RF90_PATH_B : case RF90_PATH_D: u4RegValue = rtl8192_QueryBBReg(dev, pPhyReg->rfintfs, bRFSI_RFENV<<16); break; } /*----Set RF_ENV enable----*/ rtl8192_setBBreg(dev, pPhyReg->rfintfe, bRFSI_RFENV<<16, 0x1); /*----Set RF_ENV output high----*/ rtl8192_setBBreg(dev, pPhyReg->rfintfo, bRFSI_RFENV, 0x1); /* Set bit number of Address and Data for RF register */ rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, b3WireAddressLength, 0x0); // Set 0 to 4 bits for Z-serial and set 1 to 6 bits for 8258 rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, b3WireDataLength, 0x0); // Set 0 to 12 bits for Z-serial and 8258, and set 1 to 14 bits for ??? rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E) eRFPath, 0x0, bMask12Bits, 0xbf); /*----Check RF block (for FPGA platform only)----*/ // TODO: this function should be removed on ASIC , Emily 2007.2.2 if (rtl8192_phy_checkBBAndRF(dev, HW90_BLOCK_RF, (RF90_RADIO_PATH_E)eRFPath)) { RT_TRACE(COMP_ERR, "PHY_RF8256_Config():Check Radio[%d] Fail!!\n", eRFPath); goto phy_RF8256_Config_ParaFile_Fail; } RetryTimes = ConstRetryTimes; RF3_Final_Value = 0; /*----Initialize RF fom connfiguration file----*/ switch(eRFPath) { case RF90_PATH_A: while(RF3_Final_Value!=RegValueToBeCheck && RetryTimes!=0) { ret = rtl8192_phy_ConfigRFWithHeaderFile(dev,(RF90_RADIO_PATH_E)eRFPath); RF3_Final_Value = rtl8192_phy_QueryRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, RegOffSetToBeCheck, bMask12Bits); RT_TRACE(COMP_RF, "RF %d %d register final value: %x\n", eRFPath, RegOffSetToBeCheck, RF3_Final_Value); RetryTimes--; } break; case RF90_PATH_B: while(RF3_Final_Value!=RegValueToBeCheck && RetryTimes!=0) { ret = rtl8192_phy_ConfigRFWithHeaderFile(dev,(RF90_RADIO_PATH_E)eRFPath); RF3_Final_Value = rtl8192_phy_QueryRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, RegOffSetToBeCheck, bMask12Bits); RT_TRACE(COMP_RF, "RF %d %d register final value: %x\n", eRFPath, RegOffSetToBeCheck, RF3_Final_Value); RetryTimes--; } break; case RF90_PATH_C: while(RF3_Final_Value!=RegValueToBeCheck && RetryTimes!=0) { ret = rtl8192_phy_ConfigRFWithHeaderFile(dev,(RF90_RADIO_PATH_E)eRFPath); RF3_Final_Value = rtl8192_phy_QueryRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, RegOffSetToBeCheck, bMask12Bits); RT_TRACE(COMP_RF, "RF %d %d register final value: %x\n", eRFPath, RegOffSetToBeCheck, RF3_Final_Value); RetryTimes--; } break; case RF90_PATH_D: while(RF3_Final_Value!=RegValueToBeCheck && RetryTimes!=0) { ret = rtl8192_phy_ConfigRFWithHeaderFile(dev,(RF90_RADIO_PATH_E)eRFPath); RF3_Final_Value = rtl8192_phy_QueryRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, RegOffSetToBeCheck, bMask12Bits); RT_TRACE(COMP_RF, "RF %d %d register final value: %x\n", eRFPath, RegOffSetToBeCheck, RF3_Final_Value); RetryTimes--; } break; } /*----Restore RFENV control type----*/; switch(eRFPath) { case RF90_PATH_A: case RF90_PATH_C: rtl8192_setBBreg(dev, pPhyReg->rfintfs, bRFSI_RFENV, u4RegValue); break; case RF90_PATH_B : case RF90_PATH_D: rtl8192_setBBreg(dev, pPhyReg->rfintfs, bRFSI_RFENV<<16, u4RegValue); break; } if(ret){ RT_TRACE(COMP_ERR, "phy_RF8256_Config_ParaFile():Radio[%d] Fail!!", eRFPath); goto phy_RF8256_Config_ParaFile_Fail; } } RT_TRACE(COMP_PHY, "PHY Initialization Success\n") ; return ; phy_RF8256_Config_ParaFile_Fail: RT_TRACE(COMP_ERR, "PHY Initialization failed\n") ; return ; } void PHY_SetRF8256CCKTxPower(struct net_device* dev, u8 powerlevel) { u32 TxAGC=0; struct r8192_priv *priv = ieee80211_priv(dev); //modified by vivi, 20080109 TxAGC = powerlevel; if(priv->bDynamicTxLowPower == TRUE ) //cosa 05/22/2008 for scan { if(priv->CustomerID == RT_CID_819x_Netcore) TxAGC = 0x22; else TxAGC += priv->CckPwEnl; } if(TxAGC > 0x24) TxAGC = 0x24; rtl8192_setBBreg(dev, rTxAGC_CCK_Mcs32, bTxAGCRateCCK, TxAGC); } void PHY_SetRF8256OFDMTxPower(struct net_device* dev, u8 powerlevel) { struct r8192_priv *priv = ieee80211_priv(dev); //Joseph TxPower for 8192 testing u32 writeVal, powerBase0, powerBase1, writeVal_tmp; u8 index = 0; u16 RegOffset[6] = {0xe00, 0xe04, 0xe10, 0xe14, 0xe18, 0xe1c}; u8 byte0, byte1, byte2, byte3; powerBase0 = powerlevel + priv->TxPowerDiff; //OFDM rates powerBase0 = (powerBase0<<24) | (powerBase0<<16) |(powerBase0<<8) |powerBase0; powerBase1 = powerlevel; //MCS rates powerBase1 = (powerBase1<<24) | (powerBase1<<16) |(powerBase1<<8) |powerBase1; for(index=0; index<6; index++) { writeVal = priv->MCSTxPowerLevelOriginalOffset[index] + ((index<2)?powerBase0:powerBase1); byte0 = (u8)(writeVal & 0x7f); byte1 = (u8)((writeVal & 0x7f00)>>8); byte2 = (u8)((writeVal & 0x7f0000)>>16); byte3 = (u8)((writeVal & 0x7f000000)>>24); if(byte0 > 0x24) // Max power index = 0x24 byte0 = 0x24; if(byte1 > 0x24) byte1 = 0x24; if(byte2 > 0x24) byte2 = 0x24; if(byte3 > 0x24) byte3 = 0x24; //for tx power track if(index == 3) { writeVal_tmp = (byte3<<24) | (byte2<<16) |(byte1<<8) |byte0; priv->Pwr_Track = writeVal_tmp; } if(priv->bDynamicTxHighPower == TRUE) //Add by Jacken 2008/03/06 { // Emily, 20080613. Set low tx power for both MCS and legacy OFDM writeVal = 0x03030303; } else { writeVal = (byte3<<24) | (byte2<<16) |(byte1<<8) |byte0; } rtl8192_setBBreg(dev, RegOffset[index], 0x7f7f7f7f, writeVal); } return; }
gpl-2.0
sleekmason/cyanogenmod12
drivers/staging/rtl8192u/r8190_rtl8256.c
9404
10482
/* This is part of the rtl8192 driver released under the GPL (See file COPYING for details). This files contains programming code for the rtl8256 radio frontend. *Many* thanks to Realtek Corp. for their great support! */ #include "r8192U.h" #include "r8192U_hw.h" #include "r819xU_phyreg.h" #include "r819xU_phy.h" #include "r8190_rtl8256.h" /*-------------------------------------------------------------------------- * Overview: set RF band width (20M or 40M) * Input: struct net_device* dev * WIRELESS_BANDWIDTH_E Bandwidth //20M or 40M * Output: NONE * Return: NONE * Note: 8226 support both 20M and 40 MHz *---------------------------------------------------------------------------*/ void PHY_SetRF8256Bandwidth(struct net_device* dev , HT_CHANNEL_WIDTH Bandwidth) //20M or 40M { u8 eRFPath; struct r8192_priv *priv = ieee80211_priv(dev); //for(eRFPath = RF90_PATH_A; eRFPath <pHalData->NumTotalRFPath; eRFPath++) for(eRFPath = 0; eRFPath <RF90_PATH_MAX; eRFPath++) { if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath)) continue; switch(Bandwidth) { case HT_CHANNEL_WIDTH_20: if(priv->card_8192_version == VERSION_819xU_A || priv->card_8192_version == VERSION_819xU_B)// 8256 D-cut, E-cut, xiong: consider it later! { rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x0b, bMask12Bits, 0x100); //phy para:1ba rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x2c, bMask12Bits, 0x3d7); rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x0e, bMask12Bits, 0x021); //cosa add for sd3's request 01/23/2008 rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x14, bMask12Bits, 0x5ab); } else { RT_TRACE(COMP_ERR, "PHY_SetRF8256Bandwidth(): unknown hardware version\n"); } break; case HT_CHANNEL_WIDTH_20_40: if(priv->card_8192_version == VERSION_819xU_A ||priv->card_8192_version == VERSION_819xU_B)// 8256 D-cut, E-cut, xiong: consider it later! { rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x0b, bMask12Bits, 0x300); //phy para:3ba rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x2c, bMask12Bits, 0x3df); rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x0e, bMask12Bits, 0x0a1); //cosa add for sd3's request 01/23/2008 if(priv->chan == 3 || priv->chan == 9) //I need to set priv->chan whenever current channel changes rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x14, bMask12Bits, 0x59b); else rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, 0x14, bMask12Bits, 0x5ab); } else { RT_TRACE(COMP_ERR, "PHY_SetRF8256Bandwidth(): unknown hardware version\n"); } break; default: RT_TRACE(COMP_ERR, "PHY_SetRF8256Bandwidth(): unknown Bandwidth: %#X\n",Bandwidth ); break; } } return; } /*-------------------------------------------------------------------------- * Overview: Interface to config 8256 * Input: struct net_device* dev * Output: NONE * Return: NONE *---------------------------------------------------------------------------*/ void PHY_RF8256_Config(struct net_device* dev) { struct r8192_priv *priv = ieee80211_priv(dev); // Initialize general global value // // TODO: Extend RF_PATH_C and RF_PATH_D in the future priv->NumTotalRFPath = RTL819X_TOTAL_RF_PATH; // Config BB and RF phy_RF8256_Config_ParaFile(dev); return; } /*-------------------------------------------------------------------------- * Overview: Interface to config 8256 * Input: struct net_device* dev * Output: NONE * Return: NONE *---------------------------------------------------------------------------*/ void phy_RF8256_Config_ParaFile(struct net_device* dev) { u32 u4RegValue = 0; //static s1Byte szRadioAFile[] = RTL819X_PHY_RADIO_A; //static s1Byte szRadioBFile[] = RTL819X_PHY_RADIO_B; //static s1Byte szRadioCFile[] = RTL819X_PHY_RADIO_C; //static s1Byte szRadioDFile[] = RTL819X_PHY_RADIO_D; u8 eRFPath; BB_REGISTER_DEFINITION_T *pPhyReg; struct r8192_priv *priv = ieee80211_priv(dev); u32 RegOffSetToBeCheck = 0x3; u32 RegValueToBeCheck = 0x7f1; u32 RF3_Final_Value = 0; u8 ConstRetryTimes = 5, RetryTimes = 5; u8 ret = 0; //3//----------------------------------------------------------------- //3// <2> Initialize RF //3//----------------------------------------------------------------- for(eRFPath = (RF90_RADIO_PATH_E)RF90_PATH_A; eRFPath <priv->NumTotalRFPath; eRFPath++) { if (!rtl8192_phy_CheckIsLegalRFPath(dev, eRFPath)) continue; pPhyReg = &priv->PHYRegDef[eRFPath]; // Joseph test for shorten RF config // pHalData->RfReg0Value[eRFPath] = rtl8192_phy_QueryRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, rGlobalCtrl, bMaskDWord); /*----Store original RFENV control type----*/ switch(eRFPath) { case RF90_PATH_A: case RF90_PATH_C: u4RegValue = rtl8192_QueryBBReg(dev, pPhyReg->rfintfs, bRFSI_RFENV); break; case RF90_PATH_B : case RF90_PATH_D: u4RegValue = rtl8192_QueryBBReg(dev, pPhyReg->rfintfs, bRFSI_RFENV<<16); break; } /*----Set RF_ENV enable----*/ rtl8192_setBBreg(dev, pPhyReg->rfintfe, bRFSI_RFENV<<16, 0x1); /*----Set RF_ENV output high----*/ rtl8192_setBBreg(dev, pPhyReg->rfintfo, bRFSI_RFENV, 0x1); /* Set bit number of Address and Data for RF register */ rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, b3WireAddressLength, 0x0); // Set 0 to 4 bits for Z-serial and set 1 to 6 bits for 8258 rtl8192_setBBreg(dev, pPhyReg->rfHSSIPara2, b3WireDataLength, 0x0); // Set 0 to 12 bits for Z-serial and 8258, and set 1 to 14 bits for ??? rtl8192_phy_SetRFReg(dev, (RF90_RADIO_PATH_E) eRFPath, 0x0, bMask12Bits, 0xbf); /*----Check RF block (for FPGA platform only)----*/ // TODO: this function should be removed on ASIC , Emily 2007.2.2 if (rtl8192_phy_checkBBAndRF(dev, HW90_BLOCK_RF, (RF90_RADIO_PATH_E)eRFPath)) { RT_TRACE(COMP_ERR, "PHY_RF8256_Config():Check Radio[%d] Fail!!\n", eRFPath); goto phy_RF8256_Config_ParaFile_Fail; } RetryTimes = ConstRetryTimes; RF3_Final_Value = 0; /*----Initialize RF fom connfiguration file----*/ switch(eRFPath) { case RF90_PATH_A: while(RF3_Final_Value!=RegValueToBeCheck && RetryTimes!=0) { ret = rtl8192_phy_ConfigRFWithHeaderFile(dev,(RF90_RADIO_PATH_E)eRFPath); RF3_Final_Value = rtl8192_phy_QueryRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, RegOffSetToBeCheck, bMask12Bits); RT_TRACE(COMP_RF, "RF %d %d register final value: %x\n", eRFPath, RegOffSetToBeCheck, RF3_Final_Value); RetryTimes--; } break; case RF90_PATH_B: while(RF3_Final_Value!=RegValueToBeCheck && RetryTimes!=0) { ret = rtl8192_phy_ConfigRFWithHeaderFile(dev,(RF90_RADIO_PATH_E)eRFPath); RF3_Final_Value = rtl8192_phy_QueryRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, RegOffSetToBeCheck, bMask12Bits); RT_TRACE(COMP_RF, "RF %d %d register final value: %x\n", eRFPath, RegOffSetToBeCheck, RF3_Final_Value); RetryTimes--; } break; case RF90_PATH_C: while(RF3_Final_Value!=RegValueToBeCheck && RetryTimes!=0) { ret = rtl8192_phy_ConfigRFWithHeaderFile(dev,(RF90_RADIO_PATH_E)eRFPath); RF3_Final_Value = rtl8192_phy_QueryRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, RegOffSetToBeCheck, bMask12Bits); RT_TRACE(COMP_RF, "RF %d %d register final value: %x\n", eRFPath, RegOffSetToBeCheck, RF3_Final_Value); RetryTimes--; } break; case RF90_PATH_D: while(RF3_Final_Value!=RegValueToBeCheck && RetryTimes!=0) { ret = rtl8192_phy_ConfigRFWithHeaderFile(dev,(RF90_RADIO_PATH_E)eRFPath); RF3_Final_Value = rtl8192_phy_QueryRFReg(dev, (RF90_RADIO_PATH_E)eRFPath, RegOffSetToBeCheck, bMask12Bits); RT_TRACE(COMP_RF, "RF %d %d register final value: %x\n", eRFPath, RegOffSetToBeCheck, RF3_Final_Value); RetryTimes--; } break; } /*----Restore RFENV control type----*/; switch(eRFPath) { case RF90_PATH_A: case RF90_PATH_C: rtl8192_setBBreg(dev, pPhyReg->rfintfs, bRFSI_RFENV, u4RegValue); break; case RF90_PATH_B : case RF90_PATH_D: rtl8192_setBBreg(dev, pPhyReg->rfintfs, bRFSI_RFENV<<16, u4RegValue); break; } if(ret){ RT_TRACE(COMP_ERR, "phy_RF8256_Config_ParaFile():Radio[%d] Fail!!", eRFPath); goto phy_RF8256_Config_ParaFile_Fail; } } RT_TRACE(COMP_PHY, "PHY Initialization Success\n") ; return ; phy_RF8256_Config_ParaFile_Fail: RT_TRACE(COMP_ERR, "PHY Initialization failed\n") ; return ; } void PHY_SetRF8256CCKTxPower(struct net_device* dev, u8 powerlevel) { u32 TxAGC=0; struct r8192_priv *priv = ieee80211_priv(dev); //modified by vivi, 20080109 TxAGC = powerlevel; if(priv->bDynamicTxLowPower == TRUE ) //cosa 05/22/2008 for scan { if(priv->CustomerID == RT_CID_819x_Netcore) TxAGC = 0x22; else TxAGC += priv->CckPwEnl; } if(TxAGC > 0x24) TxAGC = 0x24; rtl8192_setBBreg(dev, rTxAGC_CCK_Mcs32, bTxAGCRateCCK, TxAGC); } void PHY_SetRF8256OFDMTxPower(struct net_device* dev, u8 powerlevel) { struct r8192_priv *priv = ieee80211_priv(dev); //Joseph TxPower for 8192 testing u32 writeVal, powerBase0, powerBase1, writeVal_tmp; u8 index = 0; u16 RegOffset[6] = {0xe00, 0xe04, 0xe10, 0xe14, 0xe18, 0xe1c}; u8 byte0, byte1, byte2, byte3; powerBase0 = powerlevel + priv->TxPowerDiff; //OFDM rates powerBase0 = (powerBase0<<24) | (powerBase0<<16) |(powerBase0<<8) |powerBase0; powerBase1 = powerlevel; //MCS rates powerBase1 = (powerBase1<<24) | (powerBase1<<16) |(powerBase1<<8) |powerBase1; for(index=0; index<6; index++) { writeVal = priv->MCSTxPowerLevelOriginalOffset[index] + ((index<2)?powerBase0:powerBase1); byte0 = (u8)(writeVal & 0x7f); byte1 = (u8)((writeVal & 0x7f00)>>8); byte2 = (u8)((writeVal & 0x7f0000)>>16); byte3 = (u8)((writeVal & 0x7f000000)>>24); if(byte0 > 0x24) // Max power index = 0x24 byte0 = 0x24; if(byte1 > 0x24) byte1 = 0x24; if(byte2 > 0x24) byte2 = 0x24; if(byte3 > 0x24) byte3 = 0x24; //for tx power track if(index == 3) { writeVal_tmp = (byte3<<24) | (byte2<<16) |(byte1<<8) |byte0; priv->Pwr_Track = writeVal_tmp; } if(priv->bDynamicTxHighPower == TRUE) //Add by Jacken 2008/03/06 { // Emily, 20080613. Set low tx power for both MCS and legacy OFDM writeVal = 0x03030303; } else { writeVal = (byte3<<24) | (byte2<<16) |(byte1<<8) |byte0; } rtl8192_setBBreg(dev, RegOffset[index], 0x7f7f7f7f, writeVal); } return; }
gpl-2.0
buglabs/android-froyo-kernel
arch/ia64/kernel/ftrace.c
11708
5592
/* * Dynamic function tracing support. * * Copyright (C) 2008 Shaohua Li <shaohua.li@intel.com> * * For licencing details, see COPYING. * * Defines low-level handling of mcount calls when the kernel * is compiled with the -pg flag. When using dynamic ftrace, the * mcount call-sites get patched lazily with NOP till they are * enabled. All code mutation routines here take effect atomically. */ #include <linux/uaccess.h> #include <linux/ftrace.h> #include <asm/cacheflush.h> #include <asm/patch.h> /* In IA64, each function will be added below two bundles with -pg option */ static unsigned char __attribute__((aligned(8))) ftrace_orig_code[MCOUNT_INSN_SIZE] = { 0x02, 0x40, 0x31, 0x10, 0x80, 0x05, /* alloc r40=ar.pfs,12,8,0 */ 0xb0, 0x02, 0x00, 0x00, 0x42, 0x40, /* mov r43=r0;; */ 0x05, 0x00, 0xc4, 0x00, /* mov r42=b0 */ 0x11, 0x48, 0x01, 0x02, 0x00, 0x21, /* mov r41=r1 */ 0x00, 0x00, 0x00, 0x02, 0x00, 0x00, /* nop.i 0x0 */ 0x08, 0x00, 0x00, 0x50 /* br.call.sptk.many b0 = _mcount;; */ }; struct ftrace_orig_insn { u64 dummy1, dummy2, dummy3; u64 dummy4:64-41+13; u64 imm20:20; u64 dummy5:3; u64 sign:1; u64 dummy6:4; }; /* mcount stub will be converted below for nop */ static unsigned char ftrace_nop_code[MCOUNT_INSN_SIZE] = { 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MII] nop.m 0x0 */ 0x30, 0x00, 0x00, 0x60, 0x00, 0x00, /* mov r3=ip */ 0x00, 0x00, 0x04, 0x00, /* nop.i 0x0 */ 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0x0 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* nop.x 0x0;; */ 0x00, 0x00, 0x04, 0x00 }; static unsigned char *ftrace_nop_replace(void) { return ftrace_nop_code; } /* * mcount stub will be converted below for call * Note: Just the last instruction is changed against nop * */ static unsigned char __attribute__((aligned(8))) ftrace_call_code[MCOUNT_INSN_SIZE] = { 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MII] nop.m 0x0 */ 0x30, 0x00, 0x00, 0x60, 0x00, 0x00, /* mov r3=ip */ 0x00, 0x00, 0x04, 0x00, /* nop.i 0x0 */ 0x05, 0x00, 0x00, 0x00, 0x01, 0x00, /* [MLX] nop.m 0x0 */ 0xff, 0xff, 0xff, 0xff, 0x7f, 0x00, /* brl.many .;;*/ 0xf8, 0xff, 0xff, 0xc8 }; struct ftrace_call_insn { u64 dummy1, dummy2; u64 dummy3:48; u64 imm39_l:16; u64 imm39_h:23; u64 dummy4:13; u64 imm20:20; u64 dummy5:3; u64 i:1; u64 dummy6:4; }; static unsigned char *ftrace_call_replace(unsigned long ip, unsigned long addr) { struct ftrace_call_insn *code = (void *)ftrace_call_code; unsigned long offset = addr - (ip + 0x10); code->imm39_l = offset >> 24; code->imm39_h = offset >> 40; code->imm20 = offset >> 4; code->i = offset >> 63; return ftrace_call_code; } static int ftrace_modify_code(unsigned long ip, unsigned char *old_code, unsigned char *new_code, int do_check) { unsigned char replaced[MCOUNT_INSN_SIZE]; /* * Note: Due to modules and __init, code can * disappear and change, we need to protect against faulting * as well as code changing. We do this by using the * probe_kernel_* functions. * * No real locking needed, this code is run through * kstop_machine, or before SMP starts. */ if (!do_check) goto skip_check; /* read the text we want to modify */ if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) return -EFAULT; /* Make sure it is what we expect it to be */ if (memcmp(replaced, old_code, MCOUNT_INSN_SIZE) != 0) return -EINVAL; skip_check: /* replace the text with the new text */ if (probe_kernel_write(((void *)ip), new_code, MCOUNT_INSN_SIZE)) return -EPERM; flush_icache_range(ip, ip + MCOUNT_INSN_SIZE); return 0; } static int ftrace_make_nop_check(struct dyn_ftrace *rec, unsigned long addr) { unsigned char __attribute__((aligned(8))) replaced[MCOUNT_INSN_SIZE]; unsigned long ip = rec->ip; if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE)) return -EFAULT; if (rec->flags & FTRACE_FL_CONVERTED) { struct ftrace_call_insn *call_insn, *tmp_call; call_insn = (void *)ftrace_call_code; tmp_call = (void *)replaced; call_insn->imm39_l = tmp_call->imm39_l; call_insn->imm39_h = tmp_call->imm39_h; call_insn->imm20 = tmp_call->imm20; call_insn->i = tmp_call->i; if (memcmp(replaced, ftrace_call_code, MCOUNT_INSN_SIZE) != 0) return -EINVAL; return 0; } else { struct ftrace_orig_insn *call_insn, *tmp_call; call_insn = (void *)ftrace_orig_code; tmp_call = (void *)replaced; call_insn->sign = tmp_call->sign; call_insn->imm20 = tmp_call->imm20; if (memcmp(replaced, ftrace_orig_code, MCOUNT_INSN_SIZE) != 0) return -EINVAL; return 0; } } int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr) { int ret; char *new; ret = ftrace_make_nop_check(rec, addr); if (ret) return ret; new = ftrace_nop_replace(); return ftrace_modify_code(rec->ip, NULL, new, 0); } int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr) { unsigned long ip = rec->ip; unsigned char *old, *new; old= ftrace_nop_replace(); new = ftrace_call_replace(ip, addr); return ftrace_modify_code(ip, old, new, 1); } /* in IA64, _mcount can't directly call ftrace_stub. Only jump is ok */ int ftrace_update_ftrace_func(ftrace_func_t func) { unsigned long ip; unsigned long addr = ((struct fnptr *)ftrace_call)->ip; if (func == ftrace_stub) return 0; ip = ((struct fnptr *)func)->ip; ia64_patch_imm64(addr + 2, ip); flush_icache_range(addr, addr + 16); return 0; } /* run from kstop_machine */ int __init ftrace_dyn_arch_init(void *data) { *(unsigned long *)data = 0; return 0; }
gpl-2.0
InfinitiveOS-Devices/android_kernel_motorola_msm8226
fs/ufs/symlink.c
12732
1200
/* * linux/fs/ufs/symlink.c * * Only fast symlinks left here - the rest is done by generic code. AV, 1999 * * Copyright (C) 1998 * Daniel Pirkl <daniel.pirkl@emai.cz> * Charles University, Faculty of Mathematics and Physics * * from * * linux/fs/ext2/symlink.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/symlink.c * * Copyright (C) 1991, 1992 Linus Torvalds * * ext2 symlink handling code */ #include <linux/fs.h> #include <linux/namei.h> #include "ufs_fs.h" #include "ufs.h" static void *ufs_follow_link(struct dentry *dentry, struct nameidata *nd) { struct ufs_inode_info *p = UFS_I(dentry->d_inode); nd_set_link(nd, (char*)p->i_u1.i_symlink); return NULL; } const struct inode_operations ufs_fast_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = ufs_follow_link, .setattr = ufs_setattr, }; const struct inode_operations ufs_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, .setattr = ufs_setattr, };
gpl-2.0
aldanopolis/android_kernel_motorola_msm8226
arch/arm/mach-msm/mmi_soc_info.c
189
3995
/* * Copyright (C) 2013 Motorola Mobility LLC * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/proc_fs.h> #include <linux/apanic_mmc.h> #include <linux/persistent_ram.h> #include <mach/socinfo.h> struct mmi_msm_bin { int set; int speed; int pvs; int ver; }; #define MMI_MSM_BIN_INVAL INT_MAX #define ACPU_BIN_SET BIT(0) static struct mmi_msm_bin mmi_msm_bin_info; static DEFINE_SPINLOCK(mmi_msm_bin_lock); static inline void mmi_panic_annotate(const char *str) { apanic_mmc_annotate(str); persistent_ram_ext_oldbuf_print(str); } static void __init mmi_msm_annotate_socinfo(void) { char socinfo[32]; snprintf(socinfo, sizeof(socinfo), "socinfo: id=%u, ", socinfo_get_id()); mmi_panic_annotate(socinfo); snprintf(socinfo, sizeof(socinfo), "ver=%u.%u, ", SOCINFO_VERSION_MAJOR(socinfo_get_version()), SOCINFO_VERSION_MINOR(socinfo_get_version())); mmi_panic_annotate(socinfo); snprintf(socinfo, sizeof(socinfo), "raw_id=%u, ", socinfo_get_raw_id()); mmi_panic_annotate(socinfo); snprintf(socinfo, sizeof(socinfo), "raw_ver=%u, ", socinfo_get_raw_version()); mmi_panic_annotate(socinfo); snprintf(socinfo, sizeof(socinfo), "hw_plat=%u, ", socinfo_get_platform_type()); mmi_panic_annotate(socinfo); snprintf(socinfo, sizeof(socinfo), "hw_plat_ver=%u, ", socinfo_get_platform_version()); mmi_panic_annotate(socinfo); snprintf(socinfo, sizeof(socinfo), "hw_plat_subtype=%u\n", socinfo_get_platform_subtype()); mmi_panic_annotate(socinfo); } static int mmi_acpu_proc_read(char *buf, char **start, off_t off, int count, int *eof, void *data) { int len = snprintf(buf, 2, "%1x", (int)data); *eof = 1; return len; } void mmi_acpu_bin_set(int *speed, int *pvs, int *ver) { unsigned long flags; spin_lock_irqsave(&mmi_msm_bin_lock, flags); if (mmi_msm_bin_info.set & ACPU_BIN_SET) { spin_unlock_irqrestore(&mmi_msm_bin_lock, flags); return; } mmi_msm_bin_info.speed = speed ? *speed : MMI_MSM_BIN_INVAL; mmi_msm_bin_info.pvs = pvs ? *pvs : MMI_MSM_BIN_INVAL; mmi_msm_bin_info.ver = ver ? *ver : MMI_MSM_BIN_INVAL; mmi_msm_bin_info.set |= ACPU_BIN_SET; spin_unlock_irqrestore(&mmi_msm_bin_lock, flags); } static void __init mmi_msm_acpu_bin_export(void) { struct proc_dir_entry *proc; unsigned long flags; char acpu[64]; spin_lock_irqsave(&mmi_msm_bin_lock, flags); if (!(mmi_msm_bin_info.set & ACPU_BIN_SET)) { spin_unlock_irqrestore(&mmi_msm_bin_lock, flags); pr_err("ACPU Bin is not available.\n"); return; } spin_unlock_irqrestore(&mmi_msm_bin_lock, flags); mmi_panic_annotate("ACPU: "); if (mmi_msm_bin_info.speed != MMI_MSM_BIN_INVAL) { snprintf(acpu, sizeof(acpu), "Speed bin %d ", mmi_msm_bin_info.speed); mmi_panic_annotate(acpu); } if (mmi_msm_bin_info.pvs != MMI_MSM_BIN_INVAL) { proc = create_proc_read_entry("cpu/msm_acpu_pvs", (S_IFREG | S_IRUGO), NULL, mmi_acpu_proc_read, (void *)mmi_msm_bin_info.pvs); if (!proc) pr_err("Failed to create /proc/cpu/msm_acpu_pvs.\n"); else proc->size = 1; snprintf(acpu, sizeof(acpu), "PVS bin %d ", mmi_msm_bin_info.pvs); mmi_panic_annotate(acpu); } if (mmi_msm_bin_info.ver != MMI_MSM_BIN_INVAL) { snprintf(acpu, sizeof(acpu), "PVS version %d ", mmi_msm_bin_info.ver); mmi_panic_annotate(acpu); } mmi_panic_annotate("\n"); } static int __init init_mmi_soc_info(void) { mmi_msm_annotate_socinfo(); mmi_msm_acpu_bin_export(); return 0; } module_init(init_mmi_soc_info); MODULE_DESCRIPTION("Motorola Mobility LLC. SOC Info"); MODULE_LICENSE("GPL v2");
gpl-2.0
SunliyMonkey/linux
net/ipv4/gre_demux.c
445
2909
/* * GRE over IPv4 demultiplexer driver * * Authors: Dmitry Kozlov (xeb@mail.ru) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <linux/if.h> #include <linux/icmp.h> #include <linux/kernel.h> #include <linux/kmod.h> #include <linux/skbuff.h> #include <linux/in.h> #include <linux/ip.h> #include <linux/netdevice.h> #include <linux/if_tunnel.h> #include <linux/spinlock.h> #include <net/protocol.h> #include <net/gre.h> #include <net/icmp.h> #include <net/route.h> #include <net/xfrm.h> static const struct gre_protocol __rcu *gre_proto[GREPROTO_MAX] __read_mostly; int gre_add_protocol(const struct gre_protocol *proto, u8 version) { if (version >= GREPROTO_MAX) return -EINVAL; return (cmpxchg((const struct gre_protocol **)&gre_proto[version], NULL, proto) == NULL) ? 0 : -EBUSY; } EXPORT_SYMBOL_GPL(gre_add_protocol); int gre_del_protocol(const struct gre_protocol *proto, u8 version) { int ret; if (version >= GREPROTO_MAX) return -EINVAL; ret = (cmpxchg((const struct gre_protocol **)&gre_proto[version], proto, NULL) == proto) ? 0 : -EBUSY; if (ret) return ret; synchronize_rcu(); return 0; } EXPORT_SYMBOL_GPL(gre_del_protocol); static int gre_rcv(struct sk_buff *skb) { const struct gre_protocol *proto; u8 ver; int ret; if (!pskb_may_pull(skb, 12)) goto drop; ver = skb->data[1]&0x7f; if (ver >= GREPROTO_MAX) goto drop; rcu_read_lock(); proto = rcu_dereference(gre_proto[ver]); if (!proto || !proto->handler) goto drop_unlock; ret = proto->handler(skb); rcu_read_unlock(); return ret; drop_unlock: rcu_read_unlock(); drop: kfree_skb(skb); return NET_RX_DROP; } static void gre_err(struct sk_buff *skb, u32 info) { const struct gre_protocol *proto; const struct iphdr *iph = (const struct iphdr *)skb->data; u8 ver = skb->data[(iph->ihl<<2) + 1]&0x7f; if (ver >= GREPROTO_MAX) return; rcu_read_lock(); proto = rcu_dereference(gre_proto[ver]); if (proto && proto->err_handler) proto->err_handler(skb, info); rcu_read_unlock(); } static const struct net_protocol net_gre_protocol = { .handler = gre_rcv, .err_handler = gre_err, .netns_ok = 1, }; static int __init gre_init(void) { pr_info("GRE over IPv4 demultiplexor driver\n"); if (inet_add_protocol(&net_gre_protocol, IPPROTO_GRE) < 0) { pr_err("can't add protocol\n"); return -EAGAIN; } return 0; } static void __exit gre_exit(void) { inet_del_protocol(&net_gre_protocol, IPPROTO_GRE); } module_init(gre_init); module_exit(gre_exit); MODULE_DESCRIPTION("GRE over IPv4 demultiplexer driver"); MODULE_AUTHOR("D. Kozlov (xeb@mail.ru)"); MODULE_LICENSE("GPL");
gpl-2.0
viaembedded/arm-soc
drivers/bcma/driver_chipcommon_b.c
1469
1338
/* * Broadcom specific AMBA * ChipCommon B Unit driver * * Copyright 2014, Hauke Mehrtens <hauke@hauke-m.de> * * Licensed under the GNU/GPL. See COPYING for details. */ #include "bcma_private.h" #include <linux/export.h> #include <linux/bcma/bcma.h> static bool bcma_wait_reg(struct bcma_bus *bus, void __iomem *addr, u32 mask, u32 value, int timeout) { unsigned long deadline = jiffies + timeout; u32 val; do { val = readl(addr); if ((val & mask) == value) return true; cpu_relax(); udelay(10); } while (!time_after_eq(jiffies, deadline)); bcma_err(bus, "Timeout waiting for register %p\n", addr); return false; } void bcma_chipco_b_mii_write(struct bcma_drv_cc_b *ccb, u32 offset, u32 value) { struct bcma_bus *bus = ccb->core->bus; writel(offset, ccb->mii + 0x00); bcma_wait_reg(bus, ccb->mii + 0x00, 0x0100, 0x0000, 100); writel(value, ccb->mii + 0x04); bcma_wait_reg(bus, ccb->mii + 0x00, 0x0100, 0x0000, 100); } EXPORT_SYMBOL_GPL(bcma_chipco_b_mii_write); int bcma_core_chipcommon_b_init(struct bcma_drv_cc_b *ccb) { if (ccb->setup_done) return 0; ccb->setup_done = 1; ccb->mii = ioremap_nocache(ccb->core->addr_s[1], BCMA_CORE_SIZE); if (!ccb->mii) return -ENOMEM; return 0; } void bcma_core_chipcommon_b_free(struct bcma_drv_cc_b *ccb) { if (ccb->mii) iounmap(ccb->mii); }
gpl-2.0
erdoukki/linux-amlogic
drivers/net/wireless/mwifiex/sta_ioctl.c
1725
37664
/* * Marvell Wireless LAN device driver: functions for station ioctl * * Copyright (C) 2011, Marvell International Ltd. * * This software file (the "File") is distributed by Marvell International * Ltd. under the terms of the GNU General Public License Version 2, June 1991 * (the "License"). You may use, redistribute and/or modify this File in * accordance with the terms and conditions of the License, a copy of which * is available by writing to the Free Software Foundation, Inc., * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA or on the * worldwide web at http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt. * * THE FILE IS DISTRIBUTED AS-IS, WITHOUT WARRANTY OF ANY KIND, AND THE * IMPLIED WARRANTIES OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE * ARE EXPRESSLY DISCLAIMED. The License provides additional details about * this warranty disclaimer. */ #include "decl.h" #include "ioctl.h" #include "util.h" #include "fw.h" #include "main.h" #include "wmm.h" #include "11n.h" #include "cfg80211.h" static int disconnect_on_suspend = 1; module_param(disconnect_on_suspend, int, 0644); /* * Copies the multicast address list from device to driver. * * This function does not validate the destination memory for * size, and the calling function must ensure enough memory is * available. */ int mwifiex_copy_mcast_addr(struct mwifiex_multicast_list *mlist, struct net_device *dev) { int i = 0; struct netdev_hw_addr *ha; netdev_for_each_mc_addr(ha, dev) memcpy(&mlist->mac_list[i++], ha->addr, ETH_ALEN); return i; } /* * Wait queue completion handler. * * This function waits on a cmd wait queue. It also cancels the pending * request after waking up, in case of errors. */ int mwifiex_wait_queue_complete(struct mwifiex_adapter *adapter, struct cmd_ctrl_node *cmd_queued) { int status; /* Wait for completion */ status = wait_event_interruptible(adapter->cmd_wait_q.wait, *(cmd_queued->condition)); if (status) { dev_err(adapter->dev, "cmd_wait_q terminated: %d\n", status); return status; } status = adapter->cmd_wait_q.status; adapter->cmd_wait_q.status = 0; return status; } /* * This function prepares the correct firmware command and * issues it to set the multicast list. * * This function can be used to enable promiscuous mode, or enable all * multicast packets, or to enable selective multicast. */ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv, struct mwifiex_multicast_list *mcast_list) { int ret = 0; u16 old_pkt_filter; old_pkt_filter = priv->curr_pkt_filter; if (mcast_list->mode == MWIFIEX_PROMISC_MODE) { dev_dbg(priv->adapter->dev, "info: Enable Promiscuous mode\n"); priv->curr_pkt_filter |= HostCmd_ACT_MAC_PROMISCUOUS_ENABLE; priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE; } else { /* Multicast */ priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE; if (mcast_list->mode == MWIFIEX_ALL_MULTI_MODE) { dev_dbg(priv->adapter->dev, "info: Enabling All Multicast!\n"); priv->curr_pkt_filter |= HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE; } else { priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_ALL_MULTICAST_ENABLE; dev_dbg(priv->adapter->dev, "info: Set multicast list=%d\n", mcast_list->num_multicast_addr); /* Send multicast addresses to firmware */ ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_MULTICAST_ADR, HostCmd_ACT_GEN_SET, 0, mcast_list); } } dev_dbg(priv->adapter->dev, "info: old_pkt_filter=%#x, curr_pkt_filter=%#x\n", old_pkt_filter, priv->curr_pkt_filter); if (old_pkt_filter != priv->curr_pkt_filter) { ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_MAC_CONTROL, HostCmd_ACT_GEN_SET, 0, &priv->curr_pkt_filter); } return ret; } /* * This function fills bss descriptor structure using provided * information. * beacon_ie buffer is allocated in this function. It is caller's * responsibility to free the memory. */ int mwifiex_fill_new_bss_desc(struct mwifiex_private *priv, struct cfg80211_bss *bss, struct mwifiex_bssdescriptor *bss_desc) { u8 *beacon_ie; size_t beacon_ie_len; struct mwifiex_bss_priv *bss_priv = (void *)bss->priv; const struct cfg80211_bss_ies *ies; rcu_read_lock(); ies = rcu_dereference(bss->ies); beacon_ie = kmemdup(ies->data, ies->len, GFP_ATOMIC); beacon_ie_len = ies->len; bss_desc->timestamp = ies->tsf; rcu_read_unlock(); if (!beacon_ie) { dev_err(priv->adapter->dev, " failed to alloc beacon_ie\n"); return -ENOMEM; } memcpy(bss_desc->mac_address, bss->bssid, ETH_ALEN); bss_desc->rssi = bss->signal; /* The caller of this function will free beacon_ie */ bss_desc->beacon_buf = beacon_ie; bss_desc->beacon_buf_size = beacon_ie_len; bss_desc->beacon_period = bss->beacon_interval; bss_desc->cap_info_bitmap = bss->capability; bss_desc->bss_band = bss_priv->band; bss_desc->fw_tsf = bss_priv->fw_tsf; if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_PRIVACY) { dev_dbg(priv->adapter->dev, "info: InterpretIE: AP WEP enabled\n"); bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_8021X_WEP; } else { bss_desc->privacy = MWIFIEX_802_11_PRIV_FILTER_ACCEPT_ALL; } if (bss_desc->cap_info_bitmap & WLAN_CAPABILITY_IBSS) bss_desc->bss_mode = NL80211_IFTYPE_ADHOC; else bss_desc->bss_mode = NL80211_IFTYPE_STATION; /* Disable 11ac by default. Enable it only where there * exist VHT_CAP IE in AP beacon */ bss_desc->disable_11ac = true; return mwifiex_update_bss_desc_with_ie(priv->adapter, bss_desc); } static int mwifiex_process_country_ie(struct mwifiex_private *priv, struct cfg80211_bss *bss) { const u8 *country_ie; u8 country_ie_len; struct mwifiex_802_11d_domain_reg *domain_info = &priv->adapter->domain_reg; rcu_read_lock(); country_ie = ieee80211_bss_get_ie(bss, WLAN_EID_COUNTRY); if (!country_ie) { rcu_read_unlock(); return 0; } country_ie_len = country_ie[1]; if (country_ie_len < IEEE80211_COUNTRY_IE_MIN_LEN) { rcu_read_unlock(); return 0; } domain_info->country_code[0] = country_ie[2]; domain_info->country_code[1] = country_ie[3]; domain_info->country_code[2] = ' '; country_ie_len -= IEEE80211_COUNTRY_STRING_LEN; domain_info->no_of_triplet = country_ie_len / sizeof(struct ieee80211_country_ie_triplet); memcpy((u8 *)domain_info->triplet, &country_ie[2] + IEEE80211_COUNTRY_STRING_LEN, country_ie_len); rcu_read_unlock(); if (mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11D_DOMAIN_INFO, HostCmd_ACT_GEN_SET, 0, NULL)) { wiphy_err(priv->adapter->wiphy, "11D: setting domain info in FW\n"); return -1; } return 0; } /* * In Ad-Hoc mode, the IBSS is created if not found in scan list. * In both Ad-Hoc and infra mode, an deauthentication is performed * first. */ int mwifiex_bss_start(struct mwifiex_private *priv, struct cfg80211_bss *bss, struct cfg80211_ssid *req_ssid) { int ret; struct mwifiex_adapter *adapter = priv->adapter; struct mwifiex_bssdescriptor *bss_desc = NULL; priv->scan_block = false; if (bss) { mwifiex_process_country_ie(priv, bss); /* Allocate and fill new bss descriptor */ bss_desc = kzalloc(sizeof(struct mwifiex_bssdescriptor), GFP_KERNEL); if (!bss_desc) return -ENOMEM; ret = mwifiex_fill_new_bss_desc(priv, bss, bss_desc); if (ret) goto done; } if (priv->bss_mode == NL80211_IFTYPE_STATION) { /* Infra mode */ ret = mwifiex_deauthenticate(priv, NULL); if (ret) goto done; if (bss_desc) { u8 config_bands = 0; if (mwifiex_band_to_radio_type((u8) bss_desc->bss_band) == HostCmd_SCAN_RADIO_TYPE_BG) config_bands = BAND_B | BAND_G | BAND_GN | BAND_GAC; else config_bands = BAND_A | BAND_AN | BAND_AAC; if (!((config_bands | adapter->fw_bands) & ~adapter->fw_bands)) adapter->config_bands = config_bands; } ret = mwifiex_check_network_compatibility(priv, bss_desc); if (ret) goto done; dev_dbg(adapter->dev, "info: SSID found in scan list ... " "associating...\n"); mwifiex_stop_net_dev_queue(priv->netdev, adapter); if (netif_carrier_ok(priv->netdev)) netif_carrier_off(priv->netdev); /* Clear any past association response stored for * application retrieval */ priv->assoc_rsp_size = 0; ret = mwifiex_associate(priv, bss_desc); /* If auth type is auto and association fails using open mode, * try to connect using shared mode */ if (ret == WLAN_STATUS_NOT_SUPPORTED_AUTH_ALG && priv->sec_info.is_authtype_auto && priv->sec_info.wep_enabled) { priv->sec_info.authentication_mode = NL80211_AUTHTYPE_SHARED_KEY; ret = mwifiex_associate(priv, bss_desc); } if (bss) cfg80211_put_bss(priv->adapter->wiphy, bss); } else { /* Adhoc mode */ /* If the requested SSID matches current SSID, return */ if (bss_desc && bss_desc->ssid.ssid_len && (!mwifiex_ssid_cmp(&priv->curr_bss_params.bss_descriptor. ssid, &bss_desc->ssid))) { ret = 0; goto done; } /* Exit Adhoc mode first */ dev_dbg(adapter->dev, "info: Sending Adhoc Stop\n"); ret = mwifiex_deauthenticate(priv, NULL); if (ret) goto done; priv->adhoc_is_link_sensed = false; ret = mwifiex_check_network_compatibility(priv, bss_desc); mwifiex_stop_net_dev_queue(priv->netdev, adapter); if (netif_carrier_ok(priv->netdev)) netif_carrier_off(priv->netdev); if (!ret) { dev_dbg(adapter->dev, "info: network found in scan" " list. Joining...\n"); ret = mwifiex_adhoc_join(priv, bss_desc); if (bss) cfg80211_put_bss(priv->adapter->wiphy, bss); } else { dev_dbg(adapter->dev, "info: Network not found in " "the list, creating adhoc with ssid = %s\n", req_ssid->ssid); ret = mwifiex_adhoc_start(priv, req_ssid); } } done: /* beacon_ie buffer was allocated in function * mwifiex_fill_new_bss_desc(). Free it now. */ if (bss_desc) kfree(bss_desc->beacon_buf); kfree(bss_desc); return ret; } /* * IOCTL request handler to set host sleep configuration. * * This function prepares the correct firmware command and * issues it. */ static int mwifiex_set_hs_params(struct mwifiex_private *priv, u16 action, int cmd_type, struct mwifiex_ds_hs_cfg *hs_cfg) { struct mwifiex_adapter *adapter = priv->adapter; int status = 0; u32 prev_cond = 0; if (!hs_cfg) return -ENOMEM; switch (action) { case HostCmd_ACT_GEN_SET: if (adapter->pps_uapsd_mode) { dev_dbg(adapter->dev, "info: Host Sleep IOCTL" " is blocked in UAPSD/PPS mode\n"); status = -1; break; } if (hs_cfg->is_invoke_hostcmd) { if (hs_cfg->conditions == HS_CFG_CANCEL) { if (!adapter->is_hs_configured) /* Already cancelled */ break; /* Save previous condition */ prev_cond = le32_to_cpu(adapter->hs_cfg .conditions); adapter->hs_cfg.conditions = cpu_to_le32(hs_cfg->conditions); } else if (hs_cfg->conditions) { adapter->hs_cfg.conditions = cpu_to_le32(hs_cfg->conditions); adapter->hs_cfg.gpio = (u8)hs_cfg->gpio; if (hs_cfg->gap) adapter->hs_cfg.gap = (u8)hs_cfg->gap; } else if (adapter->hs_cfg.conditions == cpu_to_le32(HS_CFG_CANCEL)) { /* Return failure if no parameters for HS enable */ status = -1; break; } if (cmd_type == MWIFIEX_SYNC_CMD) status = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_HS_CFG_ENH, HostCmd_ACT_GEN_SET, 0, &adapter->hs_cfg); else status = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_HS_CFG_ENH, HostCmd_ACT_GEN_SET, 0, &adapter->hs_cfg); if (hs_cfg->conditions == HS_CFG_CANCEL) /* Restore previous condition */ adapter->hs_cfg.conditions = cpu_to_le32(prev_cond); } else { adapter->hs_cfg.conditions = cpu_to_le32(hs_cfg->conditions); adapter->hs_cfg.gpio = (u8)hs_cfg->gpio; adapter->hs_cfg.gap = (u8)hs_cfg->gap; } break; case HostCmd_ACT_GEN_GET: hs_cfg->conditions = le32_to_cpu(adapter->hs_cfg.conditions); hs_cfg->gpio = adapter->hs_cfg.gpio; hs_cfg->gap = adapter->hs_cfg.gap; break; default: status = -1; break; } return status; } /* * Sends IOCTL request to cancel the existing Host Sleep configuration. * * This function allocates the IOCTL request buffer, fills it * with requisite parameters and calls the IOCTL handler. */ int mwifiex_cancel_hs(struct mwifiex_private *priv, int cmd_type) { struct mwifiex_ds_hs_cfg hscfg; hscfg.conditions = HS_CFG_CANCEL; hscfg.is_invoke_hostcmd = true; return mwifiex_set_hs_params(priv, HostCmd_ACT_GEN_SET, cmd_type, &hscfg); } EXPORT_SYMBOL_GPL(mwifiex_cancel_hs); /* * Sends IOCTL request to cancel the existing Host Sleep configuration. * * This function allocates the IOCTL request buffer, fills it * with requisite parameters and calls the IOCTL handler. */ int mwifiex_enable_hs(struct mwifiex_adapter *adapter) { struct mwifiex_ds_hs_cfg hscfg; struct mwifiex_private *priv; int i; if (disconnect_on_suspend) { for (i = 0; i < adapter->priv_num; i++) { priv = adapter->priv[i]; if (priv) mwifiex_deauthenticate(priv, NULL); } } if (adapter->hs_activated) { dev_dbg(adapter->dev, "cmd: HS Already activated\n"); return true; } adapter->hs_activate_wait_q_woken = false; memset(&hscfg, 0, sizeof(struct mwifiex_ds_hs_cfg)); hscfg.is_invoke_hostcmd = true; if (mwifiex_set_hs_params(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_STA), HostCmd_ACT_GEN_SET, MWIFIEX_SYNC_CMD, &hscfg)) { dev_err(adapter->dev, "IOCTL request HS enable failed\n"); return false; } if (wait_event_interruptible(adapter->hs_activate_wait_q, adapter->hs_activate_wait_q_woken)) { dev_err(adapter->dev, "hs_activate_wait_q terminated\n"); return false; } return true; } EXPORT_SYMBOL_GPL(mwifiex_enable_hs); /* * IOCTL request handler to get BSS information. * * This function collates the information from different driver structures * to send to the user. */ int mwifiex_get_bss_info(struct mwifiex_private *priv, struct mwifiex_bss_info *info) { struct mwifiex_adapter *adapter = priv->adapter; struct mwifiex_bssdescriptor *bss_desc; if (!info) return -1; bss_desc = &priv->curr_bss_params.bss_descriptor; info->bss_mode = priv->bss_mode; memcpy(&info->ssid, &bss_desc->ssid, sizeof(struct cfg80211_ssid)); memcpy(&info->bssid, &bss_desc->mac_address, ETH_ALEN); info->bss_chan = bss_desc->channel; memcpy(info->country_code, adapter->country_code, IEEE80211_COUNTRY_STRING_LEN); info->media_connected = priv->media_connected; info->max_power_level = priv->max_tx_power_level; info->min_power_level = priv->min_tx_power_level; info->adhoc_state = priv->adhoc_state; info->bcn_nf_last = priv->bcn_nf_last; if (priv->sec_info.wep_enabled) info->wep_status = true; else info->wep_status = false; info->is_hs_configured = adapter->is_hs_configured; info->is_deep_sleep = adapter->is_deep_sleep; return 0; } /* * The function disables auto deep sleep mode. */ int mwifiex_disable_auto_ds(struct mwifiex_private *priv) { struct mwifiex_ds_auto_ds auto_ds; auto_ds.auto_ds = DEEP_SLEEP_OFF; return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_PS_MODE_ENH, DIS_AUTO_PS, BITMAP_AUTO_DS, &auto_ds); } EXPORT_SYMBOL_GPL(mwifiex_disable_auto_ds); /* * Sends IOCTL request to get the data rate. * * This function allocates the IOCTL request buffer, fills it * with requisite parameters and calls the IOCTL handler. */ int mwifiex_drv_get_data_rate(struct mwifiex_private *priv, u32 *rate) { int ret; ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_TX_RATE_QUERY, HostCmd_ACT_GEN_GET, 0, NULL); if (!ret) { if (priv->is_data_rate_auto) *rate = mwifiex_index_to_data_rate(priv, priv->tx_rate, priv->tx_htinfo); else *rate = priv->data_rate; } return ret; } /* * IOCTL request handler to set tx power configuration. * * This function prepares the correct firmware command and * issues it. * * For non-auto power mode, all the following power groups are set - * - Modulation class HR/DSSS * - Modulation class OFDM * - Modulation class HTBW20 * - Modulation class HTBW40 */ int mwifiex_set_tx_power(struct mwifiex_private *priv, struct mwifiex_power_cfg *power_cfg) { int ret; struct host_cmd_ds_txpwr_cfg *txp_cfg; struct mwifiex_types_power_group *pg_tlv; struct mwifiex_power_group *pg; u8 *buf; u16 dbm = 0; if (!power_cfg->is_power_auto) { dbm = (u16) power_cfg->power_level; if ((dbm < priv->min_tx_power_level) || (dbm > priv->max_tx_power_level)) { dev_err(priv->adapter->dev, "txpower value %d dBm" " is out of range (%d dBm-%d dBm)\n", dbm, priv->min_tx_power_level, priv->max_tx_power_level); return -1; } } buf = kzalloc(MWIFIEX_SIZE_OF_CMD_BUFFER, GFP_KERNEL); if (!buf) return -ENOMEM; txp_cfg = (struct host_cmd_ds_txpwr_cfg *) buf; txp_cfg->action = cpu_to_le16(HostCmd_ACT_GEN_SET); if (!power_cfg->is_power_auto) { txp_cfg->mode = cpu_to_le32(1); pg_tlv = (struct mwifiex_types_power_group *) (buf + sizeof(struct host_cmd_ds_txpwr_cfg)); pg_tlv->type = TLV_TYPE_POWER_GROUP; pg_tlv->length = 4 * sizeof(struct mwifiex_power_group); pg = (struct mwifiex_power_group *) (buf + sizeof(struct host_cmd_ds_txpwr_cfg) + sizeof(struct mwifiex_types_power_group)); /* Power group for modulation class HR/DSSS */ pg->first_rate_code = 0x00; pg->last_rate_code = 0x03; pg->modulation_class = MOD_CLASS_HR_DSSS; pg->power_step = 0; pg->power_min = (s8) dbm; pg->power_max = (s8) dbm; pg++; /* Power group for modulation class OFDM */ pg->first_rate_code = 0x00; pg->last_rate_code = 0x07; pg->modulation_class = MOD_CLASS_OFDM; pg->power_step = 0; pg->power_min = (s8) dbm; pg->power_max = (s8) dbm; pg++; /* Power group for modulation class HTBW20 */ pg->first_rate_code = 0x00; pg->last_rate_code = 0x20; pg->modulation_class = MOD_CLASS_HT; pg->power_step = 0; pg->power_min = (s8) dbm; pg->power_max = (s8) dbm; pg->ht_bandwidth = HT_BW_20; pg++; /* Power group for modulation class HTBW40 */ pg->first_rate_code = 0x00; pg->last_rate_code = 0x20; pg->modulation_class = MOD_CLASS_HT; pg->power_step = 0; pg->power_min = (s8) dbm; pg->power_max = (s8) dbm; pg->ht_bandwidth = HT_BW_40; } ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_TXPWR_CFG, HostCmd_ACT_GEN_SET, 0, buf); kfree(buf); return ret; } /* * IOCTL request handler to get power save mode. * * This function prepares the correct firmware command and * issues it. */ int mwifiex_drv_set_power(struct mwifiex_private *priv, u32 *ps_mode) { int ret; struct mwifiex_adapter *adapter = priv->adapter; u16 sub_cmd; if (*ps_mode) adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_PSP; else adapter->ps_mode = MWIFIEX_802_11_POWER_MODE_CAM; sub_cmd = (*ps_mode) ? EN_AUTO_PS : DIS_AUTO_PS; ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_PS_MODE_ENH, sub_cmd, BITMAP_STA_PS, NULL); if ((!ret) && (sub_cmd == DIS_AUTO_PS)) ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_PS_MODE_ENH, GET_PS, 0, NULL); return ret; } /* * IOCTL request handler to set/reset WPA IE. * * The supplied WPA IE is treated as a opaque buffer. Only the first field * is checked to determine WPA version. If buffer length is zero, the existing * WPA IE is reset. */ static int mwifiex_set_wpa_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr, u16 ie_len) { if (ie_len) { if (ie_len > sizeof(priv->wpa_ie)) { dev_err(priv->adapter->dev, "failed to copy WPA IE, too big\n"); return -1; } memcpy(priv->wpa_ie, ie_data_ptr, ie_len); priv->wpa_ie_len = (u8) ie_len; dev_dbg(priv->adapter->dev, "cmd: Set Wpa_ie_len=%d IE=%#x\n", priv->wpa_ie_len, priv->wpa_ie[0]); if (priv->wpa_ie[0] == WLAN_EID_VENDOR_SPECIFIC) { priv->sec_info.wpa_enabled = true; } else if (priv->wpa_ie[0] == WLAN_EID_RSN) { priv->sec_info.wpa2_enabled = true; } else { priv->sec_info.wpa_enabled = false; priv->sec_info.wpa2_enabled = false; } } else { memset(priv->wpa_ie, 0, sizeof(priv->wpa_ie)); priv->wpa_ie_len = 0; dev_dbg(priv->adapter->dev, "info: reset wpa_ie_len=%d IE=%#x\n", priv->wpa_ie_len, priv->wpa_ie[0]); priv->sec_info.wpa_enabled = false; priv->sec_info.wpa2_enabled = false; } return 0; } /* * IOCTL request handler to set/reset WAPI IE. * * The supplied WAPI IE is treated as a opaque buffer. Only the first field * is checked to internally enable WAPI. If buffer length is zero, the existing * WAPI IE is reset. */ static int mwifiex_set_wapi_ie(struct mwifiex_private *priv, u8 *ie_data_ptr, u16 ie_len) { if (ie_len) { if (ie_len > sizeof(priv->wapi_ie)) { dev_dbg(priv->adapter->dev, "info: failed to copy WAPI IE, too big\n"); return -1; } memcpy(priv->wapi_ie, ie_data_ptr, ie_len); priv->wapi_ie_len = ie_len; dev_dbg(priv->adapter->dev, "cmd: Set wapi_ie_len=%d IE=%#x\n", priv->wapi_ie_len, priv->wapi_ie[0]); if (priv->wapi_ie[0] == WLAN_EID_BSS_AC_ACCESS_DELAY) priv->sec_info.wapi_enabled = true; } else { memset(priv->wapi_ie, 0, sizeof(priv->wapi_ie)); priv->wapi_ie_len = ie_len; dev_dbg(priv->adapter->dev, "info: Reset wapi_ie_len=%d IE=%#x\n", priv->wapi_ie_len, priv->wapi_ie[0]); priv->sec_info.wapi_enabled = false; } return 0; } /* * IOCTL request handler to set/reset WPS IE. * * The supplied WPS IE is treated as a opaque buffer. Only the first field * is checked to internally enable WPS. If buffer length is zero, the existing * WPS IE is reset. */ static int mwifiex_set_wps_ie(struct mwifiex_private *priv, u8 *ie_data_ptr, u16 ie_len) { if (ie_len) { priv->wps_ie = kzalloc(MWIFIEX_MAX_VSIE_LEN, GFP_KERNEL); if (!priv->wps_ie) return -ENOMEM; if (ie_len > sizeof(priv->wps_ie)) { dev_dbg(priv->adapter->dev, "info: failed to copy WPS IE, too big\n"); kfree(priv->wps_ie); return -1; } memcpy(priv->wps_ie, ie_data_ptr, ie_len); priv->wps_ie_len = ie_len; dev_dbg(priv->adapter->dev, "cmd: Set wps_ie_len=%d IE=%#x\n", priv->wps_ie_len, priv->wps_ie[0]); } else { kfree(priv->wps_ie); priv->wps_ie_len = ie_len; dev_dbg(priv->adapter->dev, "info: Reset wps_ie_len=%d\n", priv->wps_ie_len); } return 0; } /* * IOCTL request handler to set WAPI key. * * This function prepares the correct firmware command and * issues it. */ static int mwifiex_sec_ioctl_set_wapi_key(struct mwifiex_private *priv, struct mwifiex_ds_encrypt_key *encrypt_key) { return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_KEY_MATERIAL, HostCmd_ACT_GEN_SET, KEY_INFO_ENABLED, encrypt_key); } /* * IOCTL request handler to set WEP network key. * * This function prepares the correct firmware command and * issues it, after validation checks. */ static int mwifiex_sec_ioctl_set_wep_key(struct mwifiex_private *priv, struct mwifiex_ds_encrypt_key *encrypt_key) { int ret; struct mwifiex_wep_key *wep_key; int index; if (priv->wep_key_curr_index >= NUM_WEP_KEYS) priv->wep_key_curr_index = 0; wep_key = &priv->wep_key[priv->wep_key_curr_index]; index = encrypt_key->key_index; if (encrypt_key->key_disable) { priv->sec_info.wep_enabled = 0; } else if (!encrypt_key->key_len) { /* Copy the required key as the current key */ wep_key = &priv->wep_key[index]; if (!wep_key->key_length) { dev_err(priv->adapter->dev, "key not set, so cannot enable it\n"); return -1; } priv->wep_key_curr_index = (u16) index; priv->sec_info.wep_enabled = 1; } else { wep_key = &priv->wep_key[index]; memset(wep_key, 0, sizeof(struct mwifiex_wep_key)); /* Copy the key in the driver */ memcpy(wep_key->key_material, encrypt_key->key_material, encrypt_key->key_len); wep_key->key_index = index; wep_key->key_length = encrypt_key->key_len; priv->sec_info.wep_enabled = 1; } if (wep_key->key_length) { /* Send request to firmware */ ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_KEY_MATERIAL, HostCmd_ACT_GEN_SET, 0, NULL); if (ret) return ret; } if (priv->sec_info.wep_enabled) priv->curr_pkt_filter |= HostCmd_ACT_MAC_WEP_ENABLE; else priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_WEP_ENABLE; ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_MAC_CONTROL, HostCmd_ACT_GEN_SET, 0, &priv->curr_pkt_filter); return ret; } /* * IOCTL request handler to set WPA key. * * This function prepares the correct firmware command and * issues it, after validation checks. * * Current driver only supports key length of up to 32 bytes. * * This function can also be used to disable a currently set key. */ static int mwifiex_sec_ioctl_set_wpa_key(struct mwifiex_private *priv, struct mwifiex_ds_encrypt_key *encrypt_key) { int ret; u8 remove_key = false; struct host_cmd_ds_802_11_key_material *ibss_key; /* Current driver only supports key length of up to 32 bytes */ if (encrypt_key->key_len > WLAN_MAX_KEY_LEN) { dev_err(priv->adapter->dev, "key length too long\n"); return -1; } if (priv->bss_mode == NL80211_IFTYPE_ADHOC) { /* * IBSS/WPA-None uses only one key (Group) for both receiving * and sending unicast and multicast packets. */ /* Send the key as PTK to firmware */ encrypt_key->key_index = MWIFIEX_KEY_INDEX_UNICAST; ret = mwifiex_send_cmd_async(priv, HostCmd_CMD_802_11_KEY_MATERIAL, HostCmd_ACT_GEN_SET, KEY_INFO_ENABLED, encrypt_key); if (ret) return ret; ibss_key = &priv->aes_key; memset(ibss_key, 0, sizeof(struct host_cmd_ds_802_11_key_material)); /* Copy the key in the driver */ memcpy(ibss_key->key_param_set.key, encrypt_key->key_material, encrypt_key->key_len); memcpy(&ibss_key->key_param_set.key_len, &encrypt_key->key_len, sizeof(ibss_key->key_param_set.key_len)); ibss_key->key_param_set.key_type_id = cpu_to_le16(KEY_TYPE_ID_TKIP); ibss_key->key_param_set.key_info = cpu_to_le16(KEY_ENABLED); /* Send the key as GTK to firmware */ encrypt_key->key_index = ~MWIFIEX_KEY_INDEX_UNICAST; } if (!encrypt_key->key_index) encrypt_key->key_index = MWIFIEX_KEY_INDEX_UNICAST; if (remove_key) ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_KEY_MATERIAL, HostCmd_ACT_GEN_SET, !KEY_INFO_ENABLED, encrypt_key); else ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_KEY_MATERIAL, HostCmd_ACT_GEN_SET, KEY_INFO_ENABLED, encrypt_key); return ret; } /* * IOCTL request handler to set/get network keys. * * This is a generic key handling function which supports WEP, WPA * and WAPI. */ static int mwifiex_sec_ioctl_encrypt_key(struct mwifiex_private *priv, struct mwifiex_ds_encrypt_key *encrypt_key) { int status; if (encrypt_key->is_wapi_key) status = mwifiex_sec_ioctl_set_wapi_key(priv, encrypt_key); else if (encrypt_key->key_len > WLAN_KEY_LEN_WEP104) status = mwifiex_sec_ioctl_set_wpa_key(priv, encrypt_key); else status = mwifiex_sec_ioctl_set_wep_key(priv, encrypt_key); return status; } /* * This function returns the driver version. */ int mwifiex_drv_get_driver_version(struct mwifiex_adapter *adapter, char *version, int max_len) { union { u32 l; u8 c[4]; } ver; char fw_ver[32]; ver.l = adapter->fw_release_number; sprintf(fw_ver, "%u.%u.%u.p%u", ver.c[2], ver.c[1], ver.c[0], ver.c[3]); snprintf(version, max_len, driver_version, fw_ver); dev_dbg(adapter->dev, "info: MWIFIEX VERSION: %s\n", version); return 0; } /* * Sends IOCTL request to set encoding parameters. * * This function allocates the IOCTL request buffer, fills it * with requisite parameters and calls the IOCTL handler. */ int mwifiex_set_encode(struct mwifiex_private *priv, struct key_params *kp, const u8 *key, int key_len, u8 key_index, const u8 *mac_addr, int disable) { struct mwifiex_ds_encrypt_key encrypt_key; memset(&encrypt_key, 0, sizeof(struct mwifiex_ds_encrypt_key)); encrypt_key.key_len = key_len; if (kp && kp->cipher == WLAN_CIPHER_SUITE_AES_CMAC) encrypt_key.is_igtk_key = true; if (!disable) { encrypt_key.key_index = key_index; if (key_len) memcpy(encrypt_key.key_material, key, key_len); if (mac_addr) memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN); if (kp && kp->seq && kp->seq_len) memcpy(encrypt_key.pn, kp->seq, kp->seq_len); } else { encrypt_key.key_disable = true; if (mac_addr) memcpy(encrypt_key.mac_addr, mac_addr, ETH_ALEN); } return mwifiex_sec_ioctl_encrypt_key(priv, &encrypt_key); } /* * Sends IOCTL request to get extended version. * * This function allocates the IOCTL request buffer, fills it * with requisite parameters and calls the IOCTL handler. */ int mwifiex_get_ver_ext(struct mwifiex_private *priv) { struct mwifiex_ver_ext ver_ext; memset(&ver_ext, 0, sizeof(struct host_cmd_ds_version_ext)); if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_VERSION_EXT, HostCmd_ACT_GEN_GET, 0, &ver_ext)) return -1; return 0; } int mwifiex_remain_on_chan_cfg(struct mwifiex_private *priv, u16 action, struct ieee80211_channel *chan, unsigned int duration) { struct host_cmd_ds_remain_on_chan roc_cfg; u8 sc; memset(&roc_cfg, 0, sizeof(roc_cfg)); roc_cfg.action = cpu_to_le16(action); if (action == HostCmd_ACT_GEN_SET) { roc_cfg.band_cfg = chan->band; sc = mwifiex_chan_type_to_sec_chan_offset(NL80211_CHAN_NO_HT); roc_cfg.band_cfg |= (sc << 2); roc_cfg.channel = ieee80211_frequency_to_channel(chan->center_freq); roc_cfg.duration = cpu_to_le32(duration); } if (mwifiex_send_cmd_sync(priv, HostCmd_CMD_REMAIN_ON_CHAN, action, 0, &roc_cfg)) { dev_err(priv->adapter->dev, "failed to remain on channel\n"); return -1; } return roc_cfg.status; } int mwifiex_set_bss_role(struct mwifiex_private *priv, u8 bss_role) { if (GET_BSS_ROLE(priv) == bss_role) { dev_dbg(priv->adapter->dev, "info: already in the desired role.\n"); return 0; } mwifiex_free_priv(priv); mwifiex_init_priv(priv); priv->bss_role = bss_role; switch (bss_role) { case MWIFIEX_BSS_ROLE_UAP: priv->bss_mode = NL80211_IFTYPE_AP; break; case MWIFIEX_BSS_ROLE_STA: case MWIFIEX_BSS_ROLE_ANY: default: priv->bss_mode = NL80211_IFTYPE_STATION; break; } mwifiex_send_cmd_sync(priv, HostCmd_CMD_SET_BSS_MODE, HostCmd_ACT_GEN_SET, 0, NULL); return mwifiex_sta_init_cmd(priv, false); } /* * Sends IOCTL request to get statistics information. * * This function allocates the IOCTL request buffer, fills it * with requisite parameters and calls the IOCTL handler. */ int mwifiex_get_stats_info(struct mwifiex_private *priv, struct mwifiex_ds_get_stats *log) { return mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_GET_LOG, HostCmd_ACT_GEN_GET, 0, log); } /* * IOCTL request handler to read/write register. * * This function prepares the correct firmware command and * issues it. * * Access to the following registers are supported - * - MAC * - BBP * - RF * - PMIC * - CAU */ static int mwifiex_reg_mem_ioctl_reg_rw(struct mwifiex_private *priv, struct mwifiex_ds_reg_rw *reg_rw, u16 action) { u16 cmd_no; switch (le32_to_cpu(reg_rw->type)) { case MWIFIEX_REG_MAC: cmd_no = HostCmd_CMD_MAC_REG_ACCESS; break; case MWIFIEX_REG_BBP: cmd_no = HostCmd_CMD_BBP_REG_ACCESS; break; case MWIFIEX_REG_RF: cmd_no = HostCmd_CMD_RF_REG_ACCESS; break; case MWIFIEX_REG_PMIC: cmd_no = HostCmd_CMD_PMIC_REG_ACCESS; break; case MWIFIEX_REG_CAU: cmd_no = HostCmd_CMD_CAU_REG_ACCESS; break; default: return -1; } return mwifiex_send_cmd_sync(priv, cmd_no, action, 0, reg_rw); } /* * Sends IOCTL request to write to a register. * * This function allocates the IOCTL request buffer, fills it * with requisite parameters and calls the IOCTL handler. */ int mwifiex_reg_write(struct mwifiex_private *priv, u32 reg_type, u32 reg_offset, u32 reg_value) { struct mwifiex_ds_reg_rw reg_rw; reg_rw.type = cpu_to_le32(reg_type); reg_rw.offset = cpu_to_le32(reg_offset); reg_rw.value = cpu_to_le32(reg_value); return mwifiex_reg_mem_ioctl_reg_rw(priv, &reg_rw, HostCmd_ACT_GEN_SET); } /* * Sends IOCTL request to read from a register. * * This function allocates the IOCTL request buffer, fills it * with requisite parameters and calls the IOCTL handler. */ int mwifiex_reg_read(struct mwifiex_private *priv, u32 reg_type, u32 reg_offset, u32 *value) { int ret; struct mwifiex_ds_reg_rw reg_rw; reg_rw.type = cpu_to_le32(reg_type); reg_rw.offset = cpu_to_le32(reg_offset); ret = mwifiex_reg_mem_ioctl_reg_rw(priv, &reg_rw, HostCmd_ACT_GEN_GET); if (ret) goto done; *value = le32_to_cpu(reg_rw.value); done: return ret; } /* * Sends IOCTL request to read from EEPROM. * * This function allocates the IOCTL request buffer, fills it * with requisite parameters and calls the IOCTL handler. */ int mwifiex_eeprom_read(struct mwifiex_private *priv, u16 offset, u16 bytes, u8 *value) { int ret; struct mwifiex_ds_read_eeprom rd_eeprom; rd_eeprom.offset = cpu_to_le16((u16) offset); rd_eeprom.byte_count = cpu_to_le16((u16) bytes); /* Send request to firmware */ ret = mwifiex_send_cmd_sync(priv, HostCmd_CMD_802_11_EEPROM_ACCESS, HostCmd_ACT_GEN_GET, 0, &rd_eeprom); if (!ret) memcpy(value, rd_eeprom.value, MAX_EEPROM_DATA); return ret; } /* * This function sets a generic IE. In addition to generic IE, it can * also handle WPA, WPA2 and WAPI IEs. */ static int mwifiex_set_gen_ie_helper(struct mwifiex_private *priv, u8 *ie_data_ptr, u16 ie_len) { int ret = 0; struct ieee_types_vendor_header *pvendor_ie; const u8 wpa_oui[] = { 0x00, 0x50, 0xf2, 0x01 }; const u8 wps_oui[] = { 0x00, 0x50, 0xf2, 0x04 }; /* If the passed length is zero, reset the buffer */ if (!ie_len) { priv->gen_ie_buf_len = 0; priv->wps.session_enable = false; return 0; } else if (!ie_data_ptr) { return -1; } pvendor_ie = (struct ieee_types_vendor_header *) ie_data_ptr; /* Test to see if it is a WPA IE, if not, then it is a gen IE */ if (((pvendor_ie->element_id == WLAN_EID_VENDOR_SPECIFIC) && (!memcmp(pvendor_ie->oui, wpa_oui, sizeof(wpa_oui)))) || (pvendor_ie->element_id == WLAN_EID_RSN)) { /* IE is a WPA/WPA2 IE so call set_wpa function */ ret = mwifiex_set_wpa_ie_helper(priv, ie_data_ptr, ie_len); priv->wps.session_enable = false; return ret; } else if (pvendor_ie->element_id == WLAN_EID_BSS_AC_ACCESS_DELAY) { /* IE is a WAPI IE so call set_wapi function */ ret = mwifiex_set_wapi_ie(priv, ie_data_ptr, ie_len); return ret; } /* * Verify that the passed length is not larger than the * available space remaining in the buffer */ if (ie_len < (sizeof(priv->gen_ie_buf) - priv->gen_ie_buf_len)) { /* Test to see if it is a WPS IE, if so, enable * wps session flag */ pvendor_ie = (struct ieee_types_vendor_header *) ie_data_ptr; if ((pvendor_ie->element_id == WLAN_EID_VENDOR_SPECIFIC) && (!memcmp(pvendor_ie->oui, wps_oui, sizeof(wps_oui)))) { priv->wps.session_enable = true; dev_dbg(priv->adapter->dev, "info: WPS Session Enabled.\n"); ret = mwifiex_set_wps_ie(priv, ie_data_ptr, ie_len); } /* Append the passed data to the end of the genIeBuffer */ memcpy(priv->gen_ie_buf + priv->gen_ie_buf_len, ie_data_ptr, ie_len); /* Increment the stored buffer length by the size passed */ priv->gen_ie_buf_len += ie_len; } else { /* Passed data does not fit in the remaining buffer space */ ret = -1; } /* Return 0, or -1 for error case */ return ret; } /* * IOCTL request handler to set/get generic IE. * * In addition to various generic IEs, this function can also be * used to set the ARP filter. */ static int mwifiex_misc_ioctl_gen_ie(struct mwifiex_private *priv, struct mwifiex_ds_misc_gen_ie *gen_ie, u16 action) { struct mwifiex_adapter *adapter = priv->adapter; switch (gen_ie->type) { case MWIFIEX_IE_TYPE_GEN_IE: if (action == HostCmd_ACT_GEN_GET) { gen_ie->len = priv->wpa_ie_len; memcpy(gen_ie->ie_data, priv->wpa_ie, gen_ie->len); } else { mwifiex_set_gen_ie_helper(priv, gen_ie->ie_data, (u16) gen_ie->len); } break; case MWIFIEX_IE_TYPE_ARP_FILTER: memset(adapter->arp_filter, 0, sizeof(adapter->arp_filter)); if (gen_ie->len > ARP_FILTER_MAX_BUF_SIZE) { adapter->arp_filter_size = 0; dev_err(adapter->dev, "invalid ARP filter size\n"); return -1; } else { memcpy(adapter->arp_filter, gen_ie->ie_data, gen_ie->len); adapter->arp_filter_size = gen_ie->len; } break; default: dev_err(adapter->dev, "invalid IE type\n"); return -1; } return 0; } /* * Sends IOCTL request to set a generic IE. * * This function allocates the IOCTL request buffer, fills it * with requisite parameters and calls the IOCTL handler. */ int mwifiex_set_gen_ie(struct mwifiex_private *priv, u8 *ie, int ie_len) { struct mwifiex_ds_misc_gen_ie gen_ie; if (ie_len > IEEE_MAX_IE_SIZE) return -EFAULT; gen_ie.type = MWIFIEX_IE_TYPE_GEN_IE; gen_ie.len = ie_len; memcpy(gen_ie.ie_data, ie, ie_len); if (mwifiex_misc_ioctl_gen_ie(priv, &gen_ie, HostCmd_ACT_GEN_SET)) return -EFAULT; return 0; }
gpl-2.0
svenkatr/linux
drivers/video/fbdev/igafb.c
2237
15838
/* * linux/drivers/video/igafb.c -- Frame buffer device for IGA 1682 * * Copyright (C) 1998 Vladimir Roganov and Gleb Raiko * * This driver is partly based on the Frame buffer device for ATI Mach64 * and partially on VESA-related code. * * Copyright (C) 1997-1998 Geert Uytterhoeven * Copyright (C) 1998 Bernd Harries * Copyright (C) 1998 Eddie C. Dost (ecd@skynet.be) * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. */ /****************************************************************************** TODO: Despite of IGA Card has advanced graphic acceleration, initial version is almost dummy and does not support it. Support for video modes and acceleration must be added together with accelerated X-Windows driver implementation. Most important thing at this moment is that we have working JavaEngine1 console & X with new console interface. ******************************************************************************/ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/delay.h> #include <linux/interrupt.h> #include <linux/fb.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/nvram.h> #include <asm/io.h> #ifdef CONFIG_SPARC #include <asm/prom.h> #include <asm/pcic.h> #endif #include <video/iga.h> struct pci_mmap_map { unsigned long voff; unsigned long poff; unsigned long size; unsigned long prot_flag; unsigned long prot_mask; }; struct iga_par { struct pci_mmap_map *mmap_map; unsigned long frame_buffer_phys; unsigned long io_base; }; struct fb_info fb_info; struct fb_fix_screeninfo igafb_fix __initdata = { .id = "IGA 1682", .type = FB_TYPE_PACKED_PIXELS, .mmio_len = 1000 }; struct fb_var_screeninfo default_var = { /* 640x480, 60 Hz, Non-Interlaced (25.175 MHz dotclock) */ .xres = 640, .yres = 480, .xres_virtual = 640, .yres_virtual = 480, .bits_per_pixel = 8, .red = {0, 8, 0 }, .green = {0, 8, 0 }, .blue = {0, 8, 0 }, .height = -1, .width = -1, .accel_flags = FB_ACCEL_NONE, .pixclock = 39722, .left_margin = 48, .right_margin = 16, .upper_margin = 33, .lower_margin = 10, .hsync_len = 96, .vsync_len = 2, .vmode = FB_VMODE_NONINTERLACED }; #ifdef CONFIG_SPARC struct fb_var_screeninfo default_var_1024x768 __initdata = { /* 1024x768, 75 Hz, Non-Interlaced (78.75 MHz dotclock) */ .xres = 1024, .yres = 768, .xres_virtual = 1024, .yres_virtual = 768, .bits_per_pixel = 8, .red = {0, 8, 0 }, .green = {0, 8, 0 }, .blue = {0, 8, 0 }, .height = -1, .width = -1, .accel_flags = FB_ACCEL_NONE, .pixclock = 12699, .left_margin = 176, .right_margin = 16, .upper_margin = 28, .lower_margin = 1, .hsync_len = 96, .vsync_len = 3, .vmode = FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }; struct fb_var_screeninfo default_var_1152x900 __initdata = { /* 1152x900, 76 Hz, Non-Interlaced (110.0 MHz dotclock) */ .xres = 1152, .yres = 900, .xres_virtual = 1152, .yres_virtual = 900, .bits_per_pixel = 8, .red = { 0, 8, 0 }, .green = { 0, 8, 0 }, .blue = { 0, 8, 0 }, .height = -1, .width = -1, .accel_flags = FB_ACCEL_NONE, .pixclock = 9091, .left_margin = 234, .right_margin = 24, .upper_margin = 34, .lower_margin = 3, .hsync_len = 100, .vsync_len = 3, .vmode = FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }; struct fb_var_screeninfo default_var_1280x1024 __initdata = { /* 1280x1024, 75 Hz, Non-Interlaced (135.00 MHz dotclock) */ .xres = 1280, .yres = 1024, .xres_virtual = 1280, .yres_virtual = 1024, .bits_per_pixel = 8, .red = {0, 8, 0 }, .green = {0, 8, 0 }, .blue = {0, 8, 0 }, .height = -1, .width = -1, .accel_flags = 0, .pixclock = 7408, .left_margin = 248, .right_margin = 16, .upper_margin = 38, .lower_margin = 1, .hsync_len = 144, .vsync_len = 3, .vmode = FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED }; /* * Memory-mapped I/O functions for Sparc PCI * * On sparc we happen to access I/O with memory mapped functions too. */ #define pci_inb(par, reg) readb(par->io_base+(reg)) #define pci_outb(par, val, reg) writeb(val, par->io_base+(reg)) static inline unsigned int iga_inb(struct iga_par *par, unsigned int reg, unsigned int idx) { pci_outb(par, idx, reg); return pci_inb(par, reg + 1); } static inline void iga_outb(struct iga_par *par, unsigned char val, unsigned int reg, unsigned int idx ) { pci_outb(par, idx, reg); pci_outb(par, val, reg+1); } #endif /* CONFIG_SPARC */ /* * Very important functionality for the JavaEngine1 computer: * make screen border black (usign special IGA registers) */ static void iga_blank_border(struct iga_par *par) { int i; #if 0 /* * PROM does this for us, so keep this code as a reminder * about required read from 0x3DA and writing of 0x20 in the end. */ (void) pci_inb(par, 0x3DA); /* required for every access */ pci_outb(par, IGA_IDX_VGA_OVERSCAN, IGA_ATTR_CTL); (void) pci_inb(par, IGA_ATTR_CTL+1); pci_outb(par, 0x38, IGA_ATTR_CTL); pci_outb(par, 0x20, IGA_ATTR_CTL); /* re-enable visual */ #endif /* * This does not work as it was designed because the overscan * color is looked up in the palette. Therefore, under X11 * overscan changes color. */ for (i=0; i < 3; i++) iga_outb(par, 0, IGA_EXT_CNTRL, IGA_IDX_OVERSCAN_COLOR + i); } #ifdef CONFIG_SPARC static int igafb_mmap(struct fb_info *info, struct vm_area_struct *vma) { struct iga_par *par = (struct iga_par *)info->par; unsigned int size, page, map_size = 0; unsigned long map_offset = 0; int i; if (!par->mmap_map) return -ENXIO; size = vma->vm_end - vma->vm_start; /* Each page, see which map applies */ for (page = 0; page < size; ) { map_size = 0; for (i = 0; par->mmap_map[i].size; i++) { unsigned long start = par->mmap_map[i].voff; unsigned long end = start + par->mmap_map[i].size; unsigned long offset = (vma->vm_pgoff << PAGE_SHIFT) + page; if (start > offset) continue; if (offset >= end) continue; map_size = par->mmap_map[i].size - (offset - start); map_offset = par->mmap_map[i].poff + (offset - start); break; } if (!map_size) { page += PAGE_SIZE; continue; } if (page + map_size > size) map_size = size - page; pgprot_val(vma->vm_page_prot) &= ~(par->mmap_map[i].prot_mask); pgprot_val(vma->vm_page_prot) |= par->mmap_map[i].prot_flag; if (remap_pfn_range(vma, vma->vm_start + page, map_offset >> PAGE_SHIFT, map_size, vma->vm_page_prot)) return -EAGAIN; page += map_size; } if (!map_size) return -EINVAL; vma->vm_flags |= VM_IO; return 0; } #endif /* CONFIG_SPARC */ static int igafb_setcolreg(unsigned regno, unsigned red, unsigned green, unsigned blue, unsigned transp, struct fb_info *info) { /* * Set a single color register. The values supplied are * already rounded down to the hardware's capabilities * (according to the entries in the `var' structure). Return * != 0 for invalid regno. */ struct iga_par *par = (struct iga_par *)info->par; if (regno >= info->cmap.len) return 1; pci_outb(par, regno, DAC_W_INDEX); pci_outb(par, red, DAC_DATA); pci_outb(par, green, DAC_DATA); pci_outb(par, blue, DAC_DATA); if (regno < 16) { switch (info->var.bits_per_pixel) { case 16: ((u16*)(info->pseudo_palette))[regno] = (regno << 10) | (regno << 5) | regno; break; case 24: ((u32*)(info->pseudo_palette))[regno] = (regno << 16) | (regno << 8) | regno; break; case 32: { int i; i = (regno << 8) | regno; ((u32*)(info->pseudo_palette))[regno] = (i << 16) | i; } break; } } return 0; } /* * Framebuffer option structure */ static struct fb_ops igafb_ops = { .owner = THIS_MODULE, .fb_setcolreg = igafb_setcolreg, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, #ifdef CONFIG_SPARC .fb_mmap = igafb_mmap, #endif }; static int __init iga_init(struct fb_info *info, struct iga_par *par) { char vramsz = iga_inb(par, IGA_EXT_CNTRL, IGA_IDX_EXT_BUS_CNTL) & MEM_SIZE_ALIAS; int video_cmap_len; switch (vramsz) { case MEM_SIZE_1M: info->fix.smem_len = 0x100000; break; case MEM_SIZE_2M: info->fix.smem_len = 0x200000; break; case MEM_SIZE_4M: case MEM_SIZE_RESERVED: info->fix.smem_len = 0x400000; break; } if (info->var.bits_per_pixel > 8) video_cmap_len = 16; else video_cmap_len = 256; info->fbops = &igafb_ops; info->flags = FBINFO_DEFAULT; fb_alloc_cmap(&info->cmap, video_cmap_len, 0); if (register_framebuffer(info) < 0) return 0; fb_info(info, "%s frame buffer device at 0x%08lx [%dMB VRAM]\n", info->fix.id, par->frame_buffer_phys, info->fix.smem_len >> 20); iga_blank_border(par); return 1; } static int __init igafb_init(void) { struct fb_info *info; struct pci_dev *pdev; struct iga_par *par; unsigned long addr; int size, iga2000 = 0; if (fb_get_options("igafb", NULL)) return -ENODEV; pdev = pci_get_device(PCI_VENDOR_ID_INTERG, PCI_DEVICE_ID_INTERG_1682, 0); if (pdev == NULL) { /* * XXX We tried to use cyber2000fb.c for IGS 2000. * But it does not initialize the chip in JavaStation-E, alas. */ pdev = pci_get_device(PCI_VENDOR_ID_INTERG, 0x2000, 0); if(pdev == NULL) { return -ENXIO; } iga2000 = 1; } /* We leak a reference here but as it cannot be unloaded this is fine. If you write unload code remember to free it in unload */ size = sizeof(struct iga_par) + sizeof(u32)*16; info = framebuffer_alloc(size, &pdev->dev); if (!info) { printk("igafb_init: can't alloc fb_info\n"); pci_dev_put(pdev); return -ENOMEM; } par = info->par; if ((addr = pdev->resource[0].start) == 0) { printk("igafb_init: no memory start\n"); kfree(info); pci_dev_put(pdev); return -ENXIO; } if ((info->screen_base = ioremap(addr, 1024*1024*2)) == 0) { printk("igafb_init: can't remap %lx[2M]\n", addr); kfree(info); pci_dev_put(pdev); return -ENXIO; } par->frame_buffer_phys = addr & PCI_BASE_ADDRESS_MEM_MASK; #ifdef CONFIG_SPARC /* * The following is sparc specific and this is why: * * IGS2000 has its I/O memory mapped and we want * to generate memory cycles on PCI, e.g. do ioremap(), * then readb/writeb() as in Documentation/io-mapping.txt. * * IGS1682 is more traditional, it responds to PCI I/O * cycles, so we want to access it with inb()/outb(). * * On sparc, PCIC converts CPU memory access within * phys window 0x3000xxxx into PCI I/O cycles. Therefore * we may use readb/writeb to access them with IGS1682. * * We do not take io_base_phys from resource[n].start * on IGS1682 because that chip is BROKEN. It does not * have a base register for I/O. We just "know" what its * I/O addresses are. */ if (iga2000) { igafb_fix.mmio_start = par->frame_buffer_phys | 0x00800000; } else { igafb_fix.mmio_start = 0x30000000; /* XXX */ } if ((par->io_base = (int) ioremap(igafb_fix.mmio_start, igafb_fix.smem_len)) == 0) { printk("igafb_init: can't remap %lx[4K]\n", igafb_fix.mmio_start); iounmap((void *)info->screen_base); kfree(info); pci_dev_put(pdev); return -ENXIO; } /* * Figure mmap addresses from PCI config space. * We need two regions: for video memory and for I/O ports. * Later one can add region for video coprocessor registers. * However, mmap routine loops until size != 0, so we put * one additional region with size == 0. */ par->mmap_map = kzalloc(4 * sizeof(*par->mmap_map), GFP_ATOMIC); if (!par->mmap_map) { printk("igafb_init: can't alloc mmap_map\n"); iounmap((void *)par->io_base); iounmap(info->screen_base); kfree(info); pci_dev_put(pdev); return -ENOMEM; } /* * Set default vmode and cmode from PROM properties. */ { struct device_node *dp = pci_device_to_OF_node(pdev); int node = dp->node; int width = prom_getintdefault(node, "width", 1024); int height = prom_getintdefault(node, "height", 768); int depth = prom_getintdefault(node, "depth", 8); switch (width) { case 1024: if (height == 768) default_var = default_var_1024x768; break; case 1152: if (height == 900) default_var = default_var_1152x900; break; case 1280: if (height == 1024) default_var = default_var_1280x1024; break; default: break; } switch (depth) { case 8: default_var.bits_per_pixel = 8; break; case 16: default_var.bits_per_pixel = 16; break; case 24: default_var.bits_per_pixel = 24; break; case 32: default_var.bits_per_pixel = 32; break; default: break; } } #endif igafb_fix.smem_start = (unsigned long) info->screen_base; igafb_fix.line_length = default_var.xres*(default_var.bits_per_pixel/8); igafb_fix.visual = default_var.bits_per_pixel <= 8 ? FB_VISUAL_PSEUDOCOLOR : FB_VISUAL_DIRECTCOLOR; info->var = default_var; info->fix = igafb_fix; info->pseudo_palette = (void *)(par + 1); if (!iga_init(info, par)) { iounmap((void *)par->io_base); iounmap(info->screen_base); kfree(par->mmap_map); kfree(info); return -ENODEV; } #ifdef CONFIG_SPARC /* * Add /dev/fb mmap values. */ /* First region is for video memory */ par->mmap_map[0].voff = 0x0; par->mmap_map[0].poff = par->frame_buffer_phys & PAGE_MASK; par->mmap_map[0].size = info->fix.smem_len & PAGE_MASK; par->mmap_map[0].prot_mask = SRMMU_CACHE; par->mmap_map[0].prot_flag = SRMMU_WRITE; /* Second region is for I/O ports */ par->mmap_map[1].voff = par->frame_buffer_phys & PAGE_MASK; par->mmap_map[1].poff = info->fix.smem_start & PAGE_MASK; par->mmap_map[1].size = PAGE_SIZE * 2; /* X wants 2 pages */ par->mmap_map[1].prot_mask = SRMMU_CACHE; par->mmap_map[1].prot_flag = SRMMU_WRITE; #endif /* CONFIG_SPARC */ return 0; } static int __init igafb_setup(char *options) { char *this_opt; if (!options || !*options) return 0; while ((this_opt = strsep(&options, ",")) != NULL) { } return 0; } module_init(igafb_init); MODULE_LICENSE("GPL"); static struct pci_device_id igafb_pci_tbl[] = { { PCI_VENDOR_ID_INTERG, PCI_DEVICE_ID_INTERG_1682, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, { } }; MODULE_DEVICE_TABLE(pci, igafb_pci_tbl);
gpl-2.0
philippedeswert/android_kernel_lge_hammerhead
drivers/usb/serial/cp210x.c
2749
28101
/* * Silicon Laboratories CP210x USB to RS232 serial adaptor driver * * Copyright (C) 2005 Craig Shelley (craig@microtron.org.uk) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 as published by the Free Software Foundation. * * Support to set flow control line levels using TIOCMGET and TIOCMSET * thanks to Karl Hiramoto karl@hiramoto.org. RTSCTS hardware flow * control thanks to Munir Nassar nassarmu@real-time.com * */ #include <linux/kernel.h> #include <linux/errno.h> #include <linux/slab.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/usb.h> #include <linux/uaccess.h> #include <linux/usb/serial.h> /* * Version Information */ #define DRIVER_VERSION "v0.09" #define DRIVER_DESC "Silicon Labs CP210x RS232 serial adaptor driver" /* * Function Prototypes */ static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *); static void cp210x_close(struct usb_serial_port *); static void cp210x_get_termios(struct tty_struct *, struct usb_serial_port *port); static void cp210x_get_termios_port(struct usb_serial_port *port, unsigned int *cflagp, unsigned int *baudp); static void cp210x_change_speed(struct tty_struct *, struct usb_serial_port *, struct ktermios *); static void cp210x_set_termios(struct tty_struct *, struct usb_serial_port *, struct ktermios*); static int cp210x_tiocmget(struct tty_struct *); static int cp210x_tiocmset(struct tty_struct *, unsigned int, unsigned int); static int cp210x_tiocmset_port(struct usb_serial_port *port, unsigned int, unsigned int); static void cp210x_break_ctl(struct tty_struct *, int); static int cp210x_startup(struct usb_serial *); static void cp210x_release(struct usb_serial *); static void cp210x_dtr_rts(struct usb_serial_port *p, int on); static bool debug; static const struct usb_device_id id_table[] = { { USB_DEVICE(0x045B, 0x0053) }, /* Renesas RX610 RX-Stick */ { USB_DEVICE(0x0471, 0x066A) }, /* AKTAKOM ACE-1001 cable */ { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */ { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */ { USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */ { USB_DEVICE(0x0FCF, 0x1003) }, /* Dynastream ANT development board */ { USB_DEVICE(0x0FCF, 0x1004) }, /* Dynastream ANT2USB */ { USB_DEVICE(0x0FCF, 0x1006) }, /* Dynastream ANT development board */ { USB_DEVICE(0x10A6, 0xAA26) }, /* Knock-off DCU-11 cable */ { USB_DEVICE(0x10AB, 0x10C5) }, /* Siemens MC60 Cable */ { USB_DEVICE(0x10B5, 0xAC70) }, /* Nokia CA-42 USB */ { USB_DEVICE(0x10C4, 0x0F91) }, /* Vstabi */ { USB_DEVICE(0x10C4, 0x1101) }, /* Arkham Technology DS101 Bus Monitor */ { USB_DEVICE(0x10C4, 0x1601) }, /* Arkham Technology DS101 Adapter */ { USB_DEVICE(0x10C4, 0x800A) }, /* SPORTident BSM7-D-USB main station */ { USB_DEVICE(0x10C4, 0x803B) }, /* Pololu USB-serial converter */ { USB_DEVICE(0x10C4, 0x8044) }, /* Cygnal Debug Adapter */ { USB_DEVICE(0x10C4, 0x804E) }, /* Software Bisque Paramount ME build-in converter */ { USB_DEVICE(0x10C4, 0x8053) }, /* Enfora EDG1228 */ { USB_DEVICE(0x10C4, 0x8054) }, /* Enfora GSM2228 */ { USB_DEVICE(0x10C4, 0x8066) }, /* Argussoft In-System Programmer */ { USB_DEVICE(0x10C4, 0x806F) }, /* IMS USB to RS422 Converter Cable */ { USB_DEVICE(0x10C4, 0x807A) }, /* Crumb128 board */ { USB_DEVICE(0x10C4, 0x80CA) }, /* Degree Controls Inc */ { USB_DEVICE(0x10C4, 0x80DD) }, /* Tracient RFID */ { USB_DEVICE(0x10C4, 0x80F6) }, /* Suunto sports instrument */ { USB_DEVICE(0x10C4, 0x8115) }, /* Arygon NFC/Mifare Reader */ { USB_DEVICE(0x10C4, 0x813D) }, /* Burnside Telecom Deskmobile */ { USB_DEVICE(0x10C4, 0x813F) }, /* Tams Master Easy Control */ { USB_DEVICE(0x10C4, 0x814A) }, /* West Mountain Radio RIGblaster P&P */ { USB_DEVICE(0x10C4, 0x814B) }, /* West Mountain Radio RIGtalk */ { USB_DEVICE(0x10C4, 0x8156) }, /* B&G H3000 link cable */ { USB_DEVICE(0x10C4, 0x815E) }, /* Helicomm IP-Link 1220-DVM */ { USB_DEVICE(0x10C4, 0x818B) }, /* AVIT Research USB to TTL */ { USB_DEVICE(0x10C4, 0x819F) }, /* MJS USB Toslink Switcher */ { USB_DEVICE(0x10C4, 0x81A6) }, /* ThinkOptics WavIt */ { USB_DEVICE(0x10C4, 0x81A9) }, /* Multiplex RC Interface */ { USB_DEVICE(0x10C4, 0x81AC) }, /* MSD Dash Hawk */ { USB_DEVICE(0x10C4, 0x81AD) }, /* INSYS USB Modem */ { USB_DEVICE(0x10C4, 0x81C8) }, /* Lipowsky Industrie Elektronik GmbH, Baby-JTAG */ { USB_DEVICE(0x10C4, 0x81E2) }, /* Lipowsky Industrie Elektronik GmbH, Baby-LIN */ { USB_DEVICE(0x10C4, 0x81E7) }, /* Aerocomm Radio */ { USB_DEVICE(0x10C4, 0x81E8) }, /* Zephyr Bioharness */ { USB_DEVICE(0x10C4, 0x81F2) }, /* C1007 HF band RFID controller */ { USB_DEVICE(0x10C4, 0x8218) }, /* Lipowsky Industrie Elektronik GmbH, HARP-1 */ { USB_DEVICE(0x10C4, 0x822B) }, /* Modem EDGE(GSM) Comander 2 */ { USB_DEVICE(0x10C4, 0x826B) }, /* Cygnal Integrated Products, Inc., Fasttrax GPS demonstration module */ { USB_DEVICE(0x10C4, 0x8293) }, /* Telegesis ETRX2USB */ { USB_DEVICE(0x10C4, 0x82F9) }, /* Procyon AVS */ { USB_DEVICE(0x10C4, 0x8341) }, /* Siemens MC35PU GPRS Modem */ { USB_DEVICE(0x10C4, 0x8382) }, /* Cygnal Integrated Products, Inc. */ { USB_DEVICE(0x10C4, 0x83A8) }, /* Amber Wireless AMB2560 */ { USB_DEVICE(0x10C4, 0x83D8) }, /* DekTec DTA Plus VHF/UHF Booster/Attenuator */ { USB_DEVICE(0x10C4, 0x8411) }, /* Kyocera GPS Module */ { USB_DEVICE(0x10C4, 0x8418) }, /* IRZ Automation Teleport SG-10 GSM/GPRS Modem */ { USB_DEVICE(0x10C4, 0x846E) }, /* BEI USB Sensor Interface (VCP) */ { USB_DEVICE(0x10C4, 0x8477) }, /* Balluff RFID */ { USB_DEVICE(0x10C4, 0x85EA) }, /* AC-Services IBUS-IF */ { USB_DEVICE(0x10C4, 0x85EB) }, /* AC-Services CIS-IBUS */ { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA80) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA71) }, /* Infinity GPS-MIC-1 Radio Monophone */ { USB_DEVICE(0x10C4, 0xF001) }, /* Elan Digital Systems USBscope50 */ { USB_DEVICE(0x10C4, 0xF002) }, /* Elan Digital Systems USBwave12 */ { USB_DEVICE(0x10C4, 0xF003) }, /* Elan Digital Systems USBpulse100 */ { USB_DEVICE(0x10C4, 0xF004) }, /* Elan Digital Systems USBcount50 */ { USB_DEVICE(0x10C5, 0xEA61) }, /* Silicon Labs MobiData GPRS USB Modem */ { USB_DEVICE(0x10CE, 0xEA6A) }, /* Silicon Labs MobiData GPRS USB Modem 100EU */ { USB_DEVICE(0x13AD, 0x9999) }, /* Baltech card reader */ { USB_DEVICE(0x1555, 0x0004) }, /* Owen AC4 USB-RS485 Converter */ { USB_DEVICE(0x166A, 0x0303) }, /* Clipsal 5500PCU C-Bus USB interface */ { USB_DEVICE(0x16D6, 0x0001) }, /* Jablotron serial interface */ { USB_DEVICE(0x16DC, 0x0010) }, /* W-IE-NE-R Plein & Baus GmbH PL512 Power Supply */ { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */ { USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */ { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */ { USB_DEVICE(0x17A8, 0x0001) }, /* Kamstrup Optical Eye/3-wire */ { USB_DEVICE(0x17A8, 0x0005) }, /* Kamstrup M-Bus Master MultiPort 250D */ { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ { USB_DEVICE(0x3195, 0xF190) }, /* Link Instruments MSO-19 */ { USB_DEVICE(0x413C, 0x9500) }, /* DW700 GPS USB interface */ { } /* Terminating Entry */ }; MODULE_DEVICE_TABLE(usb, id_table); struct cp210x_port_private { __u8 bInterfaceNumber; }; static struct usb_driver cp210x_driver = { .name = "cp210x", .probe = usb_serial_probe, .disconnect = usb_serial_disconnect, .id_table = id_table, }; static struct usb_serial_driver cp210x_device = { .driver = { .owner = THIS_MODULE, .name = "cp210x", }, .id_table = id_table, .num_ports = 1, .bulk_in_size = 256, .bulk_out_size = 256, .open = cp210x_open, .close = cp210x_close, .break_ctl = cp210x_break_ctl, .set_termios = cp210x_set_termios, .tiocmget = cp210x_tiocmget, .tiocmset = cp210x_tiocmset, .attach = cp210x_startup, .release = cp210x_release, .dtr_rts = cp210x_dtr_rts }; static struct usb_serial_driver * const serial_drivers[] = { &cp210x_device, NULL }; /* Config request types */ #define REQTYPE_HOST_TO_DEVICE 0x41 #define REQTYPE_DEVICE_TO_HOST 0xc1 /* Config request codes */ #define CP210X_IFC_ENABLE 0x00 #define CP210X_SET_BAUDDIV 0x01 #define CP210X_GET_BAUDDIV 0x02 #define CP210X_SET_LINE_CTL 0x03 #define CP210X_GET_LINE_CTL 0x04 #define CP210X_SET_BREAK 0x05 #define CP210X_IMM_CHAR 0x06 #define CP210X_SET_MHS 0x07 #define CP210X_GET_MDMSTS 0x08 #define CP210X_SET_XON 0x09 #define CP210X_SET_XOFF 0x0A #define CP210X_SET_EVENTMASK 0x0B #define CP210X_GET_EVENTMASK 0x0C #define CP210X_SET_CHAR 0x0D #define CP210X_GET_CHARS 0x0E #define CP210X_GET_PROPS 0x0F #define CP210X_GET_COMM_STATUS 0x10 #define CP210X_RESET 0x11 #define CP210X_PURGE 0x12 #define CP210X_SET_FLOW 0x13 #define CP210X_GET_FLOW 0x14 #define CP210X_EMBED_EVENTS 0x15 #define CP210X_GET_EVENTSTATE 0x16 #define CP210X_SET_CHARS 0x19 #define CP210X_GET_BAUDRATE 0x1D #define CP210X_SET_BAUDRATE 0x1E /* CP210X_IFC_ENABLE */ #define UART_ENABLE 0x0001 #define UART_DISABLE 0x0000 /* CP210X_(SET|GET)_BAUDDIV */ #define BAUD_RATE_GEN_FREQ 0x384000 /* CP210X_(SET|GET)_LINE_CTL */ #define BITS_DATA_MASK 0X0f00 #define BITS_DATA_5 0X0500 #define BITS_DATA_6 0X0600 #define BITS_DATA_7 0X0700 #define BITS_DATA_8 0X0800 #define BITS_DATA_9 0X0900 #define BITS_PARITY_MASK 0x00f0 #define BITS_PARITY_NONE 0x0000 #define BITS_PARITY_ODD 0x0010 #define BITS_PARITY_EVEN 0x0020 #define BITS_PARITY_MARK 0x0030 #define BITS_PARITY_SPACE 0x0040 #define BITS_STOP_MASK 0x000f #define BITS_STOP_1 0x0000 #define BITS_STOP_1_5 0x0001 #define BITS_STOP_2 0x0002 /* CP210X_SET_BREAK */ #define BREAK_ON 0x0001 #define BREAK_OFF 0x0000 /* CP210X_(SET_MHS|GET_MDMSTS) */ #define CONTROL_DTR 0x0001 #define CONTROL_RTS 0x0002 #define CONTROL_CTS 0x0010 #define CONTROL_DSR 0x0020 #define CONTROL_RING 0x0040 #define CONTROL_DCD 0x0080 #define CONTROL_WRITE_DTR 0x0100 #define CONTROL_WRITE_RTS 0x0200 /* * cp210x_get_config * Reads from the CP210x configuration registers * 'size' is specified in bytes. * 'data' is a pointer to a pre-allocated array of integers large * enough to hold 'size' bytes (with 4 bytes to each integer) */ static int cp210x_get_config(struct usb_serial_port *port, u8 request, unsigned int *data, int size) { struct usb_serial *serial = port->serial; struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); __le32 *buf; int result, i, length; /* Number of integers required to contain the array */ length = (((size - 1) | 3) + 1)/4; buf = kcalloc(length, sizeof(__le32), GFP_KERNEL); if (!buf) { dev_err(&port->dev, "%s - out of memory.\n", __func__); return -ENOMEM; } /* Issue the request, attempting to read 'size' bytes */ result = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), request, REQTYPE_DEVICE_TO_HOST, 0x0000, port_priv->bInterfaceNumber, buf, size, USB_CTRL_GET_TIMEOUT); /* Convert data into an array of integers */ for (i = 0; i < length; i++) data[i] = le32_to_cpu(buf[i]); kfree(buf); if (result != size) { dbg("%s - Unable to send config request, " "request=0x%x size=%d result=%d", __func__, request, size, result); if (result > 0) result = -EPROTO; return result; } return 0; } /* * cp210x_set_config * Writes to the CP210x configuration registers * Values less than 16 bits wide are sent directly * 'size' is specified in bytes. */ static int cp210x_set_config(struct usb_serial_port *port, u8 request, unsigned int *data, int size) { struct usb_serial *serial = port->serial; struct cp210x_port_private *port_priv = usb_get_serial_port_data(port); __le32 *buf; int result, i, length; /* Number of integers required to contain the array */ length = (((size - 1) | 3) + 1)/4; buf = kmalloc(length * sizeof(__le32), GFP_KERNEL); if (!buf) { dev_err(&port->dev, "%s - out of memory.\n", __func__); return -ENOMEM; } /* Array of integers into bytes */ for (i = 0; i < length; i++) buf[i] = cpu_to_le32(data[i]); if (size > 2) { result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), request, REQTYPE_HOST_TO_DEVICE, 0x0000, port_priv->bInterfaceNumber, buf, size, USB_CTRL_SET_TIMEOUT); } else { result = usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), request, REQTYPE_HOST_TO_DEVICE, data[0], port_priv->bInterfaceNumber, NULL, 0, USB_CTRL_SET_TIMEOUT); } kfree(buf); if ((size > 2 && result != size) || result < 0) { dbg("%s - Unable to send request, " "request=0x%x size=%d result=%d", __func__, request, size, result); if (result > 0) result = -EPROTO; return result; } return 0; } /* * cp210x_set_config_single * Convenience function for calling cp210x_set_config on single data values * without requiring an integer pointer */ static inline int cp210x_set_config_single(struct usb_serial_port *port, u8 request, unsigned int data) { return cp210x_set_config(port, request, &data, 2); } /* * cp210x_quantise_baudrate * Quantises the baud rate as per AN205 Table 1 */ static unsigned int cp210x_quantise_baudrate(unsigned int baud) { if (baud <= 300) baud = 300; else if (baud <= 600) baud = 600; else if (baud <= 1200) baud = 1200; else if (baud <= 1800) baud = 1800; else if (baud <= 2400) baud = 2400; else if (baud <= 4000) baud = 4000; else if (baud <= 4803) baud = 4800; else if (baud <= 7207) baud = 7200; else if (baud <= 9612) baud = 9600; else if (baud <= 14428) baud = 14400; else if (baud <= 16062) baud = 16000; else if (baud <= 19250) baud = 19200; else if (baud <= 28912) baud = 28800; else if (baud <= 38601) baud = 38400; else if (baud <= 51558) baud = 51200; else if (baud <= 56280) baud = 56000; else if (baud <= 58053) baud = 57600; else if (baud <= 64111) baud = 64000; else if (baud <= 77608) baud = 76800; else if (baud <= 117028) baud = 115200; else if (baud <= 129347) baud = 128000; else if (baud <= 156868) baud = 153600; else if (baud <= 237832) baud = 230400; else if (baud <= 254234) baud = 250000; else if (baud <= 273066) baud = 256000; else if (baud <= 491520) baud = 460800; else if (baud <= 567138) baud = 500000; else if (baud <= 670254) baud = 576000; else if (baud < 1000000) baud = 921600; else if (baud > 2000000) baud = 2000000; return baud; } static int cp210x_open(struct tty_struct *tty, struct usb_serial_port *port) { int result; dbg("%s - port %d", __func__, port->number); result = cp210x_set_config_single(port, CP210X_IFC_ENABLE, UART_ENABLE); if (result) { dev_err(&port->dev, "%s - Unable to enable UART\n", __func__); return result; } /* Configure the termios structure */ cp210x_get_termios(tty, port); /* The baud rate must be initialised on cp2104 */ if (tty) cp210x_change_speed(tty, port, NULL); return usb_serial_generic_open(tty, port); } static void cp210x_close(struct usb_serial_port *port) { dbg("%s - port %d", __func__, port->number); usb_serial_generic_close(port); mutex_lock(&port->serial->disc_mutex); if (!port->serial->disconnected) cp210x_set_config_single(port, CP210X_IFC_ENABLE, UART_DISABLE); mutex_unlock(&port->serial->disc_mutex); } /* * cp210x_get_termios * Reads the baud rate, data bits, parity, stop bits and flow control mode * from the device, corrects any unsupported values, and configures the * termios structure to reflect the state of the device */ static void cp210x_get_termios(struct tty_struct *tty, struct usb_serial_port *port) { unsigned int baud; if (tty) { cp210x_get_termios_port(tty->driver_data, &tty->termios->c_cflag, &baud); tty_encode_baud_rate(tty, baud, baud); } else { unsigned int cflag; cflag = 0; cp210x_get_termios_port(port, &cflag, &baud); } } /* * cp210x_get_termios_port * This is the heart of cp210x_get_termios which always uses a &usb_serial_port. */ static void cp210x_get_termios_port(struct usb_serial_port *port, unsigned int *cflagp, unsigned int *baudp) { unsigned int cflag, modem_ctl[4]; unsigned int baud; unsigned int bits; dbg("%s - port %d", __func__, port->number); cp210x_get_config(port, CP210X_GET_BAUDRATE, &baud, 4); dbg("%s - baud rate = %d", __func__, baud); *baudp = baud; cflag = *cflagp; cp210x_get_config(port, CP210X_GET_LINE_CTL, &bits, 2); cflag &= ~CSIZE; switch (bits & BITS_DATA_MASK) { case BITS_DATA_5: dbg("%s - data bits = 5", __func__); cflag |= CS5; break; case BITS_DATA_6: dbg("%s - data bits = 6", __func__); cflag |= CS6; break; case BITS_DATA_7: dbg("%s - data bits = 7", __func__); cflag |= CS7; break; case BITS_DATA_8: dbg("%s - data bits = 8", __func__); cflag |= CS8; break; case BITS_DATA_9: dbg("%s - data bits = 9 (not supported, using 8 data bits)", __func__); cflag |= CS8; bits &= ~BITS_DATA_MASK; bits |= BITS_DATA_8; cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2); break; default: dbg("%s - Unknown number of data bits, using 8", __func__); cflag |= CS8; bits &= ~BITS_DATA_MASK; bits |= BITS_DATA_8; cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2); break; } switch (bits & BITS_PARITY_MASK) { case BITS_PARITY_NONE: dbg("%s - parity = NONE", __func__); cflag &= ~PARENB; break; case BITS_PARITY_ODD: dbg("%s - parity = ODD", __func__); cflag |= (PARENB|PARODD); break; case BITS_PARITY_EVEN: dbg("%s - parity = EVEN", __func__); cflag &= ~PARODD; cflag |= PARENB; break; case BITS_PARITY_MARK: dbg("%s - parity = MARK", __func__); cflag |= (PARENB|PARODD|CMSPAR); break; case BITS_PARITY_SPACE: dbg("%s - parity = SPACE", __func__); cflag &= ~PARODD; cflag |= (PARENB|CMSPAR); break; default: dbg("%s - Unknown parity mode, disabling parity", __func__); cflag &= ~PARENB; bits &= ~BITS_PARITY_MASK; cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2); break; } cflag &= ~CSTOPB; switch (bits & BITS_STOP_MASK) { case BITS_STOP_1: dbg("%s - stop bits = 1", __func__); break; case BITS_STOP_1_5: dbg("%s - stop bits = 1.5 (not supported, using 1 stop bit)", __func__); bits &= ~BITS_STOP_MASK; cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2); break; case BITS_STOP_2: dbg("%s - stop bits = 2", __func__); cflag |= CSTOPB; break; default: dbg("%s - Unknown number of stop bits, using 1 stop bit", __func__); bits &= ~BITS_STOP_MASK; cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2); break; } cp210x_get_config(port, CP210X_GET_FLOW, modem_ctl, 16); if (modem_ctl[0] & 0x0008) { dbg("%s - flow control = CRTSCTS", __func__); cflag |= CRTSCTS; } else { dbg("%s - flow control = NONE", __func__); cflag &= ~CRTSCTS; } *cflagp = cflag; } /* * CP2101 supports the following baud rates: * * 300, 600, 1200, 1800, 2400, 4800, 7200, 9600, 14400, 19200, 28800, * 38400, 56000, 57600, 115200, 128000, 230400, 460800, 921600 * * CP2102 and CP2103 support the following additional rates: * * 4000, 16000, 51200, 64000, 76800, 153600, 250000, 256000, 500000, * 576000 * * The device will map a requested rate to a supported one, but the result * of requests for rates greater than 1053257 is undefined (see AN205). * * CP2104, CP2105 and CP2110 support most rates up to 2M, 921k and 1M baud, * respectively, with an error less than 1%. The actual rates are determined * by * * div = round(freq / (2 x prescale x request)) * actual = freq / (2 x prescale x div) * * For CP2104 and CP2105 freq is 48Mhz and prescale is 4 for request <= 365bps * or 1 otherwise. * For CP2110 freq is 24Mhz and prescale is 4 for request <= 300bps or 1 * otherwise. */ static void cp210x_change_speed(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios) { u32 baud; baud = tty->termios->c_ospeed; /* This maps the requested rate to a rate valid on cp2102 or cp2103, * or to an arbitrary rate in [1M,2M]. * * NOTE: B0 is not implemented. */ baud = cp210x_quantise_baudrate(baud); dbg("%s - setting baud rate to %u", __func__, baud); if (cp210x_set_config(port, CP210X_SET_BAUDRATE, &baud, sizeof(baud))) { dev_warn(&port->dev, "failed to set baud rate to %u\n", baud); if (old_termios) baud = old_termios->c_ospeed; else baud = 9600; } tty_encode_baud_rate(tty, baud, baud); } static void cp210x_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios) { unsigned int cflag, old_cflag; unsigned int bits; unsigned int modem_ctl[4]; dbg("%s - port %d", __func__, port->number); if (!tty) return; cflag = tty->termios->c_cflag; old_cflag = old_termios->c_cflag; if (tty->termios->c_ospeed != old_termios->c_ospeed) cp210x_change_speed(tty, port, old_termios); /* If the number of data bits is to be updated */ if ((cflag & CSIZE) != (old_cflag & CSIZE)) { cp210x_get_config(port, CP210X_GET_LINE_CTL, &bits, 2); bits &= ~BITS_DATA_MASK; switch (cflag & CSIZE) { case CS5: bits |= BITS_DATA_5; dbg("%s - data bits = 5", __func__); break; case CS6: bits |= BITS_DATA_6; dbg("%s - data bits = 6", __func__); break; case CS7: bits |= BITS_DATA_7; dbg("%s - data bits = 7", __func__); break; case CS8: bits |= BITS_DATA_8; dbg("%s - data bits = 8", __func__); break; /*case CS9: bits |= BITS_DATA_9; dbg("%s - data bits = 9", __func__); break;*/ default: dbg("cp210x driver does not " "support the number of bits requested," " using 8 bit mode"); bits |= BITS_DATA_8; break; } if (cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2)) dbg("Number of data bits requested " "not supported by device"); } if ((cflag & (PARENB|PARODD|CMSPAR)) != (old_cflag & (PARENB|PARODD|CMSPAR))) { cp210x_get_config(port, CP210X_GET_LINE_CTL, &bits, 2); bits &= ~BITS_PARITY_MASK; if (cflag & PARENB) { if (cflag & CMSPAR) { if (cflag & PARODD) { bits |= BITS_PARITY_MARK; dbg("%s - parity = MARK", __func__); } else { bits |= BITS_PARITY_SPACE; dbg("%s - parity = SPACE", __func__); } } else { if (cflag & PARODD) { bits |= BITS_PARITY_ODD; dbg("%s - parity = ODD", __func__); } else { bits |= BITS_PARITY_EVEN; dbg("%s - parity = EVEN", __func__); } } } if (cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2)) dbg("Parity mode not supported by device"); } if ((cflag & CSTOPB) != (old_cflag & CSTOPB)) { cp210x_get_config(port, CP210X_GET_LINE_CTL, &bits, 2); bits &= ~BITS_STOP_MASK; if (cflag & CSTOPB) { bits |= BITS_STOP_2; dbg("%s - stop bits = 2", __func__); } else { bits |= BITS_STOP_1; dbg("%s - stop bits = 1", __func__); } if (cp210x_set_config(port, CP210X_SET_LINE_CTL, &bits, 2)) dbg("Number of stop bits requested " "not supported by device"); } if ((cflag & CRTSCTS) != (old_cflag & CRTSCTS)) { cp210x_get_config(port, CP210X_GET_FLOW, modem_ctl, 16); dbg("%s - read modem controls = 0x%.4x 0x%.4x 0x%.4x 0x%.4x", __func__, modem_ctl[0], modem_ctl[1], modem_ctl[2], modem_ctl[3]); if (cflag & CRTSCTS) { modem_ctl[0] &= ~0x7B; modem_ctl[0] |= 0x09; modem_ctl[1] = 0x80; dbg("%s - flow control = CRTSCTS", __func__); } else { modem_ctl[0] &= ~0x7B; modem_ctl[0] |= 0x01; modem_ctl[1] |= 0x40; dbg("%s - flow control = NONE", __func__); } dbg("%s - write modem controls = 0x%.4x 0x%.4x 0x%.4x 0x%.4x", __func__, modem_ctl[0], modem_ctl[1], modem_ctl[2], modem_ctl[3]); cp210x_set_config(port, CP210X_SET_FLOW, modem_ctl, 16); } } static int cp210x_tiocmset (struct tty_struct *tty, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; return cp210x_tiocmset_port(port, set, clear); } static int cp210x_tiocmset_port(struct usb_serial_port *port, unsigned int set, unsigned int clear) { unsigned int control = 0; dbg("%s - port %d", __func__, port->number); if (set & TIOCM_RTS) { control |= CONTROL_RTS; control |= CONTROL_WRITE_RTS; } if (set & TIOCM_DTR) { control |= CONTROL_DTR; control |= CONTROL_WRITE_DTR; } if (clear & TIOCM_RTS) { control &= ~CONTROL_RTS; control |= CONTROL_WRITE_RTS; } if (clear & TIOCM_DTR) { control &= ~CONTROL_DTR; control |= CONTROL_WRITE_DTR; } dbg("%s - control = 0x%.4x", __func__, control); return cp210x_set_config(port, CP210X_SET_MHS, &control, 2); } static void cp210x_dtr_rts(struct usb_serial_port *p, int on) { if (on) cp210x_tiocmset_port(p, TIOCM_DTR|TIOCM_RTS, 0); else cp210x_tiocmset_port(p, 0, TIOCM_DTR|TIOCM_RTS); } static int cp210x_tiocmget (struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; unsigned int control; int result; dbg("%s - port %d", __func__, port->number); cp210x_get_config(port, CP210X_GET_MDMSTS, &control, 1); result = ((control & CONTROL_DTR) ? TIOCM_DTR : 0) |((control & CONTROL_RTS) ? TIOCM_RTS : 0) |((control & CONTROL_CTS) ? TIOCM_CTS : 0) |((control & CONTROL_DSR) ? TIOCM_DSR : 0) |((control & CONTROL_RING)? TIOCM_RI : 0) |((control & CONTROL_DCD) ? TIOCM_CD : 0); dbg("%s - control = 0x%.2x", __func__, control); return result; } static void cp210x_break_ctl (struct tty_struct *tty, int break_state) { struct usb_serial_port *port = tty->driver_data; unsigned int state; dbg("%s - port %d", __func__, port->number); if (break_state == 0) state = BREAK_OFF; else state = BREAK_ON; dbg("%s - turning break %s", __func__, state == BREAK_OFF ? "off" : "on"); cp210x_set_config(port, CP210X_SET_BREAK, &state, 2); } static int cp210x_startup(struct usb_serial *serial) { struct cp210x_port_private *port_priv; int i; /* cp210x buffers behave strangely unless device is reset */ usb_reset_device(serial->dev); for (i = 0; i < serial->num_ports; i++) { port_priv = kzalloc(sizeof(*port_priv), GFP_KERNEL); if (!port_priv) return -ENOMEM; memset(port_priv, 0x00, sizeof(*port_priv)); port_priv->bInterfaceNumber = serial->interface->cur_altsetting->desc.bInterfaceNumber; usb_set_serial_port_data(serial->port[i], port_priv); } return 0; } static void cp210x_release(struct usb_serial *serial) { struct cp210x_port_private *port_priv; int i; for (i = 0; i < serial->num_ports; i++) { port_priv = usb_get_serial_port_data(serial->port[i]); kfree(port_priv); usb_set_serial_port_data(serial->port[i], NULL); } } module_usb_serial_driver(cp210x_driver, serial_drivers); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Enable verbose debugging messages");
gpl-2.0
jongh90/kvm
drivers/media/dvb-frontends/lgs8gl5.c
3773
10080
/* Legend Silicon LGS-8GL5 DMB-TH OFDM demodulator driver Copyright (C) 2008 Sirius International (Hong Kong) Limited Timothy Lee <timothy.lee@siriushk.com> This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/module.h> #include <linux/string.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "lgs8gl5.h" #define REG_RESET 0x02 #define REG_RESET_OFF 0x01 #define REG_03 0x03 #define REG_04 0x04 #define REG_07 0x07 #define REG_09 0x09 #define REG_0A 0x0a #define REG_0B 0x0b #define REG_0C 0x0c #define REG_37 0x37 #define REG_STRENGTH 0x4b #define REG_STRENGTH_MASK 0x7f #define REG_STRENGTH_CARRIER 0x80 #define REG_INVERSION 0x7c #define REG_INVERSION_ON 0x80 #define REG_7D 0x7d #define REG_7E 0x7e #define REG_A2 0xa2 #define REG_STATUS 0xa4 #define REG_STATUS_SYNC 0x04 #define REG_STATUS_LOCK 0x01 struct lgs8gl5_state { struct i2c_adapter *i2c; const struct lgs8gl5_config *config; struct dvb_frontend frontend; }; static int debug; #define dprintk(args...) \ do { \ if (debug) \ printk(KERN_DEBUG "lgs8gl5: " args); \ } while (0) /* Writes into demod's register */ static int lgs8gl5_write_reg(struct lgs8gl5_state *state, u8 reg, u8 data) { int ret; u8 buf[] = {reg, data}; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 2 }; ret = i2c_transfer(state->i2c, &msg, 1); if (ret != 1) dprintk("%s: error (reg=0x%02x, val=0x%02x, ret=%i)\n", __func__, reg, data, ret); return (ret != 1) ? -1 : 0; } /* Reads from demod's register */ static int lgs8gl5_read_reg(struct lgs8gl5_state *state, u8 reg) { int ret; u8 b0[] = {reg}; u8 b1[] = {0}; struct i2c_msg msg[2] = { { .addr = state->config->demod_address, .flags = 0, .buf = b0, .len = 1 }, { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } }; ret = i2c_transfer(state->i2c, msg, 2); if (ret != 2) return -EIO; return b1[0]; } static int lgs8gl5_update_reg(struct lgs8gl5_state *state, u8 reg, u8 data) { lgs8gl5_read_reg(state, reg); lgs8gl5_write_reg(state, reg, data); return 0; } /* Writes into alternate device's register */ /* TODO: Find out what that device is for! */ static int lgs8gl5_update_alt_reg(struct lgs8gl5_state *state, u8 reg, u8 data) { int ret; u8 b0[] = {reg}; u8 b1[] = {0}; u8 b2[] = {reg, data}; struct i2c_msg msg[3] = { { .addr = state->config->demod_address + 2, .flags = 0, .buf = b0, .len = 1 }, { .addr = state->config->demod_address + 2, .flags = I2C_M_RD, .buf = b1, .len = 1 }, { .addr = state->config->demod_address + 2, .flags = 0, .buf = b2, .len = 2 }, }; ret = i2c_transfer(state->i2c, msg, 3); return (ret != 3) ? -1 : 0; } static void lgs8gl5_soft_reset(struct lgs8gl5_state *state) { u8 val; dprintk("%s\n", __func__); val = lgs8gl5_read_reg(state, REG_RESET); lgs8gl5_write_reg(state, REG_RESET, val & ~REG_RESET_OFF); lgs8gl5_write_reg(state, REG_RESET, val | REG_RESET_OFF); msleep(5); } /* Starts demodulation */ static void lgs8gl5_start_demod(struct lgs8gl5_state *state) { u8 val; int n; dprintk("%s\n", __func__); lgs8gl5_update_alt_reg(state, 0xc2, 0x28); lgs8gl5_soft_reset(state); lgs8gl5_update_reg(state, REG_07, 0x10); lgs8gl5_update_reg(state, REG_07, 0x10); lgs8gl5_write_reg(state, REG_09, 0x0e); lgs8gl5_write_reg(state, REG_0A, 0xe5); lgs8gl5_write_reg(state, REG_0B, 0x35); lgs8gl5_write_reg(state, REG_0C, 0x30); lgs8gl5_update_reg(state, REG_03, 0x00); lgs8gl5_update_reg(state, REG_7E, 0x01); lgs8gl5_update_alt_reg(state, 0xc5, 0x00); lgs8gl5_update_reg(state, REG_04, 0x02); lgs8gl5_update_reg(state, REG_37, 0x01); lgs8gl5_soft_reset(state); /* Wait for carrier */ for (n = 0; n < 10; n++) { val = lgs8gl5_read_reg(state, REG_STRENGTH); dprintk("Wait for carrier[%d] 0x%02X\n", n, val); if (val & REG_STRENGTH_CARRIER) break; msleep(4); } if (!(val & REG_STRENGTH_CARRIER)) return; /* Wait for lock */ for (n = 0; n < 20; n++) { val = lgs8gl5_read_reg(state, REG_STATUS); dprintk("Wait for lock[%d] 0x%02X\n", n, val); if (val & REG_STATUS_LOCK) break; msleep(12); } if (!(val & REG_STATUS_LOCK)) return; lgs8gl5_write_reg(state, REG_7D, lgs8gl5_read_reg(state, REG_A2)); lgs8gl5_soft_reset(state); } static int lgs8gl5_init(struct dvb_frontend *fe) { struct lgs8gl5_state *state = fe->demodulator_priv; dprintk("%s\n", __func__); lgs8gl5_update_alt_reg(state, 0xc2, 0x28); lgs8gl5_soft_reset(state); lgs8gl5_update_reg(state, REG_07, 0x10); lgs8gl5_update_reg(state, REG_07, 0x10); lgs8gl5_write_reg(state, REG_09, 0x0e); lgs8gl5_write_reg(state, REG_0A, 0xe5); lgs8gl5_write_reg(state, REG_0B, 0x35); lgs8gl5_write_reg(state, REG_0C, 0x30); return 0; } static int lgs8gl5_read_status(struct dvb_frontend *fe, fe_status_t *status) { struct lgs8gl5_state *state = fe->demodulator_priv; u8 level = lgs8gl5_read_reg(state, REG_STRENGTH); u8 flags = lgs8gl5_read_reg(state, REG_STATUS); *status = 0; if ((level & REG_STRENGTH_MASK) > 0) *status |= FE_HAS_SIGNAL; if (level & REG_STRENGTH_CARRIER) *status |= FE_HAS_CARRIER; if (flags & REG_STATUS_SYNC) *status |= FE_HAS_SYNC; if (flags & REG_STATUS_LOCK) *status |= FE_HAS_LOCK; return 0; } static int lgs8gl5_read_ber(struct dvb_frontend *fe, u32 *ber) { *ber = 0; return 0; } static int lgs8gl5_read_signal_strength(struct dvb_frontend *fe, u16 *signal_strength) { struct lgs8gl5_state *state = fe->demodulator_priv; u8 level = lgs8gl5_read_reg(state, REG_STRENGTH); *signal_strength = (level & REG_STRENGTH_MASK) << 8; return 0; } static int lgs8gl5_read_snr(struct dvb_frontend *fe, u16 *snr) { struct lgs8gl5_state *state = fe->demodulator_priv; u8 level = lgs8gl5_read_reg(state, REG_STRENGTH); *snr = (level & REG_STRENGTH_MASK) << 8; return 0; } static int lgs8gl5_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { *ucblocks = 0; return 0; } static int lgs8gl5_set_frontend(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct lgs8gl5_state *state = fe->demodulator_priv; dprintk("%s\n", __func__); if (p->bandwidth_hz != 8000000) return -EINVAL; if (fe->ops.tuner_ops.set_params) { fe->ops.tuner_ops.set_params(fe); if (fe->ops.i2c_gate_ctrl) fe->ops.i2c_gate_ctrl(fe, 0); } /* lgs8gl5_set_inversion(state, p->inversion); */ lgs8gl5_start_demod(state); return 0; } static int lgs8gl5_get_frontend(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct lgs8gl5_state *state = fe->demodulator_priv; u8 inv = lgs8gl5_read_reg(state, REG_INVERSION); p->inversion = (inv & REG_INVERSION_ON) ? INVERSION_ON : INVERSION_OFF; p->code_rate_HP = FEC_1_2; p->code_rate_LP = FEC_7_8; p->guard_interval = GUARD_INTERVAL_1_32; p->transmission_mode = TRANSMISSION_MODE_2K; p->modulation = QAM_64; p->hierarchy = HIERARCHY_NONE; p->bandwidth_hz = 8000000; return 0; } static int lgs8gl5_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *fesettings) { fesettings->min_delay_ms = 240; fesettings->step_size = 0; fesettings->max_drift = 0; return 0; } static void lgs8gl5_release(struct dvb_frontend *fe) { struct lgs8gl5_state *state = fe->demodulator_priv; kfree(state); } static struct dvb_frontend_ops lgs8gl5_ops; struct dvb_frontend* lgs8gl5_attach(const struct lgs8gl5_config *config, struct i2c_adapter *i2c) { struct lgs8gl5_state *state = NULL; dprintk("%s\n", __func__); /* Allocate memory for the internal state */ state = kzalloc(sizeof(struct lgs8gl5_state), GFP_KERNEL); if (state == NULL) goto error; /* Setup the state */ state->config = config; state->i2c = i2c; /* Check if the demod is there */ if (lgs8gl5_read_reg(state, REG_RESET) < 0) goto error; /* Create dvb_frontend */ memcpy(&state->frontend.ops, &lgs8gl5_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; return &state->frontend; error: kfree(state); return NULL; } EXPORT_SYMBOL(lgs8gl5_attach); static struct dvb_frontend_ops lgs8gl5_ops = { .delsys = { SYS_DTMB }, .info = { .name = "Legend Silicon LGS-8GL5 DMB-TH", .frequency_min = 474000000, .frequency_max = 858000000, .frequency_stepsize = 10000, .frequency_tolerance = 0, .caps = FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_32 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_BANDWIDTH_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_HIERARCHY_AUTO | FE_CAN_RECOVER }, .release = lgs8gl5_release, .init = lgs8gl5_init, .set_frontend = lgs8gl5_set_frontend, .get_frontend = lgs8gl5_get_frontend, .get_tune_settings = lgs8gl5_get_tune_settings, .read_status = lgs8gl5_read_status, .read_ber = lgs8gl5_read_ber, .read_signal_strength = lgs8gl5_read_signal_strength, .read_snr = lgs8gl5_read_snr, .read_ucblocks = lgs8gl5_read_ucblocks, }; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Turn on/off frontend debugging (default:off)."); MODULE_DESCRIPTION("Legend Silicon LGS-8GL5 DMB-TH Demodulator driver"); MODULE_AUTHOR("Timothy Lee"); MODULE_LICENSE("GPL");
gpl-2.0
zhiweix-dong/linux-yocto-micro-3.19
arch/metag/kernel/perf_callchain.c
4029
2191
/* * Perf callchain handling code. * * Based on the ARM perf implementation. */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/perf_event.h> #include <linux/uaccess.h> #include <asm/ptrace.h> #include <asm/stacktrace.h> static bool is_valid_call(unsigned long calladdr) { unsigned int callinsn; /* Check the possible return address is aligned. */ if (!(calladdr & 0x3)) { if (!get_user(callinsn, (unsigned int *)calladdr)) { /* Check for CALLR or SWAP PC,D1RtP. */ if ((callinsn & 0xff000000) == 0xab000000 || callinsn == 0xa3200aa0) return true; } } return false; } static struct metag_frame __user * user_backtrace(struct metag_frame __user *user_frame, struct perf_callchain_entry *entry) { struct metag_frame frame; unsigned long calladdr; /* We cannot rely on having frame pointers in user code. */ while (1) { /* Also check accessibility of one struct frame beyond */ if (!access_ok(VERIFY_READ, user_frame, sizeof(frame))) return 0; if (__copy_from_user_inatomic(&frame, user_frame, sizeof(frame))) return 0; --user_frame; calladdr = frame.lr - 4; if (is_valid_call(calladdr)) { perf_callchain_store(entry, calladdr); return user_frame; } } return 0; } void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) { unsigned long sp = regs->ctx.AX[0].U0; struct metag_frame __user *frame; frame = (struct metag_frame __user *)sp; --frame; while ((entry->nr < PERF_MAX_STACK_DEPTH) && frame) frame = user_backtrace(frame, entry); } /* * Gets called by walk_stackframe() for every stackframe. This will be called * whist unwinding the stackframe and is like a subroutine return so we use * the PC. */ static int callchain_trace(struct stackframe *fr, void *data) { struct perf_callchain_entry *entry = data; perf_callchain_store(entry, fr->pc); return 0; } void perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs) { struct stackframe fr; fr.fp = regs->ctx.AX[1].U0; fr.sp = regs->ctx.AX[0].U0; fr.lr = regs->ctx.DX[4].U1; fr.pc = regs->ctx.CurrPC; walk_stackframe(&fr, callchain_trace, entry); }
gpl-2.0
tommypacker/android_kernel_lge_msm8974
drivers/scsi/arcmsr/arcmsr_hba.c
4029
99897
/* ******************************************************************************* ** O.S : Linux ** FILE NAME : arcmsr_hba.c ** BY : Nick Cheng ** Description: SCSI RAID Device Driver for ** ARECA RAID Host adapter ******************************************************************************* ** Copyright (C) 2002 - 2005, Areca Technology Corporation All rights reserved ** ** Web site: www.areca.com.tw ** E-mail: support@areca.com.tw ** ** This program is free software; you can redistribute it and/or modify ** it under the terms of the GNU General Public License version 2 as ** published by the Free Software Foundation. ** This program is distributed in the hope that it will be useful, ** but WITHOUT ANY WARRANTY; without even the implied warranty of ** MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ** GNU General Public License for more details. ******************************************************************************* ** Redistribution and use in source and binary forms, with or without ** modification, are permitted provided that the following conditions ** are met: ** 1. Redistributions of source code must retain the above copyright ** notice, this list of conditions and the following disclaimer. ** 2. Redistributions in binary form must reproduce the above copyright ** notice, this list of conditions and the following disclaimer in the ** documentation and/or other materials provided with the distribution. ** 3. The name of the author may not be used to endorse or promote products ** derived from this software without specific prior written permission. ** ** THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR ** IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES ** OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ** IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, ** INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES(INCLUDING,BUT ** NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ** DATA, OR PROFITS; OR BUSINESS INTERRUPTION)HOWEVER CAUSED AND ON ANY ** THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ** (INCLUDING NEGLIGENCE OR OTHERWISE)ARISING IN ANY WAY OUT OF THE USE OF ** THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ******************************************************************************* ** For history of changes, see Documentation/scsi/ChangeLog.arcmsr ** Firmware Specification, see Documentation/scsi/arcmsr_spec.txt ******************************************************************************* */ #include <linux/module.h> #include <linux/reboot.h> #include <linux/spinlock.h> #include <linux/pci_ids.h> #include <linux/interrupt.h> #include <linux/moduleparam.h> #include <linux/errno.h> #include <linux/types.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/timer.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/aer.h> #include <asm/dma.h> #include <asm/io.h> #include <asm/uaccess.h> #include <scsi/scsi_host.h> #include <scsi/scsi.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_tcq.h> #include <scsi/scsi_device.h> #include <scsi/scsi_transport.h> #include <scsi/scsicam.h> #include "arcmsr.h" MODULE_AUTHOR("Nick Cheng <support@areca.com.tw>"); MODULE_DESCRIPTION("ARECA (ARC11xx/12xx/16xx/1880) SATA/SAS RAID Host Bus Adapter"); MODULE_LICENSE("Dual BSD/GPL"); MODULE_VERSION(ARCMSR_DRIVER_VERSION); #define ARCMSR_SLEEPTIME 10 #define ARCMSR_RETRYCOUNT 12 wait_queue_head_t wait_q; static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_cmnd *cmd); static int arcmsr_iop_confirm(struct AdapterControlBlock *acb); static int arcmsr_abort(struct scsi_cmnd *); static int arcmsr_bus_reset(struct scsi_cmnd *); static int arcmsr_bios_param(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int *info); static int arcmsr_queue_command(struct Scsi_Host *h, struct scsi_cmnd *cmd); static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id); static void arcmsr_remove(struct pci_dev *pdev); static void arcmsr_shutdown(struct pci_dev *pdev); static void arcmsr_iop_init(struct AdapterControlBlock *acb); static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb); static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb); static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb); static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb); static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb); static void arcmsr_request_device_map(unsigned long pacb); static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb); static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb); static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb); static void arcmsr_message_isr_bh_fn(struct work_struct *work); static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb); static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb); static void arcmsr_hbc_message_isr(struct AdapterControlBlock *pACB); static void arcmsr_hardware_reset(struct AdapterControlBlock *acb); static const char *arcmsr_info(struct Scsi_Host *); static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb); static int arcmsr_adjust_disk_queue_depth(struct scsi_device *sdev, int queue_depth, int reason) { if (reason != SCSI_QDEPTH_DEFAULT) return -EOPNOTSUPP; if (queue_depth > ARCMSR_MAX_CMD_PERLUN) queue_depth = ARCMSR_MAX_CMD_PERLUN; scsi_adjust_queue_depth(sdev, MSG_ORDERED_TAG, queue_depth); return queue_depth; } static struct scsi_host_template arcmsr_scsi_host_template = { .module = THIS_MODULE, .name = "ARCMSR ARECA SATA/SAS RAID Controller" ARCMSR_DRIVER_VERSION, .info = arcmsr_info, .queuecommand = arcmsr_queue_command, .eh_abort_handler = arcmsr_abort, .eh_bus_reset_handler = arcmsr_bus_reset, .bios_param = arcmsr_bios_param, .change_queue_depth = arcmsr_adjust_disk_queue_depth, .can_queue = ARCMSR_MAX_FREECCB_NUM, .this_id = ARCMSR_SCSI_INITIATOR_ID, .sg_tablesize = ARCMSR_DEFAULT_SG_ENTRIES, .max_sectors = ARCMSR_MAX_XFER_SECTORS_C, .cmd_per_lun = ARCMSR_MAX_CMD_PERLUN, .use_clustering = ENABLE_CLUSTERING, .shost_attrs = arcmsr_host_attrs, }; static struct pci_device_id arcmsr_device_id_table[] = { {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1110)}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1120)}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1130)}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1160)}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1170)}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1200)}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1201)}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1202)}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1210)}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1220)}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1230)}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1260)}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1270)}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1280)}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1380)}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1381)}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1680)}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1681)}, {PCI_DEVICE(PCI_VENDOR_ID_ARECA, PCI_DEVICE_ID_ARECA_1880)}, {0, 0}, /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, arcmsr_device_id_table); static struct pci_driver arcmsr_pci_driver = { .name = "arcmsr", .id_table = arcmsr_device_id_table, .probe = arcmsr_probe, .remove = arcmsr_remove, .shutdown = arcmsr_shutdown, }; /* **************************************************************************** **************************************************************************** */ static void arcmsr_free_hbb_mu(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: case ACB_ADAPTER_TYPE_C: break; case ACB_ADAPTER_TYPE_B:{ dma_free_coherent(&acb->pdev->dev, sizeof(struct MessageUnit_B), acb->pmuB, acb->dma_coherent_handle_hbb_mu); } } } static bool arcmsr_remap_pciregion(struct AdapterControlBlock *acb) { struct pci_dev *pdev = acb->pdev; switch (acb->adapter_type){ case ACB_ADAPTER_TYPE_A:{ acb->pmuA = ioremap(pci_resource_start(pdev,0), pci_resource_len(pdev,0)); if (!acb->pmuA) { printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); return false; } break; } case ACB_ADAPTER_TYPE_B:{ void __iomem *mem_base0, *mem_base1; mem_base0 = ioremap(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0)); if (!mem_base0) { printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); return false; } mem_base1 = ioremap(pci_resource_start(pdev, 2), pci_resource_len(pdev, 2)); if (!mem_base1) { iounmap(mem_base0); printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); return false; } acb->mem_base0 = mem_base0; acb->mem_base1 = mem_base1; break; } case ACB_ADAPTER_TYPE_C:{ acb->pmuC = ioremap_nocache(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1)); if (!acb->pmuC) { printk(KERN_NOTICE "arcmsr%d: memory mapping region fail \n", acb->host->host_no); return false; } if (readl(&acb->pmuC->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &acb->pmuC->outbound_doorbell_clear);/*clear interrupt*/ return true; } break; } } return true; } static void arcmsr_unmap_pciregion(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A:{ iounmap(acb->pmuA); } break; case ACB_ADAPTER_TYPE_B:{ iounmap(acb->mem_base0); iounmap(acb->mem_base1); } break; case ACB_ADAPTER_TYPE_C:{ iounmap(acb->pmuC); } } } static irqreturn_t arcmsr_do_interrupt(int irq, void *dev_id) { irqreturn_t handle_state; struct AdapterControlBlock *acb = dev_id; handle_state = arcmsr_interrupt(acb); return handle_state; } static int arcmsr_bios_param(struct scsi_device *sdev, struct block_device *bdev, sector_t capacity, int *geom) { int ret, heads, sectors, cylinders, total_capacity; unsigned char *buffer;/* return copy of block device's partition table */ buffer = scsi_bios_ptable(bdev); if (buffer) { ret = scsi_partsize(buffer, capacity, &geom[2], &geom[0], &geom[1]); kfree(buffer); if (ret != -1) return ret; } total_capacity = capacity; heads = 64; sectors = 32; cylinders = total_capacity / (heads * sectors); if (cylinders > 1024) { heads = 255; sectors = 63; cylinders = total_capacity / (heads * sectors); } geom[0] = heads; geom[1] = sectors; geom[2] = cylinders; return 0; } static void arcmsr_define_adapter_type(struct AdapterControlBlock *acb) { struct pci_dev *pdev = acb->pdev; u16 dev_id; pci_read_config_word(pdev, PCI_DEVICE_ID, &dev_id); acb->dev_id = dev_id; switch (dev_id) { case 0x1880: { acb->adapter_type = ACB_ADAPTER_TYPE_C; } break; case 0x1201: { acb->adapter_type = ACB_ADAPTER_TYPE_B; } break; default: acb->adapter_type = ACB_ADAPTER_TYPE_A; } } static uint8_t arcmsr_hba_wait_msgint_ready(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; int i; for (i = 0; i < 2000; i++) { if (readl(&reg->outbound_intstatus) & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus); return true; } msleep(10); } /* max 20 seconds */ return false; } static uint8_t arcmsr_hbb_wait_msgint_ready(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; int i; for (i = 0; i < 2000; i++) { if (readl(reg->iop2drv_doorbell) & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell); return true; } msleep(10); } /* max 20 seconds */ return false; } static uint8_t arcmsr_hbc_wait_msgint_ready(struct AdapterControlBlock *pACB) { struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC; int i; for (i = 0; i < 2000; i++) { if (readl(&phbcmu->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &phbcmu->outbound_doorbell_clear); /*clear interrupt*/ return true; } msleep(10); } /* max 20 seconds */ return false; } static void arcmsr_flush_hba_cache(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; int retry_count = 30; writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0); do { if (arcmsr_hba_wait_msgint_ready(acb)) break; else { retry_count--; printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \ timeout, retry count down = %d \n", acb->host->host_no, retry_count); } } while (retry_count != 0); } static void arcmsr_flush_hbb_cache(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; int retry_count = 30; writel(ARCMSR_MESSAGE_FLUSH_CACHE, reg->drv2iop_doorbell); do { if (arcmsr_hbb_wait_msgint_ready(acb)) break; else { retry_count--; printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \ timeout,retry count down = %d \n", acb->host->host_no, retry_count); } } while (retry_count != 0); } static void arcmsr_flush_hbc_cache(struct AdapterControlBlock *pACB) { struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC; int retry_count = 30;/* enlarge wait flush adapter cache time: 10 minute */ writel(ARCMSR_INBOUND_MESG0_FLUSH_CACHE, &reg->inbound_msgaddr0); writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell); do { if (arcmsr_hbc_wait_msgint_ready(pACB)) { break; } else { retry_count--; printk(KERN_NOTICE "arcmsr%d: wait 'flush adapter cache' \ timeout,retry count down = %d \n", pACB->host->host_no, retry_count); } } while (retry_count != 0); return; } static void arcmsr_flush_adapter_cache(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_flush_hba_cache(acb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_flush_hbb_cache(acb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_flush_hbc_cache(acb); } } } static int arcmsr_alloc_ccb_pool(struct AdapterControlBlock *acb) { struct pci_dev *pdev = acb->pdev; void *dma_coherent; dma_addr_t dma_coherent_handle; struct CommandControlBlock *ccb_tmp; int i = 0, j = 0; dma_addr_t cdb_phyaddr; unsigned long roundup_ccbsize; unsigned long max_xfer_len; unsigned long max_sg_entrys; uint32_t firm_config_version; for (i = 0; i < ARCMSR_MAX_TARGETID; i++) for (j = 0; j < ARCMSR_MAX_TARGETLUN; j++) acb->devstate[i][j] = ARECA_RAID_GONE; max_xfer_len = ARCMSR_MAX_XFER_LEN; max_sg_entrys = ARCMSR_DEFAULT_SG_ENTRIES; firm_config_version = acb->firm_cfg_version; if((firm_config_version & 0xFF) >= 3){ max_xfer_len = (ARCMSR_CDB_SG_PAGE_LENGTH << ((firm_config_version >> 8) & 0xFF)) * 1024;/* max 4M byte */ max_sg_entrys = (max_xfer_len/4096); } acb->host->max_sectors = max_xfer_len/512; acb->host->sg_tablesize = max_sg_entrys; roundup_ccbsize = roundup(sizeof(struct CommandControlBlock) + (max_sg_entrys - 1) * sizeof(struct SG64ENTRY), 32); acb->uncache_size = roundup_ccbsize * ARCMSR_MAX_FREECCB_NUM; dma_coherent = dma_alloc_coherent(&pdev->dev, acb->uncache_size, &dma_coherent_handle, GFP_KERNEL); if(!dma_coherent){ printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error\n", acb->host->host_no); return -ENOMEM; } acb->dma_coherent = dma_coherent; acb->dma_coherent_handle = dma_coherent_handle; memset(dma_coherent, 0, acb->uncache_size); ccb_tmp = dma_coherent; acb->vir2phy_offset = (unsigned long)dma_coherent - (unsigned long)dma_coherent_handle; for(i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++){ cdb_phyaddr = dma_coherent_handle + offsetof(struct CommandControlBlock, arcmsr_cdb); ccb_tmp->cdb_phyaddr_pattern = ((acb->adapter_type == ACB_ADAPTER_TYPE_C) ? cdb_phyaddr : (cdb_phyaddr >> 5)); acb->pccb_pool[i] = ccb_tmp; ccb_tmp->acb = acb; INIT_LIST_HEAD(&ccb_tmp->list); list_add_tail(&ccb_tmp->list, &acb->ccb_free_list); ccb_tmp = (struct CommandControlBlock *)((unsigned long)ccb_tmp + roundup_ccbsize); dma_coherent_handle = dma_coherent_handle + roundup_ccbsize; } return 0; } static void arcmsr_message_isr_bh_fn(struct work_struct *work) { struct AdapterControlBlock *acb = container_of(work,struct AdapterControlBlock, arcmsr_do_message_isr_bh); switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct MessageUnit_A __iomem *reg = acb->pmuA; char *acb_dev_map = (char *)acb->device_map; uint32_t __iomem *signature = (uint32_t __iomem*) (&reg->message_rwbuffer[0]); char __iomem *devicemap = (char __iomem*) (&reg->message_rwbuffer[21]); int target, lun; struct scsi_device *psdev; char diff; atomic_inc(&acb->rq_map_token); if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) { for(target = 0; target < ARCMSR_MAX_TARGETID -1; target++) { diff = (*acb_dev_map)^readb(devicemap); if (diff != 0) { char temp; *acb_dev_map = readb(devicemap); temp =*acb_dev_map; for(lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) { if((temp & 0x01)==1 && (diff & 0x01) == 1) { scsi_add_device(acb->host, 0, target, lun); }else if((temp & 0x01) == 0 && (diff & 0x01) == 1) { psdev = scsi_device_lookup(acb->host, 0, target, lun); if (psdev != NULL ) { scsi_remove_device(psdev); scsi_device_put(psdev); } } temp >>= 1; diff >>= 1; } } devicemap++; acb_dev_map++; } } break; } case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; char *acb_dev_map = (char *)acb->device_map; uint32_t __iomem *signature = (uint32_t __iomem*)(&reg->message_rwbuffer[0]); char __iomem *devicemap = (char __iomem*)(&reg->message_rwbuffer[21]); int target, lun; struct scsi_device *psdev; char diff; atomic_inc(&acb->rq_map_token); if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) { for(target = 0; target < ARCMSR_MAX_TARGETID -1; target++) { diff = (*acb_dev_map)^readb(devicemap); if (diff != 0) { char temp; *acb_dev_map = readb(devicemap); temp =*acb_dev_map; for(lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) { if((temp & 0x01)==1 && (diff & 0x01) == 1) { scsi_add_device(acb->host, 0, target, lun); }else if((temp & 0x01) == 0 && (diff & 0x01) == 1) { psdev = scsi_device_lookup(acb->host, 0, target, lun); if (psdev != NULL ) { scsi_remove_device(psdev); scsi_device_put(psdev); } } temp >>= 1; diff >>= 1; } } devicemap++; acb_dev_map++; } } } break; case ACB_ADAPTER_TYPE_C: { struct MessageUnit_C *reg = acb->pmuC; char *acb_dev_map = (char *)acb->device_map; uint32_t __iomem *signature = (uint32_t __iomem *)(&reg->msgcode_rwbuffer[0]); char __iomem *devicemap = (char __iomem *)(&reg->msgcode_rwbuffer[21]); int target, lun; struct scsi_device *psdev; char diff; atomic_inc(&acb->rq_map_token); if (readl(signature) == ARCMSR_SIGNATURE_GET_CONFIG) { for (target = 0; target < ARCMSR_MAX_TARGETID - 1; target++) { diff = (*acb_dev_map)^readb(devicemap); if (diff != 0) { char temp; *acb_dev_map = readb(devicemap); temp = *acb_dev_map; for (lun = 0; lun < ARCMSR_MAX_TARGETLUN; lun++) { if ((temp & 0x01) == 1 && (diff & 0x01) == 1) { scsi_add_device(acb->host, 0, target, lun); } else if ((temp & 0x01) == 0 && (diff & 0x01) == 1) { psdev = scsi_device_lookup(acb->host, 0, target, lun); if (psdev != NULL) { scsi_remove_device(psdev); scsi_device_put(psdev); } } temp >>= 1; diff >>= 1; } } devicemap++; acb_dev_map++; } } } } } static int arcmsr_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct Scsi_Host *host; struct AdapterControlBlock *acb; uint8_t bus,dev_fun; int error; error = pci_enable_device(pdev); if(error){ return -ENODEV; } host = scsi_host_alloc(&arcmsr_scsi_host_template, sizeof(struct AdapterControlBlock)); if(!host){ goto pci_disable_dev; } error = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if(error){ error = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if(error){ printk(KERN_WARNING "scsi%d: No suitable DMA mask available\n", host->host_no); goto scsi_host_release; } } init_waitqueue_head(&wait_q); bus = pdev->bus->number; dev_fun = pdev->devfn; acb = (struct AdapterControlBlock *) host->hostdata; memset(acb,0,sizeof(struct AdapterControlBlock)); acb->pdev = pdev; acb->host = host; host->max_lun = ARCMSR_MAX_TARGETLUN; host->max_id = ARCMSR_MAX_TARGETID; /*16:8*/ host->max_cmd_len = 16; /*this is issue of 64bit LBA ,over 2T byte*/ host->can_queue = ARCMSR_MAX_FREECCB_NUM; /* max simultaneous cmds */ host->cmd_per_lun = ARCMSR_MAX_CMD_PERLUN; host->this_id = ARCMSR_SCSI_INITIATOR_ID; host->unique_id = (bus << 8) | dev_fun; pci_set_drvdata(pdev, host); pci_set_master(pdev); error = pci_request_regions(pdev, "arcmsr"); if(error){ goto scsi_host_release; } spin_lock_init(&acb->eh_lock); spin_lock_init(&acb->ccblist_lock); acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | ACB_F_MESSAGE_RQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READED); acb->acb_flags &= ~ACB_F_SCSISTOPADAPTER; INIT_LIST_HEAD(&acb->ccb_free_list); arcmsr_define_adapter_type(acb); error = arcmsr_remap_pciregion(acb); if(!error){ goto pci_release_regs; } error = arcmsr_get_firmware_spec(acb); if(!error){ goto unmap_pci_region; } error = arcmsr_alloc_ccb_pool(acb); if(error){ goto free_hbb_mu; } arcmsr_iop_init(acb); error = scsi_add_host(host, &pdev->dev); if(error){ goto RAID_controller_stop; } error = request_irq(pdev->irq, arcmsr_do_interrupt, IRQF_SHARED, "arcmsr", acb); if(error){ goto scsi_host_remove; } host->irq = pdev->irq; scsi_scan_host(host); INIT_WORK(&acb->arcmsr_do_message_isr_bh, arcmsr_message_isr_bh_fn); atomic_set(&acb->rq_map_token, 16); atomic_set(&acb->ante_token_value, 16); acb->fw_flag = FW_NORMAL; init_timer(&acb->eternal_timer); acb->eternal_timer.expires = jiffies + msecs_to_jiffies(6 * HZ); acb->eternal_timer.data = (unsigned long) acb; acb->eternal_timer.function = &arcmsr_request_device_map; add_timer(&acb->eternal_timer); if(arcmsr_alloc_sysfs_attr(acb)) goto out_free_sysfs; return 0; out_free_sysfs: scsi_host_remove: scsi_remove_host(host); RAID_controller_stop: arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); arcmsr_free_ccb_pool(acb); free_hbb_mu: arcmsr_free_hbb_mu(acb); unmap_pci_region: arcmsr_unmap_pciregion(acb); pci_release_regs: pci_release_regions(pdev); scsi_host_release: scsi_host_put(host); pci_disable_dev: pci_disable_device(pdev); return -ENODEV; } static uint8_t arcmsr_abort_hba_allcmd(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0); if (!arcmsr_hba_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'abort all outstanding command' timeout \n" , acb->host->host_no); return false; } return true; } static uint8_t arcmsr_abort_hbb_allcmd(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; writel(ARCMSR_MESSAGE_ABORT_CMD, reg->drv2iop_doorbell); if (!arcmsr_hbb_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'abort all outstanding command' timeout \n" , acb->host->host_no); return false; } return true; } static uint8_t arcmsr_abort_hbc_allcmd(struct AdapterControlBlock *pACB) { struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC; writel(ARCMSR_INBOUND_MESG0_ABORT_CMD, &reg->inbound_msgaddr0); writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell); if (!arcmsr_hbc_wait_msgint_ready(pACB)) { printk(KERN_NOTICE "arcmsr%d: wait 'abort all outstanding command' timeout \n" , pACB->host->host_no); return false; } return true; } static uint8_t arcmsr_abort_allcmd(struct AdapterControlBlock *acb) { uint8_t rtnval = 0; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { rtnval = arcmsr_abort_hba_allcmd(acb); } break; case ACB_ADAPTER_TYPE_B: { rtnval = arcmsr_abort_hbb_allcmd(acb); } break; case ACB_ADAPTER_TYPE_C: { rtnval = arcmsr_abort_hbc_allcmd(acb); } } return rtnval; } static bool arcmsr_hbb_enable_driver_mode(struct AdapterControlBlock *pacb) { struct MessageUnit_B *reg = pacb->pmuB; writel(ARCMSR_MESSAGE_START_DRIVER_MODE, reg->drv2iop_doorbell); if (!arcmsr_hbb_wait_msgint_ready(pacb)) { printk(KERN_ERR "arcmsr%d: can't set driver mode. \n", pacb->host->host_no); return false; } return true; } static void arcmsr_pci_unmap_dma(struct CommandControlBlock *ccb) { struct scsi_cmnd *pcmd = ccb->pcmd; scsi_dma_unmap(pcmd); } static void arcmsr_ccb_complete(struct CommandControlBlock *ccb) { struct AdapterControlBlock *acb = ccb->acb; struct scsi_cmnd *pcmd = ccb->pcmd; unsigned long flags; atomic_dec(&acb->ccboutstandingcount); arcmsr_pci_unmap_dma(ccb); ccb->startdone = ARCMSR_CCB_DONE; spin_lock_irqsave(&acb->ccblist_lock, flags); list_add_tail(&ccb->list, &acb->ccb_free_list); spin_unlock_irqrestore(&acb->ccblist_lock, flags); pcmd->scsi_done(pcmd); } static void arcmsr_report_sense_info(struct CommandControlBlock *ccb) { struct scsi_cmnd *pcmd = ccb->pcmd; struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)pcmd->sense_buffer; pcmd->result = DID_OK << 16; if (sensebuffer) { int sense_data_length = sizeof(struct SENSE_DATA) < SCSI_SENSE_BUFFERSIZE ? sizeof(struct SENSE_DATA) : SCSI_SENSE_BUFFERSIZE; memset(sensebuffer, 0, SCSI_SENSE_BUFFERSIZE); memcpy(sensebuffer, ccb->arcmsr_cdb.SenseData, sense_data_length); sensebuffer->ErrorCode = SCSI_SENSE_CURRENT_ERRORS; sensebuffer->Valid = 1; } } static u32 arcmsr_disable_outbound_ints(struct AdapterControlBlock *acb) { u32 orig_mask = 0; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A : { struct MessageUnit_A __iomem *reg = acb->pmuA; orig_mask = readl(&reg->outbound_intmask); writel(orig_mask|ARCMSR_MU_OUTBOUND_ALL_INTMASKENABLE, \ &reg->outbound_intmask); } break; case ACB_ADAPTER_TYPE_B : { struct MessageUnit_B *reg = acb->pmuB; orig_mask = readl(reg->iop2drv_doorbell_mask); writel(0, reg->iop2drv_doorbell_mask); } break; case ACB_ADAPTER_TYPE_C:{ struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC; /* disable all outbound interrupt */ orig_mask = readl(&reg->host_int_mask); /* disable outbound message0 int */ writel(orig_mask|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask); } break; } return orig_mask; } static void arcmsr_report_ccb_state(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb, bool error) { uint8_t id, lun; id = ccb->pcmd->device->id; lun = ccb->pcmd->device->lun; if (!error) { if (acb->devstate[id][lun] == ARECA_RAID_GONE) acb->devstate[id][lun] = ARECA_RAID_GOOD; ccb->pcmd->result = DID_OK << 16; arcmsr_ccb_complete(ccb); }else{ switch (ccb->arcmsr_cdb.DeviceStatus) { case ARCMSR_DEV_SELECT_TIMEOUT: { acb->devstate[id][lun] = ARECA_RAID_GONE; ccb->pcmd->result = DID_NO_CONNECT << 16; arcmsr_ccb_complete(ccb); } break; case ARCMSR_DEV_ABORTED: case ARCMSR_DEV_INIT_FAIL: { acb->devstate[id][lun] = ARECA_RAID_GONE; ccb->pcmd->result = DID_BAD_TARGET << 16; arcmsr_ccb_complete(ccb); } break; case ARCMSR_DEV_CHECK_CONDITION: { acb->devstate[id][lun] = ARECA_RAID_GOOD; arcmsr_report_sense_info(ccb); arcmsr_ccb_complete(ccb); } break; default: printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d isr get command error done, \ but got unknown DeviceStatus = 0x%x \n" , acb->host->host_no , id , lun , ccb->arcmsr_cdb.DeviceStatus); acb->devstate[id][lun] = ARECA_RAID_GONE; ccb->pcmd->result = DID_NO_CONNECT << 16; arcmsr_ccb_complete(ccb); break; } } } static void arcmsr_drain_donequeue(struct AdapterControlBlock *acb, struct CommandControlBlock *pCCB, bool error) { int id, lun; if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) { if (pCCB->startdone == ARCMSR_CCB_ABORTED) { struct scsi_cmnd *abortcmd = pCCB->pcmd; if (abortcmd) { id = abortcmd->device->id; lun = abortcmd->device->lun; abortcmd->result |= DID_ABORT << 16; arcmsr_ccb_complete(pCCB); printk(KERN_NOTICE "arcmsr%d: pCCB ='0x%p' isr got aborted command \n", acb->host->host_no, pCCB); } return; } printk(KERN_NOTICE "arcmsr%d: isr get an illegal ccb command \ done acb = '0x%p'" "ccb = '0x%p' ccbacb = '0x%p' startdone = 0x%x" " ccboutstandingcount = %d \n" , acb->host->host_no , acb , pCCB , pCCB->acb , pCCB->startdone , atomic_read(&acb->ccboutstandingcount)); return; } arcmsr_report_ccb_state(acb, pCCB, error); } static void arcmsr_done4abort_postqueue(struct AdapterControlBlock *acb) { int i = 0; uint32_t flag_ccb; struct ARCMSR_CDB *pARCMSR_CDB; bool error; struct CommandControlBlock *pCCB; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct MessageUnit_A __iomem *reg = acb->pmuA; uint32_t outbound_intstatus; outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable; /*clear and abort all outbound posted Q*/ writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/ while(((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/ pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; arcmsr_drain_donequeue(acb, pCCB, error); } } break; case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; /*clear all outbound posted Q*/ writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); /* clear doorbell interrupt */ for (i = 0; i < ARCMSR_MAX_HBB_POSTQUEUE; i++) { if ((flag_ccb = readl(&reg->done_qbuffer[i])) != 0) { writel(0, &reg->done_qbuffer[i]); pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/ pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; arcmsr_drain_donequeue(acb, pCCB, error); } reg->post_qbuffer[i] = 0; } reg->doneq_index = 0; reg->postq_index = 0; } break; case ACB_ADAPTER_TYPE_C: { struct MessageUnit_C *reg = acb->pmuC; struct ARCMSR_CDB *pARCMSR_CDB; uint32_t flag_ccb, ccb_cdb_phy; bool error; struct CommandControlBlock *pCCB; while ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) && (i++ < ARCMSR_MAX_OUTSTANDING_CMD)) { /*need to do*/ flag_ccb = readl(&reg->outbound_queueport_low); ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+ccb_cdb_phy);/*frame must be 32 bytes aligned*/ pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; arcmsr_drain_donequeue(acb, pCCB, error); } } } } static void arcmsr_remove(struct pci_dev *pdev) { struct Scsi_Host *host = pci_get_drvdata(pdev); struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; int poll_count = 0; arcmsr_free_sysfs_attr(acb); scsi_remove_host(host); flush_work_sync(&acb->arcmsr_do_message_isr_bh); del_timer_sync(&acb->eternal_timer); arcmsr_disable_outbound_ints(acb); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); acb->acb_flags |= ACB_F_SCSISTOPADAPTER; acb->acb_flags &= ~ACB_F_IOP_INITED; for (poll_count = 0; poll_count < ARCMSR_MAX_OUTSTANDING_CMD; poll_count++){ if (!atomic_read(&acb->ccboutstandingcount)) break; arcmsr_interrupt(acb);/* FIXME: need spinlock */ msleep(25); } if (atomic_read(&acb->ccboutstandingcount)) { int i; arcmsr_abort_allcmd(acb); arcmsr_done4abort_postqueue(acb); for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { struct CommandControlBlock *ccb = acb->pccb_pool[i]; if (ccb->startdone == ARCMSR_CCB_START) { ccb->startdone = ARCMSR_CCB_ABORTED; ccb->pcmd->result = DID_ABORT << 16; arcmsr_ccb_complete(ccb); } } } free_irq(pdev->irq, acb); arcmsr_free_ccb_pool(acb); arcmsr_free_hbb_mu(acb); arcmsr_unmap_pciregion(acb); pci_release_regions(pdev); scsi_host_put(host); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); } static void arcmsr_shutdown(struct pci_dev *pdev) { struct Scsi_Host *host = pci_get_drvdata(pdev); struct AdapterControlBlock *acb = (struct AdapterControlBlock *)host->hostdata; del_timer_sync(&acb->eternal_timer); arcmsr_disable_outbound_ints(acb); flush_work_sync(&acb->arcmsr_do_message_isr_bh); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); } static int arcmsr_module_init(void) { int error = 0; error = pci_register_driver(&arcmsr_pci_driver); return error; } static void arcmsr_module_exit(void) { pci_unregister_driver(&arcmsr_pci_driver); } module_init(arcmsr_module_init); module_exit(arcmsr_module_exit); static void arcmsr_enable_outbound_ints(struct AdapterControlBlock *acb, u32 intmask_org) { u32 mask; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct MessageUnit_A __iomem *reg = acb->pmuA; mask = intmask_org & ~(ARCMSR_MU_OUTBOUND_POSTQUEUE_INTMASKENABLE | ARCMSR_MU_OUTBOUND_DOORBELL_INTMASKENABLE| ARCMSR_MU_OUTBOUND_MESSAGE0_INTMASKENABLE); writel(mask, &reg->outbound_intmask); acb->outbound_int_enable = ~(intmask_org & mask) & 0x000000ff; } break; case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; mask = intmask_org | (ARCMSR_IOP2DRV_DATA_WRITE_OK | ARCMSR_IOP2DRV_DATA_READ_OK | ARCMSR_IOP2DRV_CDB_DONE | ARCMSR_IOP2DRV_MESSAGE_CMD_DONE); writel(mask, reg->iop2drv_doorbell_mask); acb->outbound_int_enable = (intmask_org | mask) & 0x0000000f; } break; case ACB_ADAPTER_TYPE_C: { struct MessageUnit_C *reg = acb->pmuC; mask = ~(ARCMSR_HBCMU_UTILITY_A_ISR_MASK | ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR_MASK|ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR_MASK); writel(intmask_org & mask, &reg->host_int_mask); acb->outbound_int_enable = ~(intmask_org & mask) & 0x0000000f; } } } static int arcmsr_build_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb, struct scsi_cmnd *pcmd) { struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb; int8_t *psge = (int8_t *)&arcmsr_cdb->u; __le32 address_lo, address_hi; int arccdbsize = 0x30; __le32 length = 0; int i; struct scatterlist *sg; int nseg; ccb->pcmd = pcmd; memset(arcmsr_cdb, 0, sizeof(struct ARCMSR_CDB)); arcmsr_cdb->TargetID = pcmd->device->id; arcmsr_cdb->LUN = pcmd->device->lun; arcmsr_cdb->Function = 1; arcmsr_cdb->Context = 0; memcpy(arcmsr_cdb->Cdb, pcmd->cmnd, pcmd->cmd_len); nseg = scsi_dma_map(pcmd); if (unlikely(nseg > acb->host->sg_tablesize || nseg < 0)) return FAILED; scsi_for_each_sg(pcmd, sg, nseg, i) { /* Get the physical address of the current data pointer */ length = cpu_to_le32(sg_dma_len(sg)); address_lo = cpu_to_le32(dma_addr_lo32(sg_dma_address(sg))); address_hi = cpu_to_le32(dma_addr_hi32(sg_dma_address(sg))); if (address_hi == 0) { struct SG32ENTRY *pdma_sg = (struct SG32ENTRY *)psge; pdma_sg->address = address_lo; pdma_sg->length = length; psge += sizeof (struct SG32ENTRY); arccdbsize += sizeof (struct SG32ENTRY); } else { struct SG64ENTRY *pdma_sg = (struct SG64ENTRY *)psge; pdma_sg->addresshigh = address_hi; pdma_sg->address = address_lo; pdma_sg->length = length|cpu_to_le32(IS_SG64_ADDR); psge += sizeof (struct SG64ENTRY); arccdbsize += sizeof (struct SG64ENTRY); } } arcmsr_cdb->sgcount = (uint8_t)nseg; arcmsr_cdb->DataLength = scsi_bufflen(pcmd); arcmsr_cdb->msgPages = arccdbsize/0x100 + (arccdbsize % 0x100 ? 1 : 0); if ( arccdbsize > 256) arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_SGL_BSIZE; if (pcmd->sc_data_direction == DMA_TO_DEVICE) arcmsr_cdb->Flags |= ARCMSR_CDB_FLAG_WRITE; ccb->arc_cdb_size = arccdbsize; return SUCCESS; } static void arcmsr_post_ccb(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb) { uint32_t cdb_phyaddr_pattern = ccb->cdb_phyaddr_pattern; struct ARCMSR_CDB *arcmsr_cdb = (struct ARCMSR_CDB *)&ccb->arcmsr_cdb; atomic_inc(&acb->ccboutstandingcount); ccb->startdone = ARCMSR_CCB_START; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct MessageUnit_A __iomem *reg = acb->pmuA; if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) writel(cdb_phyaddr_pattern | ARCMSR_CCBPOST_FLAG_SGL_BSIZE, &reg->inbound_queueport); else { writel(cdb_phyaddr_pattern, &reg->inbound_queueport); } } break; case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; uint32_t ending_index, index = reg->postq_index; ending_index = ((index + 1) % ARCMSR_MAX_HBB_POSTQUEUE); writel(0, &reg->post_qbuffer[ending_index]); if (arcmsr_cdb->Flags & ARCMSR_CDB_FLAG_SGL_BSIZE) { writel(cdb_phyaddr_pattern | ARCMSR_CCBPOST_FLAG_SGL_BSIZE,\ &reg->post_qbuffer[index]); } else { writel(cdb_phyaddr_pattern, &reg->post_qbuffer[index]); } index++; index %= ARCMSR_MAX_HBB_POSTQUEUE;/*if last index number set it to 0 */ reg->postq_index = index; writel(ARCMSR_DRV2IOP_CDB_POSTED, reg->drv2iop_doorbell); } break; case ACB_ADAPTER_TYPE_C: { struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)acb->pmuC; uint32_t ccb_post_stamp, arc_cdb_size; arc_cdb_size = (ccb->arc_cdb_size > 0x300) ? 0x300 : ccb->arc_cdb_size; ccb_post_stamp = (cdb_phyaddr_pattern | ((arc_cdb_size - 1) >> 6) | 1); if (acb->cdb_phyaddr_hi32) { writel(acb->cdb_phyaddr_hi32, &phbcmu->inbound_queueport_high); writel(ccb_post_stamp, &phbcmu->inbound_queueport_low); } else { writel(ccb_post_stamp, &phbcmu->inbound_queueport_low); } } } } static void arcmsr_stop_hba_bgrb(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; acb->acb_flags &= ~ACB_F_MSG_START_BGRB; writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0); if (!arcmsr_hba_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'stop adapter background rebulid' timeout \n" , acb->host->host_no); } } static void arcmsr_stop_hbb_bgrb(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; acb->acb_flags &= ~ACB_F_MSG_START_BGRB; writel(ARCMSR_MESSAGE_STOP_BGRB, reg->drv2iop_doorbell); if (!arcmsr_hbb_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'stop adapter background rebulid' timeout \n" , acb->host->host_no); } } static void arcmsr_stop_hbc_bgrb(struct AdapterControlBlock *pACB) { struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC; pACB->acb_flags &= ~ACB_F_MSG_START_BGRB; writel(ARCMSR_INBOUND_MESG0_STOP_BGRB, &reg->inbound_msgaddr0); writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell); if (!arcmsr_hbc_wait_msgint_ready(pACB)) { printk(KERN_NOTICE "arcmsr%d: wait 'stop adapter background rebulid' timeout \n" , pACB->host->host_no); } return; } static void arcmsr_stop_adapter_bgrb(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_stop_hba_bgrb(acb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_stop_hbb_bgrb(acb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_stop_hbc_bgrb(acb); } } } static void arcmsr_free_ccb_pool(struct AdapterControlBlock *acb) { dma_free_coherent(&acb->pdev->dev, acb->uncache_size, acb->dma_coherent, acb->dma_coherent_handle); } void arcmsr_iop_message_read(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct MessageUnit_A __iomem *reg = acb->pmuA; writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell); } break; case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell); } break; case ACB_ADAPTER_TYPE_C: { struct MessageUnit_C __iomem *reg = acb->pmuC; writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell); } } } static void arcmsr_iop_message_wrote(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct MessageUnit_A __iomem *reg = acb->pmuA; /* ** push inbound doorbell tell iop, driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ writel(ARCMSR_INBOUND_DRIVER_DATA_WRITE_OK, &reg->inbound_doorbell); } break; case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; /* ** push inbound doorbell tell iop, driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ writel(ARCMSR_DRV2IOP_DATA_WRITE_OK, reg->drv2iop_doorbell); } break; case ACB_ADAPTER_TYPE_C: { struct MessageUnit_C __iomem *reg = acb->pmuC; /* ** push inbound doorbell tell iop, driver data write ok ** and wait reply on next hwinterrupt for next Qbuffer post */ writel(ARCMSR_HBCMU_DRV2IOP_DATA_WRITE_OK, &reg->inbound_doorbell); } break; } } struct QBUFFER __iomem *arcmsr_get_iop_rqbuffer(struct AdapterControlBlock *acb) { struct QBUFFER __iomem *qbuffer = NULL; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct MessageUnit_A __iomem *reg = acb->pmuA; qbuffer = (struct QBUFFER __iomem *)&reg->message_rbuffer; } break; case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; qbuffer = (struct QBUFFER __iomem *)reg->message_rbuffer; } break; case ACB_ADAPTER_TYPE_C: { struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)acb->pmuC; qbuffer = (struct QBUFFER __iomem *)&phbcmu->message_rbuffer; } } return qbuffer; } static struct QBUFFER __iomem *arcmsr_get_iop_wqbuffer(struct AdapterControlBlock *acb) { struct QBUFFER __iomem *pqbuffer = NULL; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct MessageUnit_A __iomem *reg = acb->pmuA; pqbuffer = (struct QBUFFER __iomem *) &reg->message_wbuffer; } break; case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; pqbuffer = (struct QBUFFER __iomem *)reg->message_wbuffer; } break; case ACB_ADAPTER_TYPE_C: { struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC; pqbuffer = (struct QBUFFER __iomem *)&reg->message_wbuffer; } } return pqbuffer; } static void arcmsr_iop2drv_data_wrote_handle(struct AdapterControlBlock *acb) { struct QBUFFER __iomem *prbuffer; struct QBUFFER *pQbuffer; uint8_t __iomem *iop_data; int32_t my_empty_len, iop_len, rqbuf_firstindex, rqbuf_lastindex; rqbuf_lastindex = acb->rqbuf_lastindex; rqbuf_firstindex = acb->rqbuf_firstindex; prbuffer = arcmsr_get_iop_rqbuffer(acb); iop_data = (uint8_t __iomem *)prbuffer->data; iop_len = prbuffer->data_len; my_empty_len = (rqbuf_firstindex - rqbuf_lastindex - 1) & (ARCMSR_MAX_QBUFFER - 1); if (my_empty_len >= iop_len) { while (iop_len > 0) { pQbuffer = (struct QBUFFER *)&acb->rqbuffer[rqbuf_lastindex]; memcpy(pQbuffer, iop_data, 1); rqbuf_lastindex++; rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; iop_data++; iop_len--; } acb->rqbuf_lastindex = rqbuf_lastindex; arcmsr_iop_message_read(acb); } else { acb->acb_flags |= ACB_F_IOPDATA_OVERFLOW; } } static void arcmsr_iop2drv_data_read_handle(struct AdapterControlBlock *acb) { acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_READED; if (acb->wqbuf_firstindex != acb->wqbuf_lastindex) { uint8_t *pQbuffer; struct QBUFFER __iomem *pwbuffer; uint8_t __iomem *iop_data; int32_t allxfer_len = 0; acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); pwbuffer = arcmsr_get_iop_wqbuffer(acb); iop_data = (uint8_t __iomem *)pwbuffer->data; while ((acb->wqbuf_firstindex != acb->wqbuf_lastindex) && \ (allxfer_len < 124)) { pQbuffer = &acb->wqbuffer[acb->wqbuf_firstindex]; memcpy(iop_data, pQbuffer, 1); acb->wqbuf_firstindex++; acb->wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; iop_data++; allxfer_len++; } pwbuffer->data_len = allxfer_len; arcmsr_iop_message_wrote(acb); } if (acb->wqbuf_firstindex == acb->wqbuf_lastindex) { acb->acb_flags |= ACB_F_MESSAGE_WQBUFFER_CLEARED; } } static void arcmsr_hba_doorbell_isr(struct AdapterControlBlock *acb) { uint32_t outbound_doorbell; struct MessageUnit_A __iomem *reg = acb->pmuA; outbound_doorbell = readl(&reg->outbound_doorbell); writel(outbound_doorbell, &reg->outbound_doorbell); if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_WRITE_OK) { arcmsr_iop2drv_data_wrote_handle(acb); } if (outbound_doorbell & ARCMSR_OUTBOUND_IOP331_DATA_READ_OK) { arcmsr_iop2drv_data_read_handle(acb); } } static void arcmsr_hbc_doorbell_isr(struct AdapterControlBlock *pACB) { uint32_t outbound_doorbell; struct MessageUnit_C *reg = (struct MessageUnit_C *)pACB->pmuC; /* ******************************************************************* ** Maybe here we need to check wrqbuffer_lock is lock or not ** DOORBELL: din! don! ** check if there are any mail need to pack from firmware ******************************************************************* */ outbound_doorbell = readl(&reg->outbound_doorbell); writel(outbound_doorbell, &reg->outbound_doorbell_clear);/*clear interrupt*/ if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_WRITE_OK) { arcmsr_iop2drv_data_wrote_handle(pACB); } if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_DATA_READ_OK) { arcmsr_iop2drv_data_read_handle(pACB); } if (outbound_doorbell & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { arcmsr_hbc_message_isr(pACB); /* messenger of "driver to iop commands" */ } return; } static void arcmsr_hba_postqueue_isr(struct AdapterControlBlock *acb) { uint32_t flag_ccb; struct MessageUnit_A __iomem *reg = acb->pmuA; struct ARCMSR_CDB *pARCMSR_CDB; struct CommandControlBlock *pCCB; bool error; while ((flag_ccb = readl(&reg->outbound_queueport)) != 0xFFFFFFFF) { pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5));/*frame must be 32 bytes aligned*/ pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; arcmsr_drain_donequeue(acb, pCCB, error); } } static void arcmsr_hbb_postqueue_isr(struct AdapterControlBlock *acb) { uint32_t index; uint32_t flag_ccb; struct MessageUnit_B *reg = acb->pmuB; struct ARCMSR_CDB *pARCMSR_CDB; struct CommandControlBlock *pCCB; bool error; index = reg->doneq_index; while ((flag_ccb = readl(&reg->done_qbuffer[index])) != 0) { writel(0, &reg->done_qbuffer[index]); pARCMSR_CDB = (struct ARCMSR_CDB *)(acb->vir2phy_offset+(flag_ccb << 5));/*frame must be 32 bytes aligned*/ pCCB = container_of(pARCMSR_CDB, struct CommandControlBlock, arcmsr_cdb); error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; arcmsr_drain_donequeue(acb, pCCB, error); index++; index %= ARCMSR_MAX_HBB_POSTQUEUE; reg->doneq_index = index; } } static void arcmsr_hbc_postqueue_isr(struct AdapterControlBlock *acb) { struct MessageUnit_C *phbcmu; struct ARCMSR_CDB *arcmsr_cdb; struct CommandControlBlock *ccb; uint32_t flag_ccb, ccb_cdb_phy, throttling = 0; int error; phbcmu = (struct MessageUnit_C *)acb->pmuC; /* areca cdb command done */ /* Use correct offset and size for syncing */ while (readl(&phbcmu->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR){ /* check if command done with no error*/ flag_ccb = readl(&phbcmu->outbound_queueport_low); ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0);/*frame must be 32 bytes aligned*/ arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy); ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; /* check if command done with no error */ arcmsr_drain_donequeue(acb, ccb, error); if (throttling == ARCMSR_HBC_ISR_THROTTLING_LEVEL) { writel(ARCMSR_HBCMU_DRV2IOP_POSTQUEUE_THROTTLING, &phbcmu->inbound_doorbell); break; } throttling++; } } /* ********************************************************************************** ** Handle a message interrupt ** ** The only message interrupt we expect is in response to a query for the current adapter config. ** We want this in order to compare the drivemap so that we can detect newly-attached drives. ********************************************************************************** */ static void arcmsr_hba_message_isr(struct AdapterControlBlock *acb) { struct MessageUnit_A *reg = acb->pmuA; /*clear interrupt and message state*/ writel(ARCMSR_MU_OUTBOUND_MESSAGE0_INT, &reg->outbound_intstatus); schedule_work(&acb->arcmsr_do_message_isr_bh); } static void arcmsr_hbb_message_isr(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; /*clear interrupt and message state*/ writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); schedule_work(&acb->arcmsr_do_message_isr_bh); } /* ********************************************************************************** ** Handle a message interrupt ** ** The only message interrupt we expect is in response to a query for the ** current adapter config. ** We want this in order to compare the drivemap so that we can detect newly-attached drives. ********************************************************************************** */ static void arcmsr_hbc_message_isr(struct AdapterControlBlock *acb) { struct MessageUnit_C *reg = acb->pmuC; /*clear interrupt and message state*/ writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear); schedule_work(&acb->arcmsr_do_message_isr_bh); } static int arcmsr_handle_hba_isr(struct AdapterControlBlock *acb) { uint32_t outbound_intstatus; struct MessageUnit_A __iomem *reg = acb->pmuA; outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable; if (!(outbound_intstatus & ARCMSR_MU_OUTBOUND_HANDLE_INT)) { return 1; } writel(outbound_intstatus, &reg->outbound_intstatus); if (outbound_intstatus & ARCMSR_MU_OUTBOUND_DOORBELL_INT) { arcmsr_hba_doorbell_isr(acb); } if (outbound_intstatus & ARCMSR_MU_OUTBOUND_POSTQUEUE_INT) { arcmsr_hba_postqueue_isr(acb); } if(outbound_intstatus & ARCMSR_MU_OUTBOUND_MESSAGE0_INT) { /* messenger of "driver to iop commands" */ arcmsr_hba_message_isr(acb); } return 0; } static int arcmsr_handle_hbb_isr(struct AdapterControlBlock *acb) { uint32_t outbound_doorbell; struct MessageUnit_B *reg = acb->pmuB; outbound_doorbell = readl(reg->iop2drv_doorbell) & acb->outbound_int_enable; if (!outbound_doorbell) return 1; writel(~outbound_doorbell, reg->iop2drv_doorbell); /*in case the last action of doorbell interrupt clearance is cached, this action can push HW to write down the clear bit*/ readl(reg->iop2drv_doorbell); writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell); if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_WRITE_OK) { arcmsr_iop2drv_data_wrote_handle(acb); } if (outbound_doorbell & ARCMSR_IOP2DRV_DATA_READ_OK) { arcmsr_iop2drv_data_read_handle(acb); } if (outbound_doorbell & ARCMSR_IOP2DRV_CDB_DONE) { arcmsr_hbb_postqueue_isr(acb); } if(outbound_doorbell & ARCMSR_IOP2DRV_MESSAGE_CMD_DONE) { /* messenger of "driver to iop commands" */ arcmsr_hbb_message_isr(acb); } return 0; } static int arcmsr_handle_hbc_isr(struct AdapterControlBlock *pACB) { uint32_t host_interrupt_status; struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC; /* ********************************************* ** check outbound intstatus ********************************************* */ host_interrupt_status = readl(&phbcmu->host_int_status); if (!host_interrupt_status) { /*it must be share irq*/ return 1; } /* MU ioctl transfer doorbell interrupts*/ if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_DOORBELL_ISR) { arcmsr_hbc_doorbell_isr(pACB); /* messenger of "ioctl message read write" */ } /* MU post queue interrupts*/ if (host_interrupt_status & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) { arcmsr_hbc_postqueue_isr(pACB); /* messenger of "scsi commands" */ } return 0; } static irqreturn_t arcmsr_interrupt(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { if (arcmsr_handle_hba_isr(acb)) { return IRQ_NONE; } } break; case ACB_ADAPTER_TYPE_B: { if (arcmsr_handle_hbb_isr(acb)) { return IRQ_NONE; } } break; case ACB_ADAPTER_TYPE_C: { if (arcmsr_handle_hbc_isr(acb)) { return IRQ_NONE; } } } return IRQ_HANDLED; } static void arcmsr_iop_parking(struct AdapterControlBlock *acb) { if (acb) { /* stop adapter background rebuild */ if (acb->acb_flags & ACB_F_MSG_START_BGRB) { uint32_t intmask_org; acb->acb_flags &= ~ACB_F_MSG_START_BGRB; intmask_org = arcmsr_disable_outbound_ints(acb); arcmsr_stop_adapter_bgrb(acb); arcmsr_flush_adapter_cache(acb); arcmsr_enable_outbound_ints(acb, intmask_org); } } } void arcmsr_post_ioctldata2iop(struct AdapterControlBlock *acb) { int32_t wqbuf_firstindex, wqbuf_lastindex; uint8_t *pQbuffer; struct QBUFFER __iomem *pwbuffer; uint8_t __iomem *iop_data; int32_t allxfer_len = 0; pwbuffer = arcmsr_get_iop_wqbuffer(acb); iop_data = (uint8_t __iomem *)pwbuffer->data; if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_READED) { acb->acb_flags &= (~ACB_F_MESSAGE_WQBUFFER_READED); wqbuf_firstindex = acb->wqbuf_firstindex; wqbuf_lastindex = acb->wqbuf_lastindex; while ((wqbuf_firstindex != wqbuf_lastindex) && (allxfer_len < 124)) { pQbuffer = &acb->wqbuffer[wqbuf_firstindex]; memcpy(iop_data, pQbuffer, 1); wqbuf_firstindex++; wqbuf_firstindex %= ARCMSR_MAX_QBUFFER; iop_data++; allxfer_len++; } acb->wqbuf_firstindex = wqbuf_firstindex; pwbuffer->data_len = allxfer_len; arcmsr_iop_message_wrote(acb); } } static int arcmsr_iop_message_xfer(struct AdapterControlBlock *acb, struct scsi_cmnd *cmd) { struct CMD_MESSAGE_FIELD *pcmdmessagefld; int retvalue = 0, transfer_len = 0; char *buffer; struct scatterlist *sg; uint32_t controlcode = (uint32_t ) cmd->cmnd[5] << 24 | (uint32_t ) cmd->cmnd[6] << 16 | (uint32_t ) cmd->cmnd[7] << 8 | (uint32_t ) cmd->cmnd[8]; /* 4 bytes: Areca io control code */ sg = scsi_sglist(cmd); buffer = kmap_atomic(sg_page(sg)) + sg->offset; if (scsi_sg_count(cmd) > 1) { retvalue = ARCMSR_MESSAGE_FAIL; goto message_out; } transfer_len += sg->length; if (transfer_len > sizeof(struct CMD_MESSAGE_FIELD)) { retvalue = ARCMSR_MESSAGE_FAIL; goto message_out; } pcmdmessagefld = (struct CMD_MESSAGE_FIELD *) buffer; switch(controlcode) { case ARCMSR_MESSAGE_READ_RQBUFFER: { unsigned char *ver_addr; uint8_t *pQbuffer, *ptmpQbuffer; int32_t allxfer_len = 0; ver_addr = kmalloc(1032, GFP_ATOMIC); if (!ver_addr) { retvalue = ARCMSR_MESSAGE_FAIL; goto message_out; } ptmpQbuffer = ver_addr; while ((acb->rqbuf_firstindex != acb->rqbuf_lastindex) && (allxfer_len < 1031)) { pQbuffer = &acb->rqbuffer[acb->rqbuf_firstindex]; memcpy(ptmpQbuffer, pQbuffer, 1); acb->rqbuf_firstindex++; acb->rqbuf_firstindex %= ARCMSR_MAX_QBUFFER; ptmpQbuffer++; allxfer_len++; } if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { struct QBUFFER __iomem *prbuffer; uint8_t __iomem *iop_data; int32_t iop_len; acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; prbuffer = arcmsr_get_iop_rqbuffer(acb); iop_data = prbuffer->data; iop_len = readl(&prbuffer->data_len); while (iop_len > 0) { acb->rqbuffer[acb->rqbuf_lastindex] = readb(iop_data); acb->rqbuf_lastindex++; acb->rqbuf_lastindex %= ARCMSR_MAX_QBUFFER; iop_data++; iop_len--; } arcmsr_iop_message_read(acb); } memcpy(pcmdmessagefld->messagedatabuffer, ver_addr, allxfer_len); pcmdmessagefld->cmdmessage.Length = allxfer_len; if(acb->fw_flag == FW_DEADLOCK) { pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; }else{ pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; } kfree(ver_addr); } break; case ARCMSR_MESSAGE_WRITE_WQBUFFER: { unsigned char *ver_addr; int32_t my_empty_len, user_len, wqbuf_firstindex, wqbuf_lastindex; uint8_t *pQbuffer, *ptmpuserbuffer; ver_addr = kmalloc(1032, GFP_ATOMIC); if (!ver_addr) { retvalue = ARCMSR_MESSAGE_FAIL; goto message_out; } if(acb->fw_flag == FW_DEADLOCK) { pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; }else{ pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; } ptmpuserbuffer = ver_addr; user_len = pcmdmessagefld->cmdmessage.Length; memcpy(ptmpuserbuffer, pcmdmessagefld->messagedatabuffer, user_len); wqbuf_lastindex = acb->wqbuf_lastindex; wqbuf_firstindex = acb->wqbuf_firstindex; if (wqbuf_lastindex != wqbuf_firstindex) { struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)cmd->sense_buffer; arcmsr_post_ioctldata2iop(acb); /* has error report sensedata */ sensebuffer->ErrorCode = 0x70; sensebuffer->SenseKey = ILLEGAL_REQUEST; sensebuffer->AdditionalSenseLength = 0x0A; sensebuffer->AdditionalSenseCode = 0x20; sensebuffer->Valid = 1; retvalue = ARCMSR_MESSAGE_FAIL; } else { my_empty_len = (wqbuf_firstindex-wqbuf_lastindex - 1) &(ARCMSR_MAX_QBUFFER - 1); if (my_empty_len >= user_len) { while (user_len > 0) { pQbuffer = &acb->wqbuffer[acb->wqbuf_lastindex]; memcpy(pQbuffer, ptmpuserbuffer, 1); acb->wqbuf_lastindex++; acb->wqbuf_lastindex %= ARCMSR_MAX_QBUFFER; ptmpuserbuffer++; user_len--; } if (acb->acb_flags & ACB_F_MESSAGE_WQBUFFER_CLEARED) { acb->acb_flags &= ~ACB_F_MESSAGE_WQBUFFER_CLEARED; arcmsr_post_ioctldata2iop(acb); } } else { /* has error report sensedata */ struct SENSE_DATA *sensebuffer = (struct SENSE_DATA *)cmd->sense_buffer; sensebuffer->ErrorCode = 0x70; sensebuffer->SenseKey = ILLEGAL_REQUEST; sensebuffer->AdditionalSenseLength = 0x0A; sensebuffer->AdditionalSenseCode = 0x20; sensebuffer->Valid = 1; retvalue = ARCMSR_MESSAGE_FAIL; } } kfree(ver_addr); } break; case ARCMSR_MESSAGE_CLEAR_RQBUFFER: { uint8_t *pQbuffer = acb->rqbuffer; if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); } acb->acb_flags |= ACB_F_MESSAGE_RQBUFFER_CLEARED; acb->rqbuf_firstindex = 0; acb->rqbuf_lastindex = 0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); if(acb->fw_flag == FW_DEADLOCK) { pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; }else{ pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; } } break; case ARCMSR_MESSAGE_CLEAR_WQBUFFER: { uint8_t *pQbuffer = acb->wqbuffer; if(acb->fw_flag == FW_DEADLOCK) { pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; }else{ pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; } if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READED); acb->wqbuf_firstindex = 0; acb->wqbuf_lastindex = 0; memset(pQbuffer, 0, ARCMSR_MAX_QBUFFER); } break; case ARCMSR_MESSAGE_CLEAR_ALLQBUFFER: { uint8_t *pQbuffer; if (acb->acb_flags & ACB_F_IOPDATA_OVERFLOW) { acb->acb_flags &= ~ACB_F_IOPDATA_OVERFLOW; arcmsr_iop_message_read(acb); } acb->acb_flags |= (ACB_F_MESSAGE_WQBUFFER_CLEARED | ACB_F_MESSAGE_RQBUFFER_CLEARED | ACB_F_MESSAGE_WQBUFFER_READED); acb->rqbuf_firstindex = 0; acb->rqbuf_lastindex = 0; acb->wqbuf_firstindex = 0; acb->wqbuf_lastindex = 0; pQbuffer = acb->rqbuffer; memset(pQbuffer, 0, sizeof(struct QBUFFER)); pQbuffer = acb->wqbuffer; memset(pQbuffer, 0, sizeof(struct QBUFFER)); if(acb->fw_flag == FW_DEADLOCK) { pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; }else{ pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; } } break; case ARCMSR_MESSAGE_RETURN_CODE_3F: { if(acb->fw_flag == FW_DEADLOCK) { pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; }else{ pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_3F; } break; } case ARCMSR_MESSAGE_SAY_HELLO: { int8_t *hello_string = "Hello! I am ARCMSR"; if(acb->fw_flag == FW_DEADLOCK) { pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; }else{ pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_OK; } memcpy(pcmdmessagefld->messagedatabuffer, hello_string , (int16_t)strlen(hello_string)); } break; case ARCMSR_MESSAGE_SAY_GOODBYE: if(acb->fw_flag == FW_DEADLOCK) { pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; } arcmsr_iop_parking(acb); break; case ARCMSR_MESSAGE_FLUSH_ADAPTER_CACHE: if(acb->fw_flag == FW_DEADLOCK) { pcmdmessagefld->cmdmessage.ReturnCode = ARCMSR_MESSAGE_RETURNCODE_BUS_HANG_ON; } arcmsr_flush_adapter_cache(acb); break; default: retvalue = ARCMSR_MESSAGE_FAIL; } message_out: sg = scsi_sglist(cmd); kunmap_atomic(buffer - sg->offset); return retvalue; } static struct CommandControlBlock *arcmsr_get_freeccb(struct AdapterControlBlock *acb) { struct list_head *head = &acb->ccb_free_list; struct CommandControlBlock *ccb = NULL; unsigned long flags; spin_lock_irqsave(&acb->ccblist_lock, flags); if (!list_empty(head)) { ccb = list_entry(head->next, struct CommandControlBlock, list); list_del_init(&ccb->list); }else{ spin_unlock_irqrestore(&acb->ccblist_lock, flags); return 0; } spin_unlock_irqrestore(&acb->ccblist_lock, flags); return ccb; } static void arcmsr_handle_virtual_command(struct AdapterControlBlock *acb, struct scsi_cmnd *cmd) { switch (cmd->cmnd[0]) { case INQUIRY: { unsigned char inqdata[36]; char *buffer; struct scatterlist *sg; if (cmd->device->lun) { cmd->result = (DID_TIME_OUT << 16); cmd->scsi_done(cmd); return; } inqdata[0] = TYPE_PROCESSOR; /* Periph Qualifier & Periph Dev Type */ inqdata[1] = 0; /* rem media bit & Dev Type Modifier */ inqdata[2] = 0; /* ISO, ECMA, & ANSI versions */ inqdata[4] = 31; /* length of additional data */ strncpy(&inqdata[8], "Areca ", 8); /* Vendor Identification */ strncpy(&inqdata[16], "RAID controller ", 16); /* Product Identification */ strncpy(&inqdata[32], "R001", 4); /* Product Revision */ sg = scsi_sglist(cmd); buffer = kmap_atomic(sg_page(sg)) + sg->offset; memcpy(buffer, inqdata, sizeof(inqdata)); sg = scsi_sglist(cmd); kunmap_atomic(buffer - sg->offset); cmd->scsi_done(cmd); } break; case WRITE_BUFFER: case READ_BUFFER: { if (arcmsr_iop_message_xfer(acb, cmd)) cmd->result = (DID_ERROR << 16); cmd->scsi_done(cmd); } break; default: cmd->scsi_done(cmd); } } static int arcmsr_queue_command_lck(struct scsi_cmnd *cmd, void (* done)(struct scsi_cmnd *)) { struct Scsi_Host *host = cmd->device->host; struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; struct CommandControlBlock *ccb; int target = cmd->device->id; int lun = cmd->device->lun; uint8_t scsicmd = cmd->cmnd[0]; cmd->scsi_done = done; cmd->host_scribble = NULL; cmd->result = 0; if ((scsicmd == SYNCHRONIZE_CACHE) ||(scsicmd == SEND_DIAGNOSTIC)){ if(acb->devstate[target][lun] == ARECA_RAID_GONE) { cmd->result = (DID_NO_CONNECT << 16); } cmd->scsi_done(cmd); return 0; } if (target == 16) { /* virtual device for iop message transfer */ arcmsr_handle_virtual_command(acb, cmd); return 0; } if (atomic_read(&acb->ccboutstandingcount) >= ARCMSR_MAX_OUTSTANDING_CMD) return SCSI_MLQUEUE_HOST_BUSY; ccb = arcmsr_get_freeccb(acb); if (!ccb) return SCSI_MLQUEUE_HOST_BUSY; if (arcmsr_build_ccb( acb, ccb, cmd ) == FAILED) { cmd->result = (DID_ERROR << 16) | (RESERVATION_CONFLICT << 1); cmd->scsi_done(cmd); return 0; } arcmsr_post_ccb(acb, ccb); return 0; } static DEF_SCSI_QCMD(arcmsr_queue_command) static bool arcmsr_get_hba_config(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; char *acb_firm_model = acb->firm_model; char *acb_firm_version = acb->firm_version; char *acb_device_map = acb->device_map; char __iomem *iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]); char __iomem *iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]); char __iomem *iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]); int count; writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0); if (!arcmsr_hba_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ miscellaneous data' timeout \n", acb->host->host_no); return false; } count = 8; while (count){ *acb_firm_model = readb(iop_firm_model); acb_firm_model++; iop_firm_model++; count--; } count = 16; while (count){ *acb_firm_version = readb(iop_firm_version); acb_firm_version++; iop_firm_version++; count--; } count=16; while(count){ *acb_device_map = readb(iop_device_map); acb_device_map++; iop_device_map++; count--; } printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n", acb->host->host_no, acb->firm_version, acb->firm_model); acb->signature = readl(&reg->message_rwbuffer[0]); acb->firm_request_len = readl(&reg->message_rwbuffer[1]); acb->firm_numbers_queue = readl(&reg->message_rwbuffer[2]); acb->firm_sdram_size = readl(&reg->message_rwbuffer[3]); acb->firm_hd_channels = readl(&reg->message_rwbuffer[4]); acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/ return true; } static bool arcmsr_get_hbb_config(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; struct pci_dev *pdev = acb->pdev; void *dma_coherent; dma_addr_t dma_coherent_handle; char *acb_firm_model = acb->firm_model; char *acb_firm_version = acb->firm_version; char *acb_device_map = acb->device_map; char __iomem *iop_firm_model; /*firm_model,15,60-67*/ char __iomem *iop_firm_version; /*firm_version,17,68-83*/ char __iomem *iop_device_map; /*firm_version,21,84-99*/ int count; dma_coherent = dma_alloc_coherent(&pdev->dev, sizeof(struct MessageUnit_B), &dma_coherent_handle, GFP_KERNEL); if (!dma_coherent){ printk(KERN_NOTICE "arcmsr%d: dma_alloc_coherent got error for hbb mu\n", acb->host->host_no); return false; } acb->dma_coherent_handle_hbb_mu = dma_coherent_handle; reg = (struct MessageUnit_B *)dma_coherent; acb->pmuB = reg; reg->drv2iop_doorbell= (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_DRV2IOP_DOORBELL); reg->drv2iop_doorbell_mask = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_DRV2IOP_DOORBELL_MASK); reg->iop2drv_doorbell = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_IOP2DRV_DOORBELL); reg->iop2drv_doorbell_mask = (uint32_t __iomem *)((unsigned long)acb->mem_base0 + ARCMSR_IOP2DRV_DOORBELL_MASK); reg->message_wbuffer = (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_WBUFFER); reg->message_rbuffer = (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_RBUFFER); reg->message_rwbuffer = (uint32_t __iomem *)((unsigned long)acb->mem_base1 + ARCMSR_MESSAGE_RWBUFFER); iop_firm_model = (char __iomem *)(&reg->message_rwbuffer[15]); /*firm_model,15,60-67*/ iop_firm_version = (char __iomem *)(&reg->message_rwbuffer[17]); /*firm_version,17,68-83*/ iop_device_map = (char __iomem *)(&reg->message_rwbuffer[21]); /*firm_version,21,84-99*/ writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell); if (!arcmsr_hbb_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ miscellaneous data' timeout \n", acb->host->host_no); return false; } count = 8; while (count){ *acb_firm_model = readb(iop_firm_model); acb_firm_model++; iop_firm_model++; count--; } count = 16; while (count){ *acb_firm_version = readb(iop_firm_version); acb_firm_version++; iop_firm_version++; count--; } count = 16; while(count){ *acb_device_map = readb(iop_device_map); acb_device_map++; iop_device_map++; count--; } printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n", acb->host->host_no, acb->firm_version, acb->firm_model); acb->signature = readl(&reg->message_rwbuffer[1]); /*firm_signature,1,00-03*/ acb->firm_request_len = readl(&reg->message_rwbuffer[2]); /*firm_request_len,1,04-07*/ acb->firm_numbers_queue = readl(&reg->message_rwbuffer[3]); /*firm_numbers_queue,2,08-11*/ acb->firm_sdram_size = readl(&reg->message_rwbuffer[4]); /*firm_sdram_size,3,12-15*/ acb->firm_hd_channels = readl(&reg->message_rwbuffer[5]); /*firm_ide_channels,4,16-19*/ acb->firm_cfg_version = readl(&reg->message_rwbuffer[25]); /*firm_cfg_version,25,100-103*/ /*firm_ide_channels,4,16-19*/ return true; } static bool arcmsr_get_hbc_config(struct AdapterControlBlock *pACB) { uint32_t intmask_org, Index, firmware_state = 0; struct MessageUnit_C *reg = pACB->pmuC; char *acb_firm_model = pACB->firm_model; char *acb_firm_version = pACB->firm_version; char *iop_firm_model = (char *)(&reg->msgcode_rwbuffer[15]); /*firm_model,15,60-67*/ char *iop_firm_version = (char *)(&reg->msgcode_rwbuffer[17]); /*firm_version,17,68-83*/ int count; /* disable all outbound interrupt */ intmask_org = readl(&reg->host_int_mask); /* disable outbound message0 int */ writel(intmask_org|ARCMSR_HBCMU_ALL_INTMASKENABLE, &reg->host_int_mask); /* wait firmware ready */ do { firmware_state = readl(&reg->outbound_msgaddr1); } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0); /* post "get config" instruction */ writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0); writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell); /* wait message ready */ for (Index = 0; Index < 2000; Index++) { if (readl(&reg->outbound_doorbell) & ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE) { writel(ARCMSR_HBCMU_IOP2DRV_MESSAGE_CMD_DONE_DOORBELL_CLEAR, &reg->outbound_doorbell_clear);/*clear interrupt*/ break; } udelay(10); } /*max 1 seconds*/ if (Index >= 2000) { printk(KERN_NOTICE "arcmsr%d: wait 'get adapter firmware \ miscellaneous data' timeout \n", pACB->host->host_no); return false; } count = 8; while (count) { *acb_firm_model = readb(iop_firm_model); acb_firm_model++; iop_firm_model++; count--; } count = 16; while (count) { *acb_firm_version = readb(iop_firm_version); acb_firm_version++; iop_firm_version++; count--; } printk(KERN_NOTICE "Areca RAID Controller%d: F/W %s & Model %s\n", pACB->host->host_no, pACB->firm_version, pACB->firm_model); pACB->firm_request_len = readl(&reg->msgcode_rwbuffer[1]); /*firm_request_len,1,04-07*/ pACB->firm_numbers_queue = readl(&reg->msgcode_rwbuffer[2]); /*firm_numbers_queue,2,08-11*/ pACB->firm_sdram_size = readl(&reg->msgcode_rwbuffer[3]); /*firm_sdram_size,3,12-15*/ pACB->firm_hd_channels = readl(&reg->msgcode_rwbuffer[4]); /*firm_ide_channels,4,16-19*/ pACB->firm_cfg_version = readl(&reg->msgcode_rwbuffer[25]); /*firm_cfg_version,25,100-103*/ /*all interrupt service will be enable at arcmsr_iop_init*/ return true; } static bool arcmsr_get_firmware_spec(struct AdapterControlBlock *acb) { if (acb->adapter_type == ACB_ADAPTER_TYPE_A) return arcmsr_get_hba_config(acb); else if (acb->adapter_type == ACB_ADAPTER_TYPE_B) return arcmsr_get_hbb_config(acb); else return arcmsr_get_hbc_config(acb); } static int arcmsr_polling_hba_ccbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_ccb) { struct MessageUnit_A __iomem *reg = acb->pmuA; struct CommandControlBlock *ccb; struct ARCMSR_CDB *arcmsr_cdb; uint32_t flag_ccb, outbound_intstatus, poll_ccb_done = 0, poll_count = 0; int rtn; bool error; polling_hba_ccb_retry: poll_count++; outbound_intstatus = readl(&reg->outbound_intstatus) & acb->outbound_int_enable; writel(outbound_intstatus, &reg->outbound_intstatus);/*clear interrupt*/ while (1) { if ((flag_ccb = readl(&reg->outbound_queueport)) == 0xFFFFFFFF) { if (poll_ccb_done){ rtn = SUCCESS; break; }else { msleep(25); if (poll_count > 100){ rtn = FAILED; break; } goto polling_hba_ccb_retry; } } arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5)); ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); poll_ccb_done = (ccb == poll_ccb) ? 1:0; if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) { printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'" " poll command abort successfully \n" , acb->host->host_no , ccb->pcmd->device->id , ccb->pcmd->device->lun , ccb); ccb->pcmd->result = DID_ABORT << 16; arcmsr_ccb_complete(ccb); continue; } printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb" " command done ccb = '0x%p'" "ccboutstandingcount = %d \n" , acb->host->host_no , ccb , atomic_read(&acb->ccboutstandingcount)); continue; } error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; arcmsr_report_ccb_state(acb, ccb, error); } return rtn; } static int arcmsr_polling_hbb_ccbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_ccb) { struct MessageUnit_B *reg = acb->pmuB; struct ARCMSR_CDB *arcmsr_cdb; struct CommandControlBlock *ccb; uint32_t flag_ccb, poll_ccb_done = 0, poll_count = 0; int index, rtn; bool error; polling_hbb_ccb_retry: poll_count++; /* clear doorbell interrupt */ writel(ARCMSR_DOORBELL_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); while(1){ index = reg->doneq_index; if ((flag_ccb = readl(&reg->done_qbuffer[index])) == 0) { if (poll_ccb_done){ rtn = SUCCESS; break; }else { msleep(25); if (poll_count > 100){ rtn = FAILED; break; } goto polling_hbb_ccb_retry; } } writel(0, &reg->done_qbuffer[index]); index++; /*if last index number set it to 0 */ index %= ARCMSR_MAX_HBB_POSTQUEUE; reg->doneq_index = index; /* check if command done with no error*/ arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + (flag_ccb << 5)); ccb = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); poll_ccb_done = (ccb == poll_ccb) ? 1:0; if ((ccb->acb != acb) || (ccb->startdone != ARCMSR_CCB_START)) { if ((ccb->startdone == ARCMSR_CCB_ABORTED) || (ccb == poll_ccb)) { printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'" " poll command abort successfully \n" ,acb->host->host_no ,ccb->pcmd->device->id ,ccb->pcmd->device->lun ,ccb); ccb->pcmd->result = DID_ABORT << 16; arcmsr_ccb_complete(ccb); continue; } printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb" " command done ccb = '0x%p'" "ccboutstandingcount = %d \n" , acb->host->host_no , ccb , atomic_read(&acb->ccboutstandingcount)); continue; } error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE0) ? true : false; arcmsr_report_ccb_state(acb, ccb, error); } return rtn; } static int arcmsr_polling_hbc_ccbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_ccb) { struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC; uint32_t flag_ccb, ccb_cdb_phy; struct ARCMSR_CDB *arcmsr_cdb; bool error; struct CommandControlBlock *pCCB; uint32_t poll_ccb_done = 0, poll_count = 0; int rtn; polling_hbc_ccb_retry: poll_count++; while (1) { if ((readl(&reg->host_int_status) & ARCMSR_HBCMU_OUTBOUND_POSTQUEUE_ISR) == 0) { if (poll_ccb_done) { rtn = SUCCESS; break; } else { msleep(25); if (poll_count > 100) { rtn = FAILED; break; } goto polling_hbc_ccb_retry; } } flag_ccb = readl(&reg->outbound_queueport_low); ccb_cdb_phy = (flag_ccb & 0xFFFFFFF0); arcmsr_cdb = (struct ARCMSR_CDB *)(acb->vir2phy_offset + ccb_cdb_phy);/*frame must be 32 bytes aligned*/ pCCB = container_of(arcmsr_cdb, struct CommandControlBlock, arcmsr_cdb); poll_ccb_done = (pCCB == poll_ccb) ? 1 : 0; /* check ifcommand done with no error*/ if ((pCCB->acb != acb) || (pCCB->startdone != ARCMSR_CCB_START)) { if (pCCB->startdone == ARCMSR_CCB_ABORTED) { printk(KERN_NOTICE "arcmsr%d: scsi id = %d lun = %d ccb = '0x%p'" " poll command abort successfully \n" , acb->host->host_no , pCCB->pcmd->device->id , pCCB->pcmd->device->lun , pCCB); pCCB->pcmd->result = DID_ABORT << 16; arcmsr_ccb_complete(pCCB); continue; } printk(KERN_NOTICE "arcmsr%d: polling get an illegal ccb" " command done ccb = '0x%p'" "ccboutstandingcount = %d \n" , acb->host->host_no , pCCB , atomic_read(&acb->ccboutstandingcount)); continue; } error = (flag_ccb & ARCMSR_CCBREPLY_FLAG_ERROR_MODE1) ? true : false; arcmsr_report_ccb_state(acb, pCCB, error); } return rtn; } static int arcmsr_polling_ccbdone(struct AdapterControlBlock *acb, struct CommandControlBlock *poll_ccb) { int rtn = 0; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { rtn = arcmsr_polling_hba_ccbdone(acb, poll_ccb); } break; case ACB_ADAPTER_TYPE_B: { rtn = arcmsr_polling_hbb_ccbdone(acb, poll_ccb); } break; case ACB_ADAPTER_TYPE_C: { rtn = arcmsr_polling_hbc_ccbdone(acb, poll_ccb); } } return rtn; } static int arcmsr_iop_confirm(struct AdapterControlBlock *acb) { uint32_t cdb_phyaddr, cdb_phyaddr_hi32; dma_addr_t dma_coherent_handle; /* ******************************************************************** ** here we need to tell iop 331 our freeccb.HighPart ** if freeccb.HighPart is not zero ******************************************************************** */ dma_coherent_handle = acb->dma_coherent_handle; cdb_phyaddr = (uint32_t)(dma_coherent_handle); cdb_phyaddr_hi32 = (uint32_t)((cdb_phyaddr >> 16) >> 16); acb->cdb_phyaddr_hi32 = cdb_phyaddr_hi32; /* *********************************************************************** ** if adapter type B, set window of "post command Q" *********************************************************************** */ switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { if (cdb_phyaddr_hi32 != 0) { struct MessageUnit_A __iomem *reg = acb->pmuA; uint32_t intmask_org; intmask_org = arcmsr_disable_outbound_ints(acb); writel(ARCMSR_SIGNATURE_SET_CONFIG, \ &reg->message_rwbuffer[0]); writel(cdb_phyaddr_hi32, &reg->message_rwbuffer[1]); writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, \ &reg->inbound_msgaddr0); if (!arcmsr_hba_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: ""set ccb high \ part physical address timeout\n", acb->host->host_no); return 1; } arcmsr_enable_outbound_ints(acb, intmask_org); } } break; case ACB_ADAPTER_TYPE_B: { unsigned long post_queue_phyaddr; uint32_t __iomem *rwbuffer; struct MessageUnit_B *reg = acb->pmuB; uint32_t intmask_org; intmask_org = arcmsr_disable_outbound_ints(acb); reg->postq_index = 0; reg->doneq_index = 0; writel(ARCMSR_MESSAGE_SET_POST_WINDOW, reg->drv2iop_doorbell); if (!arcmsr_hbb_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d:can not set diver mode\n", \ acb->host->host_no); return 1; } post_queue_phyaddr = acb->dma_coherent_handle_hbb_mu; rwbuffer = reg->message_rwbuffer; /* driver "set config" signature */ writel(ARCMSR_SIGNATURE_SET_CONFIG, rwbuffer++); /* normal should be zero */ writel(cdb_phyaddr_hi32, rwbuffer++); /* postQ size (256 + 8)*4 */ writel(post_queue_phyaddr, rwbuffer++); /* doneQ size (256 + 8)*4 */ writel(post_queue_phyaddr + 1056, rwbuffer++); /* ccb maxQ size must be --> [(256 + 8)*4]*/ writel(1056, rwbuffer); writel(ARCMSR_MESSAGE_SET_CONFIG, reg->drv2iop_doorbell); if (!arcmsr_hbb_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \ timeout \n",acb->host->host_no); return 1; } arcmsr_hbb_enable_driver_mode(acb); arcmsr_enable_outbound_ints(acb, intmask_org); } break; case ACB_ADAPTER_TYPE_C: { if (cdb_phyaddr_hi32 != 0) { struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC; printk(KERN_NOTICE "arcmsr%d: cdb_phyaddr_hi32=0x%x\n", acb->adapter_index, cdb_phyaddr_hi32); writel(ARCMSR_SIGNATURE_SET_CONFIG, &reg->msgcode_rwbuffer[0]); writel(cdb_phyaddr_hi32, &reg->msgcode_rwbuffer[1]); writel(ARCMSR_INBOUND_MESG0_SET_CONFIG, &reg->inbound_msgaddr0); writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell); if (!arcmsr_hbc_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: 'set command Q window' \ timeout \n", acb->host->host_no); return 1; } } } } return 0; } static void arcmsr_wait_firmware_ready(struct AdapterControlBlock *acb) { uint32_t firmware_state = 0; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct MessageUnit_A __iomem *reg = acb->pmuA; do { firmware_state = readl(&reg->outbound_msgaddr1); } while ((firmware_state & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0); } break; case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; do { firmware_state = readl(reg->iop2drv_doorbell); } while ((firmware_state & ARCMSR_MESSAGE_FIRMWARE_OK) == 0); writel(ARCMSR_DRV2IOP_END_OF_INTERRUPT, reg->drv2iop_doorbell); } break; case ACB_ADAPTER_TYPE_C: { struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC; do { firmware_state = readl(&reg->outbound_msgaddr1); } while ((firmware_state & ARCMSR_HBCMU_MESSAGE_FIRMWARE_OK) == 0); } } } static void arcmsr_request_hba_device_map(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); return; } else { acb->fw_flag = FW_NORMAL; if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)){ atomic_set(&acb->rq_map_token, 16); } atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token)); if (atomic_dec_and_test(&acb->rq_map_token)) { mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); return; } writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0); mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); } return; } static void arcmsr_request_hbb_device_map(struct AdapterControlBlock *acb) { struct MessageUnit_B __iomem *reg = acb->pmuB; if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0 ) || ((acb->acb_flags & ACB_F_ABORT) != 0 )){ mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); return; } else { acb->fw_flag = FW_NORMAL; if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) { atomic_set(&acb->rq_map_token, 16); } atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token)); if (atomic_dec_and_test(&acb->rq_map_token)) { mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); return; } writel(ARCMSR_MESSAGE_GET_CONFIG, reg->drv2iop_doorbell); mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); } return; } static void arcmsr_request_hbc_device_map(struct AdapterControlBlock *acb) { struct MessageUnit_C __iomem *reg = acb->pmuC; if (unlikely(atomic_read(&acb->rq_map_token) == 0) || ((acb->acb_flags & ACB_F_BUS_RESET) != 0) || ((acb->acb_flags & ACB_F_ABORT) != 0)) { mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); return; } else { acb->fw_flag = FW_NORMAL; if (atomic_read(&acb->ante_token_value) == atomic_read(&acb->rq_map_token)) { atomic_set(&acb->rq_map_token, 16); } atomic_set(&acb->ante_token_value, atomic_read(&acb->rq_map_token)); if (atomic_dec_and_test(&acb->rq_map_token)) { mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); return; } writel(ARCMSR_INBOUND_MESG0_GET_CONFIG, &reg->inbound_msgaddr0); writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &reg->inbound_doorbell); mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); } return; } static void arcmsr_request_device_map(unsigned long pacb) { struct AdapterControlBlock *acb = (struct AdapterControlBlock *)pacb; switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { arcmsr_request_hba_device_map(acb); } break; case ACB_ADAPTER_TYPE_B: { arcmsr_request_hbb_device_map(acb); } break; case ACB_ADAPTER_TYPE_C: { arcmsr_request_hbc_device_map(acb); } } } static void arcmsr_start_hba_bgrb(struct AdapterControlBlock *acb) { struct MessageUnit_A __iomem *reg = acb->pmuA; acb->acb_flags |= ACB_F_MSG_START_BGRB; writel(ARCMSR_INBOUND_MESG0_START_BGRB, &reg->inbound_msgaddr0); if (!arcmsr_hba_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ rebulid' timeout \n", acb->host->host_no); } } static void arcmsr_start_hbb_bgrb(struct AdapterControlBlock *acb) { struct MessageUnit_B *reg = acb->pmuB; acb->acb_flags |= ACB_F_MSG_START_BGRB; writel(ARCMSR_MESSAGE_START_BGRB, reg->drv2iop_doorbell); if (!arcmsr_hbb_wait_msgint_ready(acb)) { printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ rebulid' timeout \n",acb->host->host_no); } } static void arcmsr_start_hbc_bgrb(struct AdapterControlBlock *pACB) { struct MessageUnit_C *phbcmu = (struct MessageUnit_C *)pACB->pmuC; pACB->acb_flags |= ACB_F_MSG_START_BGRB; writel(ARCMSR_INBOUND_MESG0_START_BGRB, &phbcmu->inbound_msgaddr0); writel(ARCMSR_HBCMU_DRV2IOP_MESSAGE_CMD_DONE, &phbcmu->inbound_doorbell); if (!arcmsr_hbc_wait_msgint_ready(pACB)) { printk(KERN_NOTICE "arcmsr%d: wait 'start adapter background \ rebulid' timeout \n", pACB->host->host_no); } return; } static void arcmsr_start_adapter_bgrb(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: arcmsr_start_hba_bgrb(acb); break; case ACB_ADAPTER_TYPE_B: arcmsr_start_hbb_bgrb(acb); break; case ACB_ADAPTER_TYPE_C: arcmsr_start_hbc_bgrb(acb); } } static void arcmsr_clear_doorbell_queue_buffer(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: { struct MessageUnit_A __iomem *reg = acb->pmuA; uint32_t outbound_doorbell; /* empty doorbell Qbuffer if door bell ringed */ outbound_doorbell = readl(&reg->outbound_doorbell); /*clear doorbell interrupt */ writel(outbound_doorbell, &reg->outbound_doorbell); writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell); } break; case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; /*clear interrupt and message state*/ writel(ARCMSR_MESSAGE_INT_CLEAR_PATTERN, reg->iop2drv_doorbell); writel(ARCMSR_DRV2IOP_DATA_READ_OK, reg->drv2iop_doorbell); /* let IOP know data has been read */ } break; case ACB_ADAPTER_TYPE_C: { struct MessageUnit_C *reg = (struct MessageUnit_C *)acb->pmuC; uint32_t outbound_doorbell; /* empty doorbell Qbuffer if door bell ringed */ outbound_doorbell = readl(&reg->outbound_doorbell); writel(outbound_doorbell, &reg->outbound_doorbell_clear); writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell); } } } static void arcmsr_enable_eoi_mode(struct AdapterControlBlock *acb) { switch (acb->adapter_type) { case ACB_ADAPTER_TYPE_A: return; case ACB_ADAPTER_TYPE_B: { struct MessageUnit_B *reg = acb->pmuB; writel(ARCMSR_MESSAGE_ACTIVE_EOI_MODE, reg->drv2iop_doorbell); if (!arcmsr_hbb_wait_msgint_ready(acb)) { printk(KERN_NOTICE "ARCMSR IOP enables EOI_MODE TIMEOUT"); return; } } break; case ACB_ADAPTER_TYPE_C: return; } return; } static void arcmsr_hardware_reset(struct AdapterControlBlock *acb) { uint8_t value[64]; int i, count = 0; struct MessageUnit_A __iomem *pmuA = acb->pmuA; struct MessageUnit_C __iomem *pmuC = acb->pmuC; u32 temp = 0; /* backup pci config data */ printk(KERN_NOTICE "arcmsr%d: executing hw bus reset .....\n", acb->host->host_no); for (i = 0; i < 64; i++) { pci_read_config_byte(acb->pdev, i, &value[i]); } /* hardware reset signal */ if ((acb->dev_id == 0x1680)) { writel(ARCMSR_ARC1680_BUS_RESET, &pmuA->reserved1[0]); } else if ((acb->dev_id == 0x1880)) { do { count++; writel(0xF, &pmuC->write_sequence); writel(0x4, &pmuC->write_sequence); writel(0xB, &pmuC->write_sequence); writel(0x2, &pmuC->write_sequence); writel(0x7, &pmuC->write_sequence); writel(0xD, &pmuC->write_sequence); } while ((((temp = readl(&pmuC->host_diagnostic)) | ARCMSR_ARC1880_DiagWrite_ENABLE) == 0) && (count < 5)); writel(ARCMSR_ARC1880_RESET_ADAPTER, &pmuC->host_diagnostic); } else { pci_write_config_byte(acb->pdev, 0x84, 0x20); } msleep(2000); /* write back pci config data */ for (i = 0; i < 64; i++) { pci_write_config_byte(acb->pdev, i, value[i]); } msleep(1000); return; } static void arcmsr_iop_init(struct AdapterControlBlock *acb) { uint32_t intmask_org; /* disable all outbound interrupt */ intmask_org = arcmsr_disable_outbound_ints(acb); arcmsr_wait_firmware_ready(acb); arcmsr_iop_confirm(acb); /*start background rebuild*/ arcmsr_start_adapter_bgrb(acb); /* empty doorbell Qbuffer if door bell ringed */ arcmsr_clear_doorbell_queue_buffer(acb); arcmsr_enable_eoi_mode(acb); /* enable outbound Post Queue,outbound doorbell Interrupt */ arcmsr_enable_outbound_ints(acb, intmask_org); acb->acb_flags |= ACB_F_IOP_INITED; } static uint8_t arcmsr_iop_reset(struct AdapterControlBlock *acb) { struct CommandControlBlock *ccb; uint32_t intmask_org; uint8_t rtnval = 0x00; int i = 0; unsigned long flags; if (atomic_read(&acb->ccboutstandingcount) != 0) { /* disable all outbound interrupt */ intmask_org = arcmsr_disable_outbound_ints(acb); /* talk to iop 331 outstanding command aborted */ rtnval = arcmsr_abort_allcmd(acb); /* clear all outbound posted Q */ arcmsr_done4abort_postqueue(acb); for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { ccb = acb->pccb_pool[i]; if (ccb->startdone == ARCMSR_CCB_START) { scsi_dma_unmap(ccb->pcmd); ccb->startdone = ARCMSR_CCB_DONE; ccb->ccb_flags = 0; spin_lock_irqsave(&acb->ccblist_lock, flags); list_add_tail(&ccb->list, &acb->ccb_free_list); spin_unlock_irqrestore(&acb->ccblist_lock, flags); } } atomic_set(&acb->ccboutstandingcount, 0); /* enable all outbound interrupt */ arcmsr_enable_outbound_ints(acb, intmask_org); return rtnval; } return rtnval; } static int arcmsr_bus_reset(struct scsi_cmnd *cmd) { struct AdapterControlBlock *acb; uint32_t intmask_org, outbound_doorbell; int retry_count = 0; int rtn = FAILED; acb = (struct AdapterControlBlock *) cmd->device->host->hostdata; printk(KERN_ERR "arcmsr: executing bus reset eh.....num_resets = %d, num_aborts = %d \n", acb->num_resets, acb->num_aborts); acb->num_resets++; switch(acb->adapter_type){ case ACB_ADAPTER_TYPE_A:{ if (acb->acb_flags & ACB_F_BUS_RESET){ long timeout; printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n"); timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ); if (timeout) { return SUCCESS; } } acb->acb_flags |= ACB_F_BUS_RESET; if (!arcmsr_iop_reset(acb)) { struct MessageUnit_A __iomem *reg; reg = acb->pmuA; arcmsr_hardware_reset(acb); acb->acb_flags &= ~ACB_F_IOP_INITED; sleep_again: ssleep(ARCMSR_SLEEPTIME); if ((readl(&reg->outbound_msgaddr1) & ARCMSR_OUTBOUND_MESG1_FIRMWARE_OK) == 0) { printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count); if (retry_count > ARCMSR_RETRYCOUNT) { acb->fw_flag = FW_DEADLOCK; printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no); return FAILED; } retry_count++; goto sleep_again; } acb->acb_flags |= ACB_F_IOP_INITED; /* disable all outbound interrupt */ intmask_org = arcmsr_disable_outbound_ints(acb); arcmsr_get_firmware_spec(acb); arcmsr_start_adapter_bgrb(acb); /* clear Qbuffer if door bell ringed */ outbound_doorbell = readl(&reg->outbound_doorbell); writel(outbound_doorbell, &reg->outbound_doorbell); /*clear interrupt */ writel(ARCMSR_INBOUND_DRIVER_DATA_READ_OK, &reg->inbound_doorbell); /* enable outbound Post Queue,outbound doorbell Interrupt */ arcmsr_enable_outbound_ints(acb, intmask_org); atomic_set(&acb->rq_map_token, 16); atomic_set(&acb->ante_token_value, 16); acb->fw_flag = FW_NORMAL; mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); acb->acb_flags &= ~ACB_F_BUS_RESET; rtn = SUCCESS; printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n"); } else { acb->acb_flags &= ~ACB_F_BUS_RESET; atomic_set(&acb->rq_map_token, 16); atomic_set(&acb->ante_token_value, 16); acb->fw_flag = FW_NORMAL; mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ)); rtn = SUCCESS; } break; } case ACB_ADAPTER_TYPE_B:{ acb->acb_flags |= ACB_F_BUS_RESET; if (!arcmsr_iop_reset(acb)) { acb->acb_flags &= ~ACB_F_BUS_RESET; rtn = FAILED; } else { acb->acb_flags &= ~ACB_F_BUS_RESET; atomic_set(&acb->rq_map_token, 16); atomic_set(&acb->ante_token_value, 16); acb->fw_flag = FW_NORMAL; mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); rtn = SUCCESS; } break; } case ACB_ADAPTER_TYPE_C:{ if (acb->acb_flags & ACB_F_BUS_RESET) { long timeout; printk(KERN_ERR "arcmsr: there is an bus reset eh proceeding.......\n"); timeout = wait_event_timeout(wait_q, (acb->acb_flags & ACB_F_BUS_RESET) == 0, 220*HZ); if (timeout) { return SUCCESS; } } acb->acb_flags |= ACB_F_BUS_RESET; if (!arcmsr_iop_reset(acb)) { struct MessageUnit_C __iomem *reg; reg = acb->pmuC; arcmsr_hardware_reset(acb); acb->acb_flags &= ~ACB_F_IOP_INITED; sleep: ssleep(ARCMSR_SLEEPTIME); if ((readl(&reg->host_diagnostic) & 0x04) != 0) { printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, retry=%d\n", acb->host->host_no, retry_count); if (retry_count > ARCMSR_RETRYCOUNT) { acb->fw_flag = FW_DEADLOCK; printk(KERN_ERR "arcmsr%d: waiting for hw bus reset return, RETRY TERMINATED!!\n", acb->host->host_no); return FAILED; } retry_count++; goto sleep; } acb->acb_flags |= ACB_F_IOP_INITED; /* disable all outbound interrupt */ intmask_org = arcmsr_disable_outbound_ints(acb); arcmsr_get_firmware_spec(acb); arcmsr_start_adapter_bgrb(acb); /* clear Qbuffer if door bell ringed */ outbound_doorbell = readl(&reg->outbound_doorbell); writel(outbound_doorbell, &reg->outbound_doorbell_clear); /*clear interrupt */ writel(ARCMSR_HBCMU_DRV2IOP_DATA_READ_OK, &reg->inbound_doorbell); /* enable outbound Post Queue,outbound doorbell Interrupt */ arcmsr_enable_outbound_ints(acb, intmask_org); atomic_set(&acb->rq_map_token, 16); atomic_set(&acb->ante_token_value, 16); acb->fw_flag = FW_NORMAL; mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6 * HZ)); acb->acb_flags &= ~ACB_F_BUS_RESET; rtn = SUCCESS; printk(KERN_ERR "arcmsr: scsi bus reset eh returns with success\n"); } else { acb->acb_flags &= ~ACB_F_BUS_RESET; atomic_set(&acb->rq_map_token, 16); atomic_set(&acb->ante_token_value, 16); acb->fw_flag = FW_NORMAL; mod_timer(&acb->eternal_timer, jiffies + msecs_to_jiffies(6*HZ)); rtn = SUCCESS; } break; } } return rtn; } static int arcmsr_abort_one_cmd(struct AdapterControlBlock *acb, struct CommandControlBlock *ccb) { int rtn; rtn = arcmsr_polling_ccbdone(acb, ccb); return rtn; } static int arcmsr_abort(struct scsi_cmnd *cmd) { struct AdapterControlBlock *acb = (struct AdapterControlBlock *)cmd->device->host->hostdata; int i = 0; int rtn = FAILED; printk(KERN_NOTICE "arcmsr%d: abort device command of scsi id = %d lun = %d \n", acb->host->host_no, cmd->device->id, cmd->device->lun); acb->acb_flags |= ACB_F_ABORT; acb->num_aborts++; /* ************************************************ ** the all interrupt service routine is locked ** we need to handle it as soon as possible and exit ************************************************ */ if (!atomic_read(&acb->ccboutstandingcount)) return rtn; for (i = 0; i < ARCMSR_MAX_FREECCB_NUM; i++) { struct CommandControlBlock *ccb = acb->pccb_pool[i]; if (ccb->startdone == ARCMSR_CCB_START && ccb->pcmd == cmd) { ccb->startdone = ARCMSR_CCB_ABORTED; rtn = arcmsr_abort_one_cmd(acb, ccb); break; } } acb->acb_flags &= ~ACB_F_ABORT; return rtn; } static const char *arcmsr_info(struct Scsi_Host *host) { struct AdapterControlBlock *acb = (struct AdapterControlBlock *) host->hostdata; static char buf[256]; char *type; int raid6 = 1; switch (acb->pdev->device) { case PCI_DEVICE_ID_ARECA_1110: case PCI_DEVICE_ID_ARECA_1200: case PCI_DEVICE_ID_ARECA_1202: case PCI_DEVICE_ID_ARECA_1210: raid6 = 0; /*FALLTHRU*/ case PCI_DEVICE_ID_ARECA_1120: case PCI_DEVICE_ID_ARECA_1130: case PCI_DEVICE_ID_ARECA_1160: case PCI_DEVICE_ID_ARECA_1170: case PCI_DEVICE_ID_ARECA_1201: case PCI_DEVICE_ID_ARECA_1220: case PCI_DEVICE_ID_ARECA_1230: case PCI_DEVICE_ID_ARECA_1260: case PCI_DEVICE_ID_ARECA_1270: case PCI_DEVICE_ID_ARECA_1280: type = "SATA"; break; case PCI_DEVICE_ID_ARECA_1380: case PCI_DEVICE_ID_ARECA_1381: case PCI_DEVICE_ID_ARECA_1680: case PCI_DEVICE_ID_ARECA_1681: case PCI_DEVICE_ID_ARECA_1880: type = "SAS"; break; default: type = "X-TYPE"; break; } sprintf(buf, "Areca %s Host Adapter RAID Controller%s\n %s", type, raid6 ? "( RAID6 capable)" : "", ARCMSR_DRIVER_VERSION); return buf; }
gpl-2.0
codekidX/android_kernel_frost_i9082
drivers/watchdog/txx9wdt.c
4285
6406
/* * txx9wdt: A Hardware Watchdog Driver for TXx9 SoCs * * Copyright (C) 2007 Atsushi Nemoto <anemo@mba.ocn.ne.jp> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/types.h> #include <linux/miscdevice.h> #include <linux/watchdog.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/uaccess.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/io.h> #include <asm/txx9tmr.h> #define TIMER_MARGIN 60 /* Default is 60 seconds */ static int timeout = TIMER_MARGIN; /* in seconds */ module_param(timeout, int, 0); MODULE_PARM_DESC(timeout, "Watchdog timeout in seconds. " "(0<timeout<((2^" __MODULE_STRING(TXX9_TIMER_BITS) ")/(IMCLK/256)), " "default=" __MODULE_STRING(TIMER_MARGIN) ")"); static int nowayout = WATCHDOG_NOWAYOUT; module_param(nowayout, int, 0); MODULE_PARM_DESC(nowayout, "Watchdog cannot be stopped once started " "(default=" __MODULE_STRING(WATCHDOG_NOWAYOUT) ")"); #define WD_TIMER_CCD 7 /* 1/256 */ #define WD_TIMER_CLK (clk_get_rate(txx9_imclk) / (2 << WD_TIMER_CCD)) #define WD_MAX_TIMEOUT ((0xffffffff >> (32 - TXX9_TIMER_BITS)) / WD_TIMER_CLK) static unsigned long txx9wdt_alive; static int expect_close; static struct txx9_tmr_reg __iomem *txx9wdt_reg; static struct clk *txx9_imclk; static DEFINE_SPINLOCK(txx9_lock); static void txx9wdt_ping(void) { spin_lock(&txx9_lock); __raw_writel(TXx9_TMWTMR_TWIE | TXx9_TMWTMR_TWC, &txx9wdt_reg->wtmr); spin_unlock(&txx9_lock); } static void txx9wdt_start(void) { spin_lock(&txx9_lock); __raw_writel(WD_TIMER_CLK * timeout, &txx9wdt_reg->cpra); __raw_writel(WD_TIMER_CCD, &txx9wdt_reg->ccdr); __raw_writel(0, &txx9wdt_reg->tisr); /* clear pending interrupt */ __raw_writel(TXx9_TMTCR_TCE | TXx9_TMTCR_CCDE | TXx9_TMTCR_TMODE_WDOG, &txx9wdt_reg->tcr); __raw_writel(TXx9_TMWTMR_TWIE | TXx9_TMWTMR_TWC, &txx9wdt_reg->wtmr); spin_unlock(&txx9_lock); } static void txx9wdt_stop(void) { spin_lock(&txx9_lock); __raw_writel(TXx9_TMWTMR_WDIS, &txx9wdt_reg->wtmr); __raw_writel(__raw_readl(&txx9wdt_reg->tcr) & ~TXx9_TMTCR_TCE, &txx9wdt_reg->tcr); spin_unlock(&txx9_lock); } static int txx9wdt_open(struct inode *inode, struct file *file) { if (test_and_set_bit(0, &txx9wdt_alive)) return -EBUSY; if (__raw_readl(&txx9wdt_reg->tcr) & TXx9_TMTCR_TCE) { clear_bit(0, &txx9wdt_alive); return -EBUSY; } if (nowayout) __module_get(THIS_MODULE); txx9wdt_start(); return nonseekable_open(inode, file); } static int txx9wdt_release(struct inode *inode, struct file *file) { if (expect_close) txx9wdt_stop(); else { printk(KERN_CRIT "txx9wdt: " "Unexpected close, not stopping watchdog!\n"); txx9wdt_ping(); } clear_bit(0, &txx9wdt_alive); expect_close = 0; return 0; } static ssize_t txx9wdt_write(struct file *file, const char __user *data, size_t len, loff_t *ppos) { if (len) { if (!nowayout) { size_t i; expect_close = 0; for (i = 0; i != len; i++) { char c; if (get_user(c, data + i)) return -EFAULT; if (c == 'V') expect_close = 1; } } txx9wdt_ping(); } return len; } static long txx9wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { void __user *argp = (void __user *)arg; int __user *p = argp; int new_timeout; static const struct watchdog_info ident = { .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING | WDIOF_MAGICCLOSE, .firmware_version = 0, .identity = "Hardware Watchdog for TXx9", }; switch (cmd) { case WDIOC_GETSUPPORT: return copy_to_user(argp, &ident, sizeof(ident)) ? -EFAULT : 0; case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: return put_user(0, p); case WDIOC_KEEPALIVE: txx9wdt_ping(); return 0; case WDIOC_SETTIMEOUT: if (get_user(new_timeout, p)) return -EFAULT; if (new_timeout < 1 || new_timeout > WD_MAX_TIMEOUT) return -EINVAL; timeout = new_timeout; txx9wdt_stop(); txx9wdt_start(); /* Fall */ case WDIOC_GETTIMEOUT: return put_user(timeout, p); default: return -ENOTTY; } } static const struct file_operations txx9wdt_fops = { .owner = THIS_MODULE, .llseek = no_llseek, .write = txx9wdt_write, .unlocked_ioctl = txx9wdt_ioctl, .open = txx9wdt_open, .release = txx9wdt_release, }; static struct miscdevice txx9wdt_miscdev = { .minor = WATCHDOG_MINOR, .name = "watchdog", .fops = &txx9wdt_fops, }; static int __init txx9wdt_probe(struct platform_device *dev) { struct resource *res; int ret; txx9_imclk = clk_get(NULL, "imbus_clk"); if (IS_ERR(txx9_imclk)) { ret = PTR_ERR(txx9_imclk); txx9_imclk = NULL; goto exit; } ret = clk_enable(txx9_imclk); if (ret) { clk_put(txx9_imclk); txx9_imclk = NULL; goto exit; } res = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!res) goto exit_busy; if (!devm_request_mem_region(&dev->dev, res->start, resource_size(res), "txx9wdt")) goto exit_busy; txx9wdt_reg = devm_ioremap(&dev->dev, res->start, resource_size(res)); if (!txx9wdt_reg) goto exit_busy; ret = misc_register(&txx9wdt_miscdev); if (ret) { goto exit; } printk(KERN_INFO "Hardware Watchdog Timer for TXx9: " "timeout=%d sec (max %ld) (nowayout= %d)\n", timeout, WD_MAX_TIMEOUT, nowayout); return 0; exit_busy: ret = -EBUSY; exit: if (txx9_imclk) { clk_disable(txx9_imclk); clk_put(txx9_imclk); } return ret; } static int __exit txx9wdt_remove(struct platform_device *dev) { misc_deregister(&txx9wdt_miscdev); clk_disable(txx9_imclk); clk_put(txx9_imclk); return 0; } static void txx9wdt_shutdown(struct platform_device *dev) { txx9wdt_stop(); } static struct platform_driver txx9wdt_driver = { .remove = __exit_p(txx9wdt_remove), .shutdown = txx9wdt_shutdown, .driver = { .name = "txx9wdt", .owner = THIS_MODULE, }, }; static int __init watchdog_init(void) { return platform_driver_probe(&txx9wdt_driver, txx9wdt_probe); } static void __exit watchdog_exit(void) { platform_driver_unregister(&txx9wdt_driver); } module_init(watchdog_init); module_exit(watchdog_exit); MODULE_DESCRIPTION("TXx9 Watchdog Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR); MODULE_ALIAS("platform:txx9wdt");
gpl-2.0