repo_name
string
path
string
copies
string
size
string
content
string
license
string
slayher/android_kernel_omap
crypto/lrw.c
4915
7802
/* LRW: as defined by Cyril Guyot in * http://grouper.ieee.org/groups/1619/email/pdf00017.pdf * * Copyright (c) 2006 Rik Snel <rsnel@cube.dyndns.org> * * Based om ecb.c * Copyright (c) 2006 Herbert Xu <herbert@gondor.apana.org.au> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ /* This implementation is checked against the test vectors in the above * document and by a test vector provided by Ken Buchanan at * http://www.mail-archive.com/stds-p1619@listserv.ieee.org/msg00173.html * * The test vectors are included in the testing module tcrypt.[ch] */ #include <crypto/algapi.h> #include <linux/err.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/scatterlist.h> #include <linux/slab.h> #include <crypto/b128ops.h> #include <crypto/gf128mul.h> struct priv { struct crypto_cipher *child; /* optimizes multiplying a random (non incrementing, as at the * start of a new sector) value with key2, we could also have * used 4k optimization tables or no optimization at all. In the * latter case we would have to store key2 here */ struct gf128mul_64k *table; /* stores: * key2*{ 0,0,...0,0,0,0,1 }, key2*{ 0,0,...0,0,0,1,1 }, * key2*{ 0,0,...0,0,1,1,1 }, key2*{ 0,0,...0,1,1,1,1 } * key2*{ 0,0,...1,1,1,1,1 }, etc * needed for optimized multiplication of incrementing values * with key2 */ be128 mulinc[128]; }; static inline void setbit128_bbe(void *b, int bit) { __set_bit(bit ^ (0x80 - #ifdef __BIG_ENDIAN BITS_PER_LONG #else BITS_PER_BYTE #endif ), b); } static int setkey(struct crypto_tfm *parent, const u8 *key, unsigned int keylen) { struct priv *ctx = crypto_tfm_ctx(parent); struct crypto_cipher *child = ctx->child; int err, i; be128 tmp = { 0 }; int bsize = crypto_cipher_blocksize(child); crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); crypto_cipher_set_flags(child, crypto_tfm_get_flags(parent) & CRYPTO_TFM_REQ_MASK); if ((err = crypto_cipher_setkey(child, key, keylen - bsize))) return err; crypto_tfm_set_flags(parent, crypto_cipher_get_flags(child) & CRYPTO_TFM_RES_MASK); if (ctx->table) gf128mul_free_64k(ctx->table); /* initialize multiplication table for Key2 */ ctx->table = gf128mul_init_64k_bbe((be128 *)(key + keylen - bsize)); if (!ctx->table) return -ENOMEM; /* initialize optimization table */ for (i = 0; i < 128; i++) { setbit128_bbe(&tmp, i); ctx->mulinc[i] = tmp; gf128mul_64k_bbe(&ctx->mulinc[i], ctx->table); } return 0; } struct sinfo { be128 t; struct crypto_tfm *tfm; void (*fn)(struct crypto_tfm *, u8 *, const u8 *); }; static inline void inc(be128 *iv) { be64_add_cpu(&iv->b, 1); if (!iv->b) be64_add_cpu(&iv->a, 1); } static inline void lrw_round(struct sinfo *s, void *dst, const void *src) { be128_xor(dst, &s->t, src); /* PP <- T xor P */ s->fn(s->tfm, dst, dst); /* CC <- E(Key2,PP) */ be128_xor(dst, dst, &s->t); /* C <- T xor CC */ } /* this returns the number of consequative 1 bits starting * from the right, get_index128(00 00 00 00 00 00 ... 00 00 10 FB) = 2 */ static inline int get_index128(be128 *block) { int x; __be32 *p = (__be32 *) block; for (p += 3, x = 0; x < 128; p--, x += 32) { u32 val = be32_to_cpup(p); if (!~val) continue; return x + ffz(val); } return x; } static int crypt(struct blkcipher_desc *d, struct blkcipher_walk *w, struct priv *ctx, void (*fn)(struct crypto_tfm *, u8 *, const u8 *)) { int err; unsigned int avail; const int bs = crypto_cipher_blocksize(ctx->child); struct sinfo s = { .tfm = crypto_cipher_tfm(ctx->child), .fn = fn }; be128 *iv; u8 *wsrc; u8 *wdst; err = blkcipher_walk_virt(d, w); if (!(avail = w->nbytes)) return err; wsrc = w->src.virt.addr; wdst = w->dst.virt.addr; /* calculate first value of T */ iv = (be128 *)w->iv; s.t = *iv; /* T <- I*Key2 */ gf128mul_64k_bbe(&s.t, ctx->table); goto first; for (;;) { do { /* T <- I*Key2, using the optimization * discussed in the specification */ be128_xor(&s.t, &s.t, &ctx->mulinc[get_index128(iv)]); inc(iv); first: lrw_round(&s, wdst, wsrc); wsrc += bs; wdst += bs; } while ((avail -= bs) >= bs); err = blkcipher_walk_done(d, w, avail); if (!(avail = w->nbytes)) break; wsrc = w->src.virt.addr; wdst = w->dst.virt.addr; } return err; } static int encrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk w; blkcipher_walk_init(&w, dst, src, nbytes); return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->child)->cia_encrypt); } static int decrypt(struct blkcipher_desc *desc, struct scatterlist *dst, struct scatterlist *src, unsigned int nbytes) { struct priv *ctx = crypto_blkcipher_ctx(desc->tfm); struct blkcipher_walk w; blkcipher_walk_init(&w, dst, src, nbytes); return crypt(desc, &w, ctx, crypto_cipher_alg(ctx->child)->cia_decrypt); } static int init_tfm(struct crypto_tfm *tfm) { struct crypto_cipher *cipher; struct crypto_instance *inst = (void *)tfm->__crt_alg; struct crypto_spawn *spawn = crypto_instance_ctx(inst); struct priv *ctx = crypto_tfm_ctx(tfm); u32 *flags = &tfm->crt_flags; cipher = crypto_spawn_cipher(spawn); if (IS_ERR(cipher)) return PTR_ERR(cipher); if (crypto_cipher_blocksize(cipher) != 16) { *flags |= CRYPTO_TFM_RES_BAD_BLOCK_LEN; return -EINVAL; } ctx->child = cipher; return 0; } static void exit_tfm(struct crypto_tfm *tfm) { struct priv *ctx = crypto_tfm_ctx(tfm); if (ctx->table) gf128mul_free_64k(ctx->table); crypto_free_cipher(ctx->child); } static struct crypto_instance *alloc(struct rtattr **tb) { struct crypto_instance *inst; struct crypto_alg *alg; int err; err = crypto_check_attr_type(tb, CRYPTO_ALG_TYPE_BLKCIPHER); if (err) return ERR_PTR(err); alg = crypto_get_attr_alg(tb, CRYPTO_ALG_TYPE_CIPHER, CRYPTO_ALG_TYPE_MASK); if (IS_ERR(alg)) return ERR_CAST(alg); inst = crypto_alloc_instance("lrw", alg); if (IS_ERR(inst)) goto out_put_alg; inst->alg.cra_flags = CRYPTO_ALG_TYPE_BLKCIPHER; inst->alg.cra_priority = alg->cra_priority; inst->alg.cra_blocksize = alg->cra_blocksize; if (alg->cra_alignmask < 7) inst->alg.cra_alignmask = 7; else inst->alg.cra_alignmask = alg->cra_alignmask; inst->alg.cra_type = &crypto_blkcipher_type; if (!(alg->cra_blocksize % 4)) inst->alg.cra_alignmask |= 3; inst->alg.cra_blkcipher.ivsize = alg->cra_blocksize; inst->alg.cra_blkcipher.min_keysize = alg->cra_cipher.cia_min_keysize + alg->cra_blocksize; inst->alg.cra_blkcipher.max_keysize = alg->cra_cipher.cia_max_keysize + alg->cra_blocksize; inst->alg.cra_ctxsize = sizeof(struct priv); inst->alg.cra_init = init_tfm; inst->alg.cra_exit = exit_tfm; inst->alg.cra_blkcipher.setkey = setkey; inst->alg.cra_blkcipher.encrypt = encrypt; inst->alg.cra_blkcipher.decrypt = decrypt; out_put_alg: crypto_mod_put(alg); return inst; } static void free(struct crypto_instance *inst) { crypto_drop_spawn(crypto_instance_ctx(inst)); kfree(inst); } static struct crypto_template crypto_tmpl = { .name = "lrw", .alloc = alloc, .free = free, .module = THIS_MODULE, }; static int __init crypto_module_init(void) { return crypto_register_template(&crypto_tmpl); } static void __exit crypto_module_exit(void) { crypto_unregister_template(&crypto_tmpl); } module_init(crypto_module_init); module_exit(crypto_module_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("LRW block cipher mode");
gpl-2.0
adrientetar/semc-msm-3.4
arch/arm/mach-omap1/pm_bus.c
5171
1729
/* * Runtime PM support code for OMAP1 * * Author: Kevin Hilman, Deep Root Systems, LLC * * Copyright (C) 2010 Texas Instruments, Inc. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/pm_runtime.h> #include <linux/pm_clock.h> #include <linux/platform_device.h> #include <linux/mutex.h> #include <linux/clk.h> #include <linux/err.h> #include <plat/omap_device.h> #include <plat/omap-pm.h> #ifdef CONFIG_PM_RUNTIME static int omap1_pm_runtime_suspend(struct device *dev) { int ret; dev_dbg(dev, "%s\n", __func__); ret = pm_generic_runtime_suspend(dev); if (ret) return ret; ret = pm_clk_suspend(dev); if (ret) { pm_generic_runtime_resume(dev); return ret; } return 0; } static int omap1_pm_runtime_resume(struct device *dev) { dev_dbg(dev, "%s\n", __func__); pm_clk_resume(dev); return pm_generic_runtime_resume(dev); } static struct dev_pm_domain default_pm_domain = { .ops = { .runtime_suspend = omap1_pm_runtime_suspend, .runtime_resume = omap1_pm_runtime_resume, USE_PLATFORM_PM_SLEEP_OPS }, }; #define OMAP1_PM_DOMAIN (&default_pm_domain) #else #define OMAP1_PM_DOMAIN NULL #endif /* CONFIG_PM_RUNTIME */ static struct pm_clk_notifier_block platform_bus_notifier = { .pm_domain = OMAP1_PM_DOMAIN, .con_ids = { "ick", "fck", NULL, }, }; static int __init omap1_pm_runtime_init(void) { if (!cpu_class_is_omap1()) return -ENODEV; pm_clk_add_notifier(&platform_bus_type, &platform_bus_notifier); return 0; } core_initcall(omap1_pm_runtime_init);
gpl-2.0
garwynn/D710SPR_GB27_Kernel
fs/hfs/bfind.c
8499
4663
/* * linux/fs/hfs/bfind.c * * Copyright (C) 2001 * Brad Boyer (flar@allandria.com) * (C) 2003 Ardis Technologies <roman@ardistech.com> * * Search routines for btrees */ #include <linux/slab.h> #include "btree.h" int hfs_find_init(struct hfs_btree *tree, struct hfs_find_data *fd) { void *ptr; fd->tree = tree; fd->bnode = NULL; ptr = kmalloc(tree->max_key_len * 2 + 4, GFP_KERNEL); if (!ptr) return -ENOMEM; fd->search_key = ptr; fd->key = ptr + tree->max_key_len + 2; dprint(DBG_BNODE_REFS, "find_init: %d (%p)\n", tree->cnid, __builtin_return_address(0)); mutex_lock(&tree->tree_lock); return 0; } void hfs_find_exit(struct hfs_find_data *fd) { hfs_bnode_put(fd->bnode); kfree(fd->search_key); dprint(DBG_BNODE_REFS, "find_exit: %d (%p)\n", fd->tree->cnid, __builtin_return_address(0)); mutex_unlock(&fd->tree->tree_lock); fd->tree = NULL; } /* Find the record in bnode that best matches key (not greater than...)*/ int __hfs_brec_find(struct hfs_bnode *bnode, struct hfs_find_data *fd) { int cmpval; u16 off, len, keylen; int rec; int b, e; int res; b = 0; e = bnode->num_recs - 1; res = -ENOENT; do { rec = (e + b) / 2; len = hfs_brec_lenoff(bnode, rec, &off); keylen = hfs_brec_keylen(bnode, rec); if (keylen == 0) { res = -EINVAL; goto fail; } hfs_bnode_read(bnode, fd->key, off, keylen); cmpval = bnode->tree->keycmp(fd->key, fd->search_key); if (!cmpval) { e = rec; res = 0; goto done; } if (cmpval < 0) b = rec + 1; else e = rec - 1; } while (b <= e); if (rec != e && e >= 0) { len = hfs_brec_lenoff(bnode, e, &off); keylen = hfs_brec_keylen(bnode, e); if (keylen == 0) { res = -EINVAL; goto fail; } hfs_bnode_read(bnode, fd->key, off, keylen); } done: fd->record = e; fd->keyoffset = off; fd->keylength = keylen; fd->entryoffset = off + keylen; fd->entrylength = len - keylen; fail: return res; } /* Traverse a B*Tree from the root to a leaf finding best fit to key */ /* Return allocated copy of node found, set recnum to best record */ int hfs_brec_find(struct hfs_find_data *fd) { struct hfs_btree *tree; struct hfs_bnode *bnode; u32 nidx, parent; __be32 data; int height, res; tree = fd->tree; if (fd->bnode) hfs_bnode_put(fd->bnode); fd->bnode = NULL; nidx = tree->root; if (!nidx) return -ENOENT; height = tree->depth; res = 0; parent = 0; for (;;) { bnode = hfs_bnode_find(tree, nidx); if (IS_ERR(bnode)) { res = PTR_ERR(bnode); bnode = NULL; break; } if (bnode->height != height) goto invalid; if (bnode->type != (--height ? HFS_NODE_INDEX : HFS_NODE_LEAF)) goto invalid; bnode->parent = parent; res = __hfs_brec_find(bnode, fd); if (!height) break; if (fd->record < 0) goto release; parent = nidx; hfs_bnode_read(bnode, &data, fd->entryoffset, 4); nidx = be32_to_cpu(data); hfs_bnode_put(bnode); } fd->bnode = bnode; return res; invalid: printk(KERN_ERR "hfs: inconsistency in B*Tree (%d,%d,%d,%u,%u)\n", height, bnode->height, bnode->type, nidx, parent); res = -EIO; release: hfs_bnode_put(bnode); return res; } int hfs_brec_read(struct hfs_find_data *fd, void *rec, int rec_len) { int res; res = hfs_brec_find(fd); if (res) return res; if (fd->entrylength > rec_len) return -EINVAL; hfs_bnode_read(fd->bnode, rec, fd->entryoffset, fd->entrylength); return 0; } int hfs_brec_goto(struct hfs_find_data *fd, int cnt) { struct hfs_btree *tree; struct hfs_bnode *bnode; int idx, res = 0; u16 off, len, keylen; bnode = fd->bnode; tree = bnode->tree; if (cnt < 0) { cnt = -cnt; while (cnt > fd->record) { cnt -= fd->record + 1; fd->record = bnode->num_recs - 1; idx = bnode->prev; if (!idx) { res = -ENOENT; goto out; } hfs_bnode_put(bnode); bnode = hfs_bnode_find(tree, idx); if (IS_ERR(bnode)) { res = PTR_ERR(bnode); bnode = NULL; goto out; } } fd->record -= cnt; } else { while (cnt >= bnode->num_recs - fd->record) { cnt -= bnode->num_recs - fd->record; fd->record = 0; idx = bnode->next; if (!idx) { res = -ENOENT; goto out; } hfs_bnode_put(bnode); bnode = hfs_bnode_find(tree, idx); if (IS_ERR(bnode)) { res = PTR_ERR(bnode); bnode = NULL; goto out; } } fd->record += cnt; } len = hfs_brec_lenoff(bnode, fd->record, &off); keylen = hfs_brec_keylen(bnode, fd->record); if (keylen == 0) { res = -EINVAL; goto out; } fd->keyoffset = off; fd->keylength = keylen; fd->entryoffset = off + keylen; fd->entrylength = len - keylen; hfs_bnode_read(bnode, fd->key, off, keylen); out: fd->bnode = bnode; return res; }
gpl-2.0
kaostao/linux
arch/sh/boards/mach-x3proto/ilsel.c
12339
4202
/* * arch/sh/boards/mach-x3proto/ilsel.c * * Helper routines for SH-X3 proto board ILSEL. * * Copyright (C) 2007 - 2010 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/bitmap.h> #include <linux/io.h> #include <mach/ilsel.h> /* * ILSEL is split across: * * ILSEL0 - 0xb8100004 [ Levels 1 - 4 ] * ILSEL1 - 0xb8100006 [ Levels 5 - 8 ] * ILSEL2 - 0xb8100008 [ Levels 9 - 12 ] * ILSEL3 - 0xb810000a [ Levels 13 - 15 ] * * With each level being relative to an ilsel_source_t. */ #define ILSEL_BASE 0xb8100004 #define ILSEL_LEVELS 15 /* * ILSEL level map, in descending order from the highest level down. * * Supported levels are 1 - 15 spread across ILSEL0 - ILSEL4, mapping * directly to IRLs. As the IRQs are numbered in reverse order relative * to the interrupt level, the level map is carefully managed to ensure a * 1:1 mapping between the bit position and the IRQ number. * * This careful constructions allows ilsel_enable*() to be referenced * directly for hooking up an ILSEL set and getting back an IRQ which can * subsequently be used for internal accounting in the (optional) disable * path. */ static unsigned long ilsel_level_map; static inline unsigned int ilsel_offset(unsigned int bit) { return ILSEL_LEVELS - bit - 1; } static inline unsigned long mk_ilsel_addr(unsigned int bit) { return ILSEL_BASE + ((ilsel_offset(bit) >> 1) & ~0x1); } static inline unsigned int mk_ilsel_shift(unsigned int bit) { return (ilsel_offset(bit) & 0x3) << 2; } static void __ilsel_enable(ilsel_source_t set, unsigned int bit) { unsigned int tmp, shift; unsigned long addr; pr_notice("enabling ILSEL set %d\n", set); addr = mk_ilsel_addr(bit); shift = mk_ilsel_shift(bit); pr_debug("%s: bit#%d: addr - 0x%08lx (shift %d, set %d)\n", __func__, bit, addr, shift, set); tmp = __raw_readw(addr); tmp &= ~(0xf << shift); tmp |= set << shift; __raw_writew(tmp, addr); } /** * ilsel_enable - Enable an ILSEL set. * @set: ILSEL source (see ilsel_source_t enum in include/asm-sh/ilsel.h). * * Enables a given non-aliased ILSEL source (<= ILSEL_KEY) at the highest * available interrupt level. Callers should take care to order callsites * noting descending interrupt levels. Aliasing FPGA and external board * IRQs need to use ilsel_enable_fixed(). * * The return value is an IRQ number that can later be taken down with * ilsel_disable(). */ int ilsel_enable(ilsel_source_t set) { unsigned int bit; if (unlikely(set > ILSEL_KEY)) { pr_err("Aliased sources must use ilsel_enable_fixed()\n"); return -EINVAL; } do { bit = find_first_zero_bit(&ilsel_level_map, ILSEL_LEVELS); } while (test_and_set_bit(bit, &ilsel_level_map)); __ilsel_enable(set, bit); return bit; } EXPORT_SYMBOL_GPL(ilsel_enable); /** * ilsel_enable_fixed - Enable an ILSEL set at a fixed interrupt level * @set: ILSEL source (see ilsel_source_t enum in include/asm-sh/ilsel.h). * @level: Interrupt level (1 - 15) * * Enables a given ILSEL source at a fixed interrupt level. Necessary * both for level reservation as well as for aliased sources that only * exist on special ILSEL#s. * * Returns an IRQ number (as ilsel_enable()). */ int ilsel_enable_fixed(ilsel_source_t set, unsigned int level) { unsigned int bit = ilsel_offset(level - 1); if (test_and_set_bit(bit, &ilsel_level_map)) return -EBUSY; __ilsel_enable(set, bit); return bit; } EXPORT_SYMBOL_GPL(ilsel_enable_fixed); /** * ilsel_disable - Disable an ILSEL set * @irq: Bit position for ILSEL set value (retval from enable routines) * * Disable a previously enabled ILSEL set. */ void ilsel_disable(unsigned int irq) { unsigned long addr; unsigned int tmp; pr_notice("disabling ILSEL set %d\n", irq); addr = mk_ilsel_addr(irq); tmp = __raw_readw(addr); tmp &= ~(0xf << mk_ilsel_shift(irq)); __raw_writew(tmp, addr); clear_bit(irq, &ilsel_level_map); } EXPORT_SYMBOL_GPL(ilsel_disable);
gpl-2.0
cybojenix/android_kernel_nvidia_kalamata
drivers/media/video/sn9c102/sn9c102_tas5110c1b.c
12851
4411
/*************************************************************************** * Plug-in for TAS5110C1B image sensor connected to the SN9C1xx PC Camera * * Controllers * * * * Copyright (C) 2004-2007 by Luca Risolia <luca.risolia@studio.unibo.it> * * * * This program is free software; you can redistribute it and/or modify * * it under the terms of the GNU General Public License as published by * * the Free Software Foundation; either version 2 of the License, or * * (at your option) any later version. * * * * This program is distributed in the hope that it will be useful, * * but WITHOUT ANY WARRANTY; without even the implied warranty of * * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * * GNU General Public License for more details. * * * * You should have received a copy of the GNU General Public License * * along with this program; if not, write to the Free Software * * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * ***************************************************************************/ #include "sn9c102_sensor.h" #include "sn9c102_devtable.h" static int tas5110c1b_init(struct sn9c102_device* cam) { int err = 0; err = sn9c102_write_const_regs(cam, {0x01, 0x01}, {0x44, 0x01}, {0x00, 0x10}, {0x00, 0x11}, {0x0a, 0x14}, {0x60, 0x17}, {0x06, 0x18}, {0xfb, 0x19}); err += sn9c102_i2c_write(cam, 0xc0, 0x80); return err; } static int tas5110c1b_set_ctrl(struct sn9c102_device* cam, const struct v4l2_control* ctrl) { int err = 0; switch (ctrl->id) { case V4L2_CID_GAIN: err += sn9c102_i2c_write(cam, 0x20, 0xf6 - ctrl->value); break; default: return -EINVAL; } return err ? -EIO : 0; } static int tas5110c1b_set_crop(struct sn9c102_device* cam, const struct v4l2_rect* rect) { struct sn9c102_sensor* s = sn9c102_get_sensor(cam); int err = 0; u8 h_start = (u8)(rect->left - s->cropcap.bounds.left) + 69, v_start = (u8)(rect->top - s->cropcap.bounds.top) + 9; err += sn9c102_write_reg(cam, h_start, 0x12); err += sn9c102_write_reg(cam, v_start, 0x13); /* Don't change ! */ err += sn9c102_write_reg(cam, 0x14, 0x1a); err += sn9c102_write_reg(cam, 0x0a, 0x1b); err += sn9c102_write_reg(cam, sn9c102_pread_reg(cam, 0x19), 0x19); return err; } static int tas5110c1b_set_pix_format(struct sn9c102_device* cam, const struct v4l2_pix_format* pix) { int err = 0; if (pix->pixelformat == V4L2_PIX_FMT_SN9C10X) err += sn9c102_write_reg(cam, 0x2b, 0x19); else err += sn9c102_write_reg(cam, 0xfb, 0x19); return err; } static const struct sn9c102_sensor tas5110c1b = { .name = "TAS5110C1B", .maintainer = "Luca Risolia <luca.risolia@studio.unibo.it>", .supported_bridge = BRIDGE_SN9C101 | BRIDGE_SN9C102, .sysfs_ops = SN9C102_I2C_WRITE, .frequency = SN9C102_I2C_100KHZ, .interface = SN9C102_I2C_3WIRES, .init = &tas5110c1b_init, .qctrl = { { .id = V4L2_CID_GAIN, .type = V4L2_CTRL_TYPE_INTEGER, .name = "global gain", .minimum = 0x00, .maximum = 0xf6, .step = 0x01, .default_value = 0x40, .flags = 0, }, }, .set_ctrl = &tas5110c1b_set_ctrl, .cropcap = { .bounds = { .left = 0, .top = 0, .width = 352, .height = 288, }, .defrect = { .left = 0, .top = 0, .width = 352, .height = 288, }, }, .set_crop = &tas5110c1b_set_crop, .pix_format = { .width = 352, .height = 288, .pixelformat = V4L2_PIX_FMT_SBGGR8, .priv = 8, }, .set_pix_format = &tas5110c1b_set_pix_format }; int sn9c102_probe_tas5110c1b(struct sn9c102_device* cam) { const struct usb_device_id tas5110c1b_id_table[] = { { USB_DEVICE(0x0c45, 0x6001), }, { USB_DEVICE(0x0c45, 0x6005), }, { USB_DEVICE(0x0c45, 0x60ab), }, { } }; /* Sensor detection is based on USB pid/vid */ if (!sn9c102_match_id(cam, tas5110c1b_id_table)) return -ENODEV; sn9c102_attach_sensor(cam, &tas5110c1b); return 0; }
gpl-2.0
Tilde88/android_kernel_lge_msm8996
drivers/video/msm/ba/msm_ba.c
52
22830
/* * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/errno.h> #include <linux/mutex.h> #include <linux/videodev2.h> #include <media/v4l2-device.h> #include <media/msm_ba.h> #include "msm_ba_internal.h" #include "msm_ba_debug.h" #include "msm_ba_common.h" #define MSM_BA_DEV_NAME "msm_ba_8064" #define MSM_BA_MAX_EVENTS 10 int msm_ba_poll(void *instance, struct file *filp, struct poll_table_struct *wait) { struct msm_ba_inst *inst = instance; int rc = 0; if (!inst) return -EINVAL; poll_wait(filp, &inst->event_handler.wait, wait); if (v4l2_event_pending(&inst->event_handler)) rc |= POLLPRI; return rc; } EXPORT_SYMBOL(msm_ba_poll); int msm_ba_querycap(void *instance, struct v4l2_capability *cap) { struct msm_ba_inst *inst = instance; if (!inst || !cap) { dprintk(BA_ERR, "Invalid input, inst = 0x%p, cap = 0x%p", inst, cap); return -EINVAL; } strlcpy(cap->driver, MSM_BA_DRV_NAME, sizeof(cap->driver)); strlcpy(cap->card, MSM_BA_DEV_NAME, sizeof(cap->card)); cap->bus_info[0] = 0; cap->version = MSM_BA_VERSION; cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; memset(cap->reserved, 0x00, sizeof(cap->reserved)); return 0; } EXPORT_SYMBOL(msm_ba_querycap); int msm_ba_g_priority(void *instance, enum v4l2_priority *prio) { struct msm_ba_inst *inst = instance; struct msm_ba_input *ba_input = NULL; int rc = 0; if (!inst || !prio) { dprintk(BA_ERR, "Invalid prio, inst = 0x%p, prio = 0x%p", inst, prio); return -EINVAL; } ba_input = msm_ba_find_input(inst->sd_input.index); if (!ba_input) { dprintk(BA_ERR, "Could not find input index: %d", inst->sd_input.index); return -EINVAL; } *prio = ba_input->prio; return rc; } EXPORT_SYMBOL(msm_ba_g_priority); int msm_ba_s_priority(void *instance, enum v4l2_priority prio) { struct msm_ba_inst *inst = instance; struct msm_ba_input *ba_input = NULL; int rc = 0; dprintk(BA_DBG, "Enter %s, prio: %d", __func__, prio); if (!inst) return -EINVAL; ba_input = msm_ba_find_input(inst->sd_input.index); if (!ba_input) { dprintk(BA_ERR, "Could not find input index: %d", inst->sd_input.index); return -EINVAL; } ba_input->prio = prio; inst->input_prio = prio; return rc; } EXPORT_SYMBOL(msm_ba_s_priority); int msm_ba_s_parm(void *instance, struct v4l2_streamparm *a) { struct msm_ba_inst *inst = instance; if (!inst || !a) return -EINVAL; return -EINVAL; } EXPORT_SYMBOL(msm_ba_s_parm); int msm_ba_enum_input(void *instance, struct v4l2_input *input) { struct msm_ba_input *ba_input = NULL; struct msm_ba_inst *inst = instance; int status = 0; int rc = 0; if (!inst || !input) return -EINVAL; if (input->index >= inst->dev_ctxt->num_inputs) return -EINVAL; ba_input = msm_ba_find_input(input->index); if (ba_input) { input->type = V4L2_INPUT_TYPE_CAMERA; input->std = V4L2_STD_ALL; strlcpy(input->name, ba_input->name, sizeof(input->name)); if (BA_INPUT_HDMI == ba_input->input_type || BA_INPUT_MHL == ba_input->input_type) input->capabilities = V4L2_IN_CAP_CUSTOM_TIMINGS; else input->capabilities = V4L2_IN_CAP_STD; dprintk(BA_DBG, "msm_ba_find_input: name %s", input->name); /* get current signal status */ rc = v4l2_subdev_call( ba_input->sd, video, g_input_status, &status); if (rc) { dprintk(BA_ERR, "g_input_status failed (%d) for sd: %s", rc, ba_input->sd->name); } else { input->status = status; ba_input->signal_status = status; } } return rc; } EXPORT_SYMBOL(msm_ba_enum_input); int msm_ba_g_input(void *instance, unsigned int *index) { struct msm_ba_inst *inst = instance; struct msm_ba_input *ba_input = NULL; int rc = 0; if (!inst || !index) return -EINVAL; do { /* First find current input */ ba_input = msm_ba_find_input(inst->sd_input.index); if (ba_input) { if (BA_INPUT_USERTYPE_KERNEL == ba_input->input_user_type) { inst->sd_input.index++; continue; } break; } } while (ba_input); if (ba_input) *index = inst->sd_input.index; else rc = -ENOENT; return rc; } EXPORT_SYMBOL(msm_ba_g_input); int msm_ba_s_input(void *instance, unsigned int index) { struct msm_ba_inst *inst = instance; struct msm_ba_input *ba_input = NULL; int rc = 0; int rc_sig = 0; if (!inst) return -EINVAL; if (index > inst->dev_ctxt->num_inputs) return -EINVAL; /* Find requested input */ ba_input = msm_ba_find_input(index); if (!ba_input) { dprintk(BA_ERR, "Could not find input index: %d", index); return -EINVAL; } if (!ba_input->sd) { dprintk(BA_ERR, "No sd registered"); return -EINVAL; } if (ba_input->in_use && ba_input->prio == V4L2_PRIORITY_RECORD && ba_input->prio != inst->input_prio) { dprintk(BA_WARN, "Input %d in use", index); return -EBUSY; } if (ba_input->ba_out_in_use) { if (inst->ext_ops) { if (inst->restore) { dprintk(BA_DBG, "Stream off in set input: %d", ba_input->bridge_chip_ip); rc_sig = v4l2_subdev_call(ba_input->sd, video, s_stream, 0); if (rc_sig) dprintk(BA_ERR, "%s: Error in stream off. rc_sig %d", __func__, rc_sig); } } else { dprintk(BA_WARN, "Sd %d in use", ba_input->ba_out); return -EBUSY; } } rc = v4l2_subdev_call(ba_input->sd, video, s_routing, ba_input->bridge_chip_ip, 0, 0); if (rc) { dprintk(BA_ERR, "Error: %d setting input: %d", rc, ba_input->bridge_chip_ip); return rc; } msm_ba_reset_ip_in_use_from_sd(ba_input->sd); inst->sd_input.index = index; strlcpy(inst->sd_input.name, ba_input->name, sizeof(inst->sd_input.name)); inst->sd = ba_input->sd; ba_input->in_use = 1; /* get current signal status */ rc_sig = v4l2_subdev_call( ba_input->sd, video, g_input_status, &ba_input->signal_status); dprintk(BA_DBG, "Set input %s : %d - signal status: %d", ba_input->name, index, ba_input->signal_status); if (!rc_sig && !ba_input->signal_status) { struct v4l2_event sd_event = { .id = 0, .type = V4L2_EVENT_MSM_BA_SIGNAL_IN_LOCK}; int *ptr = (int *)sd_event.u.data; ptr[0] = index; ptr[1] = ba_input->signal_status; msm_ba_queue_v4l2_event(inst, &sd_event); } return rc; } EXPORT_SYMBOL(msm_ba_s_input); int msm_ba_enum_output(void *instance, struct v4l2_output *output) { struct msm_ba_input *ba_input = NULL; struct msm_ba_inst *inst = instance; int rc = 0; if (!inst || !output) return -EINVAL; ba_input = msm_ba_find_output(output->index); if (!ba_input) return -EINVAL; output->type = V4L2_OUTPUT_TYPE_ANALOG; output->std = V4L2_STD_ALL; strlcpy(output->name, ba_input->sd->name, sizeof(output->name)); output->capabilities = V4L2_OUT_CAP_STD; return rc; } EXPORT_SYMBOL(msm_ba_enum_output); int msm_ba_g_output(void *instance, unsigned int *index) { struct msm_ba_inst *inst = instance; int rc = 0; if (!inst || !index) return -EINVAL; *index = inst->sd_output.index; return rc; } EXPORT_SYMBOL(msm_ba_g_output); int msm_ba_s_output(void *instance, unsigned int index) { struct msm_ba_inst *inst = instance; struct msm_ba_input *ba_input = NULL; int rc = 0; if (!inst) return -EINVAL; ba_input = msm_ba_find_output(index); if (ba_input) { if (!ba_input->sd) { dprintk(BA_ERR, "No sd registered"); return -EINVAL; } ba_input->ba_out = index; inst->sd_output.index = index; inst->sd = ba_input->sd; inst->sd_input.index = ba_input->ba_ip_idx; } else { dprintk(BA_ERR, "Could not find output index: %d", index); rc = -EINVAL; } return rc; } EXPORT_SYMBOL(msm_ba_s_output); int msm_ba_enum_fmt(void *instance, struct v4l2_fmtdesc *f) { struct msm_ba_inst *inst = instance; if (!inst || !f) return -EINVAL; return -EINVAL; } EXPORT_SYMBOL(msm_ba_enum_fmt); int msm_ba_s_fmt(void *instance, struct v4l2_format *f) { struct msm_ba_inst *inst = instance; if (!inst || !f) return -EINVAL; return -EINVAL; } EXPORT_SYMBOL(msm_ba_s_fmt); int msm_ba_g_fmt(void *instance, struct v4l2_format *f) { struct msm_ba_inst *inst = instance; struct v4l2_subdev *sd = NULL; struct msm_ba_input *ba_input = NULL; v4l2_std_id new_std = V4L2_STD_UNKNOWN; struct v4l2_dv_timings sd_dv_timings; struct v4l2_mbus_framefmt sd_mbus_fmt; int rc = 0; if (!inst || !f) return -EINVAL; sd = inst->sd; if (!sd) { dprintk(BA_ERR, "No sd registered"); return -EINVAL; } ba_input = msm_ba_find_input(inst->sd_input.index); if (!ba_input) { dprintk(BA_ERR, "Could not find input index: %d", inst->sd_input.index); return -EINVAL; } if (BA_INPUT_HDMI != ba_input->input_type) { rc = v4l2_subdev_call(sd, video, querystd, &new_std); if (rc) { dprintk(BA_ERR, "querystd failed %d for sd: %s", rc, sd->name); return -EINVAL; } inst->sd_input.std = new_std; } else { rc = v4l2_subdev_call(sd, video, g_dv_timings, &sd_dv_timings); if (rc) { dprintk(BA_ERR, "g_dv_timings failed %d for sd: %s", rc, sd->name); return -EINVAL; } } rc = v4l2_subdev_call(sd, video, g_mbus_fmt, &sd_mbus_fmt); if (rc) { dprintk(BA_ERR, "g_mbus_fmt failed %d for sd: %s", rc, sd->name); } else { f->fmt.pix.height = sd_mbus_fmt.height; f->fmt.pix.width = sd_mbus_fmt.width; switch (sd_mbus_fmt.code) { case V4L2_MBUS_FMT_YUYV8_2X8: f->fmt.pix.pixelformat = V4L2_PIX_FMT_YUYV; break; case V4L2_MBUS_FMT_YVYU8_2X8: f->fmt.pix.pixelformat = V4L2_PIX_FMT_YVYU; break; case V4L2_MBUS_FMT_VYUY8_2X8: f->fmt.pix.pixelformat = V4L2_PIX_FMT_VYUY; break; case V4L2_MBUS_FMT_UYVY8_2X8: f->fmt.pix.pixelformat = V4L2_PIX_FMT_UYVY; break; default: dprintk(BA_ERR, "Unknown sd_mbus_fmt.code 0x%x", sd_mbus_fmt.code); f->fmt.pix.pixelformat = V4L2_PIX_FMT_UYVY; break; } } return rc; } EXPORT_SYMBOL(msm_ba_g_fmt); int msm_ba_s_ctrl(void *instance, struct v4l2_control *control) { struct msm_ba_inst *inst = instance; if (!inst || !control) return -EINVAL; return v4l2_s_ctrl(NULL, &inst->ctrl_handler, control); } EXPORT_SYMBOL(msm_ba_s_ctrl); int msm_ba_g_ctrl(void *instance, struct v4l2_control *control) { struct msm_ba_inst *inst = instance; if (!inst || !control) return -EINVAL; return v4l2_g_ctrl(&inst->ctrl_handler, control); } EXPORT_SYMBOL(msm_ba_g_ctrl); int msm_ba_s_ext_ctrl(void *instance, struct v4l2_ext_controls *control) { struct msm_ba_inst *inst = instance; if (!inst || !control) return -EINVAL; return -EINVAL; } EXPORT_SYMBOL(msm_ba_s_ext_ctrl); int msm_ba_streamon(void *instance, enum v4l2_buf_type i) { struct msm_ba_inst *inst = instance; struct v4l2_subdev *sd = NULL; int rc = 0; if (!inst) return -EINVAL; sd = inst->sd; if (!sd) { dprintk(BA_ERR, "No sd registered"); return -EINVAL; } rc = v4l2_subdev_call(sd, video, s_stream, 1); if (rc) dprintk(BA_ERR, "Stream on failed on input: %d", inst->sd_input.index); else msm_ba_set_out_in_use(sd, 1); dprintk(BA_DBG, "Stream on: %s : %d", inst->sd_input.name, inst->sd_input.index); return rc; } EXPORT_SYMBOL(msm_ba_streamon); int msm_ba_streamoff(void *instance, enum v4l2_buf_type i) { struct msm_ba_inst *inst = instance; struct v4l2_subdev *sd = NULL; int rc = 0; if (!inst) return -EINVAL; sd = inst->sd; if (!sd) { dprintk(BA_ERR, "No sd registered"); return -EINVAL; } rc = v4l2_subdev_call(sd, video, s_stream, 0); if (rc) dprintk(BA_ERR, "Stream off failed on input: %d", inst->sd_input.index); dprintk(BA_DBG, "Stream off: %s : %d", inst->sd_input.name, inst->sd_input.index); msm_ba_set_out_in_use(sd, 0); return rc; } EXPORT_SYMBOL(msm_ba_streamoff); long msm_ba_private_ioctl(void *instance, int cmd, void *arg) { long rc = 0; struct msm_ba_inst *inst = instance; struct v4l2_subdev *sd = NULL; int *s_ioctl = arg; dprintk(BA_DBG, "Enter %s with command: 0x%x", __func__, cmd); if (!inst) return -EINVAL; switch (cmd) { case VIDIOC_HDMI_RX_CEC_S_LOGICAL: { dprintk(BA_DBG, "VIDIOC_HDMI_RX_CEC_S_LOGICAL"); sd = inst->sd; if (!sd) { dprintk(BA_ERR, "No sd registered"); return -EINVAL; } if (s_ioctl) { rc = v4l2_subdev_call(sd, core, ioctl, cmd, s_ioctl); if (rc) dprintk(BA_ERR, "%s failed: %ld on cmd: 0x%x", __func__, rc, cmd); } else { dprintk(BA_ERR, "%s: NULL argument provided", __func__); rc = -EINVAL; } } break; case VIDIOC_HDMI_RX_CEC_CLEAR_LOGICAL: { dprintk(BA_DBG, "VIDIOC_HDMI_RX_CEC_CLEAR_LOGICAL"); sd = inst->sd; if (!sd) { dprintk(BA_ERR, "No sd registered"); return -EINVAL; } rc = v4l2_subdev_call(sd, core, ioctl, cmd, s_ioctl); if (rc) dprintk(BA_ERR, "%s failed: %ld on cmd: 0x%x", __func__, rc, cmd); } break; case VIDIOC_HDMI_RX_CEC_G_PHYSICAL: { dprintk(BA_DBG, "VIDIOC_HDMI_RX_CEC_G_PHYSICAL"); sd = inst->sd; if (!sd) { dprintk(BA_ERR, "No sd registered"); return -EINVAL; } if (s_ioctl) { rc = v4l2_subdev_call(sd, core, ioctl, cmd, s_ioctl); if (rc) dprintk(BA_ERR, "%s failed: %ld on cmd: 0x%x", __func__, rc, cmd); } else { dprintk(BA_ERR, "%s: NULL argument provided", __func__); rc = -EINVAL; } } break; case VIDIOC_HDMI_RX_CEC_G_CONNECTED: { dprintk(BA_DBG, "VIDIOC_HDMI_RX_CEC_G_CONNECTED"); sd = inst->sd; if (!sd) { dprintk(BA_ERR, "No sd registered"); return -EINVAL; } if (s_ioctl) { rc = v4l2_subdev_call(sd, core, ioctl, cmd, s_ioctl); if (rc) dprintk(BA_ERR, "%s failed: %ld on cmd: 0x%x", __func__, rc, cmd); } else { dprintk(BA_ERR, "%s: NULL argument provided", __func__); rc = -EINVAL; } } break; case VIDIOC_HDMI_RX_CEC_S_ENABLE: { dprintk(BA_DBG, "VIDIOC_HDMI_RX_CEC_S_ENABLE"); sd = inst->sd; if (!sd) { dprintk(BA_ERR, "No sd registered"); return -EINVAL; } if (s_ioctl) { rc = v4l2_subdev_call(sd, core, ioctl, cmd, s_ioctl); if (rc) dprintk(BA_ERR, "%s failed: %ld on cmd: 0x%x", __func__, rc, cmd); } else { dprintk(BA_ERR, "%s: NULL argument provided", __func__); rc = -EINVAL; } } break; default: dprintk(BA_WARN, "Not a typewriter! Command: 0x%x", cmd); rc = -ENOTTY; break; } return rc; } EXPORT_SYMBOL(msm_ba_private_ioctl); int msm_ba_save_restore_input(void *instance, enum msm_ba_save_restore_ip sr) { struct msm_ba_inst *inst = instance; struct msm_ba_input *ba_input = NULL; int rc = 0; if (!inst) return -EINVAL; if (BA_SR_RESTORE_IP == sr && inst->restore) { dprintk(BA_DBG, "Restoring input: %d", inst->saved_input); rc = v4l2_subdev_call(inst->sd, video, s_routing, inst->saved_input, 0, 0); if (rc) dprintk(BA_ERR, "Failed to restore input: %d", inst->saved_input); msm_ba_reset_ip_in_use_from_sd(inst->sd); ba_input = msm_ba_find_input_from_sd(inst->sd, inst->saved_input); if (ba_input) ba_input->in_use = 1; else dprintk(BA_WARN, "Could not find input %d from sd: %s", inst->saved_input, inst->sd->name); inst->restore = 0; inst->saved_input = BA_IP_MAX; dprintk(BA_DBG, "Stream on from save restore"); rc = msm_ba_streamon(inst, V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE); } else if (BA_SR_SAVE_IP == sr) { ba_input = msm_ba_find_input(inst->sd_input.index); if (ba_input == NULL) { dprintk(BA_ERR, "Could not find input %d", inst->sd_input.index); } else if (ba_input->ba_out_in_use) { inst->restore = 1; inst->saved_input = msm_ba_find_ip_in_use_from_sd(inst->sd); if (inst->saved_input == BA_IP_MAX) { dprintk(BA_ERR, "Could not find input to save"); inst->restore = 0; } dprintk(BA_DBG, "Saving input: %d", inst->saved_input); rc = -EBUSY; } } else { dprintk(BA_DBG, "Nothing to do in save and restore"); } return rc; } EXPORT_SYMBOL(msm_ba_save_restore_input); void msm_ba_release_subdev_node(struct video_device *vdev) { struct v4l2_subdev *sd = video_get_drvdata(vdev); sd->devnode = NULL; kfree(vdev); } static int msm_ba_register_v4l2_subdev(struct v4l2_device *v4l2_dev, struct v4l2_subdev *sd) { struct video_device *vdev; int rc = 0; dprintk(BA_DBG, "Enter %s: v4l2_dev 0x%p, v4l2_subdev 0x%p", __func__, v4l2_dev, sd); if (NULL == v4l2_dev || NULL == sd || !sd->name[0]) { dprintk(BA_ERR, "Invalid input"); return -EINVAL; } rc = v4l2_device_register_subdev(v4l2_dev, sd); if (rc < 0) { dprintk(BA_ERR, "%s(%d), V4L2 subdev register failed for %s rc: %d", __func__, __LINE__, sd->name, rc); return rc; } if (sd->flags & V4L2_SUBDEV_FL_HAS_DEVNODE) { vdev = video_device_alloc(); if (NULL == vdev) { dprintk(BA_ERR, "%s Not enough memory", __func__); return -ENOMEM; } video_set_drvdata(vdev, sd); strlcpy(vdev->name, sd->name, sizeof(vdev->name)); vdev->v4l2_dev = v4l2_dev; vdev->fops = &v4l2_subdev_fops; vdev->release = msm_ba_release_subdev_node; rc = __video_register_device(vdev, VFL_TYPE_SUBDEV, -1, 1, sd->owner); if (rc < 0) { dprintk(BA_ERR, "%s Error registering video device %s", __func__, sd->name); kfree(vdev); } else { #if defined(CONFIG_MEDIA_CONTROLLER) sd->entity.info.v4l.major = VIDEO_MAJOR; sd->entity.info.v4l.minor = vdev->minor; sd->entity.name = video_device_node_name(vdev); #endif sd->devnode = vdev; } } dprintk(BA_DBG, "Exit %s with rc: %d", __func__, rc); return rc; } int msm_ba_register_subdev_node(struct v4l2_subdev *sd) { struct ba_ctxt *ba_ctxt; int rc = 0; ba_ctxt = msm_ba_get_ba_context(); rc = msm_ba_register_v4l2_subdev(&ba_ctxt->dev_ctxt->v4l2_dev, sd); if (!rc) { ba_ctxt->dev_ctxt->num_ba_subdevs++; msm_ba_add_inputs(sd); } return rc; } EXPORT_SYMBOL(msm_ba_register_subdev_node); static void __msm_ba_sd_unregister(struct v4l2_subdev *sub_dev) { struct ba_ctxt *ba_ctxt; ba_ctxt = msm_ba_get_ba_context(); mutex_lock(&ba_ctxt->ba_cs); v4l2_device_unregister_subdev(sub_dev); ba_ctxt->dev_ctxt->num_ba_subdevs--; msm_ba_del_inputs(sub_dev); dprintk(BA_DBG, "%s(%d), BA Unreg Sub Device : num ba devices %d : %s", __func__, __LINE__, ba_ctxt->dev_ctxt->num_ba_subdevs, sub_dev->name); mutex_unlock(&ba_ctxt->ba_cs); } int msm_ba_unregister_subdev_node(struct v4l2_subdev *sub_dev) { struct ba_ctxt *ba_ctxt; ba_ctxt = msm_ba_get_ba_context(); if (!ba_ctxt || !ba_ctxt->dev_ctxt) return -ENODEV; if (!sub_dev) return -EINVAL; __msm_ba_sd_unregister(sub_dev); return 0; } EXPORT_SYMBOL(msm_ba_unregister_subdev_node); static int msm_ba_setup_event_queue(void *inst, struct video_device *pvdev) { int rc = 0; struct msm_ba_inst *ba_inst = (struct msm_ba_inst *)inst; v4l2_fh_init(&ba_inst->event_handler, pvdev); v4l2_fh_add(&ba_inst->event_handler); return rc; } int msm_ba_subscribe_event(void *inst, const struct v4l2_event_subscription *sub) { int rc = 0; struct msm_ba_inst *ba_inst = (struct msm_ba_inst *)inst; if (!inst || !sub) return -EINVAL; rc = v4l2_event_subscribe(&ba_inst->event_handler, sub, MSM_BA_MAX_EVENTS, NULL); return rc; } EXPORT_SYMBOL(msm_ba_subscribe_event); int msm_ba_unsubscribe_event(void *inst, const struct v4l2_event_subscription *sub) { int rc = 0; struct msm_ba_inst *ba_inst = (struct msm_ba_inst *)inst; if (!inst || !sub) return -EINVAL; rc = v4l2_event_unsubscribe(&ba_inst->event_handler, sub); return rc; } EXPORT_SYMBOL(msm_ba_unsubscribe_event); void msm_ba_subdev_event_hndlr(struct v4l2_subdev *sd, unsigned int notification, void *arg) { struct msm_ba_dev *dev_ctxt = NULL; struct msm_ba_input *ba_input; struct msm_ba_sd_event *ba_sd_event; int bridge_chip_ip; if (!sd || !arg) { dprintk(BA_ERR, "%s null v4l2 subdev or arg", __func__); return; } bridge_chip_ip = ((int *)((struct v4l2_event *)arg)->u.data)[0]; ba_input = msm_ba_find_input_from_sd(sd, bridge_chip_ip); if (!ba_input) { dprintk(BA_WARN, "Could not find input %d from sd: %s", bridge_chip_ip, sd->name); return; } ba_sd_event = kzalloc(sizeof(*ba_sd_event), GFP_KERNEL); if (!ba_sd_event) { dprintk(BA_ERR, "%s out of memory", __func__); return; } dev_ctxt = get_ba_dev(); ba_sd_event->sd_event = *(struct v4l2_event *)arg; ((int *)ba_sd_event->sd_event.u.data)[0] = ba_input->ba_ip_idx; mutex_lock(&dev_ctxt->dev_cs); list_add_tail(&ba_sd_event->list, &dev_ctxt->sd_events); mutex_unlock(&dev_ctxt->dev_cs); schedule_delayed_work(&dev_ctxt->sd_events_work, 0); } void *msm_ba_open(const struct msm_ba_ext_ops *ext_ops) { struct msm_ba_inst *inst = NULL; struct msm_ba_dev *dev_ctxt = NULL; int rc = 0; dev_ctxt = get_ba_dev(); inst = kzalloc(sizeof(*inst), GFP_KERNEL); if (!inst) { dprintk(BA_ERR, "Failed to allocate memory"); return NULL; } mutex_init(&inst->inst_cs); init_waitqueue_head(&inst->kernel_event_queue); inst->state = MSM_BA_DEV_UNINIT_DONE; inst->dev_ctxt = dev_ctxt; rc = msm_ba_ctrl_init(inst); if (rc) { dprintk(BA_WARN, "Failed to initialize controls: %d", rc); msm_ba_ctrl_deinit(inst); } if (!list_empty(&(inst->dev_ctxt->v4l2_dev.subdevs))) inst->sd = list_first_entry(&(inst->dev_ctxt->v4l2_dev.subdevs), struct v4l2_subdev, list); msm_ba_setup_event_queue(inst, dev_ctxt->vdev); mutex_lock(&dev_ctxt->dev_cs); list_add_tail(&inst->list, &dev_ctxt->instances); mutex_unlock(&dev_ctxt->dev_cs); dev_ctxt->state = BA_DEV_INIT; dev_ctxt->state = BA_DEV_INIT_DONE; inst->state = MSM_BA_DEV_INIT_DONE; inst->sd_input.index = 0; inst->input_prio = V4L2_PRIORITY_DEFAULT; inst->debugfs_root = msm_ba_debugfs_init_inst(inst, dev_ctxt->debugfs_root); inst->ext_ops = ext_ops; return inst; } EXPORT_SYMBOL(msm_ba_open); int msm_ba_close(void *instance) { struct msm_ba_inst *inst = instance; struct msm_ba_inst *temp; struct msm_ba_dev *dev_ctxt; struct list_head *ptr; struct list_head *next; int rc = 0; if (!inst) return -EINVAL; dev_ctxt = inst->dev_ctxt; mutex_lock(&dev_ctxt->dev_cs); list_for_each_safe(ptr, next, &dev_ctxt->instances) { temp = list_entry(ptr, struct msm_ba_inst, list); if (temp == inst) list_del(&inst->list); } mutex_unlock(&dev_ctxt->dev_cs); msm_ba_ctrl_deinit(inst); v4l2_fh_del(&inst->event_handler); v4l2_fh_exit(&inst->event_handler); debugfs_remove_recursive(inst->debugfs_root); dprintk(BA_DBG, "Closed BA instance: %p", inst); kfree(inst); return rc; } EXPORT_SYMBOL(msm_ba_close);
gpl-2.0
wkritzinger/asuswrt-merlin
release/src/router/samba3/examples/libsmbclient/smbwrapper/smbw.c
52
23066
/* Unix SMB/Netbios implementation. Version 2.0 SMB wrapper functions Copyright (C) Andrew Tridgell 1998 Copyright (C) Derrell Lipman 2003-2005 This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <stdio.h> #include <stdlib.h> #include <unistd.h> #include <stdarg.h> #include <assert.h> #include "smbw.h" #include "bsd-strlfunc.h" typedef enum StartupType { StartupType_Fake, StartupType_Real } StartupType; int smbw_fd_map[__FD_SETSIZE]; int smbw_ref_count[__FD_SETSIZE]; char smbw_cwd[PATH_MAX]; char smbw_prefix[] = SMBW_PREFIX; /* needs to be here because of dumb include files on some systems */ int creat_bits = O_WRONLY|O_CREAT|O_TRUNC; int smbw_initialized = 0; static int debug_level = 0; static SMBCCTX *smbw_ctx; extern int smbw_debug; /***************************************************** smbw_ref -- manipulate reference counts ******************************************************/ int smbw_ref(int client_fd, Ref_Count_Type type, ...) { va_list ap; /* client id values begin at SMBC_BASE_FC. */ client_fd -= SMBC_BASE_FD; va_start(ap, type); switch(type) { case SMBW_RCT_Increment: return ++smbw_ref_count[client_fd]; case SMBW_RCT_Decrement: return --smbw_ref_count[client_fd]; case SMBW_RCT_Get: return smbw_ref_count[client_fd]; case SMBW_RCT_Set: return (smbw_ref_count[client_fd] = va_arg(ap, int)); } va_end(ap); /* never gets here */ return -1; } /* * Return a username and password given a server and share name * * Returns 0 upon success; * non-zero otherwise, and errno is set to indicate the error. */ static void get_envvar_auth_data(const char *srv, const char *shr, char *wg, int wglen, char *un, int unlen, char *pw, int pwlen) { char *u; char *p; char *w; /* Fall back to environment variables */ w = getenv("WORKGROUP"); if (w == NULL) w = ""; u = getenv("USER"); if (u == NULL) u = ""; p = getenv("PASSWORD"); if (p == NULL) p = ""; smbw_strlcpy(wg, w, wglen); smbw_strlcpy(un, u, unlen); smbw_strlcpy(pw, p, pwlen); } static smbc_get_auth_data_fn get_auth_data_fn = get_envvar_auth_data; /***************************************************** set the get auth data function ******************************************************/ void smbw_set_auth_data_fn(smbc_get_auth_data_fn fn) { get_auth_data_fn = fn; } /***************************************************** ensure that all connections are terminated upon exit ******************************************************/ static void do_shutdown(void) { if (smbw_ctx != NULL) { smbc_free_context(smbw_ctx, 1); } } /***************************************************** initialise structures *******************************************************/ static void do_init(StartupType startupType) { int i; char *p; smbw_initialized = 1; /* this must be first to avoid recursion! */ smbw_ctx = NULL; /* don't free context until it's established */ /* initially, no file descriptors are mapped */ for (i = 0; i < __FD_SETSIZE; i++) { smbw_fd_map[i] = -1; smbw_ref_count[i] = 0; } /* See if we've been told to start in a particular directory */ if ((p=getenv("SMBW_DIR")) != NULL) { smbw_strlcpy(smbw_cwd, p, PATH_MAX); /* we don't want the old directory to be busy */ (* smbw_libc.chdir)("/"); } else { *smbw_cwd = '\0'; } if ((p=getenv("DEBUG"))) { debug_level = atoi(p); } if ((smbw_ctx = smbc_new_context()) == NULL) { fprintf(stderr, "Could not create a context.\n"); exit(1); } smbw_ctx->debug = debug_level; smbw_ctx->callbacks.auth_fn = get_auth_data_fn; smbw_ctx->options.browse_max_lmb_count = 0; smbw_ctx->options.urlencode_readdir_entries = 1; smbw_ctx->options.one_share_per_server = 1; if (smbc_init_context(smbw_ctx) == NULL) { fprintf(stderr, "Could not initialize context.\n"); exit(1); } smbc_set_context(smbw_ctx); /* if not real startup, exit handler has already been established */ if (startupType == StartupType_Real) { atexit(do_shutdown); } } /***************************************************** initialise structures, real start up vs a fork() *******************************************************/ void smbw_init(void) { do_init(StartupType_Real); } /***************************************************** determine if a file descriptor is a smb one *******************************************************/ int smbw_fd(int smbw_fd) { SMBW_INIT(); return (smbw_fd >= 0 && smbw_fd < __FD_SETSIZE && smbw_fd_map[smbw_fd] >= SMBC_BASE_FD); /* minimum smbc_ fd */ } /***************************************************** determine if a path is a smb one *******************************************************/ int smbw_path(const char *name) { int len; int ret; int saved_errno; saved_errno = errno; SMBW_INIT(); len = strlen(smbw_prefix); ret = ((strncmp(name, smbw_prefix, len) == 0 && (name[len] == '\0' || name[len] == '/')) || (*name != '/' && *smbw_cwd != '\0')); errno = saved_errno; return ret; } /***************************************************** remove redundent stuff from a filename *******************************************************/ void smbw_clean_fname(char *name) { char *p, *p2; int l; int modified = 1; if (!name) return; DEBUG(10, ("Clean [%s]...\n", name)); while (modified) { modified = 0; if ((p=strstr(name,"/./"))) { modified = 1; while (*p) { p[0] = p[2]; p++; } DEBUG(10, ("\tclean 1 (/./) produced [%s]\n", name)); } if ((p=strstr(name,"//"))) { modified = 1; while (*p) { p[0] = p[1]; p++; } DEBUG(10, ("\tclean 2 (//) produced [%s]\n", name)); } if (strcmp(name,"/../")==0) { modified = 1; name[1] = 0; DEBUG(10,("\tclean 3 (^/../$) produced [%s]\n", name)); } if ((p=strstr(name,"/../"))) { modified = 1; for (p2 = (p > name ? p-1 : p); p2 > name; p2--) { if (p2[0] == '/') break; } if (p2 > name) p2++; while (*p2) { p2[0] = p[3]; p2++; p++; } DEBUG(10, ("\tclean 4 (/../) produced [%s]\n", name)); } if (strcmp(name,"/..")==0) { modified = 1; name[1] = 0; DEBUG(10, ("\tclean 5 (^/..$) produced [%s]\n", name)); } l = strlen(name); p = l>=3?(name+l-3):name; if (strcmp(p,"/..")==0) { modified = 1; for (p2=p-1;p2>name;p2--) { if (p2[0] == '/') break; } if (p2==name) { p[0] = '/'; p[1] = 0; } else { p2[0] = 0; } DEBUG(10, ("\tclean 6 (/..) produced [%s]\n", name)); } l = strlen(name); p = l>=2?(name+l-2):name; if (strcmp(p,"/.")==0) { modified = 1; if (p == name) { p[1] = 0; } else { p[0] = 0; } DEBUG(10, ("\tclean 7 (/.) produced [%s]\n", name)); } if (strncmp(p=name,"./",2) == 0) { modified = 1; do { p[0] = p[2]; } while (*p++); DEBUG(10, ("\tclean 8 (^./) produced [%s]\n", name)); } l = strlen(p=name); if (l > 1 && p[l-1] == '/') { modified = 1; p[l-1] = 0; DEBUG(10, ("\tclean 9 (/) produced [%s]\n", name)); } } } void smbw_fix_path(const char *src, char *dest) { const char *p; int len = strlen(smbw_prefix); if (*src == '/') { for (p = src + len; *p == '/'; p++) ; snprintf(dest, PATH_MAX, "smb://%s", p); } else { snprintf(dest, PATH_MAX, "%s/%s", smbw_cwd, src); } smbw_clean_fname(dest + 5); DEBUG(10, ("smbw_fix_path(%s) returning [%s]\n", src, dest)); } /***************************************************** a wrapper for open() *******************************************************/ int smbw_open(const char *fname, int flags, mode_t mode) { int client_fd; int smbw_fd; char path[PATH_MAX]; SMBW_INIT(); if (!fname) { errno = EINVAL; return -1; } smbw_fd = (smbw_libc.open)(SMBW_DUMMY, O_WRONLY, 0200); if (smbw_fd == -1) { errno = EMFILE; return -1; } smbw_fix_path(fname, path); if (flags == creat_bits) { client_fd = smbc_creat(path, mode); } else { client_fd = smbc_open(path, flags, mode); } if (client_fd < 0) { (* smbw_libc.close)(smbw_fd); return -1; } smbw_fd_map[smbw_fd] = client_fd; smbw_ref(client_fd, SMBW_RCT_Increment); return smbw_fd; } /***************************************************** a wrapper for pread() there should really be an smbc_pread() to avoid the two lseek()s required in this kludge. *******************************************************/ ssize_t smbw_pread(int smbw_fd, void *buf, size_t count, SMBW_OFF_T ofs) { int client_fd; ssize_t ret; int saved_errno; SMBW_OFF_T old_ofs; if (count == 0) { return 0; } client_fd = smbw_fd_map[smbw_fd]; if ((old_ofs = smbc_lseek(client_fd, 0, SEEK_CUR)) < 0 || smbc_lseek(client_fd, ofs, SEEK_SET) < 0) { return -1; } if ((ret = smbc_read(client_fd, buf, count)) < 0) { saved_errno = errno; (void) smbc_lseek(client_fd, old_ofs, SEEK_SET); errno = saved_errno; return -1; } return ret; } /***************************************************** a wrapper for read() *******************************************************/ ssize_t smbw_read(int smbw_fd, void *buf, size_t count) { int client_fd; client_fd = smbw_fd_map[smbw_fd]; return smbc_read(client_fd, buf, count); } /***************************************************** a wrapper for write() *******************************************************/ ssize_t smbw_write(int smbw_fd, void *buf, size_t count) { int client_fd; client_fd = smbw_fd_map[smbw_fd]; return smbc_write(client_fd, buf, count); } /***************************************************** a wrapper for pwrite() *******************************************************/ ssize_t smbw_pwrite(int smbw_fd, void *buf, size_t count, SMBW_OFF_T ofs) { int saved_errno; int client_fd; ssize_t ret; SMBW_OFF_T old_ofs; if (count == 0) { return 0; } client_fd = smbw_fd_map[smbw_fd]; if ((old_ofs = smbc_lseek(client_fd, 0, SEEK_CUR)) < 0 || smbc_lseek(client_fd, ofs, SEEK_SET) < 0) { return -1; } if ((ret = smbc_write(client_fd, buf, count)) < 0) { saved_errno = errno; (void) smbc_lseek(client_fd, old_ofs, SEEK_SET); errno = saved_errno; return -1; } return ret; } /***************************************************** a wrapper for close() *******************************************************/ int smbw_close(int smbw_fd) { int client_fd; client_fd = smbw_fd_map[smbw_fd]; if (smbw_ref(client_fd, SMBW_RCT_Decrement) > 0) { return 0; } (* smbw_libc.close)(smbw_fd); smbw_fd_map[smbw_fd] = -1; return smbc_close(client_fd); } /***************************************************** a wrapper for fcntl() *******************************************************/ int smbw_fcntl(int smbw_fd, int cmd, long arg) { return 0; } /***************************************************** a wrapper for access() *******************************************************/ int smbw_access(const char *name, int mode) { struct SMBW_stat st; SMBW_INIT(); if (smbw_stat(name, &st)) return -1; if (((mode & R_OK) && !(st.s_mode & S_IRUSR)) || ((mode & W_OK) && !(st.s_mode & S_IWUSR)) || ((mode & X_OK) && !(st.s_mode & S_IXUSR))) { errno = EACCES; return -1; } return 0; } /***************************************************** a wrapper for readlink() - needed for correct errno setting *******************************************************/ int smbw_readlink(const char *fname, char *buf, size_t bufsize) { struct SMBW_stat st; int ret; SMBW_INIT(); ret = smbw_stat(fname, &st); if (ret != 0) { return -1; } /* it exists - say it isn't a link */ errno = EINVAL; return -1; } /***************************************************** a wrapper for unlink() *******************************************************/ int smbw_unlink(const char *fname) { char path[PATH_MAX]; SMBW_INIT(); smbw_fix_path(fname, path); return smbc_unlink(path); } /***************************************************** a wrapper for rename() *******************************************************/ int smbw_rename(const char *oldname, const char *newname) { char path_old[PATH_MAX]; char path_new[PATH_MAX]; SMBW_INIT(); smbw_fix_path(oldname, path_old); smbw_fix_path(newname, path_new); return smbc_rename(path_old, path_new); } /***************************************************** a wrapper for utimes *******************************************************/ int smbw_utimes(const char *fname, void *buf) { char path[PATH_MAX]; smbw_fix_path(fname, path); return smbc_utimes(path, buf); } /***************************************************** a wrapper for utime *******************************************************/ int smbw_utime(const char *fname, void *buf) { char path[PATH_MAX]; smbw_fix_path(fname, path); return smbc_utime(path, buf); } /***************************************************** a wrapper for chown() *******************************************************/ int smbw_chown(const char *fname, uid_t owner, gid_t group) { /* always indiciate that this is not supported. */ errno = ENOTSUP; return -1; } /***************************************************** a wrapper for chmod() *******************************************************/ int smbw_chmod(const char *fname, mode_t newmode) { char path[PATH_MAX]; smbw_fix_path(fname, path); return smbc_chmod(path, newmode); } /***************************************************** a wrapper for lseek() *******************************************************/ SMBW_OFF_T smbw_lseek(int smbw_fd, SMBW_OFF_T offset, int whence) { int client_fd; SMBW_OFF_T ret; client_fd = smbw_fd_map[smbw_fd]; ret = smbc_lseek(client_fd, offset, whence); if (smbw_debug) { printf("smbw_lseek(%d/%d, 0x%llx) returned 0x%llx\n", smbw_fd, client_fd, (unsigned long long) offset, (unsigned long long) ret); } return ret; } /***************************************************** a wrapper for dup() *******************************************************/ int smbw_dup(int smbw_fd) { int fd2; fd2 = (smbw_libc.dup)(smbw_fd); if (fd2 == -1) { return -1; } smbw_fd_map[fd2] = smbw_fd_map[smbw_fd]; smbw_ref(smbw_fd_map[smbw_fd], SMBW_RCT_Increment); return fd2; } /***************************************************** a wrapper for dup2() *******************************************************/ int smbw_dup2(int smbw_fd, int fd2) { if ((* smbw_libc.dup2)(smbw_fd, fd2) != fd2) { return -1; } smbw_fd_map[fd2] = smbw_fd_map[smbw_fd]; smbw_ref(smbw_fd_map[smbw_fd], SMBW_RCT_Increment); return fd2; } /***************************************************** when we fork we have to close all connections and files in the child *******************************************************/ int smbw_fork(void) { int i; pid_t child_pid; int p[2]; char c = 0; SMBW_INIT(); if (pipe(p)) return (* smbw_libc.fork)(); child_pid = (* smbw_libc.fork)(); if (child_pid) { /* block the parent for a moment until the sockets are closed */ (* smbw_libc.close)(p[1]); (* smbw_libc.read)(p[0], &c, 1); (* smbw_libc.close)(p[0]); return child_pid; } (* smbw_libc.close)(p[0]); /* close all server connections and locally-opened files */ for (i = 0; i < __FD_SETSIZE; i++) { if (smbw_fd_map[i] > 0 && smbw_ref(smbw_fd_map[i], SMBW_RCT_Get) > 0) { smbc_close(smbw_fd_map[i]); smbw_ref(smbw_fd_map[i], SMBW_RCT_Set, 0); (* smbw_libc.close)(i); } smbw_fd_map[i] = -1; } /* unblock the parent */ write(p[1], &c, 1); (* smbw_libc.close)(p[1]); /* specify directory to start in, if it's simulated smb */ if (*smbw_cwd != '\0') { setenv("SMBW_DIR", smbw_cwd, 1); } else { unsetenv("SMBW_DIR"); } /* Re-initialize this library for the child */ do_init(StartupType_Fake); /* and continue in the child */ return 0; } int smbw_setxattr(const char *fname, const char *name, const void *value, size_t size, int flags) { char path[PATH_MAX]; if (strcmp(name, "system.posix_acl_access") == 0) { name = "system.*"; } smbw_fix_path(fname, path); return smbc_setxattr(path, name, value, size, flags); } int smbw_lsetxattr(const char *fname, const char *name, const void *value, size_t size, int flags) { char path[PATH_MAX]; if (strcmp(name, "system.posix_acl_access") == 0) { name = "system.*"; } smbw_fix_path(fname, path); return smbc_lsetxattr(path, name, value, size, flags); } int smbw_fsetxattr(int smbw_fd, const char *name, const void *value, size_t size, int flags) { int client_fd; if (strcmp(name, "system.posix_acl_access") == 0) { name = "system.*"; } client_fd = smbw_fd_map[smbw_fd]; return smbc_fsetxattr(client_fd, name, value, size, flags); } int smbw_getxattr(const char *fname, const char *name, const void *value, size_t size) { char path[PATH_MAX]; if (strcmp(name, "system.posix_acl_access") == 0) { name = "system.*"; } smbw_fix_path(fname, path); return smbc_getxattr(path, name, value, size); } int smbw_lgetxattr(const char *fname, const char *name, const void *value, size_t size) { char path[PATH_MAX]; if (strcmp(name, "system.posix_acl_access") == 0) { name = "system.*"; } smbw_fix_path(fname, path); return smbc_lgetxattr(path, name, value, size); } int smbw_fgetxattr(int smbw_fd, const char *name, const void *value, size_t size) { int client_fd; if (strcmp(name, "system.posix_acl_access") == 0) { name = "system.*"; } client_fd = smbw_fd_map[smbw_fd]; return smbc_fgetxattr(client_fd, name, value, size); } int smbw_removexattr(const char *fname, const char *name) { char path[PATH_MAX]; if (strcmp(name, "system.posix_acl_access") == 0) { name = "system.*"; } smbw_fix_path(fname, path); return smbc_removexattr(path, name); } int smbw_lremovexattr(const char *fname, const char *name) { char path[PATH_MAX]; if (strcmp(name, "system.posix_acl_access") == 0) { name = "system.*"; } smbw_fix_path(fname, path); return smbc_lremovexattr(path, name); } int smbw_fremovexattr(int smbw_fd, const char *name) { int client_fd; if (strcmp(name, "system.posix_acl_access") == 0) { name = "system.*"; } client_fd = smbw_fd_map[smbw_fd]; return smbc_fremovexattr(client_fd, name); } int smbw_listxattr(const char *fname, char *list, size_t size) { char path[PATH_MAX]; smbw_fix_path(fname, path); return smbc_listxattr(path, list, size); } int smbw_llistxattr(const char *fname, char *list, size_t size) { char path[PATH_MAX]; smbw_fix_path(fname, path); return smbc_llistxattr(path, list, size); } int smbw_flistxattr(int smbw_fd, char *list, size_t size) { int client_fd; client_fd = smbw_fd_map[smbw_fd]; return smbc_flistxattr(client_fd, list, size); }
gpl-2.0
wangenau/samsung-kernel-msm7x30
drivers/infiniband/hw/qib/qib_fs.c
52
14927
/* * Copyright (c) 2006, 2007, 2008, 2009 QLogic Corporation. All rights reserved. * Copyright (c) 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <linux/module.h> #include <linux/fs.h> #include <linux/mount.h> #include <linux/pagemap.h> #include <linux/init.h> #include <linux/namei.h> #include "qib.h" #define QIBFS_MAGIC 0x726a77 static struct super_block *qib_super; #define private2dd(file) ((file)->f_dentry->d_inode->i_private) static int qibfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, const struct file_operations *fops, void *data) { int error; struct inode *inode = new_inode(dir->i_sb); if (!inode) { error = -EPERM; goto bail; } inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_uid = 0; inode->i_gid = 0; inode->i_blocks = 0; inode->i_atime = CURRENT_TIME; inode->i_mtime = inode->i_atime; inode->i_ctime = inode->i_atime; inode->i_private = data; if (S_ISDIR(mode)) { inode->i_op = &simple_dir_inode_operations; inc_nlink(inode); inc_nlink(dir); } inode->i_fop = fops; d_instantiate(dentry, inode); error = 0; bail: return error; } static int create_file(const char *name, umode_t mode, struct dentry *parent, struct dentry **dentry, const struct file_operations *fops, void *data) { int error; *dentry = NULL; mutex_lock(&parent->d_inode->i_mutex); *dentry = lookup_one_len(name, parent, strlen(name)); if (!IS_ERR(*dentry)) error = qibfs_mknod(parent->d_inode, *dentry, mode, fops, data); else error = PTR_ERR(*dentry); mutex_unlock(&parent->d_inode->i_mutex); return error; } static ssize_t driver_stats_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { return simple_read_from_buffer(buf, count, ppos, &qib_stats, sizeof qib_stats); } /* * driver stats field names, one line per stat, single string. Used by * programs like ipathstats to print the stats in a way which works for * different versions of drivers, without changing program source. * if qlogic_ib_stats changes, this needs to change. Names need to be * 12 chars or less (w/o newline), for proper display by ipathstats utility. */ static const char qib_statnames[] = "KernIntr\n" "ErrorIntr\n" "Tx_Errs\n" "Rcv_Errs\n" "H/W_Errs\n" "NoPIOBufs\n" "CtxtsOpen\n" "RcvLen_Errs\n" "EgrBufFull\n" "EgrHdrFull\n" ; static ssize_t driver_names_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { return simple_read_from_buffer(buf, count, ppos, qib_statnames, sizeof qib_statnames - 1); /* no null */ } static const struct file_operations driver_ops[] = { { .read = driver_stats_read, .llseek = generic_file_llseek, }, { .read = driver_names_read, .llseek = generic_file_llseek, }, }; /* read the per-device counters */ static ssize_t dev_counters_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { u64 *counters; size_t avail; struct qib_devdata *dd = private2dd(file); avail = dd->f_read_cntrs(dd, *ppos, NULL, &counters); return simple_read_from_buffer(buf, count, ppos, counters, avail); } /* read the per-device counters */ static ssize_t dev_names_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { char *names; size_t avail; struct qib_devdata *dd = private2dd(file); avail = dd->f_read_cntrs(dd, *ppos, &names, NULL); return simple_read_from_buffer(buf, count, ppos, names, avail); } static const struct file_operations cntr_ops[] = { { .read = dev_counters_read, .llseek = generic_file_llseek, }, { .read = dev_names_read, .llseek = generic_file_llseek, }, }; /* * Could use file->f_dentry->d_inode->i_ino to figure out which file, * instead of separate routine for each, but for now, this works... */ /* read the per-port names (same for each port) */ static ssize_t portnames_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { char *names; size_t avail; struct qib_devdata *dd = private2dd(file); avail = dd->f_read_portcntrs(dd, *ppos, 0, &names, NULL); return simple_read_from_buffer(buf, count, ppos, names, avail); } /* read the per-port counters for port 1 (pidx 0) */ static ssize_t portcntrs_1_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { u64 *counters; size_t avail; struct qib_devdata *dd = private2dd(file); avail = dd->f_read_portcntrs(dd, *ppos, 0, NULL, &counters); return simple_read_from_buffer(buf, count, ppos, counters, avail); } /* read the per-port counters for port 2 (pidx 1) */ static ssize_t portcntrs_2_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { u64 *counters; size_t avail; struct qib_devdata *dd = private2dd(file); avail = dd->f_read_portcntrs(dd, *ppos, 1, NULL, &counters); return simple_read_from_buffer(buf, count, ppos, counters, avail); } static const struct file_operations portcntr_ops[] = { { .read = portnames_read, .llseek = generic_file_llseek, }, { .read = portcntrs_1_read, .llseek = generic_file_llseek, }, { .read = portcntrs_2_read, .llseek = generic_file_llseek, }, }; /* * read the per-port QSFP data for port 1 (pidx 0) */ static ssize_t qsfp_1_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct qib_devdata *dd = private2dd(file); char *tmp; int ret; tmp = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!tmp) return -ENOMEM; ret = qib_qsfp_dump(dd->pport, tmp, PAGE_SIZE); if (ret > 0) ret = simple_read_from_buffer(buf, count, ppos, tmp, ret); kfree(tmp); return ret; } /* * read the per-port QSFP data for port 2 (pidx 1) */ static ssize_t qsfp_2_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct qib_devdata *dd = private2dd(file); char *tmp; int ret; if (dd->num_pports < 2) return -ENODEV; tmp = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!tmp) return -ENOMEM; ret = qib_qsfp_dump(dd->pport + 1, tmp, PAGE_SIZE); if (ret > 0) ret = simple_read_from_buffer(buf, count, ppos, tmp, ret); kfree(tmp); return ret; } static const struct file_operations qsfp_ops[] = { { .read = qsfp_1_read, .llseek = generic_file_llseek, }, { .read = qsfp_2_read, .llseek = generic_file_llseek, }, }; static ssize_t flash_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct qib_devdata *dd; ssize_t ret; loff_t pos; char *tmp; pos = *ppos; if (pos < 0) { ret = -EINVAL; goto bail; } if (pos >= sizeof(struct qib_flash)) { ret = 0; goto bail; } if (count > sizeof(struct qib_flash) - pos) count = sizeof(struct qib_flash) - pos; tmp = kmalloc(count, GFP_KERNEL); if (!tmp) { ret = -ENOMEM; goto bail; } dd = private2dd(file); if (qib_eeprom_read(dd, pos, tmp, count)) { qib_dev_err(dd, "failed to read from flash\n"); ret = -ENXIO; goto bail_tmp; } if (copy_to_user(buf, tmp, count)) { ret = -EFAULT; goto bail_tmp; } *ppos = pos + count; ret = count; bail_tmp: kfree(tmp); bail: return ret; } static ssize_t flash_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { struct qib_devdata *dd; ssize_t ret; loff_t pos; char *tmp; pos = *ppos; if (pos != 0) { ret = -EINVAL; goto bail; } if (count != sizeof(struct qib_flash)) { ret = -EINVAL; goto bail; } tmp = kmalloc(count, GFP_KERNEL); if (!tmp) { ret = -ENOMEM; goto bail; } if (copy_from_user(tmp, buf, count)) { ret = -EFAULT; goto bail_tmp; } dd = private2dd(file); if (qib_eeprom_write(dd, pos, tmp, count)) { ret = -ENXIO; qib_dev_err(dd, "failed to write to flash\n"); goto bail_tmp; } *ppos = pos + count; ret = count; bail_tmp: kfree(tmp); bail: return ret; } static const struct file_operations flash_ops = { .read = flash_read, .write = flash_write, .llseek = default_llseek, }; static int add_cntr_files(struct super_block *sb, struct qib_devdata *dd) { struct dentry *dir, *tmp; char unit[10]; int ret, i; /* create the per-unit directory */ snprintf(unit, sizeof unit, "%u", dd->unit); ret = create_file(unit, S_IFDIR|S_IRUGO|S_IXUGO, sb->s_root, &dir, &simple_dir_operations, dd); if (ret) { printk(KERN_ERR "create_file(%s) failed: %d\n", unit, ret); goto bail; } /* create the files in the new directory */ ret = create_file("counters", S_IFREG|S_IRUGO, dir, &tmp, &cntr_ops[0], dd); if (ret) { printk(KERN_ERR "create_file(%s/counters) failed: %d\n", unit, ret); goto bail; } ret = create_file("counter_names", S_IFREG|S_IRUGO, dir, &tmp, &cntr_ops[1], dd); if (ret) { printk(KERN_ERR "create_file(%s/counter_names) failed: %d\n", unit, ret); goto bail; } ret = create_file("portcounter_names", S_IFREG|S_IRUGO, dir, &tmp, &portcntr_ops[0], dd); if (ret) { printk(KERN_ERR "create_file(%s/%s) failed: %d\n", unit, "portcounter_names", ret); goto bail; } for (i = 1; i <= dd->num_pports; i++) { char fname[24]; sprintf(fname, "port%dcounters", i); /* create the files in the new directory */ ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp, &portcntr_ops[i], dd); if (ret) { printk(KERN_ERR "create_file(%s/%s) failed: %d\n", unit, fname, ret); goto bail; } if (!(dd->flags & QIB_HAS_QSFP)) continue; sprintf(fname, "qsfp%d", i); ret = create_file(fname, S_IFREG|S_IRUGO, dir, &tmp, &qsfp_ops[i - 1], dd); if (ret) { printk(KERN_ERR "create_file(%s/%s) failed: %d\n", unit, fname, ret); goto bail; } } ret = create_file("flash", S_IFREG|S_IWUSR|S_IRUGO, dir, &tmp, &flash_ops, dd); if (ret) printk(KERN_ERR "create_file(%s/flash) failed: %d\n", unit, ret); bail: return ret; } static int remove_file(struct dentry *parent, char *name) { struct dentry *tmp; int ret; tmp = lookup_one_len(name, parent, strlen(name)); if (IS_ERR(tmp)) { ret = PTR_ERR(tmp); goto bail; } spin_lock(&tmp->d_lock); if (!(d_unhashed(tmp) && tmp->d_inode)) { dget_dlock(tmp); __d_drop(tmp); spin_unlock(&tmp->d_lock); simple_unlink(parent->d_inode, tmp); } else { spin_unlock(&tmp->d_lock); } ret = 0; bail: /* * We don't expect clients to care about the return value, but * it's there if they need it. */ return ret; } static int remove_device_files(struct super_block *sb, struct qib_devdata *dd) { struct dentry *dir, *root; char unit[10]; int ret, i; root = dget(sb->s_root); mutex_lock(&root->d_inode->i_mutex); snprintf(unit, sizeof unit, "%u", dd->unit); dir = lookup_one_len(unit, root, strlen(unit)); if (IS_ERR(dir)) { ret = PTR_ERR(dir); printk(KERN_ERR "Lookup of %s failed\n", unit); goto bail; } remove_file(dir, "counters"); remove_file(dir, "counter_names"); remove_file(dir, "portcounter_names"); for (i = 0; i < dd->num_pports; i++) { char fname[24]; sprintf(fname, "port%dcounters", i + 1); remove_file(dir, fname); if (dd->flags & QIB_HAS_QSFP) { sprintf(fname, "qsfp%d", i + 1); remove_file(dir, fname); } } remove_file(dir, "flash"); d_delete(dir); ret = simple_rmdir(root->d_inode, dir); bail: mutex_unlock(&root->d_inode->i_mutex); dput(root); return ret; } /* * This fills everything in when the fs is mounted, to handle umount/mount * after device init. The direct add_cntr_files() call handles adding * them from the init code, when the fs is already mounted. */ static int qibfs_fill_super(struct super_block *sb, void *data, int silent) { struct qib_devdata *dd, *tmp; unsigned long flags; int ret; static struct tree_descr files[] = { [2] = {"driver_stats", &driver_ops[0], S_IRUGO}, [3] = {"driver_stats_names", &driver_ops[1], S_IRUGO}, {""}, }; ret = simple_fill_super(sb, QIBFS_MAGIC, files); if (ret) { printk(KERN_ERR "simple_fill_super failed: %d\n", ret); goto bail; } spin_lock_irqsave(&qib_devs_lock, flags); list_for_each_entry_safe(dd, tmp, &qib_dev_list, list) { spin_unlock_irqrestore(&qib_devs_lock, flags); ret = add_cntr_files(sb, dd); if (ret) goto bail; spin_lock_irqsave(&qib_devs_lock, flags); } spin_unlock_irqrestore(&qib_devs_lock, flags); bail: return ret; } static struct dentry *qibfs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { struct dentry *ret; ret = mount_single(fs_type, flags, data, qibfs_fill_super); if (!IS_ERR(ret)) qib_super = ret->d_sb; return ret; } static void qibfs_kill_super(struct super_block *s) { kill_litter_super(s); qib_super = NULL; } int qibfs_add(struct qib_devdata *dd) { int ret; /* * On first unit initialized, qib_super will not yet exist * because nobody has yet tried to mount the filesystem, so * we can't consider that to be an error; if an error occurs * during the mount, that will get a complaint, so this is OK. * add_cntr_files() for all units is done at mount from * qibfs_fill_super(), so one way or another, everything works. */ if (qib_super == NULL) ret = 0; else ret = add_cntr_files(qib_super, dd); return ret; } int qibfs_remove(struct qib_devdata *dd) { int ret = 0; if (qib_super) ret = remove_device_files(qib_super, dd); return ret; } static struct file_system_type qibfs_fs_type = { .owner = THIS_MODULE, .name = "ipathfs", .mount = qibfs_mount, .kill_sb = qibfs_kill_super, }; MODULE_ALIAS_FS("ipathfs"); int __init qib_init_qibfs(void) { return register_filesystem(&qibfs_fs_type); } int __exit qib_exit_qibfs(void) { return unregister_filesystem(&qibfs_fs_type); }
gpl-2.0
MyAOSP/kernel_moto_wingray
arch/m68k/kernel/asm-offsets_mm.c
308
3815
/* * This program is used to generate definitions needed by * assembly language modules. * * We use the technique used in the OSF Mach kernel code: * generate asm statements containing #defines, * compile this file to assembler, and then extract the * #defines from the assembly-language output. */ #define ASM_OFFSETS_C #include <linux/stddef.h> #include <linux/sched.h> #include <linux/kernel_stat.h> #include <linux/kbuild.h> #include <asm/bootinfo.h> #include <asm/irq.h> #include <asm/amigahw.h> #include <linux/font.h> int main(void) { /* offsets into the task struct */ DEFINE(TASK_THREAD, offsetof(struct task_struct, thread)); DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info)); DEFINE(TASK_MM, offsetof(struct task_struct, mm)); #ifdef CONFIG_MMU DEFINE(TASK_TINFO, offsetof(struct task_struct, thread.info)); #endif /* offsets into the thread struct */ DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp)); DEFINE(THREAD_USP, offsetof(struct thread_struct, usp)); DEFINE(THREAD_SR, offsetof(struct thread_struct, sr)); DEFINE(THREAD_FS, offsetof(struct thread_struct, fs)); DEFINE(THREAD_CRP, offsetof(struct thread_struct, crp)); DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0)); DEFINE(THREAD_FPREG, offsetof(struct thread_struct, fp)); DEFINE(THREAD_FPCNTL, offsetof(struct thread_struct, fpcntl)); DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate)); /* offsets into the thread_info struct */ DEFINE(TINFO_PREEMPT, offsetof(struct thread_info, preempt_count)); DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags)); /* offsets into the pt_regs */ DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0)); DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0)); DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1)); DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2)); DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3)); DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4)); DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5)); DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0)); DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1)); DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2)); DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc)); DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr)); /* bitfields are a bit difficult */ DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, pc) + 4); /* offsets into the irq_cpustat_t struct */ DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending)); /* offsets into the bi_record struct */ DEFINE(BIR_TAG, offsetof(struct bi_record, tag)); DEFINE(BIR_SIZE, offsetof(struct bi_record, size)); DEFINE(BIR_DATA, offsetof(struct bi_record, data)); /* offsets into font_desc (drivers/video/console/font.h) */ DEFINE(FONT_DESC_IDX, offsetof(struct font_desc, idx)); DEFINE(FONT_DESC_NAME, offsetof(struct font_desc, name)); DEFINE(FONT_DESC_WIDTH, offsetof(struct font_desc, width)); DEFINE(FONT_DESC_HEIGHT, offsetof(struct font_desc, height)); DEFINE(FONT_DESC_DATA, offsetof(struct font_desc, data)); DEFINE(FONT_DESC_PREF, offsetof(struct font_desc, pref)); /* signal defines */ DEFINE(LSIGSEGV, SIGSEGV); DEFINE(LSEGV_MAPERR, SEGV_MAPERR); DEFINE(LSIGTRAP, SIGTRAP); DEFINE(LTRAP_TRACE, TRAP_TRACE); /* offsets into the custom struct */ DEFINE(CUSTOMBASE, &amiga_custom); DEFINE(C_INTENAR, offsetof(struct CUSTOM, intenar)); DEFINE(C_INTREQR, offsetof(struct CUSTOM, intreqr)); DEFINE(C_INTENA, offsetof(struct CUSTOM, intena)); DEFINE(C_INTREQ, offsetof(struct CUSTOM, intreq)); DEFINE(C_SERDATR, offsetof(struct CUSTOM, serdatr)); DEFINE(C_SERDAT, offsetof(struct CUSTOM, serdat)); DEFINE(C_SERPER, offsetof(struct CUSTOM, serper)); DEFINE(CIAABASE, &ciaa); DEFINE(CIABBASE, &ciab); DEFINE(C_PRA, offsetof(struct CIA, pra)); DEFINE(ZTWOBASE, zTwoBase); return 0; }
gpl-2.0
googyanas/Googy-Max4-CM-Kernel
fs/lockd/mon.c
564
14132
/* * linux/fs/lockd/mon.c * * The kernel statd client. * * Copyright (C) 1996, Olaf Kirch <okir@monad.swb.de> */ #include <linux/types.h> #include <linux/utsname.h> #include <linux/kernel.h> #include <linux/ktime.h> #include <linux/slab.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/xprtsock.h> #include <linux/sunrpc/svc.h> #include <linux/lockd/lockd.h> #include <asm/unaligned.h> #define NLMDBG_FACILITY NLMDBG_MONITOR #define NSM_PROGRAM 100024 #define NSM_VERSION 1 enum { NSMPROC_NULL, NSMPROC_STAT, NSMPROC_MON, NSMPROC_UNMON, NSMPROC_UNMON_ALL, NSMPROC_SIMU_CRASH, NSMPROC_NOTIFY, }; struct nsm_args { struct nsm_private *priv; u32 prog; /* RPC callback info */ u32 vers; u32 proc; char *mon_name; char *nodename; }; struct nsm_res { u32 status; u32 state; }; static const struct rpc_program nsm_program; static LIST_HEAD(nsm_handles); static DEFINE_SPINLOCK(nsm_lock); /* * Local NSM state */ u32 __read_mostly nsm_local_state; bool __read_mostly nsm_use_hostnames; static inline struct sockaddr *nsm_addr(const struct nsm_handle *nsm) { return (struct sockaddr *)&nsm->sm_addr; } static struct rpc_clnt *nsm_create(struct net *net) { struct sockaddr_in sin = { .sin_family = AF_INET, .sin_addr.s_addr = htonl(INADDR_LOOPBACK), }; struct rpc_create_args args = { .net = net, .protocol = XPRT_TRANSPORT_UDP, .address = (struct sockaddr *)&sin, .addrsize = sizeof(sin), .servername = "rpc.statd", .program = &nsm_program, .version = NSM_VERSION, .authflavor = RPC_AUTH_NULL, .flags = RPC_CLNT_CREATE_NOPING, }; return rpc_create(&args); } static int nsm_mon_unmon(struct nsm_handle *nsm, u32 proc, struct nsm_res *res, struct net *net) { struct rpc_clnt *clnt; int status; struct nsm_args args = { .priv = &nsm->sm_priv, .prog = NLM_PROGRAM, .vers = 3, .proc = NLMPROC_NSM_NOTIFY, .mon_name = nsm->sm_mon_name, .nodename = utsname()->nodename, }; struct rpc_message msg = { .rpc_argp = &args, .rpc_resp = res, }; clnt = nsm_create(net); if (IS_ERR(clnt)) { status = PTR_ERR(clnt); dprintk("lockd: failed to create NSM upcall transport, " "status=%d\n", status); goto out; } memset(res, 0, sizeof(*res)); msg.rpc_proc = &clnt->cl_procinfo[proc]; status = rpc_call_sync(clnt, &msg, 0); if (status == -ECONNREFUSED) { dprintk("lockd: NSM upcall RPC failed, status=%d, forcing rebind\n", status); rpc_force_rebind(clnt); status = rpc_call_sync(clnt, &msg, RPC_TASK_SOFTCONN); } if (status < 0) dprintk("lockd: NSM upcall RPC failed, status=%d\n", status); else status = 0; rpc_shutdown_client(clnt); out: return status; } /** * nsm_monitor - Notify a peer in case we reboot * @host: pointer to nlm_host of peer to notify * * If this peer is not already monitored, this function sends an * upcall to the local rpc.statd to record the name/address of * the peer to notify in case we reboot. * * Returns zero if the peer is monitored by the local rpc.statd; * otherwise a negative errno value is returned. */ int nsm_monitor(const struct nlm_host *host) { struct nsm_handle *nsm = host->h_nsmhandle; struct nsm_res res; int status; dprintk("lockd: nsm_monitor(%s)\n", nsm->sm_name); if (nsm->sm_monitored) return 0; /* * Choose whether to record the caller_name or IP address of * this peer in the local rpc.statd's database. */ nsm->sm_mon_name = nsm_use_hostnames ? nsm->sm_name : nsm->sm_addrbuf; status = nsm_mon_unmon(nsm, NSMPROC_MON, &res, host->net); if (unlikely(res.status != 0)) status = -EIO; if (unlikely(status < 0)) { printk(KERN_NOTICE "lockd: cannot monitor %s\n", nsm->sm_name); return status; } nsm->sm_monitored = 1; if (unlikely(nsm_local_state != res.state)) { nsm_local_state = res.state; dprintk("lockd: NSM state changed to %d\n", nsm_local_state); } return 0; } /** * nsm_unmonitor - Unregister peer notification * @host: pointer to nlm_host of peer to stop monitoring * * If this peer is monitored, this function sends an upcall to * tell the local rpc.statd not to send this peer a notification * when we reboot. */ void nsm_unmonitor(const struct nlm_host *host) { struct nsm_handle *nsm = host->h_nsmhandle; struct nsm_res res; int status; if (atomic_read(&nsm->sm_count) == 1 && nsm->sm_monitored && !nsm->sm_sticky) { dprintk("lockd: nsm_unmonitor(%s)\n", nsm->sm_name); status = nsm_mon_unmon(nsm, NSMPROC_UNMON, &res, host->net); if (res.status != 0) status = -EIO; if (status < 0) printk(KERN_NOTICE "lockd: cannot unmonitor %s\n", nsm->sm_name); else nsm->sm_monitored = 0; } } static struct nsm_handle *nsm_lookup_hostname(const char *hostname, const size_t len) { struct nsm_handle *nsm; list_for_each_entry(nsm, &nsm_handles, sm_link) if (strlen(nsm->sm_name) == len && memcmp(nsm->sm_name, hostname, len) == 0) return nsm; return NULL; } static struct nsm_handle *nsm_lookup_addr(const struct sockaddr *sap) { struct nsm_handle *nsm; list_for_each_entry(nsm, &nsm_handles, sm_link) if (rpc_cmp_addr(nsm_addr(nsm), sap)) return nsm; return NULL; } static struct nsm_handle *nsm_lookup_priv(const struct nsm_private *priv) { struct nsm_handle *nsm; list_for_each_entry(nsm, &nsm_handles, sm_link) if (memcmp(nsm->sm_priv.data, priv->data, sizeof(priv->data)) == 0) return nsm; return NULL; } /* * Construct a unique cookie to match this nsm_handle to this monitored * host. It is passed to the local rpc.statd via NSMPROC_MON, and * returned via NLMPROC_SM_NOTIFY, in the "priv" field of these * requests. * * The NSM protocol requires that these cookies be unique while the * system is running. We prefer a stronger requirement of making them * unique across reboots. If user space bugs cause a stale cookie to * be sent to the kernel, it could cause the wrong host to lose its * lock state if cookies were not unique across reboots. * * The cookies are exposed only to local user space via loopback. They * do not appear on the physical network. If we want greater security * for some reason, nsm_init_private() could perform a one-way hash to * obscure the contents of the cookie. */ static void nsm_init_private(struct nsm_handle *nsm) { u64 *p = (u64 *)&nsm->sm_priv.data; struct timespec ts; s64 ns; ktime_get_ts(&ts); ns = timespec_to_ns(&ts); put_unaligned(ns, p); put_unaligned((unsigned long)nsm, p + 1); } static struct nsm_handle *nsm_create_handle(const struct sockaddr *sap, const size_t salen, const char *hostname, const size_t hostname_len) { struct nsm_handle *new; new = kzalloc(sizeof(*new) + hostname_len + 1, GFP_KERNEL); if (unlikely(new == NULL)) return NULL; atomic_set(&new->sm_count, 1); new->sm_name = (char *)(new + 1); memcpy(nsm_addr(new), sap, salen); new->sm_addrlen = salen; nsm_init_private(new); if (rpc_ntop(nsm_addr(new), new->sm_addrbuf, sizeof(new->sm_addrbuf)) == 0) (void)snprintf(new->sm_addrbuf, sizeof(new->sm_addrbuf), "unsupported address family"); memcpy(new->sm_name, hostname, hostname_len); new->sm_name[hostname_len] = '\0'; return new; } /** * nsm_get_handle - Find or create a cached nsm_handle * @sap: pointer to socket address of handle to find * @salen: length of socket address * @hostname: pointer to C string containing hostname to find * @hostname_len: length of C string * * Behavior is modulated by the global nsm_use_hostnames variable. * * Returns a cached nsm_handle after bumping its ref count, or * returns a fresh nsm_handle if a handle that matches @sap and/or * @hostname cannot be found in the handle cache. Returns NULL if * an error occurs. */ struct nsm_handle *nsm_get_handle(const struct sockaddr *sap, const size_t salen, const char *hostname, const size_t hostname_len) { struct nsm_handle *cached, *new = NULL; if (hostname && memchr(hostname, '/', hostname_len) != NULL) { if (printk_ratelimit()) { printk(KERN_WARNING "Invalid hostname \"%.*s\" " "in NFS lock request\n", (int)hostname_len, hostname); } return NULL; } retry: spin_lock(&nsm_lock); if (nsm_use_hostnames && hostname != NULL) cached = nsm_lookup_hostname(hostname, hostname_len); else cached = nsm_lookup_addr(sap); if (cached != NULL) { atomic_inc(&cached->sm_count); spin_unlock(&nsm_lock); kfree(new); dprintk("lockd: found nsm_handle for %s (%s), " "cnt %d\n", cached->sm_name, cached->sm_addrbuf, atomic_read(&cached->sm_count)); return cached; } if (new != NULL) { list_add(&new->sm_link, &nsm_handles); spin_unlock(&nsm_lock); dprintk("lockd: created nsm_handle for %s (%s)\n", new->sm_name, new->sm_addrbuf); return new; } spin_unlock(&nsm_lock); new = nsm_create_handle(sap, salen, hostname, hostname_len); if (unlikely(new == NULL)) return NULL; goto retry; } /** * nsm_reboot_lookup - match NLMPROC_SM_NOTIFY arguments to an nsm_handle * @info: pointer to NLMPROC_SM_NOTIFY arguments * * Returns a matching nsm_handle if found in the nsm cache. The returned * nsm_handle's reference count is bumped. Otherwise returns NULL if some * error occurred. */ struct nsm_handle *nsm_reboot_lookup(const struct nlm_reboot *info) { struct nsm_handle *cached; spin_lock(&nsm_lock); cached = nsm_lookup_priv(&info->priv); if (unlikely(cached == NULL)) { spin_unlock(&nsm_lock); dprintk("lockd: never saw rebooted peer '%.*s' before\n", info->len, info->mon); return cached; } atomic_inc(&cached->sm_count); spin_unlock(&nsm_lock); dprintk("lockd: host %s (%s) rebooted, cnt %d\n", cached->sm_name, cached->sm_addrbuf, atomic_read(&cached->sm_count)); return cached; } /** * nsm_release - Release an NSM handle * @nsm: pointer to handle to be released * */ void nsm_release(struct nsm_handle *nsm) { if (atomic_dec_and_lock(&nsm->sm_count, &nsm_lock)) { list_del(&nsm->sm_link); spin_unlock(&nsm_lock); dprintk("lockd: destroyed nsm_handle for %s (%s)\n", nsm->sm_name, nsm->sm_addrbuf); kfree(nsm); } } /* * XDR functions for NSM. * * See http://www.opengroup.org/ for details on the Network * Status Monitor wire protocol. */ static void encode_nsm_string(struct xdr_stream *xdr, const char *string) { const u32 len = strlen(string); __be32 *p; BUG_ON(len > SM_MAXSTRLEN); p = xdr_reserve_space(xdr, 4 + len); xdr_encode_opaque(p, string, len); } /* * "mon_name" specifies the host to be monitored. */ static void encode_mon_name(struct xdr_stream *xdr, const struct nsm_args *argp) { encode_nsm_string(xdr, argp->mon_name); } /* * The "my_id" argument specifies the hostname and RPC procedure * to be called when the status manager receives notification * (via the NLMPROC_SM_NOTIFY call) that the state of host "mon_name" * has changed. */ static void encode_my_id(struct xdr_stream *xdr, const struct nsm_args *argp) { __be32 *p; encode_nsm_string(xdr, argp->nodename); p = xdr_reserve_space(xdr, 4 + 4 + 4); *p++ = cpu_to_be32(argp->prog); *p++ = cpu_to_be32(argp->vers); *p = cpu_to_be32(argp->proc); } /* * The "mon_id" argument specifies the non-private arguments * of an NSMPROC_MON or NSMPROC_UNMON call. */ static void encode_mon_id(struct xdr_stream *xdr, const struct nsm_args *argp) { encode_mon_name(xdr, argp); encode_my_id(xdr, argp); } /* * The "priv" argument may contain private information required * by the NSMPROC_MON call. This information will be supplied in the * NLMPROC_SM_NOTIFY call. */ static void encode_priv(struct xdr_stream *xdr, const struct nsm_args *argp) { __be32 *p; p = xdr_reserve_space(xdr, SM_PRIV_SIZE); xdr_encode_opaque_fixed(p, argp->priv->data, SM_PRIV_SIZE); } static void nsm_xdr_enc_mon(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nsm_args *argp) { encode_mon_id(xdr, argp); encode_priv(xdr, argp); } static void nsm_xdr_enc_unmon(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nsm_args *argp) { encode_mon_id(xdr, argp); } static int nsm_xdr_dec_stat_res(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nsm_res *resp) { __be32 *p; p = xdr_inline_decode(xdr, 4 + 4); if (unlikely(p == NULL)) return -EIO; resp->status = be32_to_cpup(p++); resp->state = be32_to_cpup(p); dprintk("lockd: %s status %d state %d\n", __func__, resp->status, resp->state); return 0; } static int nsm_xdr_dec_stat(struct rpc_rqst *rqstp, struct xdr_stream *xdr, struct nsm_res *resp) { __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) return -EIO; resp->state = be32_to_cpup(p); dprintk("lockd: %s state %d\n", __func__, resp->state); return 0; } #define SM_my_name_sz (1+XDR_QUADLEN(SM_MAXSTRLEN)) #define SM_my_id_sz (SM_my_name_sz+3) #define SM_mon_name_sz (1+XDR_QUADLEN(SM_MAXSTRLEN)) #define SM_mon_id_sz (SM_mon_name_sz+SM_my_id_sz) #define SM_priv_sz (XDR_QUADLEN(SM_PRIV_SIZE)) #define SM_mon_sz (SM_mon_id_sz+SM_priv_sz) #define SM_monres_sz 2 #define SM_unmonres_sz 1 static struct rpc_procinfo nsm_procedures[] = { [NSMPROC_MON] = { .p_proc = NSMPROC_MON, .p_encode = (kxdreproc_t)nsm_xdr_enc_mon, .p_decode = (kxdrdproc_t)nsm_xdr_dec_stat_res, .p_arglen = SM_mon_sz, .p_replen = SM_monres_sz, .p_statidx = NSMPROC_MON, .p_name = "MONITOR", }, [NSMPROC_UNMON] = { .p_proc = NSMPROC_UNMON, .p_encode = (kxdreproc_t)nsm_xdr_enc_unmon, .p_decode = (kxdrdproc_t)nsm_xdr_dec_stat, .p_arglen = SM_mon_id_sz, .p_replen = SM_unmonres_sz, .p_statidx = NSMPROC_UNMON, .p_name = "UNMONITOR", }, }; static const struct rpc_version nsm_version1 = { .number = 1, .nrprocs = ARRAY_SIZE(nsm_procedures), .procs = nsm_procedures }; static const struct rpc_version *nsm_version[] = { [1] = &nsm_version1, }; static struct rpc_stat nsm_stats; static const struct rpc_program nsm_program = { .name = "statd", .number = NSM_PROGRAM, .nrvers = ARRAY_SIZE(nsm_version), .version = nsm_version, .stats = &nsm_stats };
gpl-2.0
lipro/linux-tqs
arch/powerpc/platforms/52xx/efika.c
820
6050
/* * Efika 5K2 platform code * Some code really inspired from the lite5200b platform. * * Copyright (C) 2006 bplan GmbH * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. */ #include <linux/init.h> #include <generated/utsrelease.h> #include <linux/pci.h> #include <linux/of.h> #include <asm/prom.h> #include <asm/time.h> #include <asm/machdep.h> #include <asm/rtas.h> #include <asm/mpc52xx.h> #define EFIKA_PLATFORM_NAME "Efika" /* ------------------------------------------------------------------------ */ /* PCI accesses thru RTAS */ /* ------------------------------------------------------------------------ */ #ifdef CONFIG_PCI /* * Access functions for PCI config space using RTAS calls. */ static int rtas_read_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 * val) { struct pci_controller *hose = pci_bus_to_host(bus); unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8) | (((bus->number - hose->first_busno) & 0xff) << 16) | (hose->global_number << 24); int ret = -1; int rval; rval = rtas_call(rtas_token("read-pci-config"), 2, 2, &ret, addr, len); *val = ret; return rval ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL; } static int rtas_write_config(struct pci_bus *bus, unsigned int devfn, int offset, int len, u32 val) { struct pci_controller *hose = pci_bus_to_host(bus); unsigned long addr = (offset & 0xff) | ((devfn & 0xff) << 8) | (((bus->number - hose->first_busno) & 0xff) << 16) | (hose->global_number << 24); int rval; rval = rtas_call(rtas_token("write-pci-config"), 3, 1, NULL, addr, len, val); return rval ? PCIBIOS_DEVICE_NOT_FOUND : PCIBIOS_SUCCESSFUL; } static struct pci_ops rtas_pci_ops = { .read = rtas_read_config, .write = rtas_write_config, }; static void __init efika_pcisetup(void) { const int *bus_range; int len; struct pci_controller *hose; struct device_node *root; struct device_node *pcictrl; root = of_find_node_by_path("/"); if (root == NULL) { printk(KERN_WARNING EFIKA_PLATFORM_NAME ": Unable to find the root node\n"); return; } for (pcictrl = NULL;;) { pcictrl = of_get_next_child(root, pcictrl); if ((pcictrl == NULL) || (strcmp(pcictrl->name, "pci") == 0)) break; } of_node_put(root); if (pcictrl == NULL) { printk(KERN_WARNING EFIKA_PLATFORM_NAME ": Unable to find the PCI bridge node\n"); return; } bus_range = of_get_property(pcictrl, "bus-range", &len); if (bus_range == NULL || len < 2 * sizeof(int)) { printk(KERN_WARNING EFIKA_PLATFORM_NAME ": Can't get bus-range for %s\n", pcictrl->full_name); return; } if (bus_range[1] == bus_range[0]) printk(KERN_INFO EFIKA_PLATFORM_NAME ": PCI bus %d", bus_range[0]); else printk(KERN_INFO EFIKA_PLATFORM_NAME ": PCI buses %d..%d", bus_range[0], bus_range[1]); printk(" controlled by %s\n", pcictrl->full_name); printk("\n"); hose = pcibios_alloc_controller(of_node_get(pcictrl)); if (!hose) { printk(KERN_WARNING EFIKA_PLATFORM_NAME ": Can't allocate PCI controller structure for %s\n", pcictrl->full_name); return; } hose->first_busno = bus_range[0]; hose->last_busno = bus_range[1]; hose->ops = &rtas_pci_ops; pci_process_bridge_OF_ranges(hose, pcictrl, 0); } #else static void __init efika_pcisetup(void) {} #endif /* ------------------------------------------------------------------------ */ /* Platform setup */ /* ------------------------------------------------------------------------ */ static void efika_show_cpuinfo(struct seq_file *m) { struct device_node *root; const char *revision; const char *codegendescription; const char *codegenvendor; root = of_find_node_by_path("/"); if (!root) return; revision = of_get_property(root, "revision", NULL); codegendescription = of_get_property(root, "CODEGEN,description", NULL); codegenvendor = of_get_property(root, "CODEGEN,vendor", NULL); if (codegendescription) seq_printf(m, "machine\t\t: %s\n", codegendescription); else seq_printf(m, "machine\t\t: Efika\n"); if (revision) seq_printf(m, "revision\t: %s\n", revision); if (codegenvendor) seq_printf(m, "vendor\t\t: %s\n", codegenvendor); of_node_put(root); } #ifdef CONFIG_PM static void efika_suspend_prepare(void __iomem *mbar) { u8 pin = 4; /* GPIO_WKUP_4 (GPIO_PSC6_0 - IRDA_RX) */ u8 level = 1; /* wakeup on high level */ /* IOW. to wake it up, short pins 1 and 3 on IRDA connector */ mpc52xx_set_wakeup_gpio(pin, level); } #endif static void __init efika_setup_arch(void) { rtas_initialize(); /* Map important registers from the internal memory map */ mpc52xx_map_common_devices(); efika_pcisetup(); #ifdef CONFIG_PM mpc52xx_suspend.board_suspend_prepare = efika_suspend_prepare; mpc52xx_pm_init(); #endif if (ppc_md.progress) ppc_md.progress("Linux/PPC " UTS_RELEASE " running on Efika ;-)\n", 0x0); } static int __init efika_probe(void) { char *model = of_get_flat_dt_prop(of_get_flat_dt_root(), "model", NULL); if (model == NULL) return 0; if (strcmp(model, "EFIKA5K2")) return 0; ISA_DMA_THRESHOLD = ~0L; DMA_MODE_READ = 0x44; DMA_MODE_WRITE = 0x48; return 1; } define_machine(efika) { .name = EFIKA_PLATFORM_NAME, .probe = efika_probe, .setup_arch = efika_setup_arch, .init = mpc52xx_declare_of_platform_devices, .show_cpuinfo = efika_show_cpuinfo, .init_IRQ = mpc52xx_init_irq, .get_irq = mpc52xx_get_irq, .restart = rtas_restart, .power_off = rtas_power_off, .halt = rtas_halt, .set_rtc_time = rtas_set_rtc_time, .get_rtc_time = rtas_get_rtc_time, .progress = rtas_progress, .get_boot_time = rtas_get_boot_time, .calibrate_decr = generic_calibrate_decr, #ifdef CONFIG_PCI .phys_mem_access_prot = pci_phys_mem_access_prot, #endif };
gpl-2.0
Motorhead1991/android_kernel_samsung_amazing
arch/x86/kernel/process_32.c
1076
10567
/* * Copyright (C) 1995 Linus Torvalds * * Pentium III FXSR, SSE support * Gareth Hughes <gareth@valinux.com>, May 2000 */ /* * This file handles the architecture-dependent parts of process handling.. */ #include <linux/stackprotector.h> #include <linux/cpu.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/elfcore.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/user.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/mc146818rtc.h> #include <linux/module.h> #include <linux/kallsyms.h> #include <linux/ptrace.h> #include <linux/personality.h> #include <linux/tick.h> #include <linux/percpu.h> #include <linux/prctl.h> #include <linux/ftrace.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/kdebug.h> #include <asm/pgtable.h> #include <asm/system.h> #include <asm/ldt.h> #include <asm/processor.h> #include <asm/i387.h> #include <asm/desc.h> #ifdef CONFIG_MATH_EMULATION #include <asm/math_emu.h> #endif #include <linux/err.h> #include <asm/tlbflush.h> #include <asm/cpu.h> #include <asm/idle.h> #include <asm/syscalls.h> #include <asm/debugreg.h> asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); /* * Return saved PC of a blocked thread. */ unsigned long thread_saved_pc(struct task_struct *tsk) { return ((unsigned long *)tsk->thread.sp)[3]; } #ifndef CONFIG_SMP static inline void play_dead(void) { BUG(); } #endif /* * The idle thread. There's no useful work to be * done, so just try to conserve power and have a * low exit latency (ie sit in a loop waiting for * somebody to say that they'd like to reschedule) */ void cpu_idle(void) { int cpu = smp_processor_id(); /* * If we're the non-boot CPU, nothing set the stack canary up * for us. CPU0 already has it initialized but no harm in * doing it again. This is a good place for updating it, as * we wont ever return from this function (so the invalid * canaries already on the stack wont ever trigger). */ boot_init_stack_canary(); current_thread_info()->status |= TS_POLLING; /* endless idle loop with no priority at all */ while (1) { tick_nohz_stop_sched_tick(1); while (!need_resched()) { check_pgt_cache(); rmb(); if (cpu_is_offline(cpu)) play_dead(); local_irq_disable(); /* Don't trace irqs off for idle */ stop_critical_timings(); pm_idle(); start_critical_timings(); } tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); schedule(); preempt_disable(); } } void __show_regs(struct pt_regs *regs, int all) { unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; unsigned long d0, d1, d2, d3, d6, d7; unsigned long sp; unsigned short ss, gs; if (user_mode_vm(regs)) { sp = regs->sp; ss = regs->ss & 0xffff; gs = get_user_gs(regs); } else { sp = kernel_stack_pointer(regs); savesegment(ss, ss); savesegment(gs, gs); } show_regs_common(); printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", (u16)regs->cs, regs->ip, regs->flags, smp_processor_id()); print_symbol("EIP is at %s\n", regs->ip); printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", regs->ax, regs->bx, regs->cx, regs->dx); printk(KERN_DEFAULT "ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", regs->si, regs->di, regs->bp, sp); printk(KERN_DEFAULT " DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss); if (!all) return; cr0 = read_cr0(); cr2 = read_cr2(); cr3 = read_cr3(); cr4 = read_cr4_safe(); printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4); get_debugreg(d0, 0); get_debugreg(d1, 1); get_debugreg(d2, 2); get_debugreg(d3, 3); printk(KERN_DEFAULT "DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", d0, d1, d2, d3); get_debugreg(d6, 6); get_debugreg(d7, 7); printk(KERN_DEFAULT "DR6: %08lx DR7: %08lx\n", d6, d7); } void release_thread(struct task_struct *dead_task) { BUG_ON(dead_task->mm); release_vm86_irqs(dead_task); } /* * This gets called before we allocate a new thread and copy * the current task into it. */ void prepare_to_copy(struct task_struct *tsk) { unlazy_fpu(tsk); } int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long unused, struct task_struct *p, struct pt_regs *regs) { struct pt_regs *childregs; struct task_struct *tsk; int err; childregs = task_pt_regs(p); *childregs = *regs; childregs->ax = 0; childregs->sp = sp; p->thread.sp = (unsigned long) childregs; p->thread.sp0 = (unsigned long) (childregs+1); p->thread.ip = (unsigned long) ret_from_fork; task_user_gs(p) = get_user_gs(regs); p->thread.io_bitmap_ptr = NULL; tsk = current; err = -ENOMEM; memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, IO_BITMAP_BYTES, GFP_KERNEL); if (!p->thread.io_bitmap_ptr) { p->thread.io_bitmap_max = 0; return -ENOMEM; } set_tsk_thread_flag(p, TIF_IO_BITMAP); } err = 0; /* * Set a new TLS for the child thread? */ if (clone_flags & CLONE_SETTLS) err = do_set_thread_area(p, -1, (struct user_desc __user *)childregs->si, 0); if (err && p->thread.io_bitmap_ptr) { kfree(p->thread.io_bitmap_ptr); p->thread.io_bitmap_max = 0; } return err; } void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) { set_user_gs(regs, 0); regs->fs = 0; regs->ds = __USER_DS; regs->es = __USER_DS; regs->ss = __USER_DS; regs->cs = __USER_CS; regs->ip = new_ip; regs->sp = new_sp; /* * Free the old FP and other extended state */ free_thread_xstate(current); } EXPORT_SYMBOL_GPL(start_thread); /* * switch_to(x,yn) should switch tasks from x to y. * * We fsave/fwait so that an exception goes off at the right time * (as a call from the fsave or fwait in effect) rather than to * the wrong process. Lazy FP saving no longer makes any sense * with modern CPU's, and this simplifies a lot of things (SMP * and UP become the same). * * NOTE! We used to use the x86 hardware context switching. The * reason for not using it any more becomes apparent when you * try to recover gracefully from saved state that is no longer * valid (stale segment register values in particular). With the * hardware task-switch, there is no way to fix up bad state in * a reasonable manner. * * The fact that Intel documents the hardware task-switching to * be slow is a fairly red herring - this code is not noticeably * faster. However, there _is_ some room for improvement here, * so the performance issues may eventually be a valid point. * More important, however, is the fact that this allows us much * more flexibility. * * The return value (in %ax) will be the "prev" task after * the task-switch, and shows up in ret_from_fork in entry.S, * for example. */ __notrace_funcgraph struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev = &prev_p->thread, *next = &next_p->thread; int cpu = smp_processor_id(); struct tss_struct *tss = &per_cpu(init_tss, cpu); bool preload_fpu; /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ /* * If the task has used fpu the last 5 timeslices, just do a full * restore of the math state immediately to avoid the trap; the * chances of needing FPU soon are obviously high now */ preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5; __unlazy_fpu(prev_p); /* we're going to use this soon, after a few expensive things */ if (preload_fpu) prefetch(next->fpu.state); /* * Reload esp0. */ load_sp0(tss, next); /* * Save away %gs. No need to save %fs, as it was saved on the * stack on entry. No need to save %es and %ds, as those are * always kernel segments while inside the kernel. Doing this * before setting the new TLS descriptors avoids the situation * where we temporarily have non-reloadable segments in %fs * and %gs. This could be an issue if the NMI handler ever * used %fs or %gs (it does not today), or if the kernel is * running inside of a hypervisor layer. */ lazy_save_gs(prev->gs); /* * Load the per-thread Thread-Local Storage descriptor. */ load_TLS(next, cpu); /* * Restore IOPL if needed. In normal use, the flags restore * in the switch assembly will handle this. But if the kernel * is running virtualized at a non-zero CPL, the popf will * not restore flags, so it must be done in a separate step. */ if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) set_iopl_mask(next->iopl); /* * Now maybe handle debug registers and/or IO bitmaps */ if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV || task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) __switch_to_xtra(prev_p, next_p, tss); /* If we're going to preload the fpu context, make sure clts is run while we're batching the cpu state updates. */ if (preload_fpu) clts(); /* * Leave lazy mode, flushing any hypercalls made here. * This must be done before restoring TLS segments so * the GDT and LDT are properly updated, and must be * done before math_state_restore, so the TS bit is up * to date. */ arch_end_context_switch(next_p); if (preload_fpu) __math_state_restore(); /* * Restore %gs if needed (which is common) */ if (prev->gs | next->gs) lazy_load_gs(next->gs); percpu_write(current_task, next_p); return prev_p; } #define top_esp (THREAD_SIZE - sizeof(unsigned long)) #define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long)) unsigned long get_wchan(struct task_struct *p) { unsigned long bp, sp, ip; unsigned long stack_page; int count = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; stack_page = (unsigned long)task_stack_page(p); sp = p->thread.sp; if (!stack_page || sp < stack_page || sp > top_esp+stack_page) return 0; /* include/asm-i386/system.h:switch_to() pushes bp last. */ bp = *(unsigned long *) sp; do { if (bp < stack_page || bp > top_ebp+stack_page) return 0; ip = *(unsigned long *) (bp+4); if (!in_sched_functions(ip)) return ip; bp = *(unsigned long *) bp; } while (count++ < 16); return 0; }
gpl-2.0
LiYihai/linux-2.6.39.4-notes
arch/x86/kernel/process_32.c
1076
10567
/* * Copyright (C) 1995 Linus Torvalds * * Pentium III FXSR, SSE support * Gareth Hughes <gareth@valinux.com>, May 2000 */ /* * This file handles the architecture-dependent parts of process handling.. */ #include <linux/stackprotector.h> #include <linux/cpu.h> #include <linux/errno.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/elfcore.h> #include <linux/smp.h> #include <linux/stddef.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/user.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/reboot.h> #include <linux/init.h> #include <linux/mc146818rtc.h> #include <linux/module.h> #include <linux/kallsyms.h> #include <linux/ptrace.h> #include <linux/personality.h> #include <linux/tick.h> #include <linux/percpu.h> #include <linux/prctl.h> #include <linux/ftrace.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/kdebug.h> #include <asm/pgtable.h> #include <asm/system.h> #include <asm/ldt.h> #include <asm/processor.h> #include <asm/i387.h> #include <asm/desc.h> #ifdef CONFIG_MATH_EMULATION #include <asm/math_emu.h> #endif #include <linux/err.h> #include <asm/tlbflush.h> #include <asm/cpu.h> #include <asm/idle.h> #include <asm/syscalls.h> #include <asm/debugreg.h> asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); /* * Return saved PC of a blocked thread. */ unsigned long thread_saved_pc(struct task_struct *tsk) { return ((unsigned long *)tsk->thread.sp)[3]; } #ifndef CONFIG_SMP static inline void play_dead(void) { BUG(); } #endif /* * The idle thread. There's no useful work to be * done, so just try to conserve power and have a * low exit latency (ie sit in a loop waiting for * somebody to say that they'd like to reschedule) */ void cpu_idle(void) { int cpu = smp_processor_id(); /* * If we're the non-boot CPU, nothing set the stack canary up * for us. CPU0 already has it initialized but no harm in * doing it again. This is a good place for updating it, as * we wont ever return from this function (so the invalid * canaries already on the stack wont ever trigger). */ boot_init_stack_canary(); current_thread_info()->status |= TS_POLLING; /* endless idle loop with no priority at all */ while (1) { tick_nohz_stop_sched_tick(1); while (!need_resched()) { check_pgt_cache(); rmb(); if (cpu_is_offline(cpu)) play_dead(); local_irq_disable(); /* Don't trace irqs off for idle */ stop_critical_timings(); pm_idle(); start_critical_timings(); } tick_nohz_restart_sched_tick(); preempt_enable_no_resched(); schedule(); preempt_disable(); } } void __show_regs(struct pt_regs *regs, int all) { unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L; unsigned long d0, d1, d2, d3, d6, d7; unsigned long sp; unsigned short ss, gs; if (user_mode_vm(regs)) { sp = regs->sp; ss = regs->ss & 0xffff; gs = get_user_gs(regs); } else { sp = kernel_stack_pointer(regs); savesegment(ss, ss); savesegment(gs, gs); } show_regs_common(); printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n", (u16)regs->cs, regs->ip, regs->flags, smp_processor_id()); print_symbol("EIP is at %s\n", regs->ip); printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n", regs->ax, regs->bx, regs->cx, regs->dx); printk(KERN_DEFAULT "ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n", regs->si, regs->di, regs->bp, sp); printk(KERN_DEFAULT " DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n", (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss); if (!all) return; cr0 = read_cr0(); cr2 = read_cr2(); cr3 = read_cr3(); cr4 = read_cr4_safe(); printk(KERN_DEFAULT "CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n", cr0, cr2, cr3, cr4); get_debugreg(d0, 0); get_debugreg(d1, 1); get_debugreg(d2, 2); get_debugreg(d3, 3); printk(KERN_DEFAULT "DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n", d0, d1, d2, d3); get_debugreg(d6, 6); get_debugreg(d7, 7); printk(KERN_DEFAULT "DR6: %08lx DR7: %08lx\n", d6, d7); } void release_thread(struct task_struct *dead_task) { BUG_ON(dead_task->mm); release_vm86_irqs(dead_task); } /* * This gets called before we allocate a new thread and copy * the current task into it. */ void prepare_to_copy(struct task_struct *tsk) { unlazy_fpu(tsk); } int copy_thread(unsigned long clone_flags, unsigned long sp, unsigned long unused, struct task_struct *p, struct pt_regs *regs) { struct pt_regs *childregs; struct task_struct *tsk; int err; childregs = task_pt_regs(p); *childregs = *regs; childregs->ax = 0; childregs->sp = sp; p->thread.sp = (unsigned long) childregs; p->thread.sp0 = (unsigned long) (childregs+1); p->thread.ip = (unsigned long) ret_from_fork; task_user_gs(p) = get_user_gs(regs); p->thread.io_bitmap_ptr = NULL; tsk = current; err = -ENOMEM; memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps)); if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) { p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr, IO_BITMAP_BYTES, GFP_KERNEL); if (!p->thread.io_bitmap_ptr) { p->thread.io_bitmap_max = 0; return -ENOMEM; } set_tsk_thread_flag(p, TIF_IO_BITMAP); } err = 0; /* * Set a new TLS for the child thread? */ if (clone_flags & CLONE_SETTLS) err = do_set_thread_area(p, -1, (struct user_desc __user *)childregs->si, 0); if (err && p->thread.io_bitmap_ptr) { kfree(p->thread.io_bitmap_ptr); p->thread.io_bitmap_max = 0; } return err; } void start_thread(struct pt_regs *regs, unsigned long new_ip, unsigned long new_sp) { set_user_gs(regs, 0); regs->fs = 0; regs->ds = __USER_DS; regs->es = __USER_DS; regs->ss = __USER_DS; regs->cs = __USER_CS; regs->ip = new_ip; regs->sp = new_sp; /* * Free the old FP and other extended state */ free_thread_xstate(current); } EXPORT_SYMBOL_GPL(start_thread); /* * switch_to(x,yn) should switch tasks from x to y. * * We fsave/fwait so that an exception goes off at the right time * (as a call from the fsave or fwait in effect) rather than to * the wrong process. Lazy FP saving no longer makes any sense * with modern CPU's, and this simplifies a lot of things (SMP * and UP become the same). * * NOTE! We used to use the x86 hardware context switching. The * reason for not using it any more becomes apparent when you * try to recover gracefully from saved state that is no longer * valid (stale segment register values in particular). With the * hardware task-switch, there is no way to fix up bad state in * a reasonable manner. * * The fact that Intel documents the hardware task-switching to * be slow is a fairly red herring - this code is not noticeably * faster. However, there _is_ some room for improvement here, * so the performance issues may eventually be a valid point. * More important, however, is the fact that this allows us much * more flexibility. * * The return value (in %ax) will be the "prev" task after * the task-switch, and shows up in ret_from_fork in entry.S, * for example. */ __notrace_funcgraph struct task_struct * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev = &prev_p->thread, *next = &next_p->thread; int cpu = smp_processor_id(); struct tss_struct *tss = &per_cpu(init_tss, cpu); bool preload_fpu; /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ /* * If the task has used fpu the last 5 timeslices, just do a full * restore of the math state immediately to avoid the trap; the * chances of needing FPU soon are obviously high now */ preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5; __unlazy_fpu(prev_p); /* we're going to use this soon, after a few expensive things */ if (preload_fpu) prefetch(next->fpu.state); /* * Reload esp0. */ load_sp0(tss, next); /* * Save away %gs. No need to save %fs, as it was saved on the * stack on entry. No need to save %es and %ds, as those are * always kernel segments while inside the kernel. Doing this * before setting the new TLS descriptors avoids the situation * where we temporarily have non-reloadable segments in %fs * and %gs. This could be an issue if the NMI handler ever * used %fs or %gs (it does not today), or if the kernel is * running inside of a hypervisor layer. */ lazy_save_gs(prev->gs); /* * Load the per-thread Thread-Local Storage descriptor. */ load_TLS(next, cpu); /* * Restore IOPL if needed. In normal use, the flags restore * in the switch assembly will handle this. But if the kernel * is running virtualized at a non-zero CPL, the popf will * not restore flags, so it must be done in a separate step. */ if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) set_iopl_mask(next->iopl); /* * Now maybe handle debug registers and/or IO bitmaps */ if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV || task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT)) __switch_to_xtra(prev_p, next_p, tss); /* If we're going to preload the fpu context, make sure clts is run while we're batching the cpu state updates. */ if (preload_fpu) clts(); /* * Leave lazy mode, flushing any hypercalls made here. * This must be done before restoring TLS segments so * the GDT and LDT are properly updated, and must be * done before math_state_restore, so the TS bit is up * to date. */ arch_end_context_switch(next_p); if (preload_fpu) __math_state_restore(); /* * Restore %gs if needed (which is common) */ if (prev->gs | next->gs) lazy_load_gs(next->gs); percpu_write(current_task, next_p); return prev_p; } #define top_esp (THREAD_SIZE - sizeof(unsigned long)) #define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long)) unsigned long get_wchan(struct task_struct *p) { unsigned long bp, sp, ip; unsigned long stack_page; int count = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; stack_page = (unsigned long)task_stack_page(p); sp = p->thread.sp; if (!stack_page || sp < stack_page || sp > top_esp+stack_page) return 0; /* include/asm-i386/system.h:switch_to() pushes bp last. */ bp = *(unsigned long *) sp; do { if (bp < stack_page || bp > top_ebp+stack_page) return 0; ip = *(unsigned long *) (bp+4); if (!in_sched_functions(ip)) return ip; bp = *(unsigned long *) bp; } while (count++ < 16); return 0; }
gpl-2.0
Arasthel/kernel_motorola_msm
arch/arm/mach-w90x900/mach-nuc950evb.c
4660
1100
/* * linux/arch/arm/mach-w90x900/mach-nuc950evb.c * * Based on mach-s3c2410/mach-smdk2410.c by Jonas Dietsche * * Copyright (C) 2008 Nuvoton technology corporation. * * Wan ZongShun <mcuos.com@gmail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation;version 2 of the License. * history: * Wang Qiang (rurality.linux@gmail.com) add LCD support * */ #include <linux/platform_device.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <asm/mach-types.h> #include <mach/map.h> #include <linux/platform_data/video-nuc900fb.h> #include "nuc950.h" static void __init nuc950evb_map_io(void) { nuc950_map_io(); nuc950_init_clocks(); } static void __init nuc950evb_init(void) { nuc950_board_init(); } MACHINE_START(W90P950EVB, "W90P950EVB") /* Maintainer: Wan ZongShun */ .map_io = nuc950evb_map_io, .init_irq = nuc900_init_irq, .init_machine = nuc950evb_init, .init_time = nuc900_timer_init, .restart = nuc9xx_restart, MACHINE_END
gpl-2.0
kurisuxx/android_kernel_samsung_klimtlte
sound/pci/hda/patch_ca0110.c
4916
15238
/* * HD audio interface patch for Creative X-Fi CA0110-IBG chip * * Copyright (c) 2008 Takashi Iwai <tiwai@suse.de> * * This driver is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This driver is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/pci.h> #include <linux/module.h> #include <sound/core.h> #include "hda_codec.h" #include "hda_local.h" /* */ struct ca0110_spec { struct auto_pin_cfg autocfg; struct hda_multi_out multiout; hda_nid_t out_pins[AUTO_CFG_MAX_OUTS]; hda_nid_t dacs[AUTO_CFG_MAX_OUTS]; hda_nid_t hp_dac; hda_nid_t input_pins[AUTO_PIN_LAST]; hda_nid_t adcs[AUTO_PIN_LAST]; hda_nid_t dig_out; hda_nid_t dig_in; unsigned int num_inputs; char input_labels[AUTO_PIN_LAST][32]; struct hda_pcm pcm_rec[2]; /* PCM information */ }; /* * PCM callbacks */ static int ca0110_playback_pcm_open(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct ca0110_spec *spec = codec->spec; return snd_hda_multi_out_analog_open(codec, &spec->multiout, substream, hinfo); } static int ca0110_playback_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { struct ca0110_spec *spec = codec->spec; return snd_hda_multi_out_analog_prepare(codec, &spec->multiout, stream_tag, format, substream); } static int ca0110_playback_pcm_cleanup(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct ca0110_spec *spec = codec->spec; return snd_hda_multi_out_analog_cleanup(codec, &spec->multiout); } /* * Digital out */ static int ca0110_dig_playback_pcm_open(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct ca0110_spec *spec = codec->spec; return snd_hda_multi_out_dig_open(codec, &spec->multiout); } static int ca0110_dig_playback_pcm_close(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct ca0110_spec *spec = codec->spec; return snd_hda_multi_out_dig_close(codec, &spec->multiout); } static int ca0110_dig_playback_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { struct ca0110_spec *spec = codec->spec; return snd_hda_multi_out_dig_prepare(codec, &spec->multiout, stream_tag, format, substream); } /* * Analog capture */ static int ca0110_capture_pcm_prepare(struct hda_pcm_stream *hinfo, struct hda_codec *codec, unsigned int stream_tag, unsigned int format, struct snd_pcm_substream *substream) { struct ca0110_spec *spec = codec->spec; snd_hda_codec_setup_stream(codec, spec->adcs[substream->number], stream_tag, 0, format); return 0; } static int ca0110_capture_pcm_cleanup(struct hda_pcm_stream *hinfo, struct hda_codec *codec, struct snd_pcm_substream *substream) { struct ca0110_spec *spec = codec->spec; snd_hda_codec_cleanup_stream(codec, spec->adcs[substream->number]); return 0; } /* */ static const char * const dirstr[2] = { "Playback", "Capture" }; static int _add_switch(struct hda_codec *codec, hda_nid_t nid, const char *pfx, int chan, int dir) { char namestr[44]; int type = dir ? HDA_INPUT : HDA_OUTPUT; struct snd_kcontrol_new knew = HDA_CODEC_MUTE_MONO(namestr, nid, chan, 0, type); sprintf(namestr, "%s %s Switch", pfx, dirstr[dir]); return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec)); } static int _add_volume(struct hda_codec *codec, hda_nid_t nid, const char *pfx, int chan, int dir) { char namestr[44]; int type = dir ? HDA_INPUT : HDA_OUTPUT; struct snd_kcontrol_new knew = HDA_CODEC_VOLUME_MONO(namestr, nid, chan, 0, type); sprintf(namestr, "%s %s Volume", pfx, dirstr[dir]); return snd_hda_ctl_add(codec, nid, snd_ctl_new1(&knew, codec)); } #define add_out_switch(codec, nid, pfx) _add_switch(codec, nid, pfx, 3, 0) #define add_out_volume(codec, nid, pfx) _add_volume(codec, nid, pfx, 3, 0) #define add_in_switch(codec, nid, pfx) _add_switch(codec, nid, pfx, 3, 1) #define add_in_volume(codec, nid, pfx) _add_volume(codec, nid, pfx, 3, 1) #define add_mono_switch(codec, nid, pfx, chan) \ _add_switch(codec, nid, pfx, chan, 0) #define add_mono_volume(codec, nid, pfx, chan) \ _add_volume(codec, nid, pfx, chan, 0) static int ca0110_build_controls(struct hda_codec *codec) { struct ca0110_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; static const char * const prefix[AUTO_CFG_MAX_OUTS] = { "Front", "Surround", NULL, "Side", "Multi" }; hda_nid_t mutenid; int i, err; for (i = 0; i < spec->multiout.num_dacs; i++) { if (get_wcaps(codec, spec->out_pins[i]) & AC_WCAP_OUT_AMP) mutenid = spec->out_pins[i]; else mutenid = spec->multiout.dac_nids[i]; if (!prefix[i]) { err = add_mono_switch(codec, mutenid, "Center", 1); if (err < 0) return err; err = add_mono_switch(codec, mutenid, "LFE", 1); if (err < 0) return err; err = add_mono_volume(codec, spec->multiout.dac_nids[i], "Center", 1); if (err < 0) return err; err = add_mono_volume(codec, spec->multiout.dac_nids[i], "LFE", 1); if (err < 0) return err; } else { err = add_out_switch(codec, mutenid, prefix[i]); if (err < 0) return err; err = add_out_volume(codec, spec->multiout.dac_nids[i], prefix[i]); if (err < 0) return err; } } if (cfg->hp_outs) { if (get_wcaps(codec, cfg->hp_pins[0]) & AC_WCAP_OUT_AMP) mutenid = cfg->hp_pins[0]; else mutenid = spec->multiout.dac_nids[i]; err = add_out_switch(codec, mutenid, "Headphone"); if (err < 0) return err; if (spec->hp_dac) { err = add_out_volume(codec, spec->hp_dac, "Headphone"); if (err < 0) return err; } } for (i = 0; i < spec->num_inputs; i++) { const char *label = spec->input_labels[i]; if (get_wcaps(codec, spec->input_pins[i]) & AC_WCAP_IN_AMP) mutenid = spec->input_pins[i]; else mutenid = spec->adcs[i]; err = add_in_switch(codec, mutenid, label); if (err < 0) return err; err = add_in_volume(codec, spec->adcs[i], label); if (err < 0) return err; } if (spec->dig_out) { err = snd_hda_create_spdif_out_ctls(codec, spec->dig_out, spec->dig_out); if (err < 0) return err; err = snd_hda_create_spdif_share_sw(codec, &spec->multiout); if (err < 0) return err; spec->multiout.share_spdif = 1; } if (spec->dig_in) { err = snd_hda_create_spdif_in_ctls(codec, spec->dig_in); if (err < 0) return err; err = add_in_volume(codec, spec->dig_in, "IEC958"); } return 0; } /* */ static const struct hda_pcm_stream ca0110_pcm_analog_playback = { .substreams = 1, .channels_min = 2, .channels_max = 8, .ops = { .open = ca0110_playback_pcm_open, .prepare = ca0110_playback_pcm_prepare, .cleanup = ca0110_playback_pcm_cleanup }, }; static const struct hda_pcm_stream ca0110_pcm_analog_capture = { .substreams = 1, .channels_min = 2, .channels_max = 2, .ops = { .prepare = ca0110_capture_pcm_prepare, .cleanup = ca0110_capture_pcm_cleanup }, }; static const struct hda_pcm_stream ca0110_pcm_digital_playback = { .substreams = 1, .channels_min = 2, .channels_max = 2, .ops = { .open = ca0110_dig_playback_pcm_open, .close = ca0110_dig_playback_pcm_close, .prepare = ca0110_dig_playback_pcm_prepare }, }; static const struct hda_pcm_stream ca0110_pcm_digital_capture = { .substreams = 1, .channels_min = 2, .channels_max = 2, }; static int ca0110_build_pcms(struct hda_codec *codec) { struct ca0110_spec *spec = codec->spec; struct hda_pcm *info = spec->pcm_rec; codec->pcm_info = info; codec->num_pcms = 0; info->name = "CA0110 Analog"; info->stream[SNDRV_PCM_STREAM_PLAYBACK] = ca0110_pcm_analog_playback; info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->dacs[0]; info->stream[SNDRV_PCM_STREAM_PLAYBACK].channels_max = spec->multiout.max_channels; info->stream[SNDRV_PCM_STREAM_CAPTURE] = ca0110_pcm_analog_capture; info->stream[SNDRV_PCM_STREAM_CAPTURE].substreams = spec->num_inputs; info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->adcs[0]; codec->num_pcms++; if (!spec->dig_out && !spec->dig_in) return 0; info++; info->name = "CA0110 Digital"; info->pcm_type = HDA_PCM_TYPE_SPDIF; if (spec->dig_out) { info->stream[SNDRV_PCM_STREAM_PLAYBACK] = ca0110_pcm_digital_playback; info->stream[SNDRV_PCM_STREAM_PLAYBACK].nid = spec->dig_out; } if (spec->dig_in) { info->stream[SNDRV_PCM_STREAM_CAPTURE] = ca0110_pcm_digital_capture; info->stream[SNDRV_PCM_STREAM_CAPTURE].nid = spec->dig_in; } codec->num_pcms++; return 0; } static void init_output(struct hda_codec *codec, hda_nid_t pin, hda_nid_t dac) { if (pin) { snd_hda_codec_write(codec, pin, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_HP); if (get_wcaps(codec, pin) & AC_WCAP_OUT_AMP) snd_hda_codec_write(codec, pin, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_UNMUTE); } if (dac) snd_hda_codec_write(codec, dac, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_OUT_ZERO); } static void init_input(struct hda_codec *codec, hda_nid_t pin, hda_nid_t adc) { if (pin) { snd_hda_codec_write(codec, pin, 0, AC_VERB_SET_PIN_WIDGET_CONTROL, PIN_VREF80); if (get_wcaps(codec, pin) & AC_WCAP_IN_AMP) snd_hda_codec_write(codec, pin, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)); } if (adc) snd_hda_codec_write(codec, adc, 0, AC_VERB_SET_AMP_GAIN_MUTE, AMP_IN_UNMUTE(0)); } static int ca0110_init(struct hda_codec *codec) { struct ca0110_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; int i; for (i = 0; i < spec->multiout.num_dacs; i++) init_output(codec, spec->out_pins[i], spec->multiout.dac_nids[i]); init_output(codec, cfg->hp_pins[0], spec->hp_dac); init_output(codec, cfg->dig_out_pins[0], spec->dig_out); for (i = 0; i < spec->num_inputs; i++) init_input(codec, spec->input_pins[i], spec->adcs[i]); init_input(codec, cfg->dig_in_pin, spec->dig_in); return 0; } static void ca0110_free(struct hda_codec *codec) { kfree(codec->spec); } static const struct hda_codec_ops ca0110_patch_ops = { .build_controls = ca0110_build_controls, .build_pcms = ca0110_build_pcms, .init = ca0110_init, .free = ca0110_free, }; static void parse_line_outs(struct hda_codec *codec) { struct ca0110_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; int i, n; unsigned int def_conf; hda_nid_t nid; n = 0; for (i = 0; i < cfg->line_outs; i++) { nid = cfg->line_out_pins[i]; def_conf = snd_hda_codec_get_pincfg(codec, nid); if (!def_conf) continue; /* invalid pin */ if (snd_hda_get_connections(codec, nid, &spec->dacs[i], 1) != 1) continue; spec->out_pins[n++] = nid; } spec->multiout.dac_nids = spec->dacs; spec->multiout.num_dacs = n; spec->multiout.max_channels = n * 2; } static void parse_hp_out(struct hda_codec *codec) { struct ca0110_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; int i; unsigned int def_conf; hda_nid_t nid, dac; if (!cfg->hp_outs) return; nid = cfg->hp_pins[0]; def_conf = snd_hda_codec_get_pincfg(codec, nid); if (!def_conf) { cfg->hp_outs = 0; return; } if (snd_hda_get_connections(codec, nid, &dac, 1) != 1) return; for (i = 0; i < cfg->line_outs; i++) if (dac == spec->dacs[i]) break; if (i >= cfg->line_outs) { spec->hp_dac = dac; spec->multiout.hp_nid = dac; } } static void parse_input(struct hda_codec *codec) { struct ca0110_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; hda_nid_t nid, pin; int n, i, j; n = 0; nid = codec->start_nid; for (i = 0; i < codec->num_nodes; i++, nid++) { unsigned int wcaps = get_wcaps(codec, nid); unsigned int type = get_wcaps_type(wcaps); if (type != AC_WID_AUD_IN) continue; if (snd_hda_get_connections(codec, nid, &pin, 1) != 1) continue; if (pin == cfg->dig_in_pin) { spec->dig_in = nid; continue; } for (j = 0; j < cfg->num_inputs; j++) if (cfg->inputs[j].pin == pin) break; if (j >= cfg->num_inputs) continue; spec->input_pins[n] = pin; snd_hda_get_pin_label(codec, pin, cfg, spec->input_labels[n], sizeof(spec->input_labels[n]), NULL); spec->adcs[n] = nid; n++; } spec->num_inputs = n; } static void parse_digital(struct hda_codec *codec) { struct ca0110_spec *spec = codec->spec; struct auto_pin_cfg *cfg = &spec->autocfg; if (cfg->dig_outs && snd_hda_get_connections(codec, cfg->dig_out_pins[0], &spec->dig_out, 1) == 1) spec->multiout.dig_out_nid = spec->dig_out; } static int ca0110_parse_auto_config(struct hda_codec *codec) { struct ca0110_spec *spec = codec->spec; int err; err = snd_hda_parse_pin_def_config(codec, &spec->autocfg, NULL); if (err < 0) return err; parse_line_outs(codec); parse_hp_out(codec); parse_digital(codec); parse_input(codec); return 0; } static int patch_ca0110(struct hda_codec *codec) { struct ca0110_spec *spec; int err; spec = kzalloc(sizeof(*spec), GFP_KERNEL); if (!spec) return -ENOMEM; codec->spec = spec; codec->bus->needs_damn_long_delay = 1; err = ca0110_parse_auto_config(codec); if (err < 0) goto error; codec->patch_ops = ca0110_patch_ops; return 0; error: kfree(codec->spec); codec->spec = NULL; return err; } /* * patch entries */ static const struct hda_codec_preset snd_hda_preset_ca0110[] = { { .id = 0x1102000a, .name = "CA0110-IBG", .patch = patch_ca0110 }, { .id = 0x1102000b, .name = "CA0110-IBG", .patch = patch_ca0110 }, { .id = 0x1102000d, .name = "SB0880 X-Fi", .patch = patch_ca0110 }, {} /* terminator */ }; MODULE_ALIAS("snd-hda-codec-id:1102000a"); MODULE_ALIAS("snd-hda-codec-id:1102000b"); MODULE_ALIAS("snd-hda-codec-id:1102000d"); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("Creative CA0110-IBG HD-audio codec"); static struct hda_codec_preset_list ca0110_list = { .preset = snd_hda_preset_ca0110, .owner = THIS_MODULE, }; static int __init patch_ca0110_init(void) { return snd_hda_add_codec_preset(&ca0110_list); } static void __exit patch_ca0110_exit(void) { snd_hda_delete_codec_preset(&ca0110_list); } module_init(patch_ca0110_init) module_exit(patch_ca0110_exit)
gpl-2.0
tdro/android_kernel_kobo_macallan
arch/x86/kernel/cpu/mcheck/mce-inject.c
4916
6035
/* * Machine check injection support. * Copyright 2008 Intel Corporation. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; version 2 * of the License. * * Authors: * Andi Kleen * Ying Huang */ #include <linux/uaccess.h> #include <linux/module.h> #include <linux/timer.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/fs.h> #include <linux/preempt.h> #include <linux/smp.h> #include <linux/notifier.h> #include <linux/kdebug.h> #include <linux/cpu.h> #include <linux/sched.h> #include <linux/gfp.h> #include <asm/mce.h> #include <asm/apic.h> #include <asm/nmi.h> /* Update fake mce registers on current CPU. */ static void inject_mce(struct mce *m) { struct mce *i = &per_cpu(injectm, m->extcpu); /* Make sure no one reads partially written injectm */ i->finished = 0; mb(); m->finished = 0; /* First set the fields after finished */ i->extcpu = m->extcpu; mb(); /* Now write record in order, finished last (except above) */ memcpy(i, m, sizeof(struct mce)); /* Finally activate it */ mb(); i->finished = 1; } static void raise_poll(struct mce *m) { unsigned long flags; mce_banks_t b; memset(&b, 0xff, sizeof(mce_banks_t)); local_irq_save(flags); machine_check_poll(0, &b); local_irq_restore(flags); m->finished = 0; } static void raise_exception(struct mce *m, struct pt_regs *pregs) { struct pt_regs regs; unsigned long flags; if (!pregs) { memset(&regs, 0, sizeof(struct pt_regs)); regs.ip = m->ip; regs.cs = m->cs; pregs = &regs; } /* in mcheck exeception handler, irq will be disabled */ local_irq_save(flags); do_machine_check(pregs, 0); local_irq_restore(flags); m->finished = 0; } static cpumask_var_t mce_inject_cpumask; static int mce_raise_notify(unsigned int cmd, struct pt_regs *regs) { int cpu = smp_processor_id(); struct mce *m = &__get_cpu_var(injectm); if (!cpumask_test_cpu(cpu, mce_inject_cpumask)) return NMI_DONE; cpumask_clear_cpu(cpu, mce_inject_cpumask); if (m->inject_flags & MCJ_EXCEPTION) raise_exception(m, regs); else if (m->status) raise_poll(m); return NMI_HANDLED; } static void mce_irq_ipi(void *info) { int cpu = smp_processor_id(); struct mce *m = &__get_cpu_var(injectm); if (cpumask_test_cpu(cpu, mce_inject_cpumask) && m->inject_flags & MCJ_EXCEPTION) { cpumask_clear_cpu(cpu, mce_inject_cpumask); raise_exception(m, NULL); } } /* Inject mce on current CPU */ static int raise_local(void) { struct mce *m = &__get_cpu_var(injectm); int context = MCJ_CTX(m->inject_flags); int ret = 0; int cpu = m->extcpu; if (m->inject_flags & MCJ_EXCEPTION) { printk(KERN_INFO "Triggering MCE exception on CPU %d\n", cpu); switch (context) { case MCJ_CTX_IRQ: /* * Could do more to fake interrupts like * calling irq_enter, but the necessary * machinery isn't exported currently. */ /*FALL THROUGH*/ case MCJ_CTX_PROCESS: raise_exception(m, NULL); break; default: printk(KERN_INFO "Invalid MCE context\n"); ret = -EINVAL; } printk(KERN_INFO "MCE exception done on CPU %d\n", cpu); } else if (m->status) { printk(KERN_INFO "Starting machine check poll CPU %d\n", cpu); raise_poll(m); mce_notify_irq(); printk(KERN_INFO "Machine check poll done on CPU %d\n", cpu); } else m->finished = 0; return ret; } static void raise_mce(struct mce *m) { int context = MCJ_CTX(m->inject_flags); inject_mce(m); if (context == MCJ_CTX_RANDOM) return; #ifdef CONFIG_X86_LOCAL_APIC if (m->inject_flags & (MCJ_IRQ_BRAODCAST | MCJ_NMI_BROADCAST)) { unsigned long start; int cpu; get_online_cpus(); cpumask_copy(mce_inject_cpumask, cpu_online_mask); cpumask_clear_cpu(get_cpu(), mce_inject_cpumask); for_each_online_cpu(cpu) { struct mce *mcpu = &per_cpu(injectm, cpu); if (!mcpu->finished || MCJ_CTX(mcpu->inject_flags) != MCJ_CTX_RANDOM) cpumask_clear_cpu(cpu, mce_inject_cpumask); } if (!cpumask_empty(mce_inject_cpumask)) { if (m->inject_flags & MCJ_IRQ_BRAODCAST) { /* * don't wait because mce_irq_ipi is necessary * to be sync with following raise_local */ preempt_disable(); smp_call_function_many(mce_inject_cpumask, mce_irq_ipi, NULL, 0); preempt_enable(); } else if (m->inject_flags & MCJ_NMI_BROADCAST) apic->send_IPI_mask(mce_inject_cpumask, NMI_VECTOR); } start = jiffies; while (!cpumask_empty(mce_inject_cpumask)) { if (!time_before(jiffies, start + 2*HZ)) { printk(KERN_ERR "Timeout waiting for mce inject %lx\n", *cpumask_bits(mce_inject_cpumask)); break; } cpu_relax(); } raise_local(); put_cpu(); put_online_cpus(); } else #endif raise_local(); } /* Error injection interface */ static ssize_t mce_write(struct file *filp, const char __user *ubuf, size_t usize, loff_t *off) { struct mce m; if (!capable(CAP_SYS_ADMIN)) return -EPERM; /* * There are some cases where real MSR reads could slip * through. */ if (!boot_cpu_has(X86_FEATURE_MCE) || !boot_cpu_has(X86_FEATURE_MCA)) return -EIO; if ((unsigned long)usize > sizeof(struct mce)) usize = sizeof(struct mce); if (copy_from_user(&m, ubuf, usize)) return -EFAULT; if (m.extcpu >= num_possible_cpus() || !cpu_online(m.extcpu)) return -EINVAL; /* * Need to give user space some time to set everything up, * so do it a jiffie or two later everywhere. */ schedule_timeout(2); raise_mce(&m); return usize; } static int inject_init(void) { if (!alloc_cpumask_var(&mce_inject_cpumask, GFP_KERNEL)) return -ENOMEM; printk(KERN_INFO "Machine check injector initialized\n"); register_mce_write_callback(mce_write); register_nmi_handler(NMI_LOCAL, mce_raise_notify, 0, "mce_notify"); return 0; } module_init(inject_init); /* * Cannot tolerate unloading currently because we cannot * guarantee all openers of mce_chrdev will get a reference to us. */ MODULE_LICENSE("GPL");
gpl-2.0
rmtew/MediaTek-HelioX10-Kernel
alps/kernel-3.10/mm/quicklist.c
9524
2454
/* * Quicklist support. * * Quicklists are light weight lists of pages that have a defined state * on alloc and free. Pages must be in the quicklist specific defined state * (zero by default) when the page is freed. It seems that the initial idea * for such lists first came from Dave Miller and then various other people * improved on it. * * Copyright (C) 2007 SGI, * Christoph Lameter <clameter@sgi.com> * Generalized, added support for multiple lists and * constructors / destructors. */ #include <linux/kernel.h> #include <linux/gfp.h> #include <linux/mm.h> #include <linux/mmzone.h> #include <linux/quicklist.h> DEFINE_PER_CPU(struct quicklist [CONFIG_NR_QUICK], quicklist); #define FRACTION_OF_NODE_MEM 16 static unsigned long max_pages(unsigned long min_pages) { unsigned long node_free_pages, max; int node = numa_node_id(); struct zone *zones = NODE_DATA(node)->node_zones; int num_cpus_on_node; node_free_pages = #ifdef CONFIG_ZONE_DMA zone_page_state(&zones[ZONE_DMA], NR_FREE_PAGES) + #endif #ifdef CONFIG_ZONE_DMA32 zone_page_state(&zones[ZONE_DMA32], NR_FREE_PAGES) + #endif zone_page_state(&zones[ZONE_NORMAL], NR_FREE_PAGES); max = node_free_pages / FRACTION_OF_NODE_MEM; num_cpus_on_node = cpumask_weight(cpumask_of_node(node)); max /= num_cpus_on_node; return max(max, min_pages); } static long min_pages_to_free(struct quicklist *q, unsigned long min_pages, long max_free) { long pages_to_free; pages_to_free = q->nr_pages - max_pages(min_pages); return min(pages_to_free, max_free); } /* * Trim down the number of pages in the quicklist */ void quicklist_trim(int nr, void (*dtor)(void *), unsigned long min_pages, unsigned long max_free) { long pages_to_free; struct quicklist *q; q = &get_cpu_var(quicklist)[nr]; if (q->nr_pages > min_pages) { pages_to_free = min_pages_to_free(q, min_pages, max_free); while (pages_to_free > 0) { /* * We pass a gfp_t of 0 to quicklist_alloc here * because we will never call into the page allocator. */ void *p = quicklist_alloc(nr, 0, NULL); if (dtor) dtor(p); free_page((unsigned long)p); pages_to_free--; } } put_cpu_var(quicklist); } unsigned long quicklist_total_size(void) { unsigned long count = 0; int cpu; struct quicklist *ql, *q; for_each_online_cpu(cpu) { ql = per_cpu(quicklist, cpu); for (q = ql; q < ql + CONFIG_NR_QUICK; q++) count += q->nr_pages; } return count; }
gpl-2.0
bubby323/samsung-kernel-c1spr
crypto/tea.c
10036
7253
/* * Cryptographic API. * * TEA, XTEA, and XETA crypto alogrithms * * The TEA and Xtended TEA algorithms were developed by David Wheeler * and Roger Needham at the Computer Laboratory of Cambridge University. * * Due to the order of evaluation in XTEA many people have incorrectly * implemented it. XETA (XTEA in the wrong order), exists for * compatibility with these implementations. * * Copyright (c) 2004 Aaron Grothe ajgrothe@yahoo.com * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/init.h> #include <linux/module.h> #include <linux/mm.h> #include <asm/byteorder.h> #include <linux/crypto.h> #include <linux/types.h> #define TEA_KEY_SIZE 16 #define TEA_BLOCK_SIZE 8 #define TEA_ROUNDS 32 #define TEA_DELTA 0x9e3779b9 #define XTEA_KEY_SIZE 16 #define XTEA_BLOCK_SIZE 8 #define XTEA_ROUNDS 32 #define XTEA_DELTA 0x9e3779b9 struct tea_ctx { u32 KEY[4]; }; struct xtea_ctx { u32 KEY[4]; }; static int tea_setkey(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct tea_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *key = (const __le32 *)in_key; ctx->KEY[0] = le32_to_cpu(key[0]); ctx->KEY[1] = le32_to_cpu(key[1]); ctx->KEY[2] = le32_to_cpu(key[2]); ctx->KEY[3] = le32_to_cpu(key[3]); return 0; } static void tea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { u32 y, z, n, sum = 0; u32 k0, k1, k2, k3; struct tea_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *in = (const __le32 *)src; __le32 *out = (__le32 *)dst; y = le32_to_cpu(in[0]); z = le32_to_cpu(in[1]); k0 = ctx->KEY[0]; k1 = ctx->KEY[1]; k2 = ctx->KEY[2]; k3 = ctx->KEY[3]; n = TEA_ROUNDS; while (n-- > 0) { sum += TEA_DELTA; y += ((z << 4) + k0) ^ (z + sum) ^ ((z >> 5) + k1); z += ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3); } out[0] = cpu_to_le32(y); out[1] = cpu_to_le32(z); } static void tea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { u32 y, z, n, sum; u32 k0, k1, k2, k3; struct tea_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *in = (const __le32 *)src; __le32 *out = (__le32 *)dst; y = le32_to_cpu(in[0]); z = le32_to_cpu(in[1]); k0 = ctx->KEY[0]; k1 = ctx->KEY[1]; k2 = ctx->KEY[2]; k3 = ctx->KEY[3]; sum = TEA_DELTA << 5; n = TEA_ROUNDS; while (n-- > 0) { z -= ((y << 4) + k2) ^ (y + sum) ^ ((y >> 5) + k3); y -= ((z << 4) + k0) ^ (z + sum) ^ ((z >> 5) + k1); sum -= TEA_DELTA; } out[0] = cpu_to_le32(y); out[1] = cpu_to_le32(z); } static int xtea_setkey(struct crypto_tfm *tfm, const u8 *in_key, unsigned int key_len) { struct xtea_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *key = (const __le32 *)in_key; ctx->KEY[0] = le32_to_cpu(key[0]); ctx->KEY[1] = le32_to_cpu(key[1]); ctx->KEY[2] = le32_to_cpu(key[2]); ctx->KEY[3] = le32_to_cpu(key[3]); return 0; } static void xtea_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { u32 y, z, sum = 0; u32 limit = XTEA_DELTA * XTEA_ROUNDS; struct xtea_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *in = (const __le32 *)src; __le32 *out = (__le32 *)dst; y = le32_to_cpu(in[0]); z = le32_to_cpu(in[1]); while (sum != limit) { y += ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum&3]); sum += XTEA_DELTA; z += ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 &3]); } out[0] = cpu_to_le32(y); out[1] = cpu_to_le32(z); } static void xtea_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { u32 y, z, sum; struct tea_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *in = (const __le32 *)src; __le32 *out = (__le32 *)dst; y = le32_to_cpu(in[0]); z = le32_to_cpu(in[1]); sum = XTEA_DELTA * XTEA_ROUNDS; while (sum) { z -= ((y << 4 ^ y >> 5) + y) ^ (sum + ctx->KEY[sum>>11 & 3]); sum -= XTEA_DELTA; y -= ((z << 4 ^ z >> 5) + z) ^ (sum + ctx->KEY[sum & 3]); } out[0] = cpu_to_le32(y); out[1] = cpu_to_le32(z); } static void xeta_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { u32 y, z, sum = 0; u32 limit = XTEA_DELTA * XTEA_ROUNDS; struct xtea_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *in = (const __le32 *)src; __le32 *out = (__le32 *)dst; y = le32_to_cpu(in[0]); z = le32_to_cpu(in[1]); while (sum != limit) { y += (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum&3]; sum += XTEA_DELTA; z += (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 &3]; } out[0] = cpu_to_le32(y); out[1] = cpu_to_le32(z); } static void xeta_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src) { u32 y, z, sum; struct tea_ctx *ctx = crypto_tfm_ctx(tfm); const __le32 *in = (const __le32 *)src; __le32 *out = (__le32 *)dst; y = le32_to_cpu(in[0]); z = le32_to_cpu(in[1]); sum = XTEA_DELTA * XTEA_ROUNDS; while (sum) { z -= (y << 4 ^ y >> 5) + (y ^ sum) + ctx->KEY[sum>>11 & 3]; sum -= XTEA_DELTA; y -= (z << 4 ^ z >> 5) + (z ^ sum) + ctx->KEY[sum & 3]; } out[0] = cpu_to_le32(y); out[1] = cpu_to_le32(z); } static struct crypto_alg tea_alg = { .cra_name = "tea", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = TEA_BLOCK_SIZE, .cra_ctxsize = sizeof (struct tea_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(tea_alg.cra_list), .cra_u = { .cipher = { .cia_min_keysize = TEA_KEY_SIZE, .cia_max_keysize = TEA_KEY_SIZE, .cia_setkey = tea_setkey, .cia_encrypt = tea_encrypt, .cia_decrypt = tea_decrypt } } }; static struct crypto_alg xtea_alg = { .cra_name = "xtea", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = XTEA_BLOCK_SIZE, .cra_ctxsize = sizeof (struct xtea_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(xtea_alg.cra_list), .cra_u = { .cipher = { .cia_min_keysize = XTEA_KEY_SIZE, .cia_max_keysize = XTEA_KEY_SIZE, .cia_setkey = xtea_setkey, .cia_encrypt = xtea_encrypt, .cia_decrypt = xtea_decrypt } } }; static struct crypto_alg xeta_alg = { .cra_name = "xeta", .cra_flags = CRYPTO_ALG_TYPE_CIPHER, .cra_blocksize = XTEA_BLOCK_SIZE, .cra_ctxsize = sizeof (struct xtea_ctx), .cra_alignmask = 3, .cra_module = THIS_MODULE, .cra_list = LIST_HEAD_INIT(xtea_alg.cra_list), .cra_u = { .cipher = { .cia_min_keysize = XTEA_KEY_SIZE, .cia_max_keysize = XTEA_KEY_SIZE, .cia_setkey = xtea_setkey, .cia_encrypt = xeta_encrypt, .cia_decrypt = xeta_decrypt } } }; static int __init tea_mod_init(void) { int ret = 0; ret = crypto_register_alg(&tea_alg); if (ret < 0) goto out; ret = crypto_register_alg(&xtea_alg); if (ret < 0) { crypto_unregister_alg(&tea_alg); goto out; } ret = crypto_register_alg(&xeta_alg); if (ret < 0) { crypto_unregister_alg(&tea_alg); crypto_unregister_alg(&xtea_alg); goto out; } out: return ret; } static void __exit tea_mod_fini(void) { crypto_unregister_alg(&tea_alg); crypto_unregister_alg(&xtea_alg); crypto_unregister_alg(&xeta_alg); } MODULE_ALIAS("xtea"); MODULE_ALIAS("xeta"); module_init(tea_mod_init); module_exit(tea_mod_fini); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION("TEA, XTEA & XETA Cryptographic Algorithms");
gpl-2.0
k2wl/android_kernel_samsung_delos3geur
mm/kmemleak-test.c
11060
3367
/* * mm/kmemleak-test.c * * Copyright (C) 2008 ARM Limited * Written by Catalin Marinas <catalin.marinas@arm.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/list.h> #include <linux/percpu.h> #include <linux/fdtable.h> #include <linux/kmemleak.h> struct test_node { long header[25]; struct list_head list; long footer[25]; }; static LIST_HEAD(test_list); static DEFINE_PER_CPU(void *, kmemleak_test_pointer); /* * Some very simple testing. This function needs to be extended for * proper testing. */ static int __init kmemleak_test_init(void) { struct test_node *elem; int i; printk(KERN_INFO "Kmemleak testing\n"); /* make some orphan objects */ pr_info("kmemleak: kmalloc(32) = %p\n", kmalloc(32, GFP_KERNEL)); pr_info("kmemleak: kmalloc(32) = %p\n", kmalloc(32, GFP_KERNEL)); pr_info("kmemleak: kmalloc(1024) = %p\n", kmalloc(1024, GFP_KERNEL)); pr_info("kmemleak: kmalloc(1024) = %p\n", kmalloc(1024, GFP_KERNEL)); pr_info("kmemleak: kmalloc(2048) = %p\n", kmalloc(2048, GFP_KERNEL)); pr_info("kmemleak: kmalloc(2048) = %p\n", kmalloc(2048, GFP_KERNEL)); pr_info("kmemleak: kmalloc(4096) = %p\n", kmalloc(4096, GFP_KERNEL)); pr_info("kmemleak: kmalloc(4096) = %p\n", kmalloc(4096, GFP_KERNEL)); #ifndef CONFIG_MODULES pr_info("kmemleak: kmem_cache_alloc(files_cachep) = %p\n", kmem_cache_alloc(files_cachep, GFP_KERNEL)); pr_info("kmemleak: kmem_cache_alloc(files_cachep) = %p\n", kmem_cache_alloc(files_cachep, GFP_KERNEL)); #endif pr_info("kmemleak: vmalloc(64) = %p\n", vmalloc(64)); pr_info("kmemleak: vmalloc(64) = %p\n", vmalloc(64)); pr_info("kmemleak: vmalloc(64) = %p\n", vmalloc(64)); pr_info("kmemleak: vmalloc(64) = %p\n", vmalloc(64)); pr_info("kmemleak: vmalloc(64) = %p\n", vmalloc(64)); /* * Add elements to a list. They should only appear as orphan * after the module is removed. */ for (i = 0; i < 10; i++) { elem = kzalloc(sizeof(*elem), GFP_KERNEL); pr_info("kmemleak: kzalloc(sizeof(*elem)) = %p\n", elem); if (!elem) return -ENOMEM; INIT_LIST_HEAD(&elem->list); list_add_tail(&elem->list, &test_list); } for_each_possible_cpu(i) { per_cpu(kmemleak_test_pointer, i) = kmalloc(129, GFP_KERNEL); pr_info("kmemleak: kmalloc(129) = %p\n", per_cpu(kmemleak_test_pointer, i)); } return 0; } module_init(kmemleak_test_init); static void __exit kmemleak_test_exit(void) { struct test_node *elem, *tmp; /* * Remove the list elements without actually freeing the * memory. */ list_for_each_entry_safe(elem, tmp, &test_list, list) list_del(&elem->list); } module_exit(kmemleak_test_exit); MODULE_LICENSE("GPL");
gpl-2.0
Dazzozo/android_kernel_huawei_u8815
drivers/input/joystick/turbografx.c
12084
8111
/* * Copyright (c) 1998-2001 Vojtech Pavlik * * Based on the work of: * Steffen Schwenke */ /* * TurboGraFX parallel port interface driver for Linux. */ /* * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * Should you need to contact me, the author, you can do so either by * e-mail - mail your message to <vojtech@ucw.cz>, or by paper mail: * Vojtech Pavlik, Simunkova 1594, Prague 8, 182 00 Czech Republic */ #include <linux/kernel.h> #include <linux/parport.h> #include <linux/input.h> #include <linux/module.h> #include <linux/init.h> #include <linux/mutex.h> #include <linux/slab.h> MODULE_AUTHOR("Vojtech Pavlik <vojtech@ucw.cz>"); MODULE_DESCRIPTION("TurboGraFX parallel port interface driver"); MODULE_LICENSE("GPL"); #define TGFX_MAX_PORTS 3 #define TGFX_MAX_DEVICES 7 struct tgfx_config { int args[TGFX_MAX_DEVICES + 1]; unsigned int nargs; }; static struct tgfx_config tgfx_cfg[TGFX_MAX_PORTS] __initdata; module_param_array_named(map, tgfx_cfg[0].args, int, &tgfx_cfg[0].nargs, 0); MODULE_PARM_DESC(map, "Describes first set of devices (<parport#>,<js1>,<js2>,..<js7>"); module_param_array_named(map2, tgfx_cfg[1].args, int, &tgfx_cfg[1].nargs, 0); MODULE_PARM_DESC(map2, "Describes second set of devices"); module_param_array_named(map3, tgfx_cfg[2].args, int, &tgfx_cfg[2].nargs, 0); MODULE_PARM_DESC(map3, "Describes third set of devices"); #define TGFX_REFRESH_TIME HZ/100 /* 10 ms */ #define TGFX_TRIGGER 0x08 #define TGFX_UP 0x10 #define TGFX_DOWN 0x20 #define TGFX_LEFT 0x40 #define TGFX_RIGHT 0x80 #define TGFX_THUMB 0x02 #define TGFX_THUMB2 0x04 #define TGFX_TOP 0x01 #define TGFX_TOP2 0x08 static int tgfx_buttons[] = { BTN_TRIGGER, BTN_THUMB, BTN_THUMB2, BTN_TOP, BTN_TOP2 }; static struct tgfx { struct pardevice *pd; struct timer_list timer; struct input_dev *dev[TGFX_MAX_DEVICES]; char name[TGFX_MAX_DEVICES][64]; char phys[TGFX_MAX_DEVICES][32]; int sticks; int used; struct mutex sem; } *tgfx_base[TGFX_MAX_PORTS]; /* * tgfx_timer() reads and analyzes TurboGraFX joystick data. */ static void tgfx_timer(unsigned long private) { struct tgfx *tgfx = (void *) private; struct input_dev *dev; int data1, data2, i; for (i = 0; i < 7; i++) if (tgfx->sticks & (1 << i)) { dev = tgfx->dev[i]; parport_write_data(tgfx->pd->port, ~(1 << i)); data1 = parport_read_status(tgfx->pd->port) ^ 0x7f; data2 = parport_read_control(tgfx->pd->port) ^ 0x04; /* CAVEAT parport */ input_report_abs(dev, ABS_X, !!(data1 & TGFX_RIGHT) - !!(data1 & TGFX_LEFT)); input_report_abs(dev, ABS_Y, !!(data1 & TGFX_DOWN ) - !!(data1 & TGFX_UP )); input_report_key(dev, BTN_TRIGGER, (data1 & TGFX_TRIGGER)); input_report_key(dev, BTN_THUMB, (data2 & TGFX_THUMB )); input_report_key(dev, BTN_THUMB2, (data2 & TGFX_THUMB2 )); input_report_key(dev, BTN_TOP, (data2 & TGFX_TOP )); input_report_key(dev, BTN_TOP2, (data2 & TGFX_TOP2 )); input_sync(dev); } mod_timer(&tgfx->timer, jiffies + TGFX_REFRESH_TIME); } static int tgfx_open(struct input_dev *dev) { struct tgfx *tgfx = input_get_drvdata(dev); int err; err = mutex_lock_interruptible(&tgfx->sem); if (err) return err; if (!tgfx->used++) { parport_claim(tgfx->pd); parport_write_control(tgfx->pd->port, 0x04); mod_timer(&tgfx->timer, jiffies + TGFX_REFRESH_TIME); } mutex_unlock(&tgfx->sem); return 0; } static void tgfx_close(struct input_dev *dev) { struct tgfx *tgfx = input_get_drvdata(dev); mutex_lock(&tgfx->sem); if (!--tgfx->used) { del_timer_sync(&tgfx->timer); parport_write_control(tgfx->pd->port, 0x00); parport_release(tgfx->pd); } mutex_unlock(&tgfx->sem); } /* * tgfx_probe() probes for tg gamepads. */ static struct tgfx __init *tgfx_probe(int parport, int *n_buttons, int n_devs) { struct tgfx *tgfx; struct input_dev *input_dev; struct parport *pp; struct pardevice *pd; int i, j; int err; pp = parport_find_number(parport); if (!pp) { printk(KERN_ERR "turbografx.c: no such parport\n"); err = -EINVAL; goto err_out; } pd = parport_register_device(pp, "turbografx", NULL, NULL, NULL, PARPORT_DEV_EXCL, NULL); if (!pd) { printk(KERN_ERR "turbografx.c: parport busy already - lp.o loaded?\n"); err = -EBUSY; goto err_put_pp; } tgfx = kzalloc(sizeof(struct tgfx), GFP_KERNEL); if (!tgfx) { printk(KERN_ERR "turbografx.c: Not enough memory\n"); err = -ENOMEM; goto err_unreg_pardev; } mutex_init(&tgfx->sem); tgfx->pd = pd; init_timer(&tgfx->timer); tgfx->timer.data = (long) tgfx; tgfx->timer.function = tgfx_timer; for (i = 0; i < n_devs; i++) { if (n_buttons[i] < 1) continue; if (n_buttons[i] > 6) { printk(KERN_ERR "turbografx.c: Invalid number of buttons %d\n", n_buttons[i]); err = -EINVAL; goto err_unreg_devs; } tgfx->dev[i] = input_dev = input_allocate_device(); if (!input_dev) { printk(KERN_ERR "turbografx.c: Not enough memory for input device\n"); err = -ENOMEM; goto err_unreg_devs; } tgfx->sticks |= (1 << i); snprintf(tgfx->name[i], sizeof(tgfx->name[i]), "TurboGraFX %d-button Multisystem joystick", n_buttons[i]); snprintf(tgfx->phys[i], sizeof(tgfx->phys[i]), "%s/input%d", tgfx->pd->port->name, i); input_dev->name = tgfx->name[i]; input_dev->phys = tgfx->phys[i]; input_dev->id.bustype = BUS_PARPORT; input_dev->id.vendor = 0x0003; input_dev->id.product = n_buttons[i]; input_dev->id.version = 0x0100; input_set_drvdata(input_dev, tgfx); input_dev->open = tgfx_open; input_dev->close = tgfx_close; input_dev->evbit[0] = BIT_MASK(EV_KEY) | BIT_MASK(EV_ABS); input_set_abs_params(input_dev, ABS_X, -1, 1, 0, 0); input_set_abs_params(input_dev, ABS_Y, -1, 1, 0, 0); for (j = 0; j < n_buttons[i]; j++) set_bit(tgfx_buttons[j], input_dev->keybit); err = input_register_device(tgfx->dev[i]); if (err) goto err_free_dev; } if (!tgfx->sticks) { printk(KERN_ERR "turbografx.c: No valid devices specified\n"); err = -EINVAL; goto err_free_tgfx; } parport_put_port(pp); return tgfx; err_free_dev: input_free_device(tgfx->dev[i]); err_unreg_devs: while (--i >= 0) if (tgfx->dev[i]) input_unregister_device(tgfx->dev[i]); err_free_tgfx: kfree(tgfx); err_unreg_pardev: parport_unregister_device(pd); err_put_pp: parport_put_port(pp); err_out: return ERR_PTR(err); } static void tgfx_remove(struct tgfx *tgfx) { int i; for (i = 0; i < TGFX_MAX_DEVICES; i++) if (tgfx->dev[i]) input_unregister_device(tgfx->dev[i]); parport_unregister_device(tgfx->pd); kfree(tgfx); } static int __init tgfx_init(void) { int i; int have_dev = 0; int err = 0; for (i = 0; i < TGFX_MAX_PORTS; i++) { if (tgfx_cfg[i].nargs == 0 || tgfx_cfg[i].args[0] < 0) continue; if (tgfx_cfg[i].nargs < 2) { printk(KERN_ERR "turbografx.c: at least one joystick must be specified\n"); err = -EINVAL; break; } tgfx_base[i] = tgfx_probe(tgfx_cfg[i].args[0], tgfx_cfg[i].args + 1, tgfx_cfg[i].nargs - 1); if (IS_ERR(tgfx_base[i])) { err = PTR_ERR(tgfx_base[i]); break; } have_dev = 1; } if (err) { while (--i >= 0) if (tgfx_base[i]) tgfx_remove(tgfx_base[i]); return err; } return have_dev ? 0 : -ENODEV; } static void __exit tgfx_exit(void) { int i; for (i = 0; i < TGFX_MAX_PORTS; i++) if (tgfx_base[i]) tgfx_remove(tgfx_base[i]); } module_init(tgfx_init); module_exit(tgfx_exit);
gpl-2.0
MyAOSP/kernel_samsung_manta
arch/arm/plat-iop/cp6.c
12596
1422
/* * IOP Coprocessor-6 access handler * Copyright (c) 2006, Intel Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * */ #include <linux/init.h> #include <asm/traps.h> #include <asm/ptrace.h> static int cp6_trap(struct pt_regs *regs, unsigned int instr) { u32 temp; /* enable cp6 access */ asm volatile ( "mrc p15, 0, %0, c15, c1, 0\n\t" "orr %0, %0, #(1 << 6)\n\t" "mcr p15, 0, %0, c15, c1, 0\n\t" : "=r"(temp)); return 0; } /* permit kernel space cp6 access * deny user space cp6 access */ static struct undef_hook cp6_hook = { .instr_mask = 0x0f000ff0, .instr_val = 0x0e000610, .cpsr_mask = MODE_MASK, .cpsr_val = SVC_MODE, .fn = cp6_trap, }; void __init iop_init_cp6_handler(void) { register_undef_hook(&cp6_hook); }
gpl-2.0
jinhu/QEMU-s5l89xx-port
hw/ide/mmio.c
53
4169
/* * QEMU IDE Emulation: mmio support (for embedded). * * Copyright (c) 2003 Fabrice Bellard * Copyright (c) 2006 Openedhand Ltd. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include <hw/hw.h> #include "block.h" #include "block_int.h" #include "sysemu.h" #include "dma.h" #include <hw/ide/internal.h> /***********************************************************/ /* MMIO based ide port * This emulates IDE device connected directly to the CPU bus without * dedicated ide controller, which is often seen on embedded boards. */ typedef struct { IDEBus bus; int shift; } MMIOState; static void mmio_ide_reset(void *opaque) { MMIOState *s = opaque; ide_bus_reset(&s->bus); } static uint32_t mmio_ide_read (void *opaque, target_phys_addr_t addr) { MMIOState *s = opaque; addr >>= s->shift; if (addr & 7) return ide_ioport_read(&s->bus, addr); else return ide_data_readw(&s->bus, 0); } static void mmio_ide_write (void *opaque, target_phys_addr_t addr, uint32_t val) { MMIOState *s = opaque; addr >>= s->shift; if (addr & 7) ide_ioport_write(&s->bus, addr, val); else ide_data_writew(&s->bus, 0, val); } static CPUReadMemoryFunc * const mmio_ide_reads[] = { mmio_ide_read, mmio_ide_read, mmio_ide_read, }; static CPUWriteMemoryFunc * const mmio_ide_writes[] = { mmio_ide_write, mmio_ide_write, mmio_ide_write, }; static uint32_t mmio_ide_status_read (void *opaque, target_phys_addr_t addr) { MMIOState *s= opaque; return ide_status_read(&s->bus, 0); } static void mmio_ide_cmd_write (void *opaque, target_phys_addr_t addr, uint32_t val) { MMIOState *s = opaque; ide_cmd_write(&s->bus, 0, val); } static CPUReadMemoryFunc * const mmio_ide_status[] = { mmio_ide_status_read, mmio_ide_status_read, mmio_ide_status_read, }; static CPUWriteMemoryFunc * const mmio_ide_cmd[] = { mmio_ide_cmd_write, mmio_ide_cmd_write, mmio_ide_cmd_write, }; static const VMStateDescription vmstate_ide_mmio = { .name = "mmio-ide", .version_id = 3, .minimum_version_id = 0, .minimum_version_id_old = 0, .fields = (VMStateField []) { VMSTATE_IDE_BUS(bus, MMIOState), VMSTATE_IDE_DRIVES(bus.ifs, MMIOState), VMSTATE_END_OF_LIST() } }; void mmio_ide_init (target_phys_addr_t membase, target_phys_addr_t membase2, qemu_irq irq, int shift, DriveInfo *hd0, DriveInfo *hd1) { MMIOState *s = qemu_mallocz(sizeof(MMIOState)); int mem1, mem2; ide_init2_with_non_qdev_drives(&s->bus, hd0, hd1, irq); s->shift = shift; mem1 = cpu_register_io_memory(mmio_ide_reads, mmio_ide_writes, s, DEVICE_NATIVE_ENDIAN); mem2 = cpu_register_io_memory(mmio_ide_status, mmio_ide_cmd, s, DEVICE_NATIVE_ENDIAN); cpu_register_physical_memory(membase, 16 << shift, mem1); cpu_register_physical_memory(membase2, 2 << shift, mem2); vmstate_register(NULL, 0, &vmstate_ide_mmio, s); qemu_register_reset(mmio_ide_reset, s); }
gpl-2.0
TheTypoMaster/binutils-gdb
bfd/dwarf1.c
53
14857
/* DWARF 1 find nearest line (_bfd_dwarf1_find_nearest_line). Copyright (C) 1998-2015 Free Software Foundation, Inc. Written by Gavin Romig-Koch of Cygnus Solutions (gavin@cygnus.com). This file is part of BFD. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin Street - Fifth Floor, Boston, MA 02110-1301, USA. */ #include "sysdep.h" #include "bfd.h" #include "libiberty.h" #include "libbfd.h" #include "elf-bfd.h" #include "elf/dwarf.h" /* dwarf1_debug is the starting point for all dwarf1 info. */ struct dwarf1_debug { /* The bfd we are working with. */ bfd* abfd; /* Pointer to the symbol table. */ asymbol** syms; /* List of already parsed compilation units. */ struct dwarf1_unit* lastUnit; /* The buffer for the .debug section. Zero indicates that the .debug section failed to load. */ bfd_byte *debug_section; /* Pointer to the end of the .debug_info section memory buffer. */ bfd_byte *debug_section_end; /* The buffer for the .line section. */ bfd_byte *line_section; /* End of that buffer. */ bfd_byte *line_section_end; /* The current or next unread die within the .debug section. */ bfd_byte *currentDie; }; /* One dwarf1_unit for each parsed compilation unit die. */ struct dwarf1_unit { /* Linked starting from stash->lastUnit. */ struct dwarf1_unit* prev; /* Name of the compilation unit. */ char *name; /* The highest and lowest address used in the compilation unit. */ unsigned long low_pc; unsigned long high_pc; /* Does this unit have a statement list? */ int has_stmt_list; /* If any, the offset of the line number table in the .line section. */ unsigned long stmt_list_offset; /* If non-zero, a pointer to the first child of this unit. */ bfd_byte *first_child; /* How many line entries? */ unsigned long line_count; /* The decoded line number table (line_count entries). */ struct linenumber* linenumber_table; /* The list of functions in this unit. */ struct dwarf1_func* func_list; }; /* One dwarf1_func for each parsed function die. */ struct dwarf1_func { /* Linked starting from aUnit->func_list. */ struct dwarf1_func* prev; /* Name of function. */ char* name; /* The highest and lowest address used in the compilation unit. */ unsigned long low_pc; unsigned long high_pc; }; /* Used to return info about a parsed die. */ struct die_info { unsigned long length; unsigned long sibling; unsigned long low_pc; unsigned long high_pc; unsigned long stmt_list_offset; char* name; int has_stmt_list; unsigned short tag; }; /* Parsed line number information. */ struct linenumber { /* First address in the line. */ unsigned long addr; /* The line number. */ unsigned long linenumber; }; /* Find the form of an attr, from the attr field. */ #define FORM_FROM_ATTR(attr) ((attr) & 0xF) /* Implicitly specified. */ /* Return a newly allocated dwarf1_unit. It should be cleared and then attached into the 'stash' at 'stash->lastUnit'. */ static struct dwarf1_unit* alloc_dwarf1_unit (struct dwarf1_debug* stash) { bfd_size_type amt = sizeof (struct dwarf1_unit); struct dwarf1_unit* x = (struct dwarf1_unit *) bfd_zalloc (stash->abfd, amt); if (x) { x->prev = stash->lastUnit; stash->lastUnit = x; } return x; } /* Return a newly allocated dwarf1_func. It must be cleared and attached into 'aUnit' at 'aUnit->func_list'. */ static struct dwarf1_func * alloc_dwarf1_func (struct dwarf1_debug* stash, struct dwarf1_unit* aUnit) { bfd_size_type amt = sizeof (struct dwarf1_func); struct dwarf1_func* x = (struct dwarf1_func *) bfd_zalloc (stash->abfd, amt); if (x) { x->prev = aUnit->func_list; aUnit->func_list = x; } return x; } /* parse_die - parse a Dwarf1 die. Parse the die starting at 'aDiePtr' into 'aDieInfo'. 'abfd' must be the bfd from which the section that 'aDiePtr' points to was pulled from. Return FALSE if the die is invalidly formatted; TRUE otherwise. */ static bfd_boolean parse_die (bfd * abfd, struct die_info * aDieInfo, bfd_byte * aDiePtr, bfd_byte * aDiePtrEnd) { bfd_byte *this_die = aDiePtr; bfd_byte *xptr = this_die; memset (aDieInfo, 0, sizeof (* aDieInfo)); /* First comes the length. */ aDieInfo->length = bfd_get_32 (abfd, (bfd_byte *) xptr); xptr += 4; if (aDieInfo->length == 0 || (this_die + aDieInfo->length) >= aDiePtrEnd) return FALSE; if (aDieInfo->length < 6) { /* Just padding bytes. */ aDieInfo->tag = TAG_padding; return TRUE; } /* Then the tag. */ aDieInfo->tag = bfd_get_16 (abfd, (bfd_byte *) xptr); xptr += 2; /* Then the attributes. */ while (xptr < (this_die + aDieInfo->length)) { unsigned short attr; /* Parse the attribute based on its form. This section must handle all dwarf1 forms, but need only handle the actual attributes that we care about. */ attr = bfd_get_16 (abfd, (bfd_byte *) xptr); xptr += 2; switch (FORM_FROM_ATTR (attr)) { case FORM_DATA2: xptr += 2; break; case FORM_DATA4: case FORM_REF: if (attr == AT_sibling) aDieInfo->sibling = bfd_get_32 (abfd, (bfd_byte *) xptr); else if (attr == AT_stmt_list) { aDieInfo->stmt_list_offset = bfd_get_32 (abfd, (bfd_byte *) xptr); aDieInfo->has_stmt_list = 1; } xptr += 4; break; case FORM_DATA8: xptr += 8; break; case FORM_ADDR: if (attr == AT_low_pc) aDieInfo->low_pc = bfd_get_32 (abfd, (bfd_byte *) xptr); else if (attr == AT_high_pc) aDieInfo->high_pc = bfd_get_32 (abfd, (bfd_byte *) xptr); xptr += 4; break; case FORM_BLOCK2: xptr += 2 + bfd_get_16 (abfd, (bfd_byte *) xptr); break; case FORM_BLOCK4: xptr += 4 + bfd_get_32 (abfd, (bfd_byte *) xptr); break; case FORM_STRING: if (attr == AT_name) aDieInfo->name = (char *) xptr; xptr += strlen ((char *) xptr) + 1; break; } } return TRUE; } /* Parse a dwarf1 line number table for 'aUnit->stmt_list_offset' into 'aUnit->linenumber_table'. Return FALSE if an error occurs; TRUE otherwise. */ static bfd_boolean parse_line_table (struct dwarf1_debug* stash, struct dwarf1_unit* aUnit) { bfd_byte *xptr; /* Load the ".line" section from the bfd if we haven't already. */ if (stash->line_section == 0) { asection *msec; bfd_size_type size; msec = bfd_get_section_by_name (stash->abfd, ".line"); if (! msec) return FALSE; size = msec->rawsize ? msec->rawsize : msec->size; stash->line_section = bfd_simple_get_relocated_section_contents (stash->abfd, msec, NULL, stash->syms); if (! stash->line_section) return FALSE; stash->line_section_end = stash->line_section + size; } xptr = stash->line_section + aUnit->stmt_list_offset; if (xptr < stash->line_section_end) { unsigned long eachLine; bfd_byte *tblend; unsigned long base; bfd_size_type amt; /* First comes the length. */ tblend = bfd_get_32 (stash->abfd, (bfd_byte *) xptr) + xptr; xptr += 4; /* Then the base address for each address in the table. */ base = bfd_get_32 (stash->abfd, (bfd_byte *) xptr); xptr += 4; /* How many line entrys? 10 = 4 (line number) + 2 (pos in line) + 4 (address in line). */ aUnit->line_count = (tblend - xptr) / 10; /* Allocate an array for the entries. */ amt = sizeof (struct linenumber) * aUnit->line_count; aUnit->linenumber_table = (struct linenumber *) bfd_alloc (stash->abfd, amt); if (!aUnit->linenumber_table) return FALSE; for (eachLine = 0; eachLine < aUnit->line_count; eachLine++) { /* A line number. */ aUnit->linenumber_table[eachLine].linenumber = bfd_get_32 (stash->abfd, (bfd_byte *) xptr); xptr += 4; /* Skip the position within the line. */ xptr += 2; /* And finally the address. */ aUnit->linenumber_table[eachLine].addr = base + bfd_get_32 (stash->abfd, (bfd_byte *) xptr); xptr += 4; } } return TRUE; } /* Parse each function die in a compilation unit 'aUnit'. The first child die of 'aUnit' should be in 'aUnit->first_child', the result is placed in 'aUnit->func_list'. Return FALSE if error; TRUE otherwise. */ static bfd_boolean parse_functions_in_unit (struct dwarf1_debug* stash, struct dwarf1_unit* aUnit) { bfd_byte *eachDie; if (aUnit->first_child) for (eachDie = aUnit->first_child; eachDie < stash->debug_section_end; ) { struct die_info eachDieInfo; if (! parse_die (stash->abfd, &eachDieInfo, eachDie, stash->debug_section_end)) return FALSE; if (eachDieInfo.tag == TAG_global_subroutine || eachDieInfo.tag == TAG_subroutine || eachDieInfo.tag == TAG_inlined_subroutine || eachDieInfo.tag == TAG_entry_point) { struct dwarf1_func* aFunc = alloc_dwarf1_func (stash,aUnit); if (!aFunc) return FALSE; aFunc->name = eachDieInfo.name; aFunc->low_pc = eachDieInfo.low_pc; aFunc->high_pc = eachDieInfo.high_pc; } /* Move to next sibling, if none, end loop */ if (eachDieInfo.sibling) eachDie = stash->debug_section + eachDieInfo.sibling; else break; } return TRUE; } /* Find the nearest line to 'addr' in 'aUnit'. Return whether we found the line (or a function) without error. */ static bfd_boolean dwarf1_unit_find_nearest_line (struct dwarf1_debug* stash, struct dwarf1_unit* aUnit, unsigned long addr, const char **filename_ptr, const char **functionname_ptr, unsigned int *linenumber_ptr) { int line_p = FALSE; int func_p = FALSE; if (aUnit->low_pc <= addr && addr < aUnit->high_pc) { if (aUnit->has_stmt_list) { unsigned long i; struct dwarf1_func* eachFunc; if (! aUnit->linenumber_table) { if (! parse_line_table (stash, aUnit)) return FALSE; } if (! aUnit->func_list) { if (! parse_functions_in_unit (stash, aUnit)) return FALSE; } for (i = 0; i < aUnit->line_count; i++) { if (aUnit->linenumber_table[i].addr <= addr && addr < aUnit->linenumber_table[i+1].addr) { *filename_ptr = aUnit->name; *linenumber_ptr = aUnit->linenumber_table[i].linenumber; line_p = TRUE; break; } } for (eachFunc = aUnit->func_list; eachFunc; eachFunc = eachFunc->prev) { if (eachFunc->low_pc <= addr && addr < eachFunc->high_pc) { *functionname_ptr = eachFunc->name; func_p = TRUE; break; } } } } return line_p || func_p; } /* The DWARF 1 version of find_nearest line. Return TRUE if the line is found without error. */ bfd_boolean _bfd_dwarf1_find_nearest_line (bfd *abfd, asymbol **symbols, asection *section, bfd_vma offset, const char **filename_ptr, const char **functionname_ptr, unsigned int *linenumber_ptr) { struct dwarf1_debug *stash = elf_tdata (abfd)->dwarf1_find_line_info; struct dwarf1_unit* eachUnit; /* What address are we looking for? */ unsigned long addr = (unsigned long)(offset + section->vma); *filename_ptr = NULL; *functionname_ptr = NULL; *linenumber_ptr = 0; if (! stash) { asection *msec; bfd_size_type size = sizeof (struct dwarf1_debug); stash = elf_tdata (abfd)->dwarf1_find_line_info = (struct dwarf1_debug *) bfd_zalloc (abfd, size); if (! stash) return FALSE; msec = bfd_get_section_by_name (abfd, ".debug"); if (! msec) /* No dwarf1 info. Note that at this point the stash has been allocated, but contains zeros, this lets future calls to this function fail quicker. */ return FALSE; size = msec->rawsize ? msec->rawsize : msec->size; stash->debug_section = bfd_simple_get_relocated_section_contents (abfd, msec, NULL, symbols); if (! stash->debug_section) return FALSE; stash->debug_section_end = stash->debug_section + size; stash->currentDie = stash->debug_section; stash->abfd = abfd; stash->syms = symbols; } /* A null debug_section indicates that there was no dwarf1 info or that an error occured while setting up the stash. */ if (! stash->debug_section) return FALSE; /* Look at the previously parsed units to see if any contain the addr. */ for (eachUnit = stash->lastUnit; eachUnit; eachUnit = eachUnit->prev) if (eachUnit->low_pc <= addr && addr < eachUnit->high_pc) return dwarf1_unit_find_nearest_line (stash, eachUnit, addr, filename_ptr, functionname_ptr, linenumber_ptr); while (stash->currentDie < stash->debug_section_end) { struct die_info aDieInfo; if (! parse_die (stash->abfd, &aDieInfo, stash->currentDie, stash->debug_section_end)) return FALSE; if (aDieInfo.tag == TAG_compile_unit) { struct dwarf1_unit* aUnit = alloc_dwarf1_unit (stash); if (!aUnit) return FALSE; aUnit->name = aDieInfo.name; aUnit->low_pc = aDieInfo.low_pc; aUnit->high_pc = aDieInfo.high_pc; aUnit->has_stmt_list = aDieInfo.has_stmt_list; aUnit->stmt_list_offset = aDieInfo.stmt_list_offset; /* A die has a child if it's followed by a die that is not it's sibling. */ if (aDieInfo.sibling && stash->currentDie + aDieInfo.length < stash->debug_section_end && stash->currentDie + aDieInfo.length != stash->debug_section + aDieInfo.sibling) aUnit->first_child = stash->currentDie + aDieInfo.length; else aUnit->first_child = 0; if (aUnit->low_pc <= addr && addr < aUnit->high_pc) return dwarf1_unit_find_nearest_line (stash, aUnit, addr, filename_ptr, functionname_ptr, linenumber_ptr); } if (aDieInfo.sibling != 0) stash->currentDie = stash->debug_section + aDieInfo.sibling; else stash->currentDie += aDieInfo.length; } return FALSE; }
gpl-2.0
Abhinav1997/kernel_cyanogen_msm8916
drivers/input/touchscreen/bu21150.c
309
26189
/* * Japan Display Inc. BU21150 touch screen driver. * * Copyright (C) 2013-2014 Japan Display Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2, and only version 2, as published by the * Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, * MA 02110-1301, USA. * */ #include <linux/uaccess.h> #include <linux/fs.h> #include <linux/miscdevice.h> #include <linux/module.h> #include <linux/spi/spi.h> #include <linux/gpio.h> #include <linux/input/bu21150.h> #include <linux/regulator/consumer.h> #include <linux/delay.h> #include <linux/of_gpio.h> #include <linux/interrupt.h> #include <asm/byteorder.h> /* define */ #define DEVICE_NAME "jdi-bu21150" #define REG_READ_DATA (0x0400) #define MAX_FRAME_SIZE (8*1024+16) /* byte */ #define SPI_HEADER_SIZE (3) #define FRAME_HEADER_SIZE (16) /* byte */ #define GPIO_LOW (0) #define GPIO_HIGH (1) #define WAITQ_WAIT (0) #define WAITQ_WAKEUP (1) #define BU21150_MIN_VOLTAGE_UV 2700000 #define BU21150_MAX_VOLTAGE_UV 3300000 #define BU21150_VDD_DIG_VOLTAGE_UV 1800000 #define BU21150_MAX_OPS_LOAD_UA 150000 /* struct */ struct bu21150_data { /* system */ struct spi_device *client; struct workqueue_struct *workq; struct work_struct work; struct pinctrl *ts_pinctrl; struct pinctrl_state *gpio_state_active; struct pinctrl_state *gpio_state_suspend; struct pinctrl_state *afe_pwr_state_active; struct pinctrl_state *afe_pwr_state_suspend; struct pinctrl_state *disp_vsn_state_active; struct pinctrl_state *disp_vsn_state_suspend; /* frame */ struct bu21150_ioctl_get_frame_data req_get; u8 frame[MAX_FRAME_SIZE]; struct bu21150_ioctl_get_frame_data frame_get; struct timeval tv; struct mutex mutex_frame; /* frame work */ u8 frame_work[MAX_FRAME_SIZE]; struct bu21150_ioctl_get_frame_data frame_work_get; /* waitq */ u8 frame_waitq_flag; wait_queue_head_t frame_waitq; /* spi */ u8 spi_buf[MAX_FRAME_SIZE]; /* power */ struct regulator *vcc_ana; struct regulator *vcc_dig; /* dtsi */ int irq_gpio; int rst_gpio; int afe_pwr_gpio; int disp_vsn_gpio; }; struct ser_req { struct spi_message msg; struct spi_transfer xfer[2]; u16 sample ____cacheline_aligned; }; /* static function declaration */ static int bu21150_probe(struct spi_device *client); static int bu21150_remove(struct spi_device *client); static int bu21150_open(struct inode *inode, struct file *filp); static int bu21150_release(struct inode *inode, struct file *filp); static long bu21150_ioctl(struct file *filp, unsigned int cmd, unsigned long arg); static long bu21150_ioctl_get_frame(unsigned long arg); static long bu21150_ioctl_reset(unsigned long arg); static long bu21150_ioctl_spi_read(unsigned long arg); static long bu21150_ioctl_spi_write(unsigned long arg); static long bu21150_ioctl_suspend(void); static long bu21150_ioctl_resume(void); static long bu21150_ioctl_unblock(void); static long bu21150_ioctl_unblock_release(void); static irqreturn_t bu21150_irq_handler(int irq, void *dev_id); static void bu21150_irq_work_func(struct work_struct *work); static void swap_2byte(unsigned char *buf, unsigned int size); static int bu21150_read_register(u32 addr, u16 size, u8 *data); static int bu21150_write_register(u32 addr, u16 size, u8 *data); static void wake_up_frame_waitq(struct bu21150_data *ts); static long wait_frame_waitq(struct bu21150_data *ts); static int is_same_bu21150_ioctl_get_frame_data( struct bu21150_ioctl_get_frame_data *data1, struct bu21150_ioctl_get_frame_data *data2); static void copy_frame(struct bu21150_data *ts); #ifdef CHECK_SAME_FRAME static void check_same_frame(struct bu21150_data *ts); #endif static bool parse_dtsi(struct device *dev, struct bu21150_data *ts); /* static variables */ static struct spi_device *g_client_bu21150; static int g_io_opened; static const struct of_device_id g_bu21150_psoc_match_table[] = { { .compatible = "jdi,bu21150", }, { }, }; static const struct file_operations g_bu21150_fops = { .owner = THIS_MODULE, .open = bu21150_open, .release = bu21150_release, .unlocked_ioctl = bu21150_ioctl, }; static struct miscdevice g_bu21150_misc_device = { .minor = MISC_DYNAMIC_MINOR, .name = DEVICE_NAME, .fops = &g_bu21150_fops, }; static const struct spi_device_id g_bu21150_device_id[] = { { DEVICE_NAME, 0 }, { } }; MODULE_DEVICE_TABLE(spi, g_bu21150_device_id); static struct spi_driver g_bu21150_spi_driver = { .probe = bu21150_probe, .remove = bu21150_remove, .id_table = g_bu21150_device_id, .driver = { .name = DEVICE_NAME, .owner = THIS_MODULE, .bus = &spi_bus_type, .of_match_table = g_bu21150_psoc_match_table, }, }; static int g_bu21150_ioctl_unblock; module_spi_driver(g_bu21150_spi_driver); MODULE_AUTHOR("Japan Display Inc"); MODULE_DESCRIPTION("JDI BU21150 Device Driver"); MODULE_LICENSE("GPL v2"); MODULE_ALIAS("spi:bu21150"); /* static functions */ static int reg_set_optimum_mode_check(struct regulator *reg, int load_ua) { return (regulator_count_voltages(reg) > 0) ? regulator_set_optimum_mode(reg, load_ua) : 0; } static int bu21150_pinctrl_init(struct bu21150_data *data) { int rc; data->ts_pinctrl = devm_pinctrl_get(&(data->client->dev)); if (IS_ERR_OR_NULL(data->ts_pinctrl)) { dev_err(&data->client->dev, "Target does not use pinctrl\n"); rc = PTR_ERR(data->ts_pinctrl); goto error; } data->gpio_state_active = pinctrl_lookup_state(data->ts_pinctrl, "pmx_ts_active"); if (IS_ERR_OR_NULL(data->gpio_state_active)) { dev_dbg(&data->client->dev, "Can not get ts default pinstate\n"); rc = PTR_ERR(data->gpio_state_active); goto error; } data->gpio_state_suspend = pinctrl_lookup_state(data->ts_pinctrl, "pmx_ts_suspend"); if (IS_ERR_OR_NULL(data->gpio_state_suspend)) { dev_dbg(&data->client->dev, "Can not get ts sleep pinstate\n"); rc = PTR_ERR(data->gpio_state_suspend); goto error; } data->afe_pwr_state_active = pinctrl_lookup_state(data->ts_pinctrl, "afe_pwr_active"); if (IS_ERR_OR_NULL(data->afe_pwr_state_active)) { dev_err(&data->client->dev, "Can not get pwr default pinstate\n"); rc = PTR_ERR(data->afe_pwr_state_active); goto error; } data->afe_pwr_state_suspend = pinctrl_lookup_state(data->ts_pinctrl, "afe_pwr_suspend"); if (IS_ERR_OR_NULL(data->afe_pwr_state_suspend)) { dev_err(&data->client->dev, "Can not get pwr sleep pinstate\n"); rc = PTR_ERR(data->afe_pwr_state_suspend); goto error; } data->disp_vsn_state_active = pinctrl_lookup_state(data->ts_pinctrl, "disp_vsn_active"); if (IS_ERR_OR_NULL(data->disp_vsn_state_active)) { dev_err(&data->client->dev, "Can not get disp_vsn default pinstate\n"); rc = PTR_ERR(data->disp_vsn_state_active); goto error; } data->disp_vsn_state_suspend = pinctrl_lookup_state(data->ts_pinctrl, "disp_vsn_suspend"); if (IS_ERR_OR_NULL(data->disp_vsn_state_suspend)) { dev_err(&data->client->dev, "Can not get disp_vsn sleep pinstate\n"); rc = PTR_ERR(data->disp_vsn_state_suspend); goto error; } return 0; error: data->ts_pinctrl = NULL; return rc; } static int bu21150_pinctrl_select(struct bu21150_data *data, bool on) { struct pinctrl_state *pins_state; int ret = 0; pins_state = on ? data->gpio_state_active : data->gpio_state_suspend; if (!IS_ERR_OR_NULL(pins_state)) { ret = pinctrl_select_state(data->ts_pinctrl, pins_state); if (ret) { dev_err(&data->client->dev, "can not set %s pins\n", on ? "pmx_ts_active" : "pmx_ts_suspend"); return ret; } } else { dev_err(&data->client->dev, "not a valid '%s' pinstate\n", on ? "pmx_ts_active" : "pmx_ts_suspend"); return -EINVAL; } return ret; } static int bu21150_pinctrl_enable(struct bu21150_data *ts, bool on) { int rc = 0; if (!on) goto pinctrl_suspend; rc = pinctrl_select_state(ts->ts_pinctrl, ts->afe_pwr_state_active); if (rc) { dev_err(&ts->client->dev, "can not set afe pwr pins\n"); return -EINVAL; } usleep(1000); rc = pinctrl_select_state(ts->ts_pinctrl, ts->disp_vsn_state_active); if (rc) { dev_err(&ts->client->dev, "can not set disp vsn pins\n"); goto err_disp_vsn_pinctrl_enable; } usleep(1000); rc = bu21150_pinctrl_select(ts, true); if (rc < 0) goto err_ts_pinctrl_enable; return 0; pinctrl_suspend: bu21150_pinctrl_select(ts, false); err_ts_pinctrl_enable: pinctrl_select_state(ts->ts_pinctrl, ts->disp_vsn_state_suspend); err_disp_vsn_pinctrl_enable: pinctrl_select_state(ts->ts_pinctrl, ts->afe_pwr_state_suspend); return rc; } static int bu21150_gpio_enable(struct bu21150_data *ts, bool on) { int rc = 0; if (!on) goto gpio_disable; /* Panel and AFE Power on sequence */ rc = gpio_request(ts->afe_pwr_gpio, "afe_pwr"); if (rc) { pr_err("%s: afe power gpio request failed\n", __func__); return -EINVAL; } gpio_direction_output(ts->afe_pwr_gpio, 1); gpio_set_value(ts->afe_pwr_gpio, 1); usleep(1000); rc = gpio_request(ts->disp_vsn_gpio, "disp_vsn"); if (rc) { pr_err("%s: disp_vsn gpio request failed\n", __func__); goto err_disp_vsn_gpio_enable; } gpio_direction_output(ts->disp_vsn_gpio, 1); gpio_set_value(ts->disp_vsn_gpio, 1); usleep(1000); rc = gpio_request(ts->irq_gpio, "bu21150_ts_int"); if (rc) { pr_err("%s: IRQ gpio_request failed\n", __func__); goto err_irq_gpio_enable; } gpio_direction_input(ts->irq_gpio); /* set reset */ rc = gpio_request(ts->rst_gpio, "bu21150_ts_reset"); if (rc) { pr_err("%s: reset gpio_request failed\n", __func__); goto err_rst_gpio_enable; } gpio_direction_output(ts->rst_gpio, GPIO_LOW); return 0; gpio_disable: gpio_free(ts->rst_gpio); err_rst_gpio_enable: gpio_free(ts->irq_gpio); err_irq_gpio_enable: gpio_free(ts->disp_vsn_gpio); err_disp_vsn_gpio_enable: gpio_free(ts->afe_pwr_gpio); return rc; } static int bu21150_pin_enable(struct bu21150_data *ts, bool on) { int rc = 0; if (!on) goto pin_disable; if (ts->ts_pinctrl) rc = bu21150_pinctrl_enable(ts, true); else rc = bu21150_gpio_enable(ts, true); return rc; pin_disable: if (ts->ts_pinctrl) bu21150_pinctrl_enable(ts, false); else bu21150_gpio_enable(ts, false); return rc; } static int bu21150_power_enable(struct bu21150_data *ts, bool on) { int rc = 0; if (!on) goto power_disable; if (regulator_count_voltages(ts->vcc_ana) > 0) { rc = regulator_set_voltage(ts->vcc_ana, BU21150_MIN_VOLTAGE_UV, BU21150_MAX_VOLTAGE_UV); if (rc) { dev_err(&ts->client->dev, "regulator vcc_ana set_vtg failed rc=%d\n", rc); return rc; } } rc = reg_set_optimum_mode_check(ts->vcc_ana, BU21150_MAX_OPS_LOAD_UA); if (rc < 0) { dev_err(&ts->client->dev, "Regulator vcc_ana set_opt failed rc=%d\n", rc); goto err_set_vcc_ana_opt_mode; } rc = regulator_enable(ts->vcc_ana); if (rc) { dev_err(&ts->client->dev, "Regulator vcc_ana enable failed rc=%d\n", rc); goto err_enable_vcc_ana; } if (regulator_count_voltages(ts->vcc_dig) > 0) { rc = regulator_set_voltage(ts->vcc_dig, BU21150_VDD_DIG_VOLTAGE_UV, BU21150_VDD_DIG_VOLTAGE_UV); if (rc) { dev_err(&ts->client->dev, "regulator vcc_dig set_vtg failed rc=%d\n", rc); goto err_set_vcc_dig_voltage; } } rc = reg_set_optimum_mode_check(ts->vcc_dig, BU21150_MAX_OPS_LOAD_UA); if (rc < 0) { dev_err(&ts->client->dev, "Regulator vcc_dig set_opt failed rc=%d\n", rc); goto err_set_vcc_dig_opt_mode; } rc = regulator_enable(ts->vcc_dig); if (rc) { dev_err(&ts->client->dev, "Regulator vcc_dig enable failed rc=%d\n", rc); goto err_enable_vcc_dig; } return 0; power_disable: regulator_disable(ts->vcc_dig); err_enable_vcc_dig: reg_set_optimum_mode_check(ts->vcc_dig, 0); err_set_vcc_dig_opt_mode: if (regulator_count_voltages(ts->vcc_dig) > 0) regulator_set_voltage(ts->vcc_dig, 0, BU21150_VDD_DIG_VOLTAGE_UV); err_set_vcc_dig_voltage: regulator_disable(ts->vcc_ana); err_enable_vcc_ana: reg_set_optimum_mode_check(ts->vcc_ana, 0); err_set_vcc_ana_opt_mode: if (regulator_count_voltages(ts->vcc_ana) > 0) regulator_set_voltage(ts->vcc_ana, 0, BU21150_MAX_VOLTAGE_UV); return rc; } static int bu21150_regulator_config(struct bu21150_data *ts, bool enable) { int rc = 0; if (!enable) goto regulator_release; ts->vcc_ana = regulator_get(&ts->client->dev, "vdd_ana"); if (IS_ERR_OR_NULL(ts->vcc_ana)) { rc = PTR_ERR(ts->vcc_ana); dev_err(&ts->client->dev, "Regulator get failed vcc_ana rc=%d\n", rc); return rc; } ts->vcc_dig = regulator_get(&ts->client->dev, "vdd_dig"); if (IS_ERR_OR_NULL(ts->vcc_dig)) { rc = PTR_ERR(ts->vcc_dig); dev_err(&ts->client->dev, "Regulator get failed vcc_dig rc=%d\n", rc); goto err_get_vdd_dig; } return 0; regulator_release: regulator_put(ts->vcc_dig); err_get_vdd_dig: regulator_put(ts->vcc_ana); return rc; } static int bu21150_probe(struct spi_device *client) { struct bu21150_data *ts; int rc; ts = kzalloc(sizeof(struct bu21150_data), GFP_KERNEL); if (!ts) { dev_err(&client->dev, "Out of memory\n"); return -ENOMEM; } /* parse dtsi */ if (!parse_dtsi(&client->dev, ts)) { dev_err(&client->dev, "Invalid dtsi\n"); rc = -EINVAL; goto err_parse_dt; } g_client_bu21150 = client; ts->client = client; rc = bu21150_pinctrl_init(ts); if (rc) { dev_err(&client->dev, "Pinctrl init failed\n"); goto err_parse_dt; } rc = bu21150_regulator_config(ts, true); if (rc) { dev_err(&client->dev, "Failed to get power rail\n"); goto err_regulator_config; } rc = bu21150_power_enable(ts, true); if (rc) { dev_err(&client->dev, "Power enablement failed\n"); goto err_power_enable; } rc = bu21150_pin_enable(ts, true); if (rc) { dev_err(&client->dev, "Pin enable failed\n"); goto err_pin_enable; } mutex_init(&ts->mutex_frame); init_waitqueue_head(&(ts->frame_waitq)); ts->workq = create_singlethread_workqueue("bu21150_workq"); if (!ts->workq) { dev_err(&client->dev, "Unable to create workq\n"); rc = -ENOMEM; goto err_create_wq; } INIT_WORK(&ts->work, bu21150_irq_work_func); if (!client->irq) { dev_err(&client->dev, "Bad irq\n"); rc = -EINVAL; goto err_create_wq; } rc = misc_register(&g_bu21150_misc_device); if (rc) { dev_err(&client->dev, "Failed to register misc device\n"); goto err_register_misc; } dev_set_drvdata(&client->dev, ts); return 0; err_register_misc: destroy_workqueue(ts->workq); err_create_wq: bu21150_pin_enable(ts, false); err_pin_enable: bu21150_power_enable(ts, false); err_power_enable: bu21150_regulator_config(ts, false); err_regulator_config: if (ts->ts_pinctrl) devm_pinctrl_put(ts->ts_pinctrl); err_parse_dt: kfree(ts); return rc; } static int bu21150_remove(struct spi_device *client) { struct bu21150_data *ts = spi_get_drvdata(client); misc_deregister(&g_bu21150_misc_device); bu21150_power_enable(ts, false); bu21150_regulator_config(ts, false); destroy_workqueue(ts->workq); free_irq(client->irq, ts); bu21150_pin_enable(ts, false); kfree(ts); return 0; } static int bu21150_open(struct inode *inode, struct file *filp) { struct bu21150_data *ts = spi_get_drvdata(g_client_bu21150); struct spi_device *client = ts->client; int error; if (g_io_opened) { pr_err("%s: g_io_opened not zero.\n", __func__); return -EBUSY; } ++g_io_opened; g_bu21150_ioctl_unblock = 0; memset(&(ts->req_get), 0, sizeof(struct bu21150_ioctl_get_frame_data)); /* set default value. */ ts->req_get.size = FRAME_HEADER_SIZE; memset(&(ts->frame_get), 0, sizeof(struct bu21150_ioctl_get_frame_data)); memset(&(ts->frame_work_get), 0, sizeof(struct bu21150_ioctl_get_frame_data)); error = request_irq(client->irq, bu21150_irq_handler, IRQF_TRIGGER_LOW | IRQF_ONESHOT, client->dev.driver->name, ts); if (error) { dev_err(&client->dev, "Failed to register interrupt\n"); return error; } return 0; } static int bu21150_release(struct inode *inode, struct file *filp) { struct bu21150_data *ts = spi_get_drvdata(g_client_bu21150); struct spi_device *client = ts->client; if (!g_io_opened) { pr_err("%s: !g_io_opened\n", __func__); return -ENOTTY; } --g_io_opened; if (g_io_opened < 0) g_io_opened = 0; free_irq(client->irq, ts); return 0; } static long bu21150_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { long ret; switch (cmd) { case BU21150_IOCTL_CMD_GET_FRAME: ret = bu21150_ioctl_get_frame(arg); return ret; case BU21150_IOCTL_CMD_RESET: ret = bu21150_ioctl_reset(arg); return ret; case BU21150_IOCTL_CMD_SPI_READ: ret = bu21150_ioctl_spi_read(arg); return ret; case BU21150_IOCTL_CMD_SPI_WRITE: ret = bu21150_ioctl_spi_write(arg); return ret; case BU21150_IOCTL_CMD_UNBLOCK: ret = bu21150_ioctl_unblock(); return ret; case BU21150_IOCTL_CMD_UNBLOCK_RELEASE: ret = bu21150_ioctl_unblock_release(); return ret; case BU21150_IOCTL_CMD_SUSPEND: ret = bu21150_ioctl_suspend(); return ret; case BU21150_IOCTL_CMD_RESUME: ret = bu21150_ioctl_resume(); return ret; default: pr_err("%s: cmd unknown.\n", __func__); return -EINVAL; } return 0; } static long bu21150_ioctl_get_frame(unsigned long arg) { long ret; struct bu21150_data *ts = spi_get_drvdata(g_client_bu21150); void __user *argp = (void __user *)arg; struct bu21150_ioctl_get_frame_data data; u32 frame_size; if (arg == 0) { pr_err("%s: arg == 0.\n", __func__); return -EINVAL; } if (copy_from_user(&data, argp, sizeof(struct bu21150_ioctl_get_frame_data))) { pr_err("%s: Failed to copy_from_user().\n", __func__); return -EFAULT; } if (data.buf == 0 || data.size == 0 || MAX_FRAME_SIZE < data.size || data.tv == 0) { pr_err("%s: data.buf == 0 ...\n", __func__); return -EINVAL; } do { ts->req_get = data; ret = wait_frame_waitq(ts); if (ret != 0) return ret; } while (!is_same_bu21150_ioctl_get_frame_data(&data, &(ts->frame_get))); /* copy frame */ mutex_lock(&ts->mutex_frame); frame_size = ts->frame_get.size; if (copy_to_user(data.buf, ts->frame, frame_size)) { mutex_unlock(&ts->mutex_frame); pr_err("%s: Failed to copy_to_user().\n", __func__); return -EFAULT; } if (copy_to_user(data.tv, &(ts->tv), sizeof(struct timeval))) { mutex_unlock(&ts->mutex_frame); pr_err("%s: Failed to copy_to_user().\n", __func__); return -EFAULT; } mutex_unlock(&ts->mutex_frame); return 0; } static long bu21150_ioctl_reset(unsigned long reset) { struct bu21150_data *ts = spi_get_drvdata(g_client_bu21150); if (!(reset == BU21150_RESET_LOW || reset == BU21150_RESET_HIGH)) { pr_err("%s: arg unknown.\n", __func__); return -EINVAL; } if (reset == BU21150_RESET_HIGH) { usleep(1000); } gpio_set_value(ts->rst_gpio, reset); return 0; } static long bu21150_ioctl_spi_read(unsigned long arg) { struct bu21150_data *ts = spi_get_drvdata(g_client_bu21150); void __user *argp = (void __user *)arg; struct bu21150_ioctl_spi_data data; if (arg == 0) { pr_err("%s: arg == 0.\n", __func__); return -EINVAL; } if (copy_from_user(&data, argp, sizeof(struct bu21150_ioctl_spi_data))) { pr_err("%s: Failed to copy_from_user().\n", __func__); return -EFAULT; } if (data.buf == 0 || data.count == 0 || MAX_FRAME_SIZE < data.count) { pr_err("%s: data.buf == 0 ...\n", __func__); return -EINVAL; } bu21150_read_register(data.addr, data.count, ts->spi_buf); if (copy_to_user(data.buf, ts->spi_buf, data.count)) { pr_err("%s: Failed to copy_to_user().\n", __func__); return -EFAULT; } return 0; } static long bu21150_ioctl_spi_write(unsigned long arg) { struct bu21150_data *ts = spi_get_drvdata(g_client_bu21150); void __user *argp = (void __user *)arg; struct bu21150_ioctl_spi_data data; if (arg == 0) { pr_err("%s: arg == 0.\n", __func__); return -EINVAL; } if (copy_from_user(&data, argp, sizeof(struct bu21150_ioctl_spi_data))) { pr_err("%s: Failed to copy_from_user().\n", __func__); return -EFAULT; } if (data.buf == 0 || data.count == 0 || MAX_FRAME_SIZE < data.count) { pr_err("%s: data.buf == 0 ...\n", __func__); return -EINVAL; } if (copy_from_user(ts->spi_buf, data.buf, data.count)) { pr_err("%s: Failed to copy_from_user()..\n", __func__); return -EFAULT; } bu21150_write_register(data.addr, data.count, ts->spi_buf); return 0; } static long bu21150_ioctl_unblock(void) { struct bu21150_data *ts = spi_get_drvdata(g_client_bu21150); g_bu21150_ioctl_unblock = 1; /* wake up */ wake_up_frame_waitq(ts); return 0; } static long bu21150_ioctl_unblock_release(void) { g_bu21150_ioctl_unblock = 0; return 0; } static long bu21150_ioctl_suspend(void) { struct bu21150_data *ts = spi_get_drvdata(g_client_bu21150); struct spi_device *client = ts->client; bu21150_ioctl_unblock(); disable_irq(client->irq); bu21150_power_enable(ts, false); return 0; } static long bu21150_ioctl_resume(void) { struct bu21150_data *ts = spi_get_drvdata(g_client_bu21150); struct spi_device *client = ts->client; g_bu21150_ioctl_unblock = 0; bu21150_power_enable(ts, true); enable_irq(client->irq); return 0; } static irqreturn_t bu21150_irq_handler(int irq, void *dev_id) { struct bu21150_data *ts = dev_id; disable_irq_nosync(irq); /* add work to queue */ queue_work(ts->workq, &ts->work); return IRQ_HANDLED; } static void bu21150_irq_work_func(struct work_struct *work) { struct bu21150_data *ts = container_of(work, struct bu21150_data, work); u8 *psbuf = (u8 *)ts->frame_work; struct spi_device *client = ts->client; /* get frame */ ts->frame_work_get = ts->req_get; bu21150_read_register(REG_READ_DATA, ts->frame_work_get.size, psbuf); #ifdef CHECK_SAME_FRAME check_same_frame(ts); #endif copy_frame(ts); wake_up_frame_waitq(ts); enable_irq(client->irq); } static int bu21150_read_register(u32 addr, u16 size, u8 *data) { struct bu21150_data *ts = spi_get_drvdata(g_client_bu21150); struct spi_device *client = ts->client; struct ser_req *req; int ret; u8 *input; u8 *output; input = kzalloc(sizeof(u8)*(size)+SPI_HEADER_SIZE, GFP_KERNEL); output = kzalloc(sizeof(u8)*(size)+SPI_HEADER_SIZE, GFP_KERNEL); req = kzalloc(sizeof(*req), GFP_KERNEL); /* set header */ input[0] = 0x03; /* read command */ input[1] = (addr & 0xFF00) >> 8; /* address hi */ input[2] = (addr & 0x00FF) >> 0; /* address lo */ /* read data */ spi_message_init(&req->msg); req->xfer[0].tx_buf = input; req->xfer[0].rx_buf = output; req->xfer[0].len = size+SPI_HEADER_SIZE; req->xfer[0].cs_change = 0; req->xfer[0].bits_per_word = 8; spi_message_add_tail(&req->xfer[0], &req->msg); ret = spi_sync(client, &req->msg); if (ret) pr_err("%s : spi_sync read data error:ret=[%d]", __func__, ret); memcpy(data, output+SPI_HEADER_SIZE, size); swap_2byte(data, size); kfree(req); kfree(input); kfree(output); return ret; } static int bu21150_write_register(u32 addr, u16 size, u8 *data) { struct bu21150_data *ts = spi_get_drvdata(g_client_bu21150); struct spi_device *client = ts->client; struct ser_req *req; int ret; u8 *input; input = kzalloc(sizeof(u8)*(size)+SPI_HEADER_SIZE, GFP_KERNEL); req = kzalloc(sizeof(*req), GFP_KERNEL); /* set header */ input[0] = 0x02; /* write command */ input[1] = (addr & 0xFF00) >> 8; /* address hi */ input[2] = (addr & 0x00FF) >> 0; /* address lo */ /* set data */ memcpy(input+SPI_HEADER_SIZE, data, size); swap_2byte(input+SPI_HEADER_SIZE, size); /* write data */ spi_message_init(&req->msg); req->xfer[0].tx_buf = input; req->xfer[0].rx_buf = NULL; req->xfer[0].len = size+SPI_HEADER_SIZE; req->xfer[0].cs_change = 0; req->xfer[0].bits_per_word = 8; spi_message_add_tail(&req->xfer[0], &req->msg); ret = spi_sync(client, &req->msg); if (ret) pr_err("%s : spi_sync read data error:ret=[%d]", __func__, ret); kfree(req); kfree(input); return ret; } static void wake_up_frame_waitq(struct bu21150_data *ts) { ts->frame_waitq_flag = WAITQ_WAKEUP; wake_up_interruptible(&(ts->frame_waitq)); } static long wait_frame_waitq(struct bu21150_data *ts) { if (g_bu21150_ioctl_unblock == 1) return BU21150_UNBLOCK; /* wait event */ if (wait_event_interruptible(ts->frame_waitq, ts->frame_waitq_flag == WAITQ_WAKEUP)) { pr_err("%s: -ERESTARTSYS\n", __func__); return -ERESTARTSYS; } ts->frame_waitq_flag = WAITQ_WAIT; if (g_bu21150_ioctl_unblock == 1) return BU21150_UNBLOCK; return 0; } static int is_same_bu21150_ioctl_get_frame_data( struct bu21150_ioctl_get_frame_data *data1, struct bu21150_ioctl_get_frame_data *data2) { int i; u8 *p1 = (u8 *)data1; u8 *p2 = (u8 *)data2; for (i = 0; i < sizeof(struct bu21150_ioctl_get_frame_data); i++) { if (p1[i] != p2[i]) return 0; } return 1; } static void copy_frame(struct bu21150_data *ts) { mutex_lock(&(ts->mutex_frame)); ts->frame_get = ts->frame_work_get; memcpy(ts->frame, ts->frame_work, MAX_FRAME_SIZE); do_gettimeofday(&(ts->tv)); mutex_unlock(&(ts->mutex_frame)); } static void swap_2byte(unsigned char *buf, unsigned int size) { int i; u16 *psbuf = (u16 *)buf; if (size%2 == 1) { pr_err("%s: error size is odd. size=[%u]\n", __func__, size); return; } for (i = 0; i < size/2; i++) be16_to_cpus(psbuf+i); } #ifdef CHECK_SAME_FRAME static void check_same_frame(struct bu21150_data *ts) { static int frame_no = -1; u16 *ps = (u16 *)ts->frame; if (ps[2] == frame_no) pr_err("%s:same_frame_no=[%d]\n", __func__, frame_no); frame_no = ps[2]; } #endif static bool parse_dtsi(struct device *dev, struct bu21150_data *ts) { enum of_gpio_flags dummy; struct device_node *np = dev->of_node; ts->irq_gpio = of_get_named_gpio_flags(np, "irq-gpio", 0, &dummy); ts->rst_gpio = of_get_named_gpio_flags(np, "rst-gpio", 0, &dummy); return true; }
gpl-2.0
nmacs/lm3s-linux
drivers/serial/max3100.c
565
21738
/* * * Copyright (C) 2008 Christian Pellegrin <chripell@evolware.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * * Notes: the MAX3100 doesn't provide an interrupt on CTS so we have * to use polling for flow control. TX empty IRQ is unusable, since * writing conf clears FIFO buffer and we cannot have this interrupt * always asking us for attention. * * Example platform data: static struct plat_max3100 max3100_plat_data = { .loopback = 0, .crystal = 0, .poll_time = 100, }; static struct spi_board_info spi_board_info[] = { { .modalias = "max3100", .platform_data = &max3100_plat_data, .irq = IRQ_EINT12, .max_speed_hz = 5*1000*1000, .chip_select = 0, }, }; * The initial minor number is 209 in the low-density serial port: * mknod /dev/ttyMAX0 c 204 209 */ #define MAX3100_MAJOR 204 #define MAX3100_MINOR 209 /* 4 MAX3100s should be enough for everyone */ #define MAX_MAX3100 4 #include <linux/delay.h> #include <linux/device.h> #include <linux/serial_core.h> #include <linux/serial.h> #include <linux/spi/spi.h> #include <linux/freezer.h> #include <linux/serial_max3100.h> #define MAX3100_C (1<<14) #define MAX3100_D (0<<14) #define MAX3100_W (1<<15) #define MAX3100_RX (0<<15) #define MAX3100_WC (MAX3100_W | MAX3100_C) #define MAX3100_RC (MAX3100_RX | MAX3100_C) #define MAX3100_WD (MAX3100_W | MAX3100_D) #define MAX3100_RD (MAX3100_RX | MAX3100_D) #define MAX3100_CMD (3 << 14) #define MAX3100_T (1<<14) #define MAX3100_R (1<<15) #define MAX3100_FEN (1<<13) #define MAX3100_SHDN (1<<12) #define MAX3100_TM (1<<11) #define MAX3100_RM (1<<10) #define MAX3100_PM (1<<9) #define MAX3100_RAM (1<<8) #define MAX3100_IR (1<<7) #define MAX3100_ST (1<<6) #define MAX3100_PE (1<<5) #define MAX3100_L (1<<4) #define MAX3100_BAUD (0xf) #define MAX3100_TE (1<<10) #define MAX3100_RAFE (1<<10) #define MAX3100_RTS (1<<9) #define MAX3100_CTS (1<<9) #define MAX3100_PT (1<<8) #define MAX3100_DATA (0xff) #define MAX3100_RT (MAX3100_R | MAX3100_T) #define MAX3100_RTC (MAX3100_RT | MAX3100_CTS | MAX3100_RAFE) /* the following simulate a status reg for ignore_status_mask */ #define MAX3100_STATUS_PE 1 #define MAX3100_STATUS_FE 2 #define MAX3100_STATUS_OE 4 struct max3100_port { struct uart_port port; struct spi_device *spi; int cts; /* last CTS received for flow ctrl */ int tx_empty; /* last TX empty bit */ spinlock_t conf_lock; /* shared data */ int conf_commit; /* need to make changes */ int conf; /* configuration for the MAX31000 * (bits 0-7, bits 8-11 are irqs) */ int rts_commit; /* need to change rts */ int rts; /* rts status */ int baud; /* current baud rate */ int parity; /* keeps track if we should send parity */ #define MAX3100_PARITY_ON 1 #define MAX3100_PARITY_ODD 2 #define MAX3100_7BIT 4 int rx_enabled; /* if we should rx chars */ int irq; /* irq assigned to the max3100 */ int minor; /* minor number */ int crystal; /* 1 if 3.6864Mhz crystal 0 for 1.8432 */ int loopback; /* 1 if we are in loopback mode */ /* for handling irqs: need workqueue since we do spi_sync */ struct workqueue_struct *workqueue; struct work_struct work; /* set to 1 to make the workhandler exit as soon as possible */ int force_end_work; /* need to know we are suspending to avoid deadlock on workqueue */ int suspending; /* hook for suspending MAX3100 via dedicated pin */ void (*max3100_hw_suspend) (int suspend); /* poll time (in ms) for ctrl lines */ int poll_time; /* and its timer */ struct timer_list timer; }; static struct max3100_port *max3100s[MAX_MAX3100]; /* the chips */ static DEFINE_MUTEX(max3100s_lock); /* race on probe */ static int max3100_do_parity(struct max3100_port *s, u16 c) { int parity; if (s->parity & MAX3100_PARITY_ODD) parity = 1; else parity = 0; if (s->parity & MAX3100_7BIT) c &= 0x7f; else c &= 0xff; parity = parity ^ (hweight8(c) & 1); return parity; } static int max3100_check_parity(struct max3100_port *s, u16 c) { return max3100_do_parity(s, c) == ((c >> 8) & 1); } static void max3100_calc_parity(struct max3100_port *s, u16 *c) { if (s->parity & MAX3100_7BIT) *c &= 0x7f; else *c &= 0xff; if (s->parity & MAX3100_PARITY_ON) *c |= max3100_do_parity(s, *c) << 8; } static void max3100_work(struct work_struct *w); static void max3100_dowork(struct max3100_port *s) { if (!s->force_end_work && !work_pending(&s->work) && !freezing(current) && !s->suspending) queue_work(s->workqueue, &s->work); } static void max3100_timeout(unsigned long data) { struct max3100_port *s = (struct max3100_port *)data; if (s->port.state) { max3100_dowork(s); mod_timer(&s->timer, jiffies + s->poll_time); } } static int max3100_sr(struct max3100_port *s, u16 tx, u16 *rx) { struct spi_message message; u16 etx, erx; int status; struct spi_transfer tran = { .tx_buf = &etx, .rx_buf = &erx, .len = 2, }; etx = cpu_to_be16(tx); spi_message_init(&message); spi_message_add_tail(&tran, &message); status = spi_sync(s->spi, &message); if (status) { dev_warn(&s->spi->dev, "error while calling spi_sync\n"); return -EIO; } *rx = be16_to_cpu(erx); s->tx_empty = (*rx & MAX3100_T) > 0; dev_dbg(&s->spi->dev, "%04x - %04x\n", tx, *rx); return 0; } static int max3100_handlerx(struct max3100_port *s, u16 rx) { unsigned int ch, flg, status = 0; int ret = 0, cts; if (rx & MAX3100_R && s->rx_enabled) { dev_dbg(&s->spi->dev, "%s\n", __func__); ch = rx & (s->parity & MAX3100_7BIT ? 0x7f : 0xff); if (rx & MAX3100_RAFE) { s->port.icount.frame++; flg = TTY_FRAME; status |= MAX3100_STATUS_FE; } else { if (s->parity & MAX3100_PARITY_ON) { if (max3100_check_parity(s, rx)) { s->port.icount.rx++; flg = TTY_NORMAL; } else { s->port.icount.parity++; flg = TTY_PARITY; status |= MAX3100_STATUS_PE; } } else { s->port.icount.rx++; flg = TTY_NORMAL; } } uart_insert_char(&s->port, status, MAX3100_STATUS_OE, ch, flg); ret = 1; } cts = (rx & MAX3100_CTS) > 0; if (s->cts != cts) { s->cts = cts; uart_handle_cts_change(&s->port, cts ? TIOCM_CTS : 0); } return ret; } static void max3100_work(struct work_struct *w) { struct max3100_port *s = container_of(w, struct max3100_port, work); int rxchars; u16 tx, rx; int conf, cconf, rts, crts; struct circ_buf *xmit = &s->port.state->xmit; dev_dbg(&s->spi->dev, "%s\n", __func__); rxchars = 0; do { spin_lock(&s->conf_lock); conf = s->conf; cconf = s->conf_commit; s->conf_commit = 0; rts = s->rts; crts = s->rts_commit; s->rts_commit = 0; spin_unlock(&s->conf_lock); if (cconf) max3100_sr(s, MAX3100_WC | conf, &rx); if (crts) { max3100_sr(s, MAX3100_WD | MAX3100_TE | (s->rts ? MAX3100_RTS : 0), &rx); rxchars += max3100_handlerx(s, rx); } max3100_sr(s, MAX3100_RD, &rx); rxchars += max3100_handlerx(s, rx); if (rx & MAX3100_T) { tx = 0xffff; if (s->port.x_char) { tx = s->port.x_char; s->port.icount.tx++; s->port.x_char = 0; } else if (!uart_circ_empty(xmit) && !uart_tx_stopped(&s->port)) { tx = xmit->buf[xmit->tail]; xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); s->port.icount.tx++; } if (tx != 0xffff) { max3100_calc_parity(s, &tx); tx |= MAX3100_WD | (s->rts ? MAX3100_RTS : 0); max3100_sr(s, tx, &rx); rxchars += max3100_handlerx(s, rx); } } if (rxchars > 16 && s->port.state->port.tty != NULL) { tty_flip_buffer_push(s->port.state->port.tty); rxchars = 0; } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&s->port); } while (!s->force_end_work && !freezing(current) && ((rx & MAX3100_R) || (!uart_circ_empty(xmit) && !uart_tx_stopped(&s->port)))); if (rxchars > 0 && s->port.state->port.tty != NULL) tty_flip_buffer_push(s->port.state->port.tty); } static irqreturn_t max3100_irq(int irqno, void *dev_id) { struct max3100_port *s = dev_id; dev_dbg(&s->spi->dev, "%s\n", __func__); max3100_dowork(s); return IRQ_HANDLED; } static void max3100_enable_ms(struct uart_port *port) { struct max3100_port *s = container_of(port, struct max3100_port, port); if (s->poll_time > 0) mod_timer(&s->timer, jiffies); dev_dbg(&s->spi->dev, "%s\n", __func__); } static void max3100_start_tx(struct uart_port *port) { struct max3100_port *s = container_of(port, struct max3100_port, port); dev_dbg(&s->spi->dev, "%s\n", __func__); max3100_dowork(s); } static void max3100_stop_rx(struct uart_port *port) { struct max3100_port *s = container_of(port, struct max3100_port, port); dev_dbg(&s->spi->dev, "%s\n", __func__); s->rx_enabled = 0; spin_lock(&s->conf_lock); s->conf &= ~MAX3100_RM; s->conf_commit = 1; spin_unlock(&s->conf_lock); max3100_dowork(s); } static unsigned int max3100_tx_empty(struct uart_port *port) { struct max3100_port *s = container_of(port, struct max3100_port, port); dev_dbg(&s->spi->dev, "%s\n", __func__); /* may not be truly up-to-date */ max3100_dowork(s); return s->tx_empty; } static unsigned int max3100_get_mctrl(struct uart_port *port) { struct max3100_port *s = container_of(port, struct max3100_port, port); dev_dbg(&s->spi->dev, "%s\n", __func__); /* may not be truly up-to-date */ max3100_dowork(s); /* always assert DCD and DSR since these lines are not wired */ return (s->cts ? TIOCM_CTS : 0) | TIOCM_DSR | TIOCM_CAR; } static void max3100_set_mctrl(struct uart_port *port, unsigned int mctrl) { struct max3100_port *s = container_of(port, struct max3100_port, port); int rts; dev_dbg(&s->spi->dev, "%s\n", __func__); rts = (mctrl & TIOCM_RTS) > 0; spin_lock(&s->conf_lock); if (s->rts != rts) { s->rts = rts; s->rts_commit = 1; max3100_dowork(s); } spin_unlock(&s->conf_lock); } static void max3100_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { struct max3100_port *s = container_of(port, struct max3100_port, port); int baud = 0; unsigned cflag; u32 param_new, param_mask, parity = 0; struct tty_struct *tty = s->port.state->port.tty; dev_dbg(&s->spi->dev, "%s\n", __func__); if (!tty) return; cflag = termios->c_cflag; param_new = 0; param_mask = 0; baud = tty_get_baud_rate(tty); param_new = s->conf & MAX3100_BAUD; switch (baud) { case 300: if (s->crystal) baud = s->baud; else param_new = 15; break; case 600: param_new = 14 + s->crystal; break; case 1200: param_new = 13 + s->crystal; break; case 2400: param_new = 12 + s->crystal; break; case 4800: param_new = 11 + s->crystal; break; case 9600: param_new = 10 + s->crystal; break; case 19200: param_new = 9 + s->crystal; break; case 38400: param_new = 8 + s->crystal; break; case 57600: param_new = 1 + s->crystal; break; case 115200: param_new = 0 + s->crystal; break; case 230400: if (s->crystal) param_new = 0; else baud = s->baud; break; default: baud = s->baud; } tty_encode_baud_rate(tty, baud, baud); s->baud = baud; param_mask |= MAX3100_BAUD; if ((cflag & CSIZE) == CS8) { param_new &= ~MAX3100_L; parity &= ~MAX3100_7BIT; } else { param_new |= MAX3100_L; parity |= MAX3100_7BIT; cflag = (cflag & ~CSIZE) | CS7; } param_mask |= MAX3100_L; if (cflag & CSTOPB) param_new |= MAX3100_ST; else param_new &= ~MAX3100_ST; param_mask |= MAX3100_ST; if (cflag & PARENB) { param_new |= MAX3100_PE; parity |= MAX3100_PARITY_ON; } else { param_new &= ~MAX3100_PE; parity &= ~MAX3100_PARITY_ON; } param_mask |= MAX3100_PE; if (cflag & PARODD) parity |= MAX3100_PARITY_ODD; else parity &= ~MAX3100_PARITY_ODD; /* mask termios capabilities we don't support */ cflag &= ~CMSPAR; termios->c_cflag = cflag; s->port.ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) s->port.ignore_status_mask |= MAX3100_STATUS_PE | MAX3100_STATUS_FE | MAX3100_STATUS_OE; /* we are sending char from a workqueue so enable */ s->port.state->port.tty->low_latency = 1; if (s->poll_time > 0) del_timer_sync(&s->timer); uart_update_timeout(port, termios->c_cflag, baud); spin_lock(&s->conf_lock); s->conf = (s->conf & ~param_mask) | (param_new & param_mask); s->conf_commit = 1; s->parity = parity; spin_unlock(&s->conf_lock); max3100_dowork(s); if (UART_ENABLE_MS(&s->port, termios->c_cflag)) max3100_enable_ms(&s->port); } static void max3100_shutdown(struct uart_port *port) { struct max3100_port *s = container_of(port, struct max3100_port, port); dev_dbg(&s->spi->dev, "%s\n", __func__); if (s->suspending) return; s->force_end_work = 1; if (s->poll_time > 0) del_timer_sync(&s->timer); if (s->workqueue) { flush_workqueue(s->workqueue); destroy_workqueue(s->workqueue); s->workqueue = NULL; } if (s->irq) free_irq(s->irq, s); /* set shutdown mode to save power */ if (s->max3100_hw_suspend) s->max3100_hw_suspend(1); else { u16 tx, rx; tx = MAX3100_WC | MAX3100_SHDN; max3100_sr(s, tx, &rx); } } static int max3100_startup(struct uart_port *port) { struct max3100_port *s = container_of(port, struct max3100_port, port); char b[12]; dev_dbg(&s->spi->dev, "%s\n", __func__); s->conf = MAX3100_RM; s->baud = s->crystal ? 230400 : 115200; s->rx_enabled = 1; if (s->suspending) return 0; s->force_end_work = 0; s->parity = 0; s->rts = 0; sprintf(b, "max3100-%d", s->minor); s->workqueue = create_freezeable_workqueue(b); if (!s->workqueue) { dev_warn(&s->spi->dev, "cannot create workqueue\n"); return -EBUSY; } INIT_WORK(&s->work, max3100_work); if (request_irq(s->irq, max3100_irq, IRQF_TRIGGER_FALLING, "max3100", s) < 0) { dev_warn(&s->spi->dev, "cannot allocate irq %d\n", s->irq); s->irq = 0; destroy_workqueue(s->workqueue); s->workqueue = NULL; return -EBUSY; } if (s->loopback) { u16 tx, rx; tx = 0x4001; max3100_sr(s, tx, &rx); } if (s->max3100_hw_suspend) s->max3100_hw_suspend(0); s->conf_commit = 1; max3100_dowork(s); /* wait for clock to settle */ msleep(50); max3100_enable_ms(&s->port); return 0; } static const char *max3100_type(struct uart_port *port) { struct max3100_port *s = container_of(port, struct max3100_port, port); dev_dbg(&s->spi->dev, "%s\n", __func__); return s->port.type == PORT_MAX3100 ? "MAX3100" : NULL; } static void max3100_release_port(struct uart_port *port) { struct max3100_port *s = container_of(port, struct max3100_port, port); dev_dbg(&s->spi->dev, "%s\n", __func__); } static void max3100_config_port(struct uart_port *port, int flags) { struct max3100_port *s = container_of(port, struct max3100_port, port); dev_dbg(&s->spi->dev, "%s\n", __func__); if (flags & UART_CONFIG_TYPE) s->port.type = PORT_MAX3100; } static int max3100_verify_port(struct uart_port *port, struct serial_struct *ser) { struct max3100_port *s = container_of(port, struct max3100_port, port); int ret = -EINVAL; dev_dbg(&s->spi->dev, "%s\n", __func__); if (ser->type == PORT_UNKNOWN || ser->type == PORT_MAX3100) ret = 0; return ret; } static void max3100_stop_tx(struct uart_port *port) { struct max3100_port *s = container_of(port, struct max3100_port, port); dev_dbg(&s->spi->dev, "%s\n", __func__); } static int max3100_request_port(struct uart_port *port) { struct max3100_port *s = container_of(port, struct max3100_port, port); dev_dbg(&s->spi->dev, "%s\n", __func__); return 0; } static void max3100_break_ctl(struct uart_port *port, int break_state) { struct max3100_port *s = container_of(port, struct max3100_port, port); dev_dbg(&s->spi->dev, "%s\n", __func__); } static struct uart_ops max3100_ops = { .tx_empty = max3100_tx_empty, .set_mctrl = max3100_set_mctrl, .get_mctrl = max3100_get_mctrl, .stop_tx = max3100_stop_tx, .start_tx = max3100_start_tx, .stop_rx = max3100_stop_rx, .enable_ms = max3100_enable_ms, .break_ctl = max3100_break_ctl, .startup = max3100_startup, .shutdown = max3100_shutdown, .set_termios = max3100_set_termios, .type = max3100_type, .release_port = max3100_release_port, .request_port = max3100_request_port, .config_port = max3100_config_port, .verify_port = max3100_verify_port, }; static struct uart_driver max3100_uart_driver = { .owner = THIS_MODULE, .driver_name = "ttyMAX", .dev_name = "ttyMAX", .major = MAX3100_MAJOR, .minor = MAX3100_MINOR, .nr = MAX_MAX3100, }; static int uart_driver_registered; static int __devinit max3100_probe(struct spi_device *spi) { int i, retval; struct plat_max3100 *pdata; u16 tx, rx; mutex_lock(&max3100s_lock); if (!uart_driver_registered) { uart_driver_registered = 1; retval = uart_register_driver(&max3100_uart_driver); if (retval) { printk(KERN_ERR "Couldn't register max3100 uart driver\n"); mutex_unlock(&max3100s_lock); return retval; } } for (i = 0; i < MAX_MAX3100; i++) if (!max3100s[i]) break; if (i == MAX_MAX3100) { dev_warn(&spi->dev, "too many MAX3100 chips\n"); mutex_unlock(&max3100s_lock); return -ENOMEM; } max3100s[i] = kzalloc(sizeof(struct max3100_port), GFP_KERNEL); if (!max3100s[i]) { dev_warn(&spi->dev, "kmalloc for max3100 structure %d failed!\n", i); mutex_unlock(&max3100s_lock); return -ENOMEM; } max3100s[i]->spi = spi; max3100s[i]->irq = spi->irq; spin_lock_init(&max3100s[i]->conf_lock); dev_set_drvdata(&spi->dev, max3100s[i]); pdata = spi->dev.platform_data; max3100s[i]->crystal = pdata->crystal; max3100s[i]->loopback = pdata->loopback; max3100s[i]->poll_time = pdata->poll_time * HZ / 1000; if (pdata->poll_time > 0 && max3100s[i]->poll_time == 0) max3100s[i]->poll_time = 1; max3100s[i]->max3100_hw_suspend = pdata->max3100_hw_suspend; max3100s[i]->minor = i; init_timer(&max3100s[i]->timer); max3100s[i]->timer.function = max3100_timeout; max3100s[i]->timer.data = (unsigned long) max3100s[i]; dev_dbg(&spi->dev, "%s: adding port %d\n", __func__, i); max3100s[i]->port.irq = max3100s[i]->irq; max3100s[i]->port.uartclk = max3100s[i]->crystal ? 3686400 : 1843200; max3100s[i]->port.fifosize = 16; max3100s[i]->port.ops = &max3100_ops; max3100s[i]->port.flags = UPF_SKIP_TEST | UPF_BOOT_AUTOCONF; max3100s[i]->port.line = i; max3100s[i]->port.type = PORT_MAX3100; max3100s[i]->port.dev = &spi->dev; retval = uart_add_one_port(&max3100_uart_driver, &max3100s[i]->port); if (retval < 0) dev_warn(&spi->dev, "uart_add_one_port failed for line %d with error %d\n", i, retval); /* set shutdown mode to save power. Will be woken-up on open */ if (max3100s[i]->max3100_hw_suspend) max3100s[i]->max3100_hw_suspend(1); else { tx = MAX3100_WC | MAX3100_SHDN; max3100_sr(max3100s[i], tx, &rx); } mutex_unlock(&max3100s_lock); return 0; } static int __devexit max3100_remove(struct spi_device *spi) { struct max3100_port *s = dev_get_drvdata(&spi->dev); int i; mutex_lock(&max3100s_lock); /* find out the index for the chip we are removing */ for (i = 0; i < MAX_MAX3100; i++) if (max3100s[i] == s) break; dev_dbg(&spi->dev, "%s: removing port %d\n", __func__, i); uart_remove_one_port(&max3100_uart_driver, &max3100s[i]->port); kfree(max3100s[i]); max3100s[i] = NULL; /* check if this is the last chip we have */ for (i = 0; i < MAX_MAX3100; i++) if (max3100s[i]) { mutex_unlock(&max3100s_lock); return 0; } pr_debug("removing max3100 driver\n"); uart_unregister_driver(&max3100_uart_driver); mutex_unlock(&max3100s_lock); return 0; } #ifdef CONFIG_PM static int max3100_suspend(struct spi_device *spi, pm_message_t state) { struct max3100_port *s = dev_get_drvdata(&spi->dev); dev_dbg(&s->spi->dev, "%s\n", __func__); disable_irq(s->irq); s->suspending = 1; uart_suspend_port(&max3100_uart_driver, &s->port); if (s->max3100_hw_suspend) s->max3100_hw_suspend(1); else { /* no HW suspend, so do SW one */ u16 tx, rx; tx = MAX3100_WC | MAX3100_SHDN; max3100_sr(s, tx, &rx); } return 0; } static int max3100_resume(struct spi_device *spi) { struct max3100_port *s = dev_get_drvdata(&spi->dev); dev_dbg(&s->spi->dev, "%s\n", __func__); if (s->max3100_hw_suspend) s->max3100_hw_suspend(0); uart_resume_port(&max3100_uart_driver, &s->port); s->suspending = 0; enable_irq(s->irq); s->conf_commit = 1; if (s->workqueue) max3100_dowork(s); return 0; } #else #define max3100_suspend NULL #define max3100_resume NULL #endif static struct spi_driver max3100_driver = { .driver = { .name = "max3100", .bus = &spi_bus_type, .owner = THIS_MODULE, }, .probe = max3100_probe, .remove = __devexit_p(max3100_remove), .suspend = max3100_suspend, .resume = max3100_resume, }; static int __init max3100_init(void) { return spi_register_driver(&max3100_driver); } module_init(max3100_init); static void __exit max3100_exit(void) { spi_unregister_driver(&max3100_driver); } module_exit(max3100_exit); MODULE_DESCRIPTION("MAX3100 driver"); MODULE_AUTHOR("Christian Pellegrin <chripell@evolware.org>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("spi:max3100");
gpl-2.0
EnJens/kernel_tf201_stock
net/ipv4/inetpeer.c
565
16049
/* * INETPEER - A storage for permanent information about peers * * This source is covered by the GNU GPL, the same as all kernel sources. * * Authors: Andrey V. Savochkin <saw@msu.ru> */ #include <linux/module.h> #include <linux/types.h> #include <linux/slab.h> #include <linux/interrupt.h> #include <linux/spinlock.h> #include <linux/random.h> #include <linux/timer.h> #include <linux/time.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/net.h> #include <net/ip.h> #include <net/inetpeer.h> #include <net/secure_seq.h> /* * Theory of operations. * We keep one entry for each peer IP address. The nodes contains long-living * information about the peer which doesn't depend on routes. * At this moment this information consists only of ID field for the next * outgoing IP packet. This field is incremented with each packet as encoded * in inet_getid() function (include/net/inetpeer.h). * At the moment of writing this notes identifier of IP packets is generated * to be unpredictable using this code only for packets subjected * (actually or potentially) to defragmentation. I.e. DF packets less than * PMTU in size uses a constant ID and do not use this code (see * ip_select_ident() in include/net/ip.h). * * Route cache entries hold references to our nodes. * New cache entries get references via lookup by destination IP address in * the avl tree. The reference is grabbed only when it's needed i.e. only * when we try to output IP packet which needs an unpredictable ID (see * __ip_select_ident() in net/ipv4/route.c). * Nodes are removed only when reference counter goes to 0. * When it's happened the node may be removed when a sufficient amount of * time has been passed since its last use. The less-recently-used entry can * also be removed if the pool is overloaded i.e. if the total amount of * entries is greater-or-equal than the threshold. * * Node pool is organised as an AVL tree. * Such an implementation has been chosen not just for fun. It's a way to * prevent easy and efficient DoS attacks by creating hash collisions. A huge * amount of long living nodes in a single hash slot would significantly delay * lookups performed with disabled BHs. * * Serialisation issues. * 1. Nodes may appear in the tree only with the pool lock held. * 2. Nodes may disappear from the tree only with the pool lock held * AND reference count being 0. * 3. Global variable peer_total is modified under the pool lock. * 4. struct inet_peer fields modification: * avl_left, avl_right, avl_parent, avl_height: pool lock * refcnt: atomically against modifications on other CPU; * usually under some other lock to prevent node disappearing * daddr: unchangeable * ip_id_count: atomic value (no lock needed) */ static struct kmem_cache *peer_cachep __read_mostly; #define node_height(x) x->avl_height #define peer_avl_empty ((struct inet_peer *)&peer_fake_node) #define peer_avl_empty_rcu ((struct inet_peer __rcu __force *)&peer_fake_node) static const struct inet_peer peer_fake_node = { .avl_left = peer_avl_empty_rcu, .avl_right = peer_avl_empty_rcu, .avl_height = 0 }; struct inet_peer_base { struct inet_peer __rcu *root; seqlock_t lock; int total; }; static struct inet_peer_base v4_peers = { .root = peer_avl_empty_rcu, .lock = __SEQLOCK_UNLOCKED(v4_peers.lock), .total = 0, }; static struct inet_peer_base v6_peers = { .root = peer_avl_empty_rcu, .lock = __SEQLOCK_UNLOCKED(v6_peers.lock), .total = 0, }; #define PEER_MAXDEPTH 40 /* sufficient for about 2^27 nodes */ /* Exported for sysctl_net_ipv4. */ int inet_peer_threshold __read_mostly = 65536 + 128; /* start to throw entries more * aggressively at this stage */ int inet_peer_minttl __read_mostly = 120 * HZ; /* TTL under high load: 120 sec */ int inet_peer_maxttl __read_mostly = 10 * 60 * HZ; /* usual time to live: 10 min */ /* Called from ip_output.c:ip_init */ void __init inet_initpeers(void) { struct sysinfo si; /* Use the straight interface to information about memory. */ si_meminfo(&si); /* The values below were suggested by Alexey Kuznetsov * <kuznet@ms2.inr.ac.ru>. I don't have any opinion about the values * myself. --SAW */ if (si.totalram <= (32768*1024)/PAGE_SIZE) inet_peer_threshold >>= 1; /* max pool size about 1MB on IA32 */ if (si.totalram <= (16384*1024)/PAGE_SIZE) inet_peer_threshold >>= 1; /* about 512KB */ if (si.totalram <= (8192*1024)/PAGE_SIZE) inet_peer_threshold >>= 2; /* about 128KB */ peer_cachep = kmem_cache_create("inet_peer_cache", sizeof(struct inet_peer), 0, SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL); } static int addr_compare(const struct inetpeer_addr *a, const struct inetpeer_addr *b) { int i, n = (a->family == AF_INET ? 1 : 4); for (i = 0; i < n; i++) { if (a->addr.a6[i] == b->addr.a6[i]) continue; if (a->addr.a6[i] < b->addr.a6[i]) return -1; return 1; } return 0; } #define rcu_deref_locked(X, BASE) \ rcu_dereference_protected(X, lockdep_is_held(&(BASE)->lock.lock)) /* * Called with local BH disabled and the pool lock held. */ #define lookup(_daddr, _stack, _base) \ ({ \ struct inet_peer *u; \ struct inet_peer __rcu **v; \ \ stackptr = _stack; \ *stackptr++ = &_base->root; \ for (u = rcu_deref_locked(_base->root, _base); \ u != peer_avl_empty; ) { \ int cmp = addr_compare(_daddr, &u->daddr); \ if (cmp == 0) \ break; \ if (cmp == -1) \ v = &u->avl_left; \ else \ v = &u->avl_right; \ *stackptr++ = v; \ u = rcu_deref_locked(*v, _base); \ } \ u; \ }) /* * Called with rcu_read_lock() * Because we hold no lock against a writer, its quite possible we fall * in an endless loop. * But every pointer we follow is guaranteed to be valid thanks to RCU. * We exit from this function if number of links exceeds PEER_MAXDEPTH */ static struct inet_peer *lookup_rcu(const struct inetpeer_addr *daddr, struct inet_peer_base *base) { struct inet_peer *u = rcu_dereference(base->root); int count = 0; while (u != peer_avl_empty) { int cmp = addr_compare(daddr, &u->daddr); if (cmp == 0) { /* Before taking a reference, check if this entry was * deleted (refcnt=-1) */ if (!atomic_add_unless(&u->refcnt, 1, -1)) u = NULL; return u; } if (cmp == -1) u = rcu_dereference(u->avl_left); else u = rcu_dereference(u->avl_right); if (unlikely(++count == PEER_MAXDEPTH)) break; } return NULL; } /* Called with local BH disabled and the pool lock held. */ #define lookup_rightempty(start, base) \ ({ \ struct inet_peer *u; \ struct inet_peer __rcu **v; \ *stackptr++ = &start->avl_left; \ v = &start->avl_left; \ for (u = rcu_deref_locked(*v, base); \ u->avl_right != peer_avl_empty_rcu; ) { \ v = &u->avl_right; \ *stackptr++ = v; \ u = rcu_deref_locked(*v, base); \ } \ u; \ }) /* Called with local BH disabled and the pool lock held. * Variable names are the proof of operation correctness. * Look into mm/map_avl.c for more detail description of the ideas. */ static void peer_avl_rebalance(struct inet_peer __rcu **stack[], struct inet_peer __rcu ***stackend, struct inet_peer_base *base) { struct inet_peer __rcu **nodep; struct inet_peer *node, *l, *r; int lh, rh; while (stackend > stack) { nodep = *--stackend; node = rcu_deref_locked(*nodep, base); l = rcu_deref_locked(node->avl_left, base); r = rcu_deref_locked(node->avl_right, base); lh = node_height(l); rh = node_height(r); if (lh > rh + 1) { /* l: RH+2 */ struct inet_peer *ll, *lr, *lrl, *lrr; int lrh; ll = rcu_deref_locked(l->avl_left, base); lr = rcu_deref_locked(l->avl_right, base); lrh = node_height(lr); if (lrh <= node_height(ll)) { /* ll: RH+1 */ RCU_INIT_POINTER(node->avl_left, lr); /* lr: RH or RH+1 */ RCU_INIT_POINTER(node->avl_right, r); /* r: RH */ node->avl_height = lrh + 1; /* RH+1 or RH+2 */ RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH+1 */ RCU_INIT_POINTER(l->avl_right, node); /* node: RH+1 or RH+2 */ l->avl_height = node->avl_height + 1; RCU_INIT_POINTER(*nodep, l); } else { /* ll: RH, lr: RH+1 */ lrl = rcu_deref_locked(lr->avl_left, base);/* lrl: RH or RH-1 */ lrr = rcu_deref_locked(lr->avl_right, base);/* lrr: RH or RH-1 */ RCU_INIT_POINTER(node->avl_left, lrr); /* lrr: RH or RH-1 */ RCU_INIT_POINTER(node->avl_right, r); /* r: RH */ node->avl_height = rh + 1; /* node: RH+1 */ RCU_INIT_POINTER(l->avl_left, ll); /* ll: RH */ RCU_INIT_POINTER(l->avl_right, lrl); /* lrl: RH or RH-1 */ l->avl_height = rh + 1; /* l: RH+1 */ RCU_INIT_POINTER(lr->avl_left, l); /* l: RH+1 */ RCU_INIT_POINTER(lr->avl_right, node); /* node: RH+1 */ lr->avl_height = rh + 2; RCU_INIT_POINTER(*nodep, lr); } } else if (rh > lh + 1) { /* r: LH+2 */ struct inet_peer *rr, *rl, *rlr, *rll; int rlh; rr = rcu_deref_locked(r->avl_right, base); rl = rcu_deref_locked(r->avl_left, base); rlh = node_height(rl); if (rlh <= node_height(rr)) { /* rr: LH+1 */ RCU_INIT_POINTER(node->avl_right, rl); /* rl: LH or LH+1 */ RCU_INIT_POINTER(node->avl_left, l); /* l: LH */ node->avl_height = rlh + 1; /* LH+1 or LH+2 */ RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH+1 */ RCU_INIT_POINTER(r->avl_left, node); /* node: LH+1 or LH+2 */ r->avl_height = node->avl_height + 1; RCU_INIT_POINTER(*nodep, r); } else { /* rr: RH, rl: RH+1 */ rlr = rcu_deref_locked(rl->avl_right, base);/* rlr: LH or LH-1 */ rll = rcu_deref_locked(rl->avl_left, base);/* rll: LH or LH-1 */ RCU_INIT_POINTER(node->avl_right, rll); /* rll: LH or LH-1 */ RCU_INIT_POINTER(node->avl_left, l); /* l: LH */ node->avl_height = lh + 1; /* node: LH+1 */ RCU_INIT_POINTER(r->avl_right, rr); /* rr: LH */ RCU_INIT_POINTER(r->avl_left, rlr); /* rlr: LH or LH-1 */ r->avl_height = lh + 1; /* r: LH+1 */ RCU_INIT_POINTER(rl->avl_right, r); /* r: LH+1 */ RCU_INIT_POINTER(rl->avl_left, node); /* node: LH+1 */ rl->avl_height = lh + 2; RCU_INIT_POINTER(*nodep, rl); } } else { node->avl_height = (lh > rh ? lh : rh) + 1; } } } /* Called with local BH disabled and the pool lock held. */ #define link_to_pool(n, base) \ do { \ n->avl_height = 1; \ n->avl_left = peer_avl_empty_rcu; \ n->avl_right = peer_avl_empty_rcu; \ /* lockless readers can catch us now */ \ rcu_assign_pointer(**--stackptr, n); \ peer_avl_rebalance(stack, stackptr, base); \ } while (0) static void inetpeer_free_rcu(struct rcu_head *head) { kmem_cache_free(peer_cachep, container_of(head, struct inet_peer, rcu)); } static void unlink_from_pool(struct inet_peer *p, struct inet_peer_base *base, struct inet_peer __rcu **stack[PEER_MAXDEPTH]) { struct inet_peer __rcu ***stackptr, ***delp; if (lookup(&p->daddr, stack, base) != p) BUG(); delp = stackptr - 1; /* *delp[0] == p */ if (p->avl_left == peer_avl_empty_rcu) { *delp[0] = p->avl_right; --stackptr; } else { /* look for a node to insert instead of p */ struct inet_peer *t; t = lookup_rightempty(p, base); BUG_ON(rcu_deref_locked(*stackptr[-1], base) != t); **--stackptr = t->avl_left; /* t is removed, t->daddr > x->daddr for any * x in p->avl_left subtree. * Put t in the old place of p. */ RCU_INIT_POINTER(*delp[0], t); t->avl_left = p->avl_left; t->avl_right = p->avl_right; t->avl_height = p->avl_height; BUG_ON(delp[1] != &p->avl_left); delp[1] = &t->avl_left; /* was &p->avl_left */ } peer_avl_rebalance(stack, stackptr, base); base->total--; call_rcu(&p->rcu, inetpeer_free_rcu); } static struct inet_peer_base *family_to_base(int family) { return family == AF_INET ? &v4_peers : &v6_peers; } /* perform garbage collect on all items stacked during a lookup */ static int inet_peer_gc(struct inet_peer_base *base, struct inet_peer __rcu **stack[PEER_MAXDEPTH], struct inet_peer __rcu ***stackptr) { struct inet_peer *p, *gchead = NULL; __u32 delta, ttl; int cnt = 0; if (base->total >= inet_peer_threshold) ttl = 0; /* be aggressive */ else ttl = inet_peer_maxttl - (inet_peer_maxttl - inet_peer_minttl) / HZ * base->total / inet_peer_threshold * HZ; stackptr--; /* last stack slot is peer_avl_empty */ while (stackptr > stack) { stackptr--; p = rcu_deref_locked(**stackptr, base); if (atomic_read(&p->refcnt) == 0) { smp_rmb(); delta = (__u32)jiffies - p->dtime; if (delta >= ttl && atomic_cmpxchg(&p->refcnt, 0, -1) == 0) { p->gc_next = gchead; gchead = p; } } } while ((p = gchead) != NULL) { gchead = p->gc_next; cnt++; unlink_from_pool(p, base, stack); } return cnt; } struct inet_peer *inet_getpeer(const struct inetpeer_addr *daddr, int create) { struct inet_peer __rcu **stack[PEER_MAXDEPTH], ***stackptr; struct inet_peer_base *base = family_to_base(daddr->family); struct inet_peer *p; unsigned int sequence; int invalidated, gccnt = 0; /* Attempt a lockless lookup first. * Because of a concurrent writer, we might not find an existing entry. */ rcu_read_lock(); sequence = read_seqbegin(&base->lock); p = lookup_rcu(daddr, base); invalidated = read_seqretry(&base->lock, sequence); rcu_read_unlock(); if (p) return p; /* If no writer did a change during our lookup, we can return early. */ if (!create && !invalidated) return NULL; /* retry an exact lookup, taking the lock before. * At least, nodes should be hot in our cache. */ write_seqlock_bh(&base->lock); relookup: p = lookup(daddr, stack, base); if (p != peer_avl_empty) { atomic_inc(&p->refcnt); write_sequnlock_bh(&base->lock); return p; } if (!gccnt) { gccnt = inet_peer_gc(base, stack, stackptr); if (gccnt && create) goto relookup; } p = create ? kmem_cache_alloc(peer_cachep, GFP_ATOMIC) : NULL; if (p) { p->daddr = *daddr; atomic_set(&p->refcnt, 1); atomic_set(&p->rid, 0); atomic_set(&p->ip_id_count, (daddr->family == AF_INET) ? secure_ip_id(daddr->addr.a4) : secure_ipv6_id(daddr->addr.a6)); p->tcp_ts_stamp = 0; p->metrics[RTAX_LOCK-1] = INETPEER_METRICS_NEW; p->rate_tokens = 0; p->rate_last = 0; p->pmtu_expires = 0; p->pmtu_orig = 0; memset(&p->redirect_learned, 0, sizeof(p->redirect_learned)); /* Link the node. */ link_to_pool(p, base); base->total++; } write_sequnlock_bh(&base->lock); return p; } EXPORT_SYMBOL_GPL(inet_getpeer); void inet_putpeer(struct inet_peer *p) { p->dtime = (__u32)jiffies; smp_mb__before_atomic_dec(); atomic_dec(&p->refcnt); } EXPORT_SYMBOL_GPL(inet_putpeer); /* * Check transmit rate limitation for given message. * The rate information is held in the inet_peer entries now. * This function is generic and could be used for other purposes * too. It uses a Token bucket filter as suggested by Alexey Kuznetsov. * * Note that the same inet_peer fields are modified by functions in * route.c too, but these work for packet destinations while xrlim_allow * works for icmp destinations. This means the rate limiting information * for one "ip object" is shared - and these ICMPs are twice limited: * by source and by destination. * * RFC 1812: 4.3.2.8 SHOULD be able to limit error message rate * SHOULD allow setting of rate limits * * Shared between ICMPv4 and ICMPv6. */ #define XRLIM_BURST_FACTOR 6 bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout) { unsigned long now, token; bool rc = false; if (!peer) return true; token = peer->rate_tokens; now = jiffies; token += now - peer->rate_last; peer->rate_last = now; if (token > XRLIM_BURST_FACTOR * timeout) token = XRLIM_BURST_FACTOR * timeout; if (token >= timeout) { token -= timeout; rc = true; } peer->rate_tokens = token; return rc; } EXPORT_SYMBOL(inet_peer_xrlim_allow);
gpl-2.0
pscholl/jennic-usb-zigbee-linux-driver
arch/mips/ar7/clock.c
565
11335
/* * Copyright (C) 2007 Felix Fietkau <nbd@openwrt.org> * Copyright (C) 2007 Eugene Konev <ejka@openwrt.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/gcd.h> #include <linux/io.h> #include <asm/addrspace.h> #include <asm/mach-ar7/ar7.h> #define BOOT_PLL_SOURCE_MASK 0x3 #define CPU_PLL_SOURCE_SHIFT 16 #define BUS_PLL_SOURCE_SHIFT 14 #define USB_PLL_SOURCE_SHIFT 18 #define DSP_PLL_SOURCE_SHIFT 22 #define BOOT_PLL_SOURCE_AFE 0 #define BOOT_PLL_SOURCE_BUS 0 #define BOOT_PLL_SOURCE_REF 1 #define BOOT_PLL_SOURCE_XTAL 2 #define BOOT_PLL_SOURCE_CPU 3 #define BOOT_PLL_BYPASS 0x00000020 #define BOOT_PLL_ASYNC_MODE 0x02000000 #define BOOT_PLL_2TO1_MODE 0x00008000 #define TNETD7200_CLOCK_ID_CPU 0 #define TNETD7200_CLOCK_ID_DSP 1 #define TNETD7200_CLOCK_ID_USB 2 #define TNETD7200_DEF_CPU_CLK 211000000 #define TNETD7200_DEF_DSP_CLK 125000000 #define TNETD7200_DEF_USB_CLK 48000000 struct tnetd7300_clock { u32 ctrl; #define PREDIV_MASK 0x001f0000 #define PREDIV_SHIFT 16 #define POSTDIV_MASK 0x0000001f u32 unused1[3]; u32 pll; #define MUL_MASK 0x0000f000 #define MUL_SHIFT 12 #define PLL_MODE_MASK 0x00000001 #define PLL_NDIV 0x00000800 #define PLL_DIV 0x00000002 #define PLL_STATUS 0x00000001 u32 unused2[3]; }; struct tnetd7300_clocks { struct tnetd7300_clock bus; struct tnetd7300_clock cpu; struct tnetd7300_clock usb; struct tnetd7300_clock dsp; }; struct tnetd7200_clock { u32 ctrl; u32 unused1[3]; #define DIVISOR_ENABLE_MASK 0x00008000 u32 mul; u32 prediv; u32 postdiv; u32 postdiv2; u32 unused2[6]; u32 cmd; u32 status; u32 cmden; u32 padding[15]; }; struct tnetd7200_clocks { struct tnetd7200_clock cpu; struct tnetd7200_clock dsp; struct tnetd7200_clock usb; }; int ar7_cpu_clock = 150000000; EXPORT_SYMBOL(ar7_cpu_clock); int ar7_bus_clock = 125000000; EXPORT_SYMBOL(ar7_bus_clock); int ar7_dsp_clock; EXPORT_SYMBOL(ar7_dsp_clock); static void approximate(int base, int target, int *prediv, int *postdiv, int *mul) { int i, j, k, freq, res = target; for (i = 1; i <= 16; i++) for (j = 1; j <= 32; j++) for (k = 1; k <= 32; k++) { freq = abs(base / j * i / k - target); if (freq < res) { res = freq; *mul = i; *prediv = j; *postdiv = k; } } } static void calculate(int base, int target, int *prediv, int *postdiv, int *mul) { int tmp_gcd, tmp_base, tmp_freq; for (*prediv = 1; *prediv <= 32; (*prediv)++) { tmp_base = base / *prediv; tmp_gcd = gcd(target, tmp_base); *mul = target / tmp_gcd; *postdiv = tmp_base / tmp_gcd; if ((*mul < 1) || (*mul >= 16)) continue; if ((*postdiv > 0) & (*postdiv <= 32)) break; } if (base / *prediv * *mul / *postdiv != target) { approximate(base, target, prediv, postdiv, mul); tmp_freq = base / *prediv * *mul / *postdiv; printk(KERN_WARNING "Adjusted requested frequency %d to %d\n", target, tmp_freq); } printk(KERN_DEBUG "Clocks: prediv: %d, postdiv: %d, mul: %d\n", *prediv, *postdiv, *mul); } static int tnetd7300_dsp_clock(void) { u32 didr1, didr2; u8 rev = ar7_chip_rev(); didr1 = readl((void *)KSEG1ADDR(AR7_REGS_GPIO + 0x18)); didr2 = readl((void *)KSEG1ADDR(AR7_REGS_GPIO + 0x1c)); if (didr2 & (1 << 23)) return 0; if ((rev >= 0x23) && (rev != 0x57)) return 250000000; if ((((didr2 & 0x1fff) << 10) | ((didr1 & 0xffc00000) >> 22)) > 4208000) return 250000000; return 0; } static int tnetd7300_get_clock(u32 shift, struct tnetd7300_clock *clock, u32 *bootcr, u32 bus_clock) { int product; int base_clock = AR7_REF_CLOCK; u32 ctrl = readl(&clock->ctrl); u32 pll = readl(&clock->pll); int prediv = ((ctrl & PREDIV_MASK) >> PREDIV_SHIFT) + 1; int postdiv = (ctrl & POSTDIV_MASK) + 1; int divisor = prediv * postdiv; int mul = ((pll & MUL_MASK) >> MUL_SHIFT) + 1; switch ((*bootcr & (BOOT_PLL_SOURCE_MASK << shift)) >> shift) { case BOOT_PLL_SOURCE_BUS: base_clock = bus_clock; break; case BOOT_PLL_SOURCE_REF: base_clock = AR7_REF_CLOCK; break; case BOOT_PLL_SOURCE_XTAL: base_clock = AR7_XTAL_CLOCK; break; case BOOT_PLL_SOURCE_CPU: base_clock = ar7_cpu_clock; break; } if (*bootcr & BOOT_PLL_BYPASS) return base_clock / divisor; if ((pll & PLL_MODE_MASK) == 0) return (base_clock >> (mul / 16 + 1)) / divisor; if ((pll & (PLL_NDIV | PLL_DIV)) == (PLL_NDIV | PLL_DIV)) { product = (mul & 1) ? (base_clock * mul) >> 1 : (base_clock * (mul - 1)) >> 2; return product / divisor; } if (mul == 16) return base_clock / divisor; return base_clock * mul / divisor; } static void tnetd7300_set_clock(u32 shift, struct tnetd7300_clock *clock, u32 *bootcr, u32 frequency) { int prediv, postdiv, mul; int base_clock = ar7_bus_clock; switch ((*bootcr & (BOOT_PLL_SOURCE_MASK << shift)) >> shift) { case BOOT_PLL_SOURCE_BUS: base_clock = ar7_bus_clock; break; case BOOT_PLL_SOURCE_REF: base_clock = AR7_REF_CLOCK; break; case BOOT_PLL_SOURCE_XTAL: base_clock = AR7_XTAL_CLOCK; break; case BOOT_PLL_SOURCE_CPU: base_clock = ar7_cpu_clock; break; } calculate(base_clock, frequency, &prediv, &postdiv, &mul); writel(((prediv - 1) << PREDIV_SHIFT) | (postdiv - 1), &clock->ctrl); msleep(1); writel(4, &clock->pll); while (readl(&clock->pll) & PLL_STATUS) ; writel(((mul - 1) << MUL_SHIFT) | (0xff << 3) | 0x0e, &clock->pll); msleep(75); } static void __init tnetd7300_init_clocks(void) { u32 *bootcr = (u32 *)ioremap_nocache(AR7_REGS_DCL, 4); struct tnetd7300_clocks *clocks = ioremap_nocache(UR8_REGS_CLOCKS, sizeof(struct tnetd7300_clocks)); ar7_bus_clock = tnetd7300_get_clock(BUS_PLL_SOURCE_SHIFT, &clocks->bus, bootcr, AR7_AFE_CLOCK); if (*bootcr & BOOT_PLL_ASYNC_MODE) ar7_cpu_clock = tnetd7300_get_clock(CPU_PLL_SOURCE_SHIFT, &clocks->cpu, bootcr, AR7_AFE_CLOCK); else ar7_cpu_clock = ar7_bus_clock; if (ar7_dsp_clock == 250000000) tnetd7300_set_clock(DSP_PLL_SOURCE_SHIFT, &clocks->dsp, bootcr, ar7_dsp_clock); iounmap(clocks); iounmap(bootcr); } static void tnetd7200_set_clock(int base, struct tnetd7200_clock *clock, int prediv, int postdiv, int postdiv2, int mul, u32 frequency) { printk(KERN_INFO "Clocks: base = %d, frequency = %u, prediv = %d, " "postdiv = %d, postdiv2 = %d, mul = %d\n", base, frequency, prediv, postdiv, postdiv2, mul); writel(0, &clock->ctrl); writel(DIVISOR_ENABLE_MASK | ((prediv - 1) & 0x1F), &clock->prediv); writel((mul - 1) & 0xF, &clock->mul); while (readl(&clock->status) & 0x1) ; /* nop */ writel(DIVISOR_ENABLE_MASK | ((postdiv - 1) & 0x1F), &clock->postdiv); writel(readl(&clock->cmden) | 1, &clock->cmden); writel(readl(&clock->cmd) | 1, &clock->cmd); while (readl(&clock->status) & 0x1) ; /* nop */ writel(DIVISOR_ENABLE_MASK | ((postdiv2 - 1) & 0x1F), &clock->postdiv2); writel(readl(&clock->cmden) | 1, &clock->cmden); writel(readl(&clock->cmd) | 1, &clock->cmd); while (readl(&clock->status) & 0x1) ; /* nop */ writel(readl(&clock->ctrl) | 1, &clock->ctrl); } static int tnetd7200_get_clock_base(int clock_id, u32 *bootcr) { if (*bootcr & BOOT_PLL_ASYNC_MODE) /* Async */ switch (clock_id) { case TNETD7200_CLOCK_ID_DSP: return AR7_REF_CLOCK; default: return AR7_AFE_CLOCK; } else /* Sync */ if (*bootcr & BOOT_PLL_2TO1_MODE) /* 2:1 */ switch (clock_id) { case TNETD7200_CLOCK_ID_DSP: return AR7_REF_CLOCK; default: return AR7_AFE_CLOCK; } else /* 1:1 */ return AR7_REF_CLOCK; } static void __init tnetd7200_init_clocks(void) { u32 *bootcr = (u32 *)ioremap_nocache(AR7_REGS_DCL, 4); struct tnetd7200_clocks *clocks = ioremap_nocache(AR7_REGS_CLOCKS, sizeof(struct tnetd7200_clocks)); int cpu_base, cpu_mul, cpu_prediv, cpu_postdiv; int dsp_base, dsp_mul, dsp_prediv, dsp_postdiv; int usb_base, usb_mul, usb_prediv, usb_postdiv; cpu_base = tnetd7200_get_clock_base(TNETD7200_CLOCK_ID_CPU, bootcr); dsp_base = tnetd7200_get_clock_base(TNETD7200_CLOCK_ID_DSP, bootcr); if (*bootcr & BOOT_PLL_ASYNC_MODE) { printk(KERN_INFO "Clocks: Async mode\n"); printk(KERN_INFO "Clocks: Setting DSP clock\n"); calculate(dsp_base, TNETD7200_DEF_DSP_CLK, &dsp_prediv, &dsp_postdiv, &dsp_mul); ar7_bus_clock = ((dsp_base / dsp_prediv) * dsp_mul) / dsp_postdiv; tnetd7200_set_clock(dsp_base, &clocks->dsp, dsp_prediv, dsp_postdiv * 2, dsp_postdiv, dsp_mul * 2, ar7_bus_clock); printk(KERN_INFO "Clocks: Setting CPU clock\n"); calculate(cpu_base, TNETD7200_DEF_CPU_CLK, &cpu_prediv, &cpu_postdiv, &cpu_mul); ar7_cpu_clock = ((cpu_base / cpu_prediv) * cpu_mul) / cpu_postdiv; tnetd7200_set_clock(cpu_base, &clocks->cpu, cpu_prediv, cpu_postdiv, -1, cpu_mul, ar7_cpu_clock); } else if (*bootcr & BOOT_PLL_2TO1_MODE) { printk(KERN_INFO "Clocks: Sync 2:1 mode\n"); printk(KERN_INFO "Clocks: Setting CPU clock\n"); calculate(cpu_base, TNETD7200_DEF_CPU_CLK, &cpu_prediv, &cpu_postdiv, &cpu_mul); ar7_cpu_clock = ((cpu_base / cpu_prediv) * cpu_mul) / cpu_postdiv; tnetd7200_set_clock(cpu_base, &clocks->cpu, cpu_prediv, cpu_postdiv, -1, cpu_mul, ar7_cpu_clock); printk(KERN_INFO "Clocks: Setting DSP clock\n"); calculate(dsp_base, TNETD7200_DEF_DSP_CLK, &dsp_prediv, &dsp_postdiv, &dsp_mul); ar7_bus_clock = ar7_cpu_clock / 2; tnetd7200_set_clock(dsp_base, &clocks->dsp, dsp_prediv, dsp_postdiv * 2, dsp_postdiv, dsp_mul * 2, ar7_bus_clock); } else { printk(KERN_INFO "Clocks: Sync 1:1 mode\n"); printk(KERN_INFO "Clocks: Setting DSP clock\n"); calculate(dsp_base, TNETD7200_DEF_DSP_CLK, &dsp_prediv, &dsp_postdiv, &dsp_mul); ar7_bus_clock = ((dsp_base / dsp_prediv) * dsp_mul) / dsp_postdiv; tnetd7200_set_clock(dsp_base, &clocks->dsp, dsp_prediv, dsp_postdiv * 2, dsp_postdiv, dsp_mul * 2, ar7_bus_clock); ar7_cpu_clock = ar7_bus_clock; } printk(KERN_INFO "Clocks: Setting USB clock\n"); usb_base = ar7_bus_clock; calculate(usb_base, TNETD7200_DEF_USB_CLK, &usb_prediv, &usb_postdiv, &usb_mul); tnetd7200_set_clock(usb_base, &clocks->usb, usb_prediv, usb_postdiv, -1, usb_mul, TNETD7200_DEF_USB_CLK); ar7_dsp_clock = ar7_cpu_clock; iounmap(clocks); iounmap(bootcr); } int __init ar7_init_clocks(void) { switch (ar7_chip_id()) { case AR7_CHIP_7100: case AR7_CHIP_7200: tnetd7200_init_clocks(); break; case AR7_CHIP_7300: ar7_dsp_clock = tnetd7300_dsp_clock(); tnetd7300_init_clocks(); break; default: break; } return 0; } arch_initcall(ar7_init_clocks);
gpl-2.0
Jazz-823/semc-kernel-msm7x30
net/wanrouter/wanmain.c
1333
19523
/***************************************************************************** * wanmain.c WAN Multiprotocol Router Module. Main code. * * This module is completely hardware-independent and provides * the following common services for the WAN Link Drivers: * o WAN device management (registering, unregistering) * o Network interface management * o Physical connection management (dial-up, incoming calls) * o Logical connection management (switched virtual circuits) * o Protocol encapsulation/decapsulation * * Author: Gideon Hack * * Copyright: (c) 1995-1999 Sangoma Technologies Inc. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * ============================================================================ * Nov 24, 2000 Nenad Corbic Updated for 2.4.X kernels * Nov 07, 2000 Nenad Corbic Fixed the Mulit-Port PPP for kernels 2.2.16 and * greater. * Aug 2, 2000 Nenad Corbic Block the Multi-Port PPP from running on * kernels 2.2.16 or greater. The SyncPPP * has changed. * Jul 13, 2000 Nenad Corbic Added SyncPPP support * Added extra debugging in device_setup(). * Oct 01, 1999 Gideon Hack Update for s514 PCI card * Dec 27, 1996 Gene Kozin Initial version (based on Sangoma's WANPIPE) * Jan 16, 1997 Gene Kozin router_devlist made public * Jan 31, 1997 Alan Cox Hacked it about a bit for 2.1 * Jun 27, 1997 Alan Cox realigned with vendor code * Oct 15, 1997 Farhan Thawar changed wan_encapsulate to add a pad byte of 0 * Apr 20, 1998 Alan Cox Fixed 2.1 symbols * May 17, 1998 K. Baranowski Fixed SNAP encapsulation in wan_encapsulate * Dec 15, 1998 Arnaldo Melo support for firmwares of up to 128000 bytes * check wandev->setup return value * Dec 22, 1998 Arnaldo Melo vmalloc/vfree used in device_setup to allocate * kernel memory and copy configuration data to * kernel space (for big firmwares) * Jun 02, 1999 Gideon Hack Updates for Linux 2.0.X and 2.2.X kernels. *****************************************************************************/ #include <linux/stddef.h> /* offsetof(), etc. */ #include <linux/capability.h> #include <linux/errno.h> /* return codes */ #include <linux/kernel.h> #include <linux/module.h> /* support for loadable modules */ #include <linux/slab.h> /* kmalloc(), kfree() */ #include <linux/smp_lock.h> #include <linux/mm.h> #include <linux/string.h> /* inline mem*, str* functions */ #include <asm/byteorder.h> /* htons(), etc. */ #include <linux/wanrouter.h> /* WAN router API definitions */ #include <linux/vmalloc.h> /* vmalloc, vfree */ #include <asm/uaccess.h> /* copy_to/from_user */ #include <linux/init.h> /* __initfunc et al. */ #define KMEM_SAFETYZONE 8 #define DEV_TO_SLAVE(dev) (*((struct net_device **)netdev_priv(dev))) /* * Function Prototypes */ /* * WAN device IOCTL handlers */ static int wanrouter_device_setup(struct wan_device *wandev, wandev_conf_t __user *u_conf); static int wanrouter_device_stat(struct wan_device *wandev, wandev_stat_t __user *u_stat); static int wanrouter_device_shutdown(struct wan_device *wandev); static int wanrouter_device_new_if(struct wan_device *wandev, wanif_conf_t __user *u_conf); static int wanrouter_device_del_if(struct wan_device *wandev, char __user *u_name); /* * Miscellaneous */ static struct wan_device *wanrouter_find_device(char *name); static int wanrouter_delete_interface(struct wan_device *wandev, char *name); static void lock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags) __acquires(lock); static void unlock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags) __releases(lock); /* * Global Data */ static char wanrouter_fullname[] = "Sangoma WANPIPE Router"; static char wanrouter_copyright[] = "(c) 1995-2000 Sangoma Technologies Inc."; static char wanrouter_modname[] = ROUTER_NAME; /* short module name */ struct wan_device* wanrouter_router_devlist; /* list of registered devices */ /* * Organize Unique Identifiers for encapsulation/decapsulation */ #if 0 static unsigned char wanrouter_oui_ether[] = { 0x00, 0x00, 0x00 }; static unsigned char wanrouter_oui_802_2[] = { 0x00, 0x80, 0xC2 }; #endif static int __init wanrouter_init(void) { int err; printk(KERN_INFO "%s v%u.%u %s\n", wanrouter_fullname, ROUTER_VERSION, ROUTER_RELEASE, wanrouter_copyright); err = wanrouter_proc_init(); if (err) printk(KERN_INFO "%s: can't create entry in proc filesystem!\n", wanrouter_modname); return err; } static void __exit wanrouter_cleanup (void) { wanrouter_proc_cleanup(); } /* * This is just plain dumb. We should move the bugger to drivers/net/wan, * slap it first in directory and make it module_init(). The only reason * for subsys_initcall() here is that net goes after drivers (why, BTW?) */ subsys_initcall(wanrouter_init); module_exit(wanrouter_cleanup); /* * Kernel APIs */ /* * Register WAN device. * o verify device credentials * o create an entry for the device in the /proc/net/router directory * o initialize internally maintained fields of the wan_device structure * o link device data space to a singly-linked list * o if it's the first device, then start kernel 'thread' * o increment module use count * * Return: * 0 Ok * < 0 error. * * Context: process */ int register_wan_device(struct wan_device *wandev) { int err, namelen; if ((wandev == NULL) || (wandev->magic != ROUTER_MAGIC) || (wandev->name == NULL)) return -EINVAL; namelen = strlen(wandev->name); if (!namelen || (namelen > WAN_DRVNAME_SZ)) return -EINVAL; if (wanrouter_find_device(wandev->name)) return -EEXIST; #ifdef WANDEBUG printk(KERN_INFO "%s: registering WAN device %s\n", wanrouter_modname, wandev->name); #endif /* * Register /proc directory entry */ err = wanrouter_proc_add(wandev); if (err) { printk(KERN_INFO "%s: can't create /proc/net/router/%s entry!\n", wanrouter_modname, wandev->name); return err; } /* * Initialize fields of the wan_device structure maintained by the * router and update local data. */ wandev->ndev = 0; wandev->dev = NULL; wandev->next = wanrouter_router_devlist; wanrouter_router_devlist = wandev; return 0; } /* * Unregister WAN device. * o shut down device * o unlink device data space from the linked list * o delete device entry in the /proc/net/router directory * o decrement module use count * * Return: 0 Ok * <0 error. * Context: process */ int unregister_wan_device(char *name) { struct wan_device *wandev, *prev; if (name == NULL) return -EINVAL; for (wandev = wanrouter_router_devlist, prev = NULL; wandev && strcmp(wandev->name, name); prev = wandev, wandev = wandev->next) ; if (wandev == NULL) return -ENODEV; #ifdef WANDEBUG printk(KERN_INFO "%s: unregistering WAN device %s\n", wanrouter_modname, name); #endif if (wandev->state != WAN_UNCONFIGURED) wanrouter_device_shutdown(wandev); if (prev) prev->next = wandev->next; else wanrouter_router_devlist = wandev->next; wanrouter_proc_delete(wandev); return 0; } #if 0 /* * Encapsulate packet. * * Return: encapsulation header size * < 0 - unsupported Ethertype * * Notes: * 1. This function may be called on interrupt context. */ int wanrouter_encapsulate(struct sk_buff *skb, struct net_device *dev, unsigned short type) { int hdr_len = 0; switch (type) { case ETH_P_IP: /* IP datagram encapsulation */ hdr_len += 1; skb_push(skb, 1); skb->data[0] = NLPID_IP; break; case ETH_P_IPX: /* SNAP encapsulation */ case ETH_P_ARP: hdr_len += 7; skb_push(skb, 7); skb->data[0] = 0; skb->data[1] = NLPID_SNAP; skb_copy_to_linear_data_offset(skb, 2, wanrouter_oui_ether, sizeof(wanrouter_oui_ether)); *((unsigned short*)&skb->data[5]) = htons(type); break; default: /* Unknown packet type */ printk(KERN_INFO "%s: unsupported Ethertype 0x%04X on interface %s!\n", wanrouter_modname, type, dev->name); hdr_len = -EINVAL; } return hdr_len; } /* * Decapsulate packet. * * Return: Ethertype (in network order) * 0 unknown encapsulation * * Notes: * 1. This function may be called on interrupt context. */ __be16 wanrouter_type_trans(struct sk_buff *skb, struct net_device *dev) { int cnt = skb->data[0] ? 0 : 1; /* there may be a pad present */ __be16 ethertype; switch (skb->data[cnt]) { case NLPID_IP: /* IP datagramm */ ethertype = htons(ETH_P_IP); cnt += 1; break; case NLPID_SNAP: /* SNAP encapsulation */ if (memcmp(&skb->data[cnt + 1], wanrouter_oui_ether, sizeof(wanrouter_oui_ether))){ printk(KERN_INFO "%s: unsupported SNAP OUI %02X-%02X-%02X " "on interface %s!\n", wanrouter_modname, skb->data[cnt+1], skb->data[cnt+2], skb->data[cnt+3], dev->name); return 0; } ethertype = *((__be16*)&skb->data[cnt+4]); cnt += 6; break; /* add other protocols, e.g. CLNP, ESIS, ISIS, if needed */ default: printk(KERN_INFO "%s: unsupported NLPID 0x%02X on interface %s!\n", wanrouter_modname, skb->data[cnt], dev->name); return 0; } skb->protocol = ethertype; skb->pkt_type = PACKET_HOST; /* Physically point to point */ skb_pull(skb, cnt); skb_reset_mac_header(skb); return ethertype; } #endif /* 0 */ /* * WAN device IOCTL. * o find WAN device associated with this node * o execute requested action or pass command to the device driver */ long wanrouter_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct inode *inode = file->f_path.dentry->d_inode; int err = 0; struct proc_dir_entry *dent; struct wan_device *wandev; void __user *data = (void __user *)arg; if (!capable(CAP_NET_ADMIN)) return -EPERM; if ((cmd >> 8) != ROUTER_IOCTL) return -EINVAL; dent = PDE(inode); if ((dent == NULL) || (dent->data == NULL)) return -EINVAL; wandev = dent->data; if (wandev->magic != ROUTER_MAGIC) return -EINVAL; lock_kernel(); switch (cmd) { case ROUTER_SETUP: err = wanrouter_device_setup(wandev, data); break; case ROUTER_DOWN: err = wanrouter_device_shutdown(wandev); break; case ROUTER_STAT: err = wanrouter_device_stat(wandev, data); break; case ROUTER_IFNEW: err = wanrouter_device_new_if(wandev, data); break; case ROUTER_IFDEL: err = wanrouter_device_del_if(wandev, data); break; case ROUTER_IFSTAT: break; default: if ((cmd >= ROUTER_USER) && (cmd <= ROUTER_USER_MAX) && wandev->ioctl) err = wandev->ioctl(wandev, cmd, arg); else err = -EINVAL; } unlock_kernel(); return err; } /* * WAN Driver IOCTL Handlers */ /* * Setup WAN link device. * o verify user address space * o allocate kernel memory and copy configuration data to kernel space * o if configuration data includes extension, copy it to kernel space too * o call driver's setup() entry point */ static int wanrouter_device_setup(struct wan_device *wandev, wandev_conf_t __user *u_conf) { void *data = NULL; wandev_conf_t *conf; int err = -EINVAL; if (wandev->setup == NULL) { /* Nothing to do ? */ printk(KERN_INFO "%s: ERROR, No setup script: wandev->setup()\n", wandev->name); return 0; } conf = kmalloc(sizeof(wandev_conf_t), GFP_KERNEL); if (conf == NULL){ printk(KERN_INFO "%s: ERROR, Failed to allocate kernel memory !\n", wandev->name); return -ENOBUFS; } if (copy_from_user(conf, u_conf, sizeof(wandev_conf_t))) { printk(KERN_INFO "%s: Failed to copy user config data to kernel space!\n", wandev->name); kfree(conf); return -EFAULT; } if (conf->magic != ROUTER_MAGIC) { kfree(conf); printk(KERN_INFO "%s: ERROR, Invalid MAGIC Number\n", wandev->name); return -EINVAL; } if (conf->data_size && conf->data) { if (conf->data_size > 128000) { printk(KERN_INFO "%s: ERROR, Invalid firmware data size %i !\n", wandev->name, conf->data_size); kfree(conf); return -EINVAL; } data = vmalloc(conf->data_size); if (!data) { printk(KERN_INFO "%s: ERROR, Faild allocate kernel memory !\n", wandev->name); kfree(conf); return -ENOBUFS; } if (!copy_from_user(data, conf->data, conf->data_size)) { conf->data = data; err = wandev->setup(wandev, conf); } else { printk(KERN_INFO "%s: ERROR, Faild to copy from user data !\n", wandev->name); err = -EFAULT; } vfree(data); } else { printk(KERN_INFO "%s: ERROR, No firmware found ! Firmware size = %i !\n", wandev->name, conf->data_size); } kfree(conf); return err; } /* * Shutdown WAN device. * o delete all not opened logical channels for this device * o call driver's shutdown() entry point */ static int wanrouter_device_shutdown(struct wan_device *wandev) { struct net_device *dev; int err=0; if (wandev->state == WAN_UNCONFIGURED) return 0; printk(KERN_INFO "\n%s: Shutting Down!\n",wandev->name); for (dev = wandev->dev; dev;) { err = wanrouter_delete_interface(wandev, dev->name); if (err) return err; /* The above function deallocates the current dev * structure. Therefore, we cannot use netdev_priv(dev) * as the next element: wandev->dev points to the * next element */ dev = wandev->dev; } if (wandev->ndev) return -EBUSY; /* there are opened interfaces */ if (wandev->shutdown) err=wandev->shutdown(wandev); return err; } /* * Get WAN device status & statistics. */ static int wanrouter_device_stat(struct wan_device *wandev, wandev_stat_t __user *u_stat) { wandev_stat_t stat; memset(&stat, 0, sizeof(stat)); /* Ask device driver to update device statistics */ if ((wandev->state != WAN_UNCONFIGURED) && wandev->update) wandev->update(wandev); /* Fill out structure */ stat.ndev = wandev->ndev; stat.state = wandev->state; if (copy_to_user(u_stat, &stat, sizeof(stat))) return -EFAULT; return 0; } /* * Create new WAN interface. * o verify user address space * o copy configuration data to kernel address space * o allocate network interface data space * o call driver's new_if() entry point * o make sure there is no interface name conflict * o register network interface */ static int wanrouter_device_new_if(struct wan_device *wandev, wanif_conf_t __user *u_conf) { wanif_conf_t *cnf; struct net_device *dev = NULL; int err; if ((wandev->state == WAN_UNCONFIGURED) || (wandev->new_if == NULL)) return -ENODEV; cnf = kmalloc(sizeof(wanif_conf_t), GFP_KERNEL); if (!cnf) return -ENOBUFS; err = -EFAULT; if (copy_from_user(cnf, u_conf, sizeof(wanif_conf_t))) goto out; err = -EINVAL; if (cnf->magic != ROUTER_MAGIC) goto out; if (cnf->config_id == WANCONFIG_MPPP) { printk(KERN_INFO "%s: Wanpipe Mulit-Port PPP support has not been compiled in!\n", wandev->name); err = -EPROTONOSUPPORT; goto out; } else { err = wandev->new_if(wandev, dev, cnf); } if (!err) { /* Register network interface. This will invoke init() * function supplied by the driver. If device registered * successfully, add it to the interface list. */ if (dev->name == NULL) { err = -EINVAL; } else { #ifdef WANDEBUG printk(KERN_INFO "%s: registering interface %s...\n", wanrouter_modname, dev->name); #endif err = register_netdev(dev); if (!err) { struct net_device *slave = NULL; unsigned long smp_flags=0; lock_adapter_irq(&wandev->lock, &smp_flags); if (wandev->dev == NULL) { wandev->dev = dev; } else { for (slave=wandev->dev; DEV_TO_SLAVE(slave); slave = DEV_TO_SLAVE(slave)) DEV_TO_SLAVE(slave) = dev; } ++wandev->ndev; unlock_adapter_irq(&wandev->lock, &smp_flags); err = 0; /* done !!! */ goto out; } } if (wandev->del_if) wandev->del_if(wandev, dev); free_netdev(dev); } out: kfree(cnf); return err; } /* * Delete WAN logical channel. * o verify user address space * o copy configuration data to kernel address space */ static int wanrouter_device_del_if(struct wan_device *wandev, char __user *u_name) { char name[WAN_IFNAME_SZ + 1]; int err = 0; if (wandev->state == WAN_UNCONFIGURED) return -ENODEV; memset(name, 0, sizeof(name)); if (copy_from_user(name, u_name, WAN_IFNAME_SZ)) return -EFAULT; err = wanrouter_delete_interface(wandev, name); if (err) return err; /* If last interface being deleted, shutdown card * This helps with administration at leaf nodes * (You can tell if the person at the other end of the phone * has an interface configured) and avoids DoS vulnerabilities * in binary driver files - this fixes a problem with the current * Sangoma driver going into strange states when all the network * interfaces are deleted and the link irrecoverably disconnected. */ if (!wandev->ndev && wandev->shutdown) err = wandev->shutdown(wandev); return err; } /* * Miscellaneous Functions */ /* * Find WAN device by name. * Return pointer to the WAN device data space or NULL if device not found. */ static struct wan_device *wanrouter_find_device(char *name) { struct wan_device *wandev; for (wandev = wanrouter_router_devlist; wandev && strcmp(wandev->name, name); wandev = wandev->next); return wandev; } /* * Delete WAN logical channel identified by its name. * o find logical channel by its name * o call driver's del_if() entry point * o unregister network interface * o unlink channel data space from linked list of channels * o release channel data space * * Return: 0 success * -ENODEV channel not found. * -EBUSY interface is open * * Note: If (force != 0), then device will be destroyed even if interface * associated with it is open. It's caller's responsibility to make * sure that opened interfaces are not removed! */ static int wanrouter_delete_interface(struct wan_device *wandev, char *name) { struct net_device *dev = NULL, *prev = NULL; unsigned long smp_flags=0; lock_adapter_irq(&wandev->lock, &smp_flags); dev = wandev->dev; prev = NULL; while (dev && strcmp(name, dev->name)) { struct net_device **slave = netdev_priv(dev); prev = dev; dev = *slave; } unlock_adapter_irq(&wandev->lock, &smp_flags); if (dev == NULL) return -ENODEV; /* interface not found */ if (netif_running(dev)) return -EBUSY; /* interface in use */ if (wandev->del_if) wandev->del_if(wandev, dev); lock_adapter_irq(&wandev->lock, &smp_flags); if (prev) { struct net_device **prev_slave = netdev_priv(prev); struct net_device **slave = netdev_priv(dev); *prev_slave = *slave; } else { struct net_device **slave = netdev_priv(dev); wandev->dev = *slave; } --wandev->ndev; unlock_adapter_irq(&wandev->lock, &smp_flags); printk(KERN_INFO "%s: unregistering '%s'\n", wandev->name, dev->name); unregister_netdev(dev); free_netdev(dev); return 0; } static void lock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags) __acquires(lock) { spin_lock_irqsave(lock, *smp_flags); } static void unlock_adapter_irq(spinlock_t *lock, unsigned long *smp_flags) __releases(lock) { spin_unlock_irqrestore(lock, *smp_flags); } EXPORT_SYMBOL(register_wan_device); EXPORT_SYMBOL(unregister_wan_device); MODULE_LICENSE("GPL"); /* * End */
gpl-2.0
MoKee/android_kernel_yu_msm8916
drivers/clk/spear/spear3xx_clock.c
1333
23043
/* * SPEAr3xx machines clock framework source file * * Copyright (C) 2012 ST Microelectronics * Viresh Kumar <viresh.linux@gmail.com> * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/clk.h> #include <linux/clkdev.h> #include <linux/err.h> #include <linux/io.h> #include <linux/of_platform.h> #include <linux/spinlock_types.h> #include "clk.h" static DEFINE_SPINLOCK(_lock); #define PLL1_CTR (misc_base + 0x008) #define PLL1_FRQ (misc_base + 0x00C) #define PLL2_CTR (misc_base + 0x014) #define PLL2_FRQ (misc_base + 0x018) #define PLL_CLK_CFG (misc_base + 0x020) /* PLL_CLK_CFG register masks */ #define MCTR_CLK_SHIFT 28 #define MCTR_CLK_MASK 3 #define CORE_CLK_CFG (misc_base + 0x024) /* CORE CLK CFG register masks */ #define GEN_SYNTH2_3_CLK_SHIFT 18 #define GEN_SYNTH2_3_CLK_MASK 1 #define HCLK_RATIO_SHIFT 10 #define HCLK_RATIO_MASK 2 #define PCLK_RATIO_SHIFT 8 #define PCLK_RATIO_MASK 2 #define PERIP_CLK_CFG (misc_base + 0x028) /* PERIP_CLK_CFG register masks */ #define UART_CLK_SHIFT 4 #define UART_CLK_MASK 1 #define FIRDA_CLK_SHIFT 5 #define FIRDA_CLK_MASK 2 #define GPT0_CLK_SHIFT 8 #define GPT1_CLK_SHIFT 11 #define GPT2_CLK_SHIFT 12 #define GPT_CLK_MASK 1 #define PERIP1_CLK_ENB (misc_base + 0x02C) /* PERIP1_CLK_ENB register masks */ #define UART_CLK_ENB 3 #define SSP_CLK_ENB 5 #define I2C_CLK_ENB 7 #define JPEG_CLK_ENB 8 #define FIRDA_CLK_ENB 10 #define GPT1_CLK_ENB 11 #define GPT2_CLK_ENB 12 #define ADC_CLK_ENB 15 #define RTC_CLK_ENB 17 #define GPIO_CLK_ENB 18 #define DMA_CLK_ENB 19 #define SMI_CLK_ENB 21 #define GMAC_CLK_ENB 23 #define USBD_CLK_ENB 24 #define USBH_CLK_ENB 25 #define C3_CLK_ENB 31 #define RAS_CLK_ENB (misc_base + 0x034) #define RAS_AHB_CLK_ENB 0 #define RAS_PLL1_CLK_ENB 1 #define RAS_APB_CLK_ENB 2 #define RAS_32K_CLK_ENB 3 #define RAS_24M_CLK_ENB 4 #define RAS_48M_CLK_ENB 5 #define RAS_PLL2_CLK_ENB 7 #define RAS_SYNT0_CLK_ENB 8 #define RAS_SYNT1_CLK_ENB 9 #define RAS_SYNT2_CLK_ENB 10 #define RAS_SYNT3_CLK_ENB 11 #define PRSC0_CLK_CFG (misc_base + 0x044) #define PRSC1_CLK_CFG (misc_base + 0x048) #define PRSC2_CLK_CFG (misc_base + 0x04C) #define AMEM_CLK_CFG (misc_base + 0x050) #define AMEM_CLK_ENB 0 #define CLCD_CLK_SYNT (misc_base + 0x05C) #define FIRDA_CLK_SYNT (misc_base + 0x060) #define UART_CLK_SYNT (misc_base + 0x064) #define GMAC_CLK_SYNT (misc_base + 0x068) #define GEN0_CLK_SYNT (misc_base + 0x06C) #define GEN1_CLK_SYNT (misc_base + 0x070) #define GEN2_CLK_SYNT (misc_base + 0x074) #define GEN3_CLK_SYNT (misc_base + 0x078) /* pll rate configuration table, in ascending order of rates */ static struct pll_rate_tbl pll_rtbl[] = { {.mode = 0, .m = 0x53, .n = 0x0C, .p = 0x1}, /* vco 332 & pll 166 MHz */ {.mode = 0, .m = 0x85, .n = 0x0C, .p = 0x1}, /* vco 532 & pll 266 MHz */ {.mode = 0, .m = 0xA6, .n = 0x0C, .p = 0x1}, /* vco 664 & pll 332 MHz */ }; /* aux rate configuration table, in ascending order of rates */ static struct aux_rate_tbl aux_rtbl[] = { /* For PLL1 = 332 MHz */ {.xscale = 1, .yscale = 81, .eq = 0}, /* 2.049 MHz */ {.xscale = 1, .yscale = 59, .eq = 0}, /* 2.822 MHz */ {.xscale = 2, .yscale = 81, .eq = 0}, /* 4.098 MHz */ {.xscale = 3, .yscale = 89, .eq = 0}, /* 5.644 MHz */ {.xscale = 4, .yscale = 81, .eq = 0}, /* 8.197 MHz */ {.xscale = 4, .yscale = 59, .eq = 0}, /* 11.254 MHz */ {.xscale = 2, .yscale = 27, .eq = 0}, /* 12.296 MHz */ {.xscale = 2, .yscale = 8, .eq = 0}, /* 41.5 MHz */ {.xscale = 2, .yscale = 4, .eq = 0}, /* 83 MHz */ {.xscale = 1, .yscale = 2, .eq = 1}, /* 166 MHz */ }; /* gpt rate configuration table, in ascending order of rates */ static struct gpt_rate_tbl gpt_rtbl[] = { /* For pll1 = 332 MHz */ {.mscale = 4, .nscale = 0}, /* 41.5 MHz */ {.mscale = 2, .nscale = 0}, /* 55.3 MHz */ {.mscale = 1, .nscale = 0}, /* 83 MHz */ }; /* clock parents */ static const char *uart0_parents[] = { "pll3_clk", "uart_syn_gclk", }; static const char *firda_parents[] = { "pll3_clk", "firda_syn_gclk", }; static const char *gpt0_parents[] = { "pll3_clk", "gpt0_syn_clk", }; static const char *gpt1_parents[] = { "pll3_clk", "gpt1_syn_clk", }; static const char *gpt2_parents[] = { "pll3_clk", "gpt2_syn_clk", }; static const char *gen2_3_parents[] = { "pll1_clk", "pll2_clk", }; static const char *ddr_parents[] = { "ahb_clk", "ahbmult2_clk", "none", "pll2_clk", }; #ifdef CONFIG_MACH_SPEAR300 static void __init spear300_clk_init(void) { struct clk *clk; clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "60000000.clcd"); clk = clk_register_fixed_factor(NULL, "fsmc_clk", "ras_ahb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "94000000.flash"); clk = clk_register_fixed_factor(NULL, "sdhci_clk", "ras_ahb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "70000000.sdhci"); clk = clk_register_fixed_factor(NULL, "gpio1_clk", "ras_apb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "a9000000.gpio"); clk = clk_register_fixed_factor(NULL, "kbd_clk", "ras_apb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "a0000000.kbd"); } #else static inline void spear300_clk_init(void) { } #endif /* array of all spear 310 clock lookups */ #ifdef CONFIG_MACH_SPEAR310 static void __init spear310_clk_init(void) { struct clk *clk; clk = clk_register_fixed_factor(NULL, "emi_clk", "ras_ahb_clk", 0, 1, 1); clk_register_clkdev(clk, "emi", NULL); clk = clk_register_fixed_factor(NULL, "fsmc_clk", "ras_ahb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "44000000.flash"); clk = clk_register_fixed_factor(NULL, "tdm_clk", "ras_ahb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "tdm"); clk = clk_register_fixed_factor(NULL, "uart1_clk", "ras_apb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "b2000000.serial"); clk = clk_register_fixed_factor(NULL, "uart2_clk", "ras_apb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "b2080000.serial"); clk = clk_register_fixed_factor(NULL, "uart3_clk", "ras_apb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "b2100000.serial"); clk = clk_register_fixed_factor(NULL, "uart4_clk", "ras_apb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "b2180000.serial"); clk = clk_register_fixed_factor(NULL, "uart5_clk", "ras_apb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "b2200000.serial"); } #else static inline void spear310_clk_init(void) { } #endif /* array of all spear 320 clock lookups */ #ifdef CONFIG_MACH_SPEAR320 #define SPEAR320_CONTROL_REG (soc_config_base + 0x0010) #define SPEAR320_EXT_CTRL_REG (soc_config_base + 0x0018) #define SPEAR320_UARTX_PCLK_MASK 0x1 #define SPEAR320_UART2_PCLK_SHIFT 8 #define SPEAR320_UART3_PCLK_SHIFT 9 #define SPEAR320_UART4_PCLK_SHIFT 10 #define SPEAR320_UART5_PCLK_SHIFT 11 #define SPEAR320_UART6_PCLK_SHIFT 12 #define SPEAR320_RS485_PCLK_SHIFT 13 #define SMII_PCLK_SHIFT 18 #define SMII_PCLK_MASK 2 #define SMII_PCLK_VAL_PAD 0x0 #define SMII_PCLK_VAL_PLL2 0x1 #define SMII_PCLK_VAL_SYNTH0 0x2 #define SDHCI_PCLK_SHIFT 15 #define SDHCI_PCLK_MASK 1 #define SDHCI_PCLK_VAL_48M 0x0 #define SDHCI_PCLK_VAL_SYNTH3 0x1 #define I2S_REF_PCLK_SHIFT 8 #define I2S_REF_PCLK_MASK 1 #define I2S_REF_PCLK_SYNTH_VAL 0x1 #define I2S_REF_PCLK_PLL2_VAL 0x0 #define UART1_PCLK_SHIFT 6 #define UART1_PCLK_MASK 1 #define SPEAR320_UARTX_PCLK_VAL_SYNTH1 0x0 #define SPEAR320_UARTX_PCLK_VAL_APB 0x1 static const char *i2s_ref_parents[] = { "ras_pll2_clk", "ras_syn2_gclk", }; static const char *sdhci_parents[] = { "ras_pll3_clk", "ras_syn3_gclk", }; static const char *smii0_parents[] = { "smii_125m_pad", "ras_pll2_clk", "ras_syn0_gclk", }; static const char *uartx_parents[] = { "ras_syn1_gclk", "ras_apb_clk", }; static void __init spear320_clk_init(void __iomem *soc_config_base) { struct clk *clk; clk = clk_register_fixed_rate(NULL, "smii_125m_pad_clk", NULL, CLK_IS_ROOT, 125000000); clk_register_clkdev(clk, "smii_125m_pad", NULL); clk = clk_register_fixed_factor(NULL, "clcd_clk", "ras_pll3_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "90000000.clcd"); clk = clk_register_fixed_factor(NULL, "emi_clk", "ras_ahb_clk", 0, 1, 1); clk_register_clkdev(clk, "emi", NULL); clk = clk_register_fixed_factor(NULL, "fsmc_clk", "ras_ahb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "4c000000.flash"); clk = clk_register_fixed_factor(NULL, "i2c1_clk", "ras_ahb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "a7000000.i2c"); clk = clk_register_fixed_factor(NULL, "pwm_clk", "ras_ahb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "a8000000.pwm"); clk = clk_register_fixed_factor(NULL, "ssp1_clk", "ras_ahb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "a5000000.spi"); clk = clk_register_fixed_factor(NULL, "ssp2_clk", "ras_ahb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "a6000000.spi"); clk = clk_register_fixed_factor(NULL, "can0_clk", "ras_apb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "c_can_platform.0"); clk = clk_register_fixed_factor(NULL, "can1_clk", "ras_apb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "c_can_platform.1"); clk = clk_register_fixed_factor(NULL, "i2s_clk", "ras_apb_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "a9400000.i2s"); clk = clk_register_mux(NULL, "i2s_ref_clk", i2s_ref_parents, ARRAY_SIZE(i2s_ref_parents), CLK_SET_RATE_PARENT, SPEAR320_CONTROL_REG, I2S_REF_PCLK_SHIFT, I2S_REF_PCLK_MASK, 0, &_lock); clk_register_clkdev(clk, "i2s_ref_clk", NULL); clk = clk_register_fixed_factor(NULL, "i2s_sclk", "i2s_ref_clk", CLK_SET_RATE_PARENT, 1, 4); clk_register_clkdev(clk, "i2s_sclk", NULL); clk = clk_register_fixed_factor(NULL, "macb1_clk", "ras_apb_clk", 0, 1, 1); clk_register_clkdev(clk, "hclk", "aa000000.eth"); clk = clk_register_fixed_factor(NULL, "macb2_clk", "ras_apb_clk", 0, 1, 1); clk_register_clkdev(clk, "hclk", "ab000000.eth"); clk = clk_register_mux(NULL, "rs485_clk", uartx_parents, ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT, SPEAR320_EXT_CTRL_REG, SPEAR320_RS485_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0, &_lock); clk_register_clkdev(clk, NULL, "a9300000.serial"); clk = clk_register_mux(NULL, "sdhci_clk", sdhci_parents, ARRAY_SIZE(sdhci_parents), CLK_SET_RATE_PARENT, SPEAR320_CONTROL_REG, SDHCI_PCLK_SHIFT, SDHCI_PCLK_MASK, 0, &_lock); clk_register_clkdev(clk, NULL, "70000000.sdhci"); clk = clk_register_mux(NULL, "smii_pclk", smii0_parents, ARRAY_SIZE(smii0_parents), 0, SPEAR320_CONTROL_REG, SMII_PCLK_SHIFT, SMII_PCLK_MASK, 0, &_lock); clk_register_clkdev(clk, NULL, "smii_pclk"); clk = clk_register_fixed_factor(NULL, "smii_clk", "smii_pclk", 0, 1, 1); clk_register_clkdev(clk, NULL, "smii"); clk = clk_register_mux(NULL, "uart1_clk", uartx_parents, ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT, SPEAR320_CONTROL_REG, UART1_PCLK_SHIFT, UART1_PCLK_MASK, 0, &_lock); clk_register_clkdev(clk, NULL, "a3000000.serial"); clk = clk_register_mux(NULL, "uart2_clk", uartx_parents, ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT, SPEAR320_EXT_CTRL_REG, SPEAR320_UART2_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0, &_lock); clk_register_clkdev(clk, NULL, "a4000000.serial"); clk = clk_register_mux(NULL, "uart3_clk", uartx_parents, ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT, SPEAR320_EXT_CTRL_REG, SPEAR320_UART3_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0, &_lock); clk_register_clkdev(clk, NULL, "a9100000.serial"); clk = clk_register_mux(NULL, "uart4_clk", uartx_parents, ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT, SPEAR320_EXT_CTRL_REG, SPEAR320_UART4_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0, &_lock); clk_register_clkdev(clk, NULL, "a9200000.serial"); clk = clk_register_mux(NULL, "uart5_clk", uartx_parents, ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT, SPEAR320_EXT_CTRL_REG, SPEAR320_UART5_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0, &_lock); clk_register_clkdev(clk, NULL, "60000000.serial"); clk = clk_register_mux(NULL, "uart6_clk", uartx_parents, ARRAY_SIZE(uartx_parents), CLK_SET_RATE_PARENT, SPEAR320_EXT_CTRL_REG, SPEAR320_UART6_PCLK_SHIFT, SPEAR320_UARTX_PCLK_MASK, 0, &_lock); clk_register_clkdev(clk, NULL, "60100000.serial"); } #else static inline void spear320_clk_init(void __iomem *soc_config_base) { } #endif void __init spear3xx_clk_init(void __iomem *misc_base, void __iomem *soc_config_base) { struct clk *clk, *clk1; clk = clk_register_fixed_rate(NULL, "osc_32k_clk", NULL, CLK_IS_ROOT, 32000); clk_register_clkdev(clk, "osc_32k_clk", NULL); clk = clk_register_fixed_rate(NULL, "osc_24m_clk", NULL, CLK_IS_ROOT, 24000000); clk_register_clkdev(clk, "osc_24m_clk", NULL); /* clock derived from 32 KHz osc clk */ clk = clk_register_gate(NULL, "rtc-spear", "osc_32k_clk", 0, PERIP1_CLK_ENB, RTC_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "fc900000.rtc"); /* clock derived from 24 MHz osc clk */ clk = clk_register_fixed_rate(NULL, "pll3_clk", "osc_24m_clk", 0, 48000000); clk_register_clkdev(clk, "pll3_clk", NULL); clk = clk_register_fixed_factor(NULL, "wdt_clk", "osc_24m_clk", 0, 1, 1); clk_register_clkdev(clk, NULL, "fc880000.wdt"); clk = clk_register_vco_pll("vco1_clk", "pll1_clk", NULL, "osc_24m_clk", 0, PLL1_CTR, PLL1_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL); clk_register_clkdev(clk, "vco1_clk", NULL); clk_register_clkdev(clk1, "pll1_clk", NULL); clk = clk_register_vco_pll("vco2_clk", "pll2_clk", NULL, "osc_24m_clk", 0, PLL2_CTR, PLL2_FRQ, pll_rtbl, ARRAY_SIZE(pll_rtbl), &_lock, &clk1, NULL); clk_register_clkdev(clk, "vco2_clk", NULL); clk_register_clkdev(clk1, "pll2_clk", NULL); /* clock derived from pll1 clk */ clk = clk_register_fixed_factor(NULL, "cpu_clk", "pll1_clk", CLK_SET_RATE_PARENT, 1, 1); clk_register_clkdev(clk, "cpu_clk", NULL); clk = clk_register_divider(NULL, "ahb_clk", "pll1_clk", CLK_SET_RATE_PARENT, CORE_CLK_CFG, HCLK_RATIO_SHIFT, HCLK_RATIO_MASK, 0, &_lock); clk_register_clkdev(clk, "ahb_clk", NULL); clk = clk_register_aux("uart_syn_clk", "uart_syn_gclk", "pll1_clk", 0, UART_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); clk_register_clkdev(clk, "uart_syn_clk", NULL); clk_register_clkdev(clk1, "uart_syn_gclk", NULL); clk = clk_register_mux(NULL, "uart0_mclk", uart0_parents, ARRAY_SIZE(uart0_parents), CLK_SET_RATE_PARENT, PERIP_CLK_CFG, UART_CLK_SHIFT, UART_CLK_MASK, 0, &_lock); clk_register_clkdev(clk, "uart0_mclk", NULL); clk = clk_register_gate(NULL, "uart0", "uart0_mclk", CLK_SET_RATE_PARENT, PERIP1_CLK_ENB, UART_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "d0000000.serial"); clk = clk_register_aux("firda_syn_clk", "firda_syn_gclk", "pll1_clk", 0, FIRDA_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); clk_register_clkdev(clk, "firda_syn_clk", NULL); clk_register_clkdev(clk1, "firda_syn_gclk", NULL); clk = clk_register_mux(NULL, "firda_mclk", firda_parents, ARRAY_SIZE(firda_parents), CLK_SET_RATE_PARENT, PERIP_CLK_CFG, FIRDA_CLK_SHIFT, FIRDA_CLK_MASK, 0, &_lock); clk_register_clkdev(clk, "firda_mclk", NULL); clk = clk_register_gate(NULL, "firda_clk", "firda_mclk", CLK_SET_RATE_PARENT, PERIP1_CLK_ENB, FIRDA_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "firda"); /* gpt clocks */ clk_register_gpt("gpt0_syn_clk", "pll1_clk", 0, PRSC0_CLK_CFG, gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock); clk = clk_register_mux(NULL, "gpt0_clk", gpt0_parents, ARRAY_SIZE(gpt0_parents), CLK_SET_RATE_PARENT, PERIP_CLK_CFG, GPT0_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock); clk_register_clkdev(clk, NULL, "gpt0"); clk_register_gpt("gpt1_syn_clk", "pll1_clk", 0, PRSC1_CLK_CFG, gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock); clk = clk_register_mux(NULL, "gpt1_mclk", gpt1_parents, ARRAY_SIZE(gpt1_parents), CLK_SET_RATE_PARENT, PERIP_CLK_CFG, GPT1_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock); clk_register_clkdev(clk, "gpt1_mclk", NULL); clk = clk_register_gate(NULL, "gpt1_clk", "gpt1_mclk", CLK_SET_RATE_PARENT, PERIP1_CLK_ENB, GPT1_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "gpt1"); clk_register_gpt("gpt2_syn_clk", "pll1_clk", 0, PRSC2_CLK_CFG, gpt_rtbl, ARRAY_SIZE(gpt_rtbl), &_lock); clk = clk_register_mux(NULL, "gpt2_mclk", gpt2_parents, ARRAY_SIZE(gpt2_parents), CLK_SET_RATE_PARENT, PERIP_CLK_CFG, GPT2_CLK_SHIFT, GPT_CLK_MASK, 0, &_lock); clk_register_clkdev(clk, "gpt2_mclk", NULL); clk = clk_register_gate(NULL, "gpt2_clk", "gpt2_mclk", CLK_SET_RATE_PARENT, PERIP1_CLK_ENB, GPT2_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "gpt2"); /* general synths clocks */ clk = clk_register_aux("gen0_syn_clk", "gen0_syn_gclk", "pll1_clk", 0, GEN0_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); clk_register_clkdev(clk, "gen0_syn_clk", NULL); clk_register_clkdev(clk1, "gen0_syn_gclk", NULL); clk = clk_register_aux("gen1_syn_clk", "gen1_syn_gclk", "pll1_clk", 0, GEN1_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); clk_register_clkdev(clk, "gen1_syn_clk", NULL); clk_register_clkdev(clk1, "gen1_syn_gclk", NULL); clk = clk_register_mux(NULL, "gen2_3_par_clk", gen2_3_parents, ARRAY_SIZE(gen2_3_parents), 0, CORE_CLK_CFG, GEN_SYNTH2_3_CLK_SHIFT, GEN_SYNTH2_3_CLK_MASK, 0, &_lock); clk_register_clkdev(clk, "gen2_3_par_clk", NULL); clk = clk_register_aux("gen2_syn_clk", "gen2_syn_gclk", "gen2_3_par_clk", 0, GEN2_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); clk_register_clkdev(clk, "gen2_syn_clk", NULL); clk_register_clkdev(clk1, "gen2_syn_gclk", NULL); clk = clk_register_aux("gen3_syn_clk", "gen3_syn_gclk", "gen2_3_par_clk", 0, GEN3_CLK_SYNT, NULL, aux_rtbl, ARRAY_SIZE(aux_rtbl), &_lock, &clk1); clk_register_clkdev(clk, "gen3_syn_clk", NULL); clk_register_clkdev(clk1, "gen3_syn_gclk", NULL); /* clock derived from pll3 clk */ clk = clk_register_gate(NULL, "usbh_clk", "pll3_clk", 0, PERIP1_CLK_ENB, USBH_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "e1800000.ehci"); clk_register_clkdev(clk, NULL, "e1900000.ohci"); clk_register_clkdev(clk, NULL, "e2100000.ohci"); clk = clk_register_fixed_factor(NULL, "usbh.0_clk", "usbh_clk", 0, 1, 1); clk_register_clkdev(clk, "usbh.0_clk", NULL); clk = clk_register_fixed_factor(NULL, "usbh.1_clk", "usbh_clk", 0, 1, 1); clk_register_clkdev(clk, "usbh.1_clk", NULL); clk = clk_register_gate(NULL, "usbd_clk", "pll3_clk", 0, PERIP1_CLK_ENB, USBD_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "e1100000.usbd"); /* clock derived from ahb clk */ clk = clk_register_fixed_factor(NULL, "ahbmult2_clk", "ahb_clk", 0, 2, 1); clk_register_clkdev(clk, "ahbmult2_clk", NULL); clk = clk_register_mux(NULL, "ddr_clk", ddr_parents, ARRAY_SIZE(ddr_parents), 0, PLL_CLK_CFG, MCTR_CLK_SHIFT, MCTR_CLK_MASK, 0, &_lock); clk_register_clkdev(clk, "ddr_clk", NULL); clk = clk_register_divider(NULL, "apb_clk", "ahb_clk", CLK_SET_RATE_PARENT, CORE_CLK_CFG, PCLK_RATIO_SHIFT, PCLK_RATIO_MASK, 0, &_lock); clk_register_clkdev(clk, "apb_clk", NULL); clk = clk_register_gate(NULL, "amem_clk", "ahb_clk", 0, AMEM_CLK_CFG, AMEM_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "amem_clk", NULL); clk = clk_register_gate(NULL, "c3_clk", "ahb_clk", 0, PERIP1_CLK_ENB, C3_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "c3_clk"); clk = clk_register_gate(NULL, "dma_clk", "ahb_clk", 0, PERIP1_CLK_ENB, DMA_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "fc400000.dma"); clk = clk_register_gate(NULL, "gmac_clk", "ahb_clk", 0, PERIP1_CLK_ENB, GMAC_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "e0800000.eth"); clk = clk_register_gate(NULL, "i2c0_clk", "ahb_clk", 0, PERIP1_CLK_ENB, I2C_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "d0180000.i2c"); clk = clk_register_gate(NULL, "jpeg_clk", "ahb_clk", 0, PERIP1_CLK_ENB, JPEG_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "jpeg"); clk = clk_register_gate(NULL, "smi_clk", "ahb_clk", 0, PERIP1_CLK_ENB, SMI_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "fc000000.flash"); /* clock derived from apb clk */ clk = clk_register_gate(NULL, "adc_clk", "apb_clk", 0, PERIP1_CLK_ENB, ADC_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "d0080000.adc"); clk = clk_register_gate(NULL, "gpio0_clk", "apb_clk", 0, PERIP1_CLK_ENB, GPIO_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "fc980000.gpio"); clk = clk_register_gate(NULL, "ssp0_clk", "apb_clk", 0, PERIP1_CLK_ENB, SSP_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, NULL, "d0100000.spi"); /* RAS clk enable */ clk = clk_register_gate(NULL, "ras_ahb_clk", "ahb_clk", 0, RAS_CLK_ENB, RAS_AHB_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "ras_ahb_clk", NULL); clk = clk_register_gate(NULL, "ras_apb_clk", "apb_clk", 0, RAS_CLK_ENB, RAS_APB_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "ras_apb_clk", NULL); clk = clk_register_gate(NULL, "ras_32k_clk", "osc_32k_clk", 0, RAS_CLK_ENB, RAS_32K_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "ras_32k_clk", NULL); clk = clk_register_gate(NULL, "ras_24m_clk", "osc_24m_clk", 0, RAS_CLK_ENB, RAS_24M_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "ras_24m_clk", NULL); clk = clk_register_gate(NULL, "ras_pll1_clk", "pll1_clk", 0, RAS_CLK_ENB, RAS_PLL1_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "ras_pll1_clk", NULL); clk = clk_register_gate(NULL, "ras_pll2_clk", "pll2_clk", 0, RAS_CLK_ENB, RAS_PLL2_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "ras_pll2_clk", NULL); clk = clk_register_gate(NULL, "ras_pll3_clk", "pll3_clk", 0, RAS_CLK_ENB, RAS_48M_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "ras_pll3_clk", NULL); clk = clk_register_gate(NULL, "ras_syn0_gclk", "gen0_syn_gclk", CLK_SET_RATE_PARENT, RAS_CLK_ENB, RAS_SYNT0_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "ras_syn0_gclk", NULL); clk = clk_register_gate(NULL, "ras_syn1_gclk", "gen1_syn_gclk", CLK_SET_RATE_PARENT, RAS_CLK_ENB, RAS_SYNT1_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "ras_syn1_gclk", NULL); clk = clk_register_gate(NULL, "ras_syn2_gclk", "gen2_syn_gclk", CLK_SET_RATE_PARENT, RAS_CLK_ENB, RAS_SYNT2_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "ras_syn2_gclk", NULL); clk = clk_register_gate(NULL, "ras_syn3_gclk", "gen3_syn_gclk", CLK_SET_RATE_PARENT, RAS_CLK_ENB, RAS_SYNT3_CLK_ENB, 0, &_lock); clk_register_clkdev(clk, "ras_syn3_gclk", NULL); if (of_machine_is_compatible("st,spear300")) spear300_clk_init(); else if (of_machine_is_compatible("st,spear310")) spear310_clk_init(); else if (of_machine_is_compatible("st,spear320")) spear320_clk_init(soc_config_base); }
gpl-2.0
loongson-community/preempt-rt-linux
drivers/leds/leds-h1940.c
1845
3771
/* * drivers/leds/leds-h1940.c * Copyright (c) Arnaud Patard <arnaud.patard@rtp-net.org> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * H1940 leds driver * */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/ctype.h> #include <linux/leds.h> #include <linux/gpio.h> #include <mach/regs-gpio.h> #include <mach/hardware.h> #include <mach/h1940-latch.h> /* * Green led. */ static void h1940_greenled_set(struct led_classdev *led_dev, enum led_brightness value) { switch (value) { case LED_HALF: h1940_latch_control(0, H1940_LATCH_LED_FLASH); s3c2410_gpio_setpin(S3C2410_GPA7, 1); break; case LED_FULL: h1940_latch_control(0, H1940_LATCH_LED_GREEN); s3c2410_gpio_setpin(S3C2410_GPA7, 1); break; default: case LED_OFF: h1940_latch_control(H1940_LATCH_LED_FLASH, 0); h1940_latch_control(H1940_LATCH_LED_GREEN, 0); s3c2410_gpio_setpin(S3C2410_GPA7, 0); break; } } static struct led_classdev h1940_greenled = { .name = "h1940:green", .brightness_set = h1940_greenled_set, .default_trigger = "h1940-charger", }; /* * Red led. */ static void h1940_redled_set(struct led_classdev *led_dev, enum led_brightness value) { switch (value) { case LED_HALF: h1940_latch_control(0, H1940_LATCH_LED_FLASH); s3c2410_gpio_setpin(S3C2410_GPA1, 1); break; case LED_FULL: h1940_latch_control(0, H1940_LATCH_LED_RED); s3c2410_gpio_setpin(S3C2410_GPA1, 1); break; default: case LED_OFF: h1940_latch_control(H1940_LATCH_LED_FLASH, 0); h1940_latch_control(H1940_LATCH_LED_RED, 0); s3c2410_gpio_setpin(S3C2410_GPA1, 0); break; } } static struct led_classdev h1940_redled = { .name = "h1940:red", .brightness_set = h1940_redled_set, .default_trigger = "h1940-charger", }; /* * Blue led. * (it can only be blue flashing led) */ static void h1940_blueled_set(struct led_classdev *led_dev, enum led_brightness value) { if (value) { /* flashing Blue */ h1940_latch_control(0, H1940_LATCH_LED_FLASH); s3c2410_gpio_setpin(S3C2410_GPA3, 1); } else { h1940_latch_control(H1940_LATCH_LED_FLASH, 0); s3c2410_gpio_setpin(S3C2410_GPA3, 0); } } static struct led_classdev h1940_blueled = { .name = "h1940:blue", .brightness_set = h1940_blueled_set, .default_trigger = "h1940-bluetooth", }; static int __devinit h1940leds_probe(struct platform_device *pdev) { int ret; ret = led_classdev_register(&pdev->dev, &h1940_greenled); if (ret) goto err_green; ret = led_classdev_register(&pdev->dev, &h1940_redled); if (ret) goto err_red; ret = led_classdev_register(&pdev->dev, &h1940_blueled); if (ret) goto err_blue; return 0; err_blue: led_classdev_unregister(&h1940_redled); err_red: led_classdev_unregister(&h1940_greenled); err_green: return ret; } static int h1940leds_remove(struct platform_device *pdev) { led_classdev_unregister(&h1940_greenled); led_classdev_unregister(&h1940_redled); led_classdev_unregister(&h1940_blueled); return 0; } static struct platform_driver h1940leds_driver = { .driver = { .name = "h1940-leds", .owner = THIS_MODULE, }, .probe = h1940leds_probe, .remove = h1940leds_remove, }; static int __init h1940leds_init(void) { return platform_driver_register(&h1940leds_driver); } static void __exit h1940leds_exit(void) { platform_driver_unregister(&h1940leds_driver); } module_init(h1940leds_init); module_exit(h1940leds_exit); MODULE_AUTHOR("Arnaud Patard <arnaud.patard@rtp-net.org>"); MODULE_DESCRIPTION("LED driver for the iPAQ H1940"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:h1940-leds");
gpl-2.0
jrfastab/Linux-Kernel-QOS
drivers/hwmon/tmp102.c
2101
8179
/* Texas Instruments TMP102 SMBus temperature sensor driver * * Copyright (C) 2010 Steven King <sfking@fdwdc.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/err.h> #include <linux/mutex.h> #include <linux/device.h> #include <linux/jiffies.h> #define DRIVER_NAME "tmp102" #define TMP102_TEMP_REG 0x00 #define TMP102_CONF_REG 0x01 /* note: these bit definitions are byte swapped */ #define TMP102_CONF_SD 0x0100 #define TMP102_CONF_TM 0x0200 #define TMP102_CONF_POL 0x0400 #define TMP102_CONF_F0 0x0800 #define TMP102_CONF_F1 0x1000 #define TMP102_CONF_R0 0x2000 #define TMP102_CONF_R1 0x4000 #define TMP102_CONF_OS 0x8000 #define TMP102_CONF_EM 0x0010 #define TMP102_CONF_AL 0x0020 #define TMP102_CONF_CR0 0x0040 #define TMP102_CONF_CR1 0x0080 #define TMP102_TLOW_REG 0x02 #define TMP102_THIGH_REG 0x03 struct tmp102 { struct device *hwmon_dev; struct mutex lock; u16 config_orig; unsigned long last_update; int temp[3]; }; /* convert left adjusted 13-bit TMP102 register value to milliCelsius */ static inline int tmp102_reg_to_mC(s16 val) { return ((val & ~0x01) * 1000) / 128; } /* convert milliCelsius to left adjusted 13-bit TMP102 register value */ static inline u16 tmp102_mC_to_reg(int val) { return (val * 128) / 1000; } static const u8 tmp102_reg[] = { TMP102_TEMP_REG, TMP102_TLOW_REG, TMP102_THIGH_REG, }; static struct tmp102 *tmp102_update_device(struct i2c_client *client) { struct tmp102 *tmp102 = i2c_get_clientdata(client); mutex_lock(&tmp102->lock); if (time_after(jiffies, tmp102->last_update + HZ / 3)) { int i; for (i = 0; i < ARRAY_SIZE(tmp102->temp); ++i) { int status = i2c_smbus_read_word_swapped(client, tmp102_reg[i]); if (status > -1) tmp102->temp[i] = tmp102_reg_to_mC(status); } tmp102->last_update = jiffies; } mutex_unlock(&tmp102->lock); return tmp102; } static ssize_t tmp102_show_temp(struct device *dev, struct device_attribute *attr, char *buf) { struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); struct tmp102 *tmp102 = tmp102_update_device(to_i2c_client(dev)); return sprintf(buf, "%d\n", tmp102->temp[sda->index]); } static ssize_t tmp102_set_temp(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct sensor_device_attribute *sda = to_sensor_dev_attr(attr); struct i2c_client *client = to_i2c_client(dev); struct tmp102 *tmp102 = i2c_get_clientdata(client); long val; int status; if (kstrtol(buf, 10, &val) < 0) return -EINVAL; val = clamp_val(val, -256000, 255000); mutex_lock(&tmp102->lock); tmp102->temp[sda->index] = val; status = i2c_smbus_write_word_swapped(client, tmp102_reg[sda->index], tmp102_mC_to_reg(val)); mutex_unlock(&tmp102->lock); return status ? : count; } static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, tmp102_show_temp, NULL , 0); static SENSOR_DEVICE_ATTR(temp1_max_hyst, S_IWUSR | S_IRUGO, tmp102_show_temp, tmp102_set_temp, 1); static SENSOR_DEVICE_ATTR(temp1_max, S_IWUSR | S_IRUGO, tmp102_show_temp, tmp102_set_temp, 2); static struct attribute *tmp102_attributes[] = { &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_max_hyst.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, NULL }; static const struct attribute_group tmp102_attr_group = { .attrs = tmp102_attributes, }; #define TMP102_CONFIG (TMP102_CONF_TM | TMP102_CONF_EM | TMP102_CONF_CR1) #define TMP102_CONFIG_RD_ONLY (TMP102_CONF_R0 | TMP102_CONF_R1 | TMP102_CONF_AL) static int tmp102_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct tmp102 *tmp102; int status; if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_WORD_DATA)) { dev_err(&client->dev, "adapter doesn't support SMBus word transactions\n"); return -ENODEV; } tmp102 = devm_kzalloc(&client->dev, sizeof(*tmp102), GFP_KERNEL); if (!tmp102) return -ENOMEM; i2c_set_clientdata(client, tmp102); status = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG); if (status < 0) { dev_err(&client->dev, "error reading config register\n"); return status; } tmp102->config_orig = status; status = i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, TMP102_CONFIG); if (status < 0) { dev_err(&client->dev, "error writing config register\n"); goto fail_restore_config; } status = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG); if (status < 0) { dev_err(&client->dev, "error reading config register\n"); goto fail_restore_config; } status &= ~TMP102_CONFIG_RD_ONLY; if (status != TMP102_CONFIG) { dev_err(&client->dev, "config settings did not stick\n"); status = -ENODEV; goto fail_restore_config; } tmp102->last_update = jiffies - HZ; mutex_init(&tmp102->lock); status = sysfs_create_group(&client->dev.kobj, &tmp102_attr_group); if (status) { dev_dbg(&client->dev, "could not create sysfs files\n"); goto fail_restore_config; } tmp102->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(tmp102->hwmon_dev)) { dev_dbg(&client->dev, "unable to register hwmon device\n"); status = PTR_ERR(tmp102->hwmon_dev); goto fail_remove_sysfs; } dev_info(&client->dev, "initialized\n"); return 0; fail_remove_sysfs: sysfs_remove_group(&client->dev.kobj, &tmp102_attr_group); fail_restore_config: i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, tmp102->config_orig); return status; } static int tmp102_remove(struct i2c_client *client) { struct tmp102 *tmp102 = i2c_get_clientdata(client); hwmon_device_unregister(tmp102->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &tmp102_attr_group); /* Stop monitoring if device was stopped originally */ if (tmp102->config_orig & TMP102_CONF_SD) { int config; config = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG); if (config >= 0) i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, config | TMP102_CONF_SD); } return 0; } #ifdef CONFIG_PM static int tmp102_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); int config; config = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG); if (config < 0) return config; config |= TMP102_CONF_SD; return i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, config); } static int tmp102_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); int config; config = i2c_smbus_read_word_swapped(client, TMP102_CONF_REG); if (config < 0) return config; config &= ~TMP102_CONF_SD; return i2c_smbus_write_word_swapped(client, TMP102_CONF_REG, config); } static const struct dev_pm_ops tmp102_dev_pm_ops = { .suspend = tmp102_suspend, .resume = tmp102_resume, }; #define TMP102_DEV_PM_OPS (&tmp102_dev_pm_ops) #else #define TMP102_DEV_PM_OPS NULL #endif /* CONFIG_PM */ static const struct i2c_device_id tmp102_id[] = { { "tmp102", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, tmp102_id); static struct i2c_driver tmp102_driver = { .driver.name = DRIVER_NAME, .driver.pm = TMP102_DEV_PM_OPS, .probe = tmp102_probe, .remove = tmp102_remove, .id_table = tmp102_id, }; module_i2c_driver(tmp102_driver); MODULE_AUTHOR("Steven King <sfking@fdwdc.com>"); MODULE_DESCRIPTION("Texas Instruments TMP102 temperature sensor driver"); MODULE_LICENSE("GPL");
gpl-2.0
ResurrectionRemix-Devices/android_kernel_samsung_smdk4412
drivers/media/dvb/frontends/dib8000.c
2357
86997
/* * Linux-DVB Driver for DiBcom's DiB8000 chip (ISDB-T). * * Copyright (C) 2009 DiBcom (http://www.dibcom.fr/) * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation, version 2. */ #include <linux/kernel.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/mutex.h> #include "dvb_math.h" #include "dvb_frontend.h" #include "dib8000.h" #define LAYER_ALL -1 #define LAYER_A 1 #define LAYER_B 2 #define LAYER_C 3 #define FE_CALLBACK_TIME_NEVER 0xffffffff #define MAX_NUMBER_OF_FRONTENDS 6 static int debug; module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "turn on debugging (default: 0)"); #define dprintk(args...) do { if (debug) { printk(KERN_DEBUG "DiB8000: "); printk(args); printk("\n"); } } while (0) #define FE_STATUS_TUNE_FAILED 0 struct i2c_device { struct i2c_adapter *adap; u8 addr; u8 *i2c_write_buffer; u8 *i2c_read_buffer; struct mutex *i2c_buffer_lock; }; struct dib8000_state { struct dib8000_config cfg; struct i2c_device i2c; struct dibx000_i2c_master i2c_master; u16 wbd_ref; u8 current_band; u32 current_bandwidth; struct dibx000_agc_config *current_agc; u32 timf; u32 timf_default; u8 div_force_off:1; u8 div_state:1; u16 div_sync_wait; u8 agc_state; u8 differential_constellation; u8 diversity_onoff; s16 ber_monitored_layer; u16 gpio_dir; u16 gpio_val; u16 revision; u8 isdbt_cfg_loaded; enum frontend_tune_state tune_state; u32 status; struct dvb_frontend *fe[MAX_NUMBER_OF_FRONTENDS]; /* for the I2C transfer */ struct i2c_msg msg[2]; u8 i2c_write_buffer[4]; u8 i2c_read_buffer[2]; struct mutex i2c_buffer_lock; }; enum dib8000_power_mode { DIB8000M_POWER_ALL = 0, DIB8000M_POWER_INTERFACE_ONLY, }; static u16 dib8000_i2c_read16(struct i2c_device *i2c, u16 reg) { u16 ret; struct i2c_msg msg[2] = { {.addr = i2c->addr >> 1, .flags = 0, .len = 2}, {.addr = i2c->addr >> 1, .flags = I2C_M_RD, .len = 2}, }; if (mutex_lock_interruptible(i2c->i2c_buffer_lock) < 0) { dprintk("could not acquire lock"); return 0; } msg[0].buf = i2c->i2c_write_buffer; msg[0].buf[0] = reg >> 8; msg[0].buf[1] = reg & 0xff; msg[1].buf = i2c->i2c_read_buffer; if (i2c_transfer(i2c->adap, msg, 2) != 2) dprintk("i2c read error on %d", reg); ret = (msg[1].buf[0] << 8) | msg[1].buf[1]; mutex_unlock(i2c->i2c_buffer_lock); return ret; } static u16 dib8000_read_word(struct dib8000_state *state, u16 reg) { u16 ret; if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) { dprintk("could not acquire lock"); return 0; } state->i2c_write_buffer[0] = reg >> 8; state->i2c_write_buffer[1] = reg & 0xff; memset(state->msg, 0, 2 * sizeof(struct i2c_msg)); state->msg[0].addr = state->i2c.addr >> 1; state->msg[0].flags = 0; state->msg[0].buf = state->i2c_write_buffer; state->msg[0].len = 2; state->msg[1].addr = state->i2c.addr >> 1; state->msg[1].flags = I2C_M_RD; state->msg[1].buf = state->i2c_read_buffer; state->msg[1].len = 2; if (i2c_transfer(state->i2c.adap, state->msg, 2) != 2) dprintk("i2c read error on %d", reg); ret = (state->i2c_read_buffer[0] << 8) | state->i2c_read_buffer[1]; mutex_unlock(&state->i2c_buffer_lock); return ret; } static u32 dib8000_read32(struct dib8000_state *state, u16 reg) { u16 rw[2]; rw[0] = dib8000_read_word(state, reg + 0); rw[1] = dib8000_read_word(state, reg + 1); return ((rw[0] << 16) | (rw[1])); } static int dib8000_i2c_write16(struct i2c_device *i2c, u16 reg, u16 val) { struct i2c_msg msg = {.addr = i2c->addr >> 1, .flags = 0, .len = 4}; int ret = 0; if (mutex_lock_interruptible(i2c->i2c_buffer_lock) < 0) { dprintk("could not acquire lock"); return -EINVAL; } msg.buf = i2c->i2c_write_buffer; msg.buf[0] = (reg >> 8) & 0xff; msg.buf[1] = reg & 0xff; msg.buf[2] = (val >> 8) & 0xff; msg.buf[3] = val & 0xff; ret = i2c_transfer(i2c->adap, &msg, 1) != 1 ? -EREMOTEIO : 0; mutex_unlock(i2c->i2c_buffer_lock); return ret; } static int dib8000_write_word(struct dib8000_state *state, u16 reg, u16 val) { int ret; if (mutex_lock_interruptible(&state->i2c_buffer_lock) < 0) { dprintk("could not acquire lock"); return -EINVAL; } state->i2c_write_buffer[0] = (reg >> 8) & 0xff; state->i2c_write_buffer[1] = reg & 0xff; state->i2c_write_buffer[2] = (val >> 8) & 0xff; state->i2c_write_buffer[3] = val & 0xff; memset(&state->msg[0], 0, sizeof(struct i2c_msg)); state->msg[0].addr = state->i2c.addr >> 1; state->msg[0].flags = 0; state->msg[0].buf = state->i2c_write_buffer; state->msg[0].len = 4; ret = (i2c_transfer(state->i2c.adap, state->msg, 1) != 1 ? -EREMOTEIO : 0); mutex_unlock(&state->i2c_buffer_lock); return ret; } static const s16 coeff_2k_sb_1seg_dqpsk[8] = { (769 << 5) | 0x0a, (745 << 5) | 0x03, (595 << 5) | 0x0d, (769 << 5) | 0x0a, (920 << 5) | 0x09, (784 << 5) | 0x02, (519 << 5) | 0x0c, (920 << 5) | 0x09 }; static const s16 coeff_2k_sb_1seg[8] = { (692 << 5) | 0x0b, (683 << 5) | 0x01, (519 << 5) | 0x09, (692 << 5) | 0x0b, 0 | 0x1f, 0 | 0x1f, 0 | 0x1f, 0 | 0x1f }; static const s16 coeff_2k_sb_3seg_0dqpsk_1dqpsk[8] = { (832 << 5) | 0x10, (912 << 5) | 0x05, (900 << 5) | 0x12, (832 << 5) | 0x10, (-931 << 5) | 0x0f, (912 << 5) | 0x04, (807 << 5) | 0x11, (-931 << 5) | 0x0f }; static const s16 coeff_2k_sb_3seg_0dqpsk[8] = { (622 << 5) | 0x0c, (941 << 5) | 0x04, (796 << 5) | 0x10, (622 << 5) | 0x0c, (982 << 5) | 0x0c, (519 << 5) | 0x02, (572 << 5) | 0x0e, (982 << 5) | 0x0c }; static const s16 coeff_2k_sb_3seg_1dqpsk[8] = { (699 << 5) | 0x14, (607 << 5) | 0x04, (944 << 5) | 0x13, (699 << 5) | 0x14, (-720 << 5) | 0x0d, (640 << 5) | 0x03, (866 << 5) | 0x12, (-720 << 5) | 0x0d }; static const s16 coeff_2k_sb_3seg[8] = { (664 << 5) | 0x0c, (925 << 5) | 0x03, (937 << 5) | 0x10, (664 << 5) | 0x0c, (-610 << 5) | 0x0a, (697 << 5) | 0x01, (836 << 5) | 0x0e, (-610 << 5) | 0x0a }; static const s16 coeff_4k_sb_1seg_dqpsk[8] = { (-955 << 5) | 0x0e, (687 << 5) | 0x04, (818 << 5) | 0x10, (-955 << 5) | 0x0e, (-922 << 5) | 0x0d, (750 << 5) | 0x03, (665 << 5) | 0x0f, (-922 << 5) | 0x0d }; static const s16 coeff_4k_sb_1seg[8] = { (638 << 5) | 0x0d, (683 << 5) | 0x02, (638 << 5) | 0x0d, (638 << 5) | 0x0d, (-655 << 5) | 0x0a, (517 << 5) | 0x00, (698 << 5) | 0x0d, (-655 << 5) | 0x0a }; static const s16 coeff_4k_sb_3seg_0dqpsk_1dqpsk[8] = { (-707 << 5) | 0x14, (910 << 5) | 0x06, (889 << 5) | 0x16, (-707 << 5) | 0x14, (-958 << 5) | 0x13, (993 << 5) | 0x05, (523 << 5) | 0x14, (-958 << 5) | 0x13 }; static const s16 coeff_4k_sb_3seg_0dqpsk[8] = { (-723 << 5) | 0x13, (910 << 5) | 0x05, (777 << 5) | 0x14, (-723 << 5) | 0x13, (-568 << 5) | 0x0f, (547 << 5) | 0x03, (696 << 5) | 0x12, (-568 << 5) | 0x0f }; static const s16 coeff_4k_sb_3seg_1dqpsk[8] = { (-940 << 5) | 0x15, (607 << 5) | 0x05, (915 << 5) | 0x16, (-940 << 5) | 0x15, (-848 << 5) | 0x13, (683 << 5) | 0x04, (543 << 5) | 0x14, (-848 << 5) | 0x13 }; static const s16 coeff_4k_sb_3seg[8] = { (612 << 5) | 0x12, (910 << 5) | 0x04, (864 << 5) | 0x14, (612 << 5) | 0x12, (-869 << 5) | 0x13, (683 << 5) | 0x02, (869 << 5) | 0x12, (-869 << 5) | 0x13 }; static const s16 coeff_8k_sb_1seg_dqpsk[8] = { (-835 << 5) | 0x12, (684 << 5) | 0x05, (735 << 5) | 0x14, (-835 << 5) | 0x12, (-598 << 5) | 0x10, (781 << 5) | 0x04, (739 << 5) | 0x13, (-598 << 5) | 0x10 }; static const s16 coeff_8k_sb_1seg[8] = { (673 << 5) | 0x0f, (683 << 5) | 0x03, (808 << 5) | 0x12, (673 << 5) | 0x0f, (585 << 5) | 0x0f, (512 << 5) | 0x01, (780 << 5) | 0x0f, (585 << 5) | 0x0f }; static const s16 coeff_8k_sb_3seg_0dqpsk_1dqpsk[8] = { (863 << 5) | 0x17, (930 << 5) | 0x07, (878 << 5) | 0x19, (863 << 5) | 0x17, (0 << 5) | 0x14, (521 << 5) | 0x05, (980 << 5) | 0x18, (0 << 5) | 0x14 }; static const s16 coeff_8k_sb_3seg_0dqpsk[8] = { (-924 << 5) | 0x17, (910 << 5) | 0x06, (774 << 5) | 0x17, (-924 << 5) | 0x17, (-877 << 5) | 0x15, (565 << 5) | 0x04, (553 << 5) | 0x15, (-877 << 5) | 0x15 }; static const s16 coeff_8k_sb_3seg_1dqpsk[8] = { (-921 << 5) | 0x19, (607 << 5) | 0x06, (881 << 5) | 0x19, (-921 << 5) | 0x19, (-921 << 5) | 0x14, (713 << 5) | 0x05, (1018 << 5) | 0x18, (-921 << 5) | 0x14 }; static const s16 coeff_8k_sb_3seg[8] = { (514 << 5) | 0x14, (910 << 5) | 0x05, (861 << 5) | 0x17, (514 << 5) | 0x14, (690 << 5) | 0x14, (683 << 5) | 0x03, (662 << 5) | 0x15, (690 << 5) | 0x14 }; static const s16 ana_fe_coeff_3seg[24] = { 81, 80, 78, 74, 68, 61, 54, 45, 37, 28, 19, 11, 4, 1022, 1017, 1013, 1010, 1008, 1008, 1008, 1008, 1010, 1014, 1017 }; static const s16 ana_fe_coeff_1seg[24] = { 249, 226, 164, 82, 5, 981, 970, 988, 1018, 20, 31, 26, 8, 1012, 1000, 1018, 1012, 8, 15, 14, 9, 3, 1017, 1003 }; static const s16 ana_fe_coeff_13seg[24] = { 396, 305, 105, -51, -77, -12, 41, 31, -11, -30, -11, 14, 15, -2, -13, -7, 5, 8, 1, -6, -7, -3, 0, 1 }; static u16 fft_to_mode(struct dib8000_state *state) { u16 mode; switch (state->fe[0]->dtv_property_cache.transmission_mode) { case TRANSMISSION_MODE_2K: mode = 1; break; case TRANSMISSION_MODE_4K: mode = 2; break; default: case TRANSMISSION_MODE_AUTO: case TRANSMISSION_MODE_8K: mode = 3; break; } return mode; } static void dib8000_set_acquisition_mode(struct dib8000_state *state) { u16 nud = dib8000_read_word(state, 298); nud |= (1 << 3) | (1 << 0); dprintk("acquisition mode activated"); dib8000_write_word(state, 298, nud); } static int dib8000_set_output_mode(struct dvb_frontend *fe, int mode) { struct dib8000_state *state = fe->demodulator_priv; u16 outreg, fifo_threshold, smo_mode, sram = 0x0205; /* by default SDRAM deintlv is enabled */ outreg = 0; fifo_threshold = 1792; smo_mode = (dib8000_read_word(state, 299) & 0x0050) | (1 << 1); dprintk("-I- Setting output mode for demod %p to %d", &state->fe[0], mode); switch (mode) { case OUTMODE_MPEG2_PAR_GATED_CLK: // STBs with parallel gated clock outreg = (1 << 10); /* 0x0400 */ break; case OUTMODE_MPEG2_PAR_CONT_CLK: // STBs with parallel continues clock outreg = (1 << 10) | (1 << 6); /* 0x0440 */ break; case OUTMODE_MPEG2_SERIAL: // STBs with serial input outreg = (1 << 10) | (2 << 6) | (0 << 1); /* 0x0482 */ break; case OUTMODE_DIVERSITY: if (state->cfg.hostbus_diversity) { outreg = (1 << 10) | (4 << 6); /* 0x0500 */ sram &= 0xfdff; } else sram |= 0x0c00; break; case OUTMODE_MPEG2_FIFO: // e.g. USB feeding smo_mode |= (3 << 1); fifo_threshold = 512; outreg = (1 << 10) | (5 << 6); break; case OUTMODE_HIGH_Z: // disable outreg = 0; break; case OUTMODE_ANALOG_ADC: outreg = (1 << 10) | (3 << 6); dib8000_set_acquisition_mode(state); break; default: dprintk("Unhandled output_mode passed to be set for demod %p", &state->fe[0]); return -EINVAL; } if (state->cfg.output_mpeg2_in_188_bytes) smo_mode |= (1 << 5); dib8000_write_word(state, 299, smo_mode); dib8000_write_word(state, 300, fifo_threshold); /* synchronous fread */ dib8000_write_word(state, 1286, outreg); dib8000_write_word(state, 1291, sram); return 0; } static int dib8000_set_diversity_in(struct dvb_frontend *fe, int onoff) { struct dib8000_state *state = fe->demodulator_priv; u16 sync_wait = dib8000_read_word(state, 273) & 0xfff0; if (!state->differential_constellation) { dib8000_write_word(state, 272, 1 << 9); //dvsy_off_lmod4 = 1 dib8000_write_word(state, 273, sync_wait | (1 << 2) | 2); // sync_enable = 1; comb_mode = 2 } else { dib8000_write_word(state, 272, 0); //dvsy_off_lmod4 = 0 dib8000_write_word(state, 273, sync_wait); // sync_enable = 0; comb_mode = 0 } state->diversity_onoff = onoff; switch (onoff) { case 0: /* only use the internal way - not the diversity input */ dib8000_write_word(state, 270, 1); dib8000_write_word(state, 271, 0); break; case 1: /* both ways */ dib8000_write_word(state, 270, 6); dib8000_write_word(state, 271, 6); break; case 2: /* only the diversity input */ dib8000_write_word(state, 270, 0); dib8000_write_word(state, 271, 1); break; } return 0; } static void dib8000_set_power_mode(struct dib8000_state *state, enum dib8000_power_mode mode) { /* by default everything is going to be powered off */ u16 reg_774 = 0x3fff, reg_775 = 0xffff, reg_776 = 0xffff, reg_900 = (dib8000_read_word(state, 900) & 0xfffc) | 0x3, reg_1280 = (dib8000_read_word(state, 1280) & 0x00ff) | 0xff00; /* now, depending on the requested mode, we power on */ switch (mode) { /* power up everything in the demod */ case DIB8000M_POWER_ALL: reg_774 = 0x0000; reg_775 = 0x0000; reg_776 = 0x0000; reg_900 &= 0xfffc; reg_1280 &= 0x00ff; break; case DIB8000M_POWER_INTERFACE_ONLY: reg_1280 &= 0x00ff; break; } dprintk("powermode : 774 : %x ; 775 : %x; 776 : %x ; 900 : %x; 1280 : %x", reg_774, reg_775, reg_776, reg_900, reg_1280); dib8000_write_word(state, 774, reg_774); dib8000_write_word(state, 775, reg_775); dib8000_write_word(state, 776, reg_776); dib8000_write_word(state, 900, reg_900); dib8000_write_word(state, 1280, reg_1280); } static int dib8000_set_adc_state(struct dib8000_state *state, enum dibx000_adc_states no) { int ret = 0; u16 reg_907 = dib8000_read_word(state, 907), reg_908 = dib8000_read_word(state, 908); switch (no) { case DIBX000_SLOW_ADC_ON: reg_908 |= (1 << 1) | (1 << 0); ret |= dib8000_write_word(state, 908, reg_908); reg_908 &= ~(1 << 1); break; case DIBX000_SLOW_ADC_OFF: reg_908 |= (1 << 1) | (1 << 0); break; case DIBX000_ADC_ON: reg_907 &= 0x0fff; reg_908 &= 0x0003; break; case DIBX000_ADC_OFF: // leave the VBG voltage on reg_907 |= (1 << 14) | (1 << 13) | (1 << 12); reg_908 |= (1 << 5) | (1 << 4) | (1 << 3) | (1 << 2); break; case DIBX000_VBG_ENABLE: reg_907 &= ~(1 << 15); break; case DIBX000_VBG_DISABLE: reg_907 |= (1 << 15); break; default: break; } ret |= dib8000_write_word(state, 907, reg_907); ret |= dib8000_write_word(state, 908, reg_908); return ret; } static int dib8000_set_bandwidth(struct dvb_frontend *fe, u32 bw) { struct dib8000_state *state = fe->demodulator_priv; u32 timf; if (bw == 0) bw = 6000; if (state->timf == 0) { dprintk("using default timf"); timf = state->timf_default; } else { dprintk("using updated timf"); timf = state->timf; } dib8000_write_word(state, 29, (u16) ((timf >> 16) & 0xffff)); dib8000_write_word(state, 30, (u16) ((timf) & 0xffff)); return 0; } static int dib8000_sad_calib(struct dib8000_state *state) { /* internal */ dib8000_write_word(state, 923, (0 << 1) | (0 << 0)); dib8000_write_word(state, 924, 776); // 0.625*3.3 / 4096 /* do the calibration */ dib8000_write_word(state, 923, (1 << 0)); dib8000_write_word(state, 923, (0 << 0)); msleep(1); return 0; } int dib8000_set_wbd_ref(struct dvb_frontend *fe, u16 value) { struct dib8000_state *state = fe->demodulator_priv; if (value > 4095) value = 4095; state->wbd_ref = value; return dib8000_write_word(state, 106, value); } EXPORT_SYMBOL(dib8000_set_wbd_ref); static void dib8000_reset_pll_common(struct dib8000_state *state, const struct dibx000_bandwidth_config *bw) { dprintk("ifreq: %d %x, inversion: %d", bw->ifreq, bw->ifreq, bw->ifreq >> 25); dib8000_write_word(state, 23, (u16) (((bw->internal * 1000) >> 16) & 0xffff)); /* P_sec_len */ dib8000_write_word(state, 24, (u16) ((bw->internal * 1000) & 0xffff)); dib8000_write_word(state, 27, (u16) ((bw->ifreq >> 16) & 0x01ff)); dib8000_write_word(state, 28, (u16) (bw->ifreq & 0xffff)); dib8000_write_word(state, 26, (u16) ((bw->ifreq >> 25) & 0x0003)); dib8000_write_word(state, 922, bw->sad_cfg); } static void dib8000_reset_pll(struct dib8000_state *state) { const struct dibx000_bandwidth_config *pll = state->cfg.pll; u16 clk_cfg1; // clk_cfg0 dib8000_write_word(state, 901, (pll->pll_prediv << 8) | (pll->pll_ratio << 0)); // clk_cfg1 clk_cfg1 = (1 << 10) | (0 << 9) | (pll->IO_CLK_en_core << 8) | (pll->bypclk_div << 5) | (pll->enable_refdiv << 4) | (1 << 3) | (pll->pll_range << 1) | (pll->pll_reset << 0); dib8000_write_word(state, 902, clk_cfg1); clk_cfg1 = (clk_cfg1 & 0xfff7) | (pll->pll_bypass << 3); dib8000_write_word(state, 902, clk_cfg1); dprintk("clk_cfg1: 0x%04x", clk_cfg1); /* 0x507 1 0 1 000 0 0 11 1 */ /* smpl_cfg: P_refclksel=2, P_ensmplsel=1 nodivsmpl=1 */ if (state->cfg.pll->ADClkSrc == 0) dib8000_write_word(state, 904, (0 << 15) | (0 << 12) | (0 << 10) | (pll->modulo << 8) | (pll->ADClkSrc << 7) | (0 << 1)); else if (state->cfg.refclksel != 0) dib8000_write_word(state, 904, (0 << 15) | (1 << 12) | ((state->cfg.refclksel & 0x3) << 10) | (pll->modulo << 8) | (pll->ADClkSrc << 7) | (0 << 1)); else dib8000_write_word(state, 904, (0 << 15) | (1 << 12) | (3 << 10) | (pll->modulo << 8) | (pll->ADClkSrc << 7) | (0 << 1)); dib8000_reset_pll_common(state, pll); } static int dib8000_reset_gpio(struct dib8000_state *st) { /* reset the GPIOs */ dib8000_write_word(st, 1029, st->cfg.gpio_dir); dib8000_write_word(st, 1030, st->cfg.gpio_val); /* TODO 782 is P_gpio_od */ dib8000_write_word(st, 1032, st->cfg.gpio_pwm_pos); dib8000_write_word(st, 1037, st->cfg.pwm_freq_div); return 0; } static int dib8000_cfg_gpio(struct dib8000_state *st, u8 num, u8 dir, u8 val) { st->cfg.gpio_dir = dib8000_read_word(st, 1029); st->cfg.gpio_dir &= ~(1 << num); /* reset the direction bit */ st->cfg.gpio_dir |= (dir & 0x1) << num; /* set the new direction */ dib8000_write_word(st, 1029, st->cfg.gpio_dir); st->cfg.gpio_val = dib8000_read_word(st, 1030); st->cfg.gpio_val &= ~(1 << num); /* reset the direction bit */ st->cfg.gpio_val |= (val & 0x01) << num; /* set the new value */ dib8000_write_word(st, 1030, st->cfg.gpio_val); dprintk("gpio dir: %x: gpio val: %x", st->cfg.gpio_dir, st->cfg.gpio_val); return 0; } int dib8000_set_gpio(struct dvb_frontend *fe, u8 num, u8 dir, u8 val) { struct dib8000_state *state = fe->demodulator_priv; return dib8000_cfg_gpio(state, num, dir, val); } EXPORT_SYMBOL(dib8000_set_gpio); static const u16 dib8000_defaults[] = { /* auto search configuration - lock0 by default waiting * for cpil_lock; lock1 cpil_lock; lock2 tmcc_sync_lock */ 3, 7, 0x0004, 0x0400, 0x0814, 12, 11, 0x001b, 0x7740, 0x005b, 0x8d80, 0x01c9, 0xc380, 0x0000, 0x0080, 0x0000, 0x0090, 0x0001, 0xd4c0, /*1, 32, 0x6680 // P_corm_thres Lock algorithms configuration */ 11, 80, /* set ADC level to -16 */ (1 << 13) - 825 - 117, (1 << 13) - 837 - 117, (1 << 13) - 811 - 117, (1 << 13) - 766 - 117, (1 << 13) - 737 - 117, (1 << 13) - 693 - 117, (1 << 13) - 648 - 117, (1 << 13) - 619 - 117, (1 << 13) - 575 - 117, (1 << 13) - 531 - 117, (1 << 13) - 501 - 117, 4, 108, 0, 0, 0, 0, 1, 175, 0x0410, 1, 179, 8192, // P_fft_nb_to_cut 6, 181, 0x2800, // P_coff_corthres_ ( 2k 4k 8k ) 0x2800 0x2800, 0x2800, 0x2800, // P_coff_cpilthres_ ( 2k 4k 8k ) 0x2800 0x2800, 0x2800, 2, 193, 0x0666, // P_pha3_thres 0x0000, // P_cti_use_cpe, P_cti_use_prog 2, 205, 0x200f, // P_cspu_regul, P_cspu_win_cut 0x000f, // P_des_shift_work 5, 215, 0x023d, // P_adp_regul_cnt 0x00a4, // P_adp_noise_cnt 0x00a4, // P_adp_regul_ext 0x7ff0, // P_adp_noise_ext 0x3ccc, // P_adp_fil 1, 230, 0x0000, // P_2d_byp_ti_num 1, 263, 0x800, //P_equal_thres_wgn 1, 268, (2 << 9) | 39, // P_equal_ctrl_synchro, P_equal_speedmode 1, 270, 0x0001, // P_div_lock0_wait 1, 285, 0x0020, //p_fec_ 1, 299, 0x0062, /* P_smo_mode, P_smo_rs_discard, P_smo_fifo_flush, P_smo_pid_parse, P_smo_error_discard */ 1, 338, (1 << 12) | // P_ctrl_corm_thres4pre_freq_inh=1 (1 << 10) | (0 << 9) | /* P_ctrl_pre_freq_inh=0 */ (3 << 5) | /* P_ctrl_pre_freq_step=3 */ (1 << 0), /* P_pre_freq_win_len=1 */ 1, 903, (0 << 4) | 2, // P_divclksel=0 P_divbitsel=2 (was clk=3,bit=1 for MPW) 0, }; static u16 dib8000_identify(struct i2c_device *client) { u16 value; //because of glitches sometimes value = dib8000_i2c_read16(client, 896); if ((value = dib8000_i2c_read16(client, 896)) != 0x01b3) { dprintk("wrong Vendor ID (read=0x%x)", value); return 0; } value = dib8000_i2c_read16(client, 897); if (value != 0x8000 && value != 0x8001 && value != 0x8002) { dprintk("wrong Device ID (%x)", value); return 0; } switch (value) { case 0x8000: dprintk("found DiB8000A"); break; case 0x8001: dprintk("found DiB8000B"); break; case 0x8002: dprintk("found DiB8000C"); break; } return value; } static int dib8000_reset(struct dvb_frontend *fe) { struct dib8000_state *state = fe->demodulator_priv; dib8000_write_word(state, 1287, 0x0003); /* sram lead in, rdy */ if ((state->revision = dib8000_identify(&state->i2c)) == 0) return -EINVAL; if (state->revision == 0x8000) dprintk("error : dib8000 MA not supported"); dibx000_reset_i2c_master(&state->i2c_master); dib8000_set_power_mode(state, DIB8000M_POWER_ALL); /* always leave the VBG voltage on - it consumes almost nothing but takes a long time to start */ dib8000_set_adc_state(state, DIBX000_VBG_ENABLE); /* restart all parts */ dib8000_write_word(state, 770, 0xffff); dib8000_write_word(state, 771, 0xffff); dib8000_write_word(state, 772, 0xfffc); dib8000_write_word(state, 898, 0x000c); // sad dib8000_write_word(state, 1280, 0x004d); dib8000_write_word(state, 1281, 0x000c); dib8000_write_word(state, 770, 0x0000); dib8000_write_word(state, 771, 0x0000); dib8000_write_word(state, 772, 0x0000); dib8000_write_word(state, 898, 0x0004); // sad dib8000_write_word(state, 1280, 0x0000); dib8000_write_word(state, 1281, 0x0000); /* drives */ if (state->cfg.drives) dib8000_write_word(state, 906, state->cfg.drives); else { dprintk("using standard PAD-drive-settings, please adjust settings in config-struct to be optimal."); dib8000_write_word(state, 906, 0x2d98); // min drive SDRAM - not optimal - adjust } dib8000_reset_pll(state); if (dib8000_reset_gpio(state) != 0) dprintk("GPIO reset was not successful."); if (dib8000_set_output_mode(fe, OUTMODE_HIGH_Z) != 0) dprintk("OUTPUT_MODE could not be resetted."); state->current_agc = NULL; // P_iqc_alpha_pha, P_iqc_alpha_amp, P_iqc_dcc_alpha, ... /* P_iqc_ca2 = 0; P_iqc_impnc_on = 0; P_iqc_mode = 0; */ if (state->cfg.pll->ifreq == 0) dib8000_write_word(state, 40, 0x0755); /* P_iqc_corr_inh = 0 enable IQcorr block */ else dib8000_write_word(state, 40, 0x1f55); /* P_iqc_corr_inh = 1 disable IQcorr block */ { u16 l = 0, r; const u16 *n; n = dib8000_defaults; l = *n++; while (l) { r = *n++; do { dib8000_write_word(state, r, *n++); r++; } while (--l); l = *n++; } } state->isdbt_cfg_loaded = 0; //div_cfg override for special configs if (state->cfg.div_cfg != 0) dib8000_write_word(state, 903, state->cfg.div_cfg); /* unforce divstr regardless whether i2c enumeration was done or not */ dib8000_write_word(state, 1285, dib8000_read_word(state, 1285) & ~(1 << 1)); dib8000_set_bandwidth(fe, 6000); dib8000_set_adc_state(state, DIBX000_SLOW_ADC_ON); dib8000_sad_calib(state); dib8000_set_adc_state(state, DIBX000_SLOW_ADC_OFF); dib8000_set_power_mode(state, DIB8000M_POWER_INTERFACE_ONLY); return 0; } static void dib8000_restart_agc(struct dib8000_state *state) { // P_restart_iqc & P_restart_agc dib8000_write_word(state, 770, 0x0a00); dib8000_write_word(state, 770, 0x0000); } static int dib8000_update_lna(struct dib8000_state *state) { u16 dyn_gain; if (state->cfg.update_lna) { // read dyn_gain here (because it is demod-dependent and not tuner) dyn_gain = dib8000_read_word(state, 390); if (state->cfg.update_lna(state->fe[0], dyn_gain)) { dib8000_restart_agc(state); return 1; } } return 0; } static int dib8000_set_agc_config(struct dib8000_state *state, u8 band) { struct dibx000_agc_config *agc = NULL; int i; if (state->current_band == band && state->current_agc != NULL) return 0; state->current_band = band; for (i = 0; i < state->cfg.agc_config_count; i++) if (state->cfg.agc[i].band_caps & band) { agc = &state->cfg.agc[i]; break; } if (agc == NULL) { dprintk("no valid AGC configuration found for band 0x%02x", band); return -EINVAL; } state->current_agc = agc; /* AGC */ dib8000_write_word(state, 76, agc->setup); dib8000_write_word(state, 77, agc->inv_gain); dib8000_write_word(state, 78, agc->time_stabiliz); dib8000_write_word(state, 101, (agc->alpha_level << 12) | agc->thlock); // Demod AGC loop configuration dib8000_write_word(state, 102, (agc->alpha_mant << 5) | agc->alpha_exp); dib8000_write_word(state, 103, (agc->beta_mant << 6) | agc->beta_exp); dprintk("WBD: ref: %d, sel: %d, active: %d, alpha: %d", state->wbd_ref != 0 ? state->wbd_ref : agc->wbd_ref, agc->wbd_sel, !agc->perform_agc_softsplit, agc->wbd_sel); /* AGC continued */ if (state->wbd_ref != 0) dib8000_write_word(state, 106, state->wbd_ref); else // use default dib8000_write_word(state, 106, agc->wbd_ref); dib8000_write_word(state, 107, (agc->wbd_alpha << 9) | (agc->perform_agc_softsplit << 8)); dib8000_write_word(state, 108, agc->agc1_max); dib8000_write_word(state, 109, agc->agc1_min); dib8000_write_word(state, 110, agc->agc2_max); dib8000_write_word(state, 111, agc->agc2_min); dib8000_write_word(state, 112, (agc->agc1_pt1 << 8) | agc->agc1_pt2); dib8000_write_word(state, 113, (agc->agc1_slope1 << 8) | agc->agc1_slope2); dib8000_write_word(state, 114, (agc->agc2_pt1 << 8) | agc->agc2_pt2); dib8000_write_word(state, 115, (agc->agc2_slope1 << 8) | agc->agc2_slope2); dib8000_write_word(state, 75, agc->agc1_pt3); dib8000_write_word(state, 923, (dib8000_read_word(state, 923) & 0xffe3) | (agc->wbd_inv << 4) | (agc->wbd_sel << 2)); /*LB : 929 -> 923 */ return 0; } void dib8000_pwm_agc_reset(struct dvb_frontend *fe) { struct dib8000_state *state = fe->demodulator_priv; dib8000_set_adc_state(state, DIBX000_ADC_ON); dib8000_set_agc_config(state, (unsigned char)(BAND_OF_FREQUENCY(fe->dtv_property_cache.frequency / 1000))); } EXPORT_SYMBOL(dib8000_pwm_agc_reset); static int dib8000_agc_soft_split(struct dib8000_state *state) { u16 agc, split_offset; if (!state->current_agc || !state->current_agc->perform_agc_softsplit || state->current_agc->split.max == 0) return FE_CALLBACK_TIME_NEVER; // n_agc_global agc = dib8000_read_word(state, 390); if (agc > state->current_agc->split.min_thres) split_offset = state->current_agc->split.min; else if (agc < state->current_agc->split.max_thres) split_offset = state->current_agc->split.max; else split_offset = state->current_agc->split.max * (agc - state->current_agc->split.min_thres) / (state->current_agc->split.max_thres - state->current_agc->split.min_thres); dprintk("AGC split_offset: %d", split_offset); // P_agc_force_split and P_agc_split_offset dib8000_write_word(state, 107, (dib8000_read_word(state, 107) & 0xff00) | split_offset); return 5000; } static int dib8000_agc_startup(struct dvb_frontend *fe) { struct dib8000_state *state = fe->demodulator_priv; enum frontend_tune_state *tune_state = &state->tune_state; int ret = 0; switch (*tune_state) { case CT_AGC_START: // set power-up level: interf+analog+AGC dib8000_set_adc_state(state, DIBX000_ADC_ON); if (dib8000_set_agc_config(state, (unsigned char)(BAND_OF_FREQUENCY(fe->dtv_property_cache.frequency / 1000))) != 0) { *tune_state = CT_AGC_STOP; state->status = FE_STATUS_TUNE_FAILED; break; } ret = 70; *tune_state = CT_AGC_STEP_0; break; case CT_AGC_STEP_0: //AGC initialization if (state->cfg.agc_control) state->cfg.agc_control(fe, 1); dib8000_restart_agc(state); // wait AGC rough lock time ret = 50; *tune_state = CT_AGC_STEP_1; break; case CT_AGC_STEP_1: // wait AGC accurate lock time ret = 70; if (dib8000_update_lna(state)) // wait only AGC rough lock time ret = 50; else *tune_state = CT_AGC_STEP_2; break; case CT_AGC_STEP_2: dib8000_agc_soft_split(state); if (state->cfg.agc_control) state->cfg.agc_control(fe, 0); *tune_state = CT_AGC_STOP; break; default: ret = dib8000_agc_soft_split(state); break; } return ret; } static const s32 lut_1000ln_mant[] = { 908, 7003, 7090, 7170, 7244, 7313, 7377, 7438, 7495, 7549, 7600 }; s32 dib8000_get_adc_power(struct dvb_frontend *fe, u8 mode) { struct dib8000_state *state = fe->demodulator_priv; u32 ix = 0, tmp_val = 0, exp = 0, mant = 0; s32 val; val = dib8000_read32(state, 384); if (mode) { tmp_val = val; while (tmp_val >>= 1) exp++; mant = (val * 1000 / (1<<exp)); ix = (u8)((mant-1000)/100); /* index of the LUT */ val = (lut_1000ln_mant[ix] + 693*(exp-20) - 6908); val = (val*256)/1000; } return val; } EXPORT_SYMBOL(dib8000_get_adc_power); static void dib8000_update_timf(struct dib8000_state *state) { u32 timf = state->timf = dib8000_read32(state, 435); dib8000_write_word(state, 29, (u16) (timf >> 16)); dib8000_write_word(state, 30, (u16) (timf & 0xffff)); dprintk("Updated timing frequency: %d (default: %d)", state->timf, state->timf_default); } static const u16 adc_target_16dB[11] = { (1 << 13) - 825 - 117, (1 << 13) - 837 - 117, (1 << 13) - 811 - 117, (1 << 13) - 766 - 117, (1 << 13) - 737 - 117, (1 << 13) - 693 - 117, (1 << 13) - 648 - 117, (1 << 13) - 619 - 117, (1 << 13) - 575 - 117, (1 << 13) - 531 - 117, (1 << 13) - 501 - 117 }; static const u8 permu_seg[] = { 6, 5, 7, 4, 8, 3, 9, 2, 10, 1, 11, 0, 12 }; static void dib8000_set_channel(struct dib8000_state *state, u8 seq, u8 autosearching) { u16 mode, max_constellation, seg_diff_mask = 0, nbseg_diff = 0; u8 guard, crate, constellation, timeI; u16 i, coeff[4], P_cfr_left_edge = 0, P_cfr_right_edge = 0, seg_mask13 = 0x1fff; // All 13 segments enabled const s16 *ncoeff = NULL, *ana_fe; u16 tmcc_pow = 0; u16 coff_pow = 0x2800; u16 init_prbs = 0xfff; u16 ana_gain = 0; if (state->ber_monitored_layer != LAYER_ALL) dib8000_write_word(state, 285, (dib8000_read_word(state, 285) & 0x60) | state->ber_monitored_layer); else dib8000_write_word(state, 285, dib8000_read_word(state, 285) & 0x60); i = dib8000_read_word(state, 26) & 1; // P_dds_invspec dib8000_write_word(state, 26, state->fe[0]->dtv_property_cache.inversion^i); if (state->fe[0]->dtv_property_cache.isdbt_sb_mode) { //compute new dds_freq for the seg and adjust prbs int seg_offset = state->fe[0]->dtv_property_cache.isdbt_sb_segment_idx - (state->fe[0]->dtv_property_cache.isdbt_sb_segment_count / 2) - (state->fe[0]->dtv_property_cache.isdbt_sb_segment_count % 2); int clk = state->cfg.pll->internal; u32 segtodds = ((u32) (430 << 23) / clk) << 3; // segtodds = SegBW / Fclk * pow(2,26) int dds_offset = seg_offset * segtodds; int new_dds, sub_channel; if ((state->fe[0]->dtv_property_cache.isdbt_sb_segment_count % 2) == 0) dds_offset -= (int)(segtodds / 2); if (state->cfg.pll->ifreq == 0) { if ((state->fe[0]->dtv_property_cache.inversion ^ i) == 0) { dib8000_write_word(state, 26, dib8000_read_word(state, 26) | 1); new_dds = dds_offset; } else new_dds = dds_offset; // We shift tuning frequency if the wanted segment is : // - the segment of center frequency with an odd total number of segments // - the segment to the left of center frequency with an even total number of segments // - the segment to the right of center frequency with an even total number of segments if ((state->fe[0]->dtv_property_cache.delivery_system == SYS_ISDBT) && (state->fe[0]->dtv_property_cache.isdbt_sb_mode == 1) && (((state->fe[0]->dtv_property_cache.isdbt_sb_segment_count % 2) && (state->fe[0]->dtv_property_cache.isdbt_sb_segment_idx == ((state->fe[0]->dtv_property_cache.isdbt_sb_segment_count / 2) + 1))) || (((state->fe[0]->dtv_property_cache.isdbt_sb_segment_count % 2) == 0) && (state->fe[0]->dtv_property_cache.isdbt_sb_segment_idx == (state->fe[0]->dtv_property_cache.isdbt_sb_segment_count / 2))) || (((state->fe[0]->dtv_property_cache.isdbt_sb_segment_count % 2) == 0) && (state->fe[0]->dtv_property_cache.isdbt_sb_segment_idx == ((state->fe[0]->dtv_property_cache.isdbt_sb_segment_count / 2) + 1))) )) { new_dds -= ((u32) (850 << 22) / clk) << 4; // new_dds = 850 (freq shift in KHz) / Fclk * pow(2,26) } } else { if ((state->fe[0]->dtv_property_cache.inversion ^ i) == 0) new_dds = state->cfg.pll->ifreq - dds_offset; else new_dds = state->cfg.pll->ifreq + dds_offset; } dib8000_write_word(state, 27, (u16) ((new_dds >> 16) & 0x01ff)); dib8000_write_word(state, 28, (u16) (new_dds & 0xffff)); if (state->fe[0]->dtv_property_cache.isdbt_sb_segment_count % 2) sub_channel = ((state->fe[0]->dtv_property_cache.isdbt_sb_subchannel + (3 * seg_offset) + 1) % 41) / 3; else sub_channel = ((state->fe[0]->dtv_property_cache.isdbt_sb_subchannel + (3 * seg_offset)) % 41) / 3; sub_channel -= 6; if (state->fe[0]->dtv_property_cache.transmission_mode == TRANSMISSION_MODE_2K || state->fe[0]->dtv_property_cache.transmission_mode == TRANSMISSION_MODE_4K) { dib8000_write_word(state, 219, dib8000_read_word(state, 219) | 0x1); //adp_pass =1 dib8000_write_word(state, 190, dib8000_read_word(state, 190) | (0x1 << 14)); //pha3_force_pha_shift = 1 } else { dib8000_write_word(state, 219, dib8000_read_word(state, 219) & 0xfffe); //adp_pass =0 dib8000_write_word(state, 190, dib8000_read_word(state, 190) & 0xbfff); //pha3_force_pha_shift = 0 } switch (state->fe[0]->dtv_property_cache.transmission_mode) { case TRANSMISSION_MODE_2K: switch (sub_channel) { case -6: init_prbs = 0x0; break; // 41, 0, 1 case -5: init_prbs = 0x423; break; // 02~04 case -4: init_prbs = 0x9; break; // 05~07 case -3: init_prbs = 0x5C7; break; // 08~10 case -2: init_prbs = 0x7A6; break; // 11~13 case -1: init_prbs = 0x3D8; break; // 14~16 case 0: init_prbs = 0x527; break; // 17~19 case 1: init_prbs = 0x7FF; break; // 20~22 case 2: init_prbs = 0x79B; break; // 23~25 case 3: init_prbs = 0x3D6; break; // 26~28 case 4: init_prbs = 0x3A2; break; // 29~31 case 5: init_prbs = 0x53B; break; // 32~34 case 6: init_prbs = 0x2F4; break; // 35~37 default: case 7: init_prbs = 0x213; break; // 38~40 } break; case TRANSMISSION_MODE_4K: switch (sub_channel) { case -6: init_prbs = 0x0; break; // 41, 0, 1 case -5: init_prbs = 0x208; break; // 02~04 case -4: init_prbs = 0xC3; break; // 05~07 case -3: init_prbs = 0x7B9; break; // 08~10 case -2: init_prbs = 0x423; break; // 11~13 case -1: init_prbs = 0x5C7; break; // 14~16 case 0: init_prbs = 0x3D8; break; // 17~19 case 1: init_prbs = 0x7FF; break; // 20~22 case 2: init_prbs = 0x3D6; break; // 23~25 case 3: init_prbs = 0x53B; break; // 26~28 case 4: init_prbs = 0x213; break; // 29~31 case 5: init_prbs = 0x29; break; // 32~34 case 6: init_prbs = 0xD0; break; // 35~37 default: case 7: init_prbs = 0x48E; break; // 38~40 } break; default: case TRANSMISSION_MODE_8K: switch (sub_channel) { case -6: init_prbs = 0x0; break; // 41, 0, 1 case -5: init_prbs = 0x740; break; // 02~04 case -4: init_prbs = 0x069; break; // 05~07 case -3: init_prbs = 0x7DD; break; // 08~10 case -2: init_prbs = 0x208; break; // 11~13 case -1: init_prbs = 0x7B9; break; // 14~16 case 0: init_prbs = 0x5C7; break; // 17~19 case 1: init_prbs = 0x7FF; break; // 20~22 case 2: init_prbs = 0x53B; break; // 23~25 case 3: init_prbs = 0x29; break; // 26~28 case 4: init_prbs = 0x48E; break; // 29~31 case 5: init_prbs = 0x4C4; break; // 32~34 case 6: init_prbs = 0x367; break; // 33~37 default: case 7: init_prbs = 0x684; break; // 38~40 } break; } } else { dib8000_write_word(state, 27, (u16) ((state->cfg.pll->ifreq >> 16) & 0x01ff)); dib8000_write_word(state, 28, (u16) (state->cfg.pll->ifreq & 0xffff)); dib8000_write_word(state, 26, (u16) ((state->cfg.pll->ifreq >> 25) & 0x0003)); } /*P_mode == ?? */ dib8000_write_word(state, 10, (seq << 4)); // dib8000_write_word(state, 287, (dib8000_read_word(state, 287) & 0xe000) | 0x1000); switch (state->fe[0]->dtv_property_cache.guard_interval) { case GUARD_INTERVAL_1_32: guard = 0; break; case GUARD_INTERVAL_1_16: guard = 1; break; case GUARD_INTERVAL_1_8: guard = 2; break; case GUARD_INTERVAL_1_4: default: guard = 3; break; } dib8000_write_word(state, 1, (init_prbs << 2) | (guard & 0x3)); // ADDR 1 max_constellation = DQPSK; for (i = 0; i < 3; i++) { switch (state->fe[0]->dtv_property_cache.layer[i].modulation) { case DQPSK: constellation = 0; break; case QPSK: constellation = 1; break; case QAM_16: constellation = 2; break; case QAM_64: default: constellation = 3; break; } switch (state->fe[0]->dtv_property_cache.layer[i].fec) { case FEC_1_2: crate = 1; break; case FEC_2_3: crate = 2; break; case FEC_3_4: crate = 3; break; case FEC_5_6: crate = 5; break; case FEC_7_8: default: crate = 7; break; } if ((state->fe[0]->dtv_property_cache.layer[i].interleaving > 0) && ((state->fe[0]->dtv_property_cache.layer[i].interleaving <= 3) || (state->fe[0]->dtv_property_cache.layer[i].interleaving == 4 && state->fe[0]->dtv_property_cache.isdbt_sb_mode == 1)) ) timeI = state->fe[0]->dtv_property_cache.layer[i].interleaving; else timeI = 0; dib8000_write_word(state, 2 + i, (constellation << 10) | ((state->fe[0]->dtv_property_cache.layer[i].segment_count & 0xf) << 6) | (crate << 3) | timeI); if (state->fe[0]->dtv_property_cache.layer[i].segment_count > 0) { switch (max_constellation) { case DQPSK: case QPSK: if (state->fe[0]->dtv_property_cache.layer[i].modulation == QAM_16 || state->fe[0]->dtv_property_cache.layer[i].modulation == QAM_64) max_constellation = state->fe[0]->dtv_property_cache.layer[i].modulation; break; case QAM_16: if (state->fe[0]->dtv_property_cache.layer[i].modulation == QAM_64) max_constellation = state->fe[0]->dtv_property_cache.layer[i].modulation; break; } } } mode = fft_to_mode(state); //dib8000_write_word(state, 5, 13); /*p_last_seg = 13*/ dib8000_write_word(state, 274, (dib8000_read_word(state, 274) & 0xffcf) | ((state->fe[0]->dtv_property_cache.isdbt_partial_reception & 1) << 5) | ((state->fe[0]->dtv_property_cache. isdbt_sb_mode & 1) << 4)); dprintk("mode = %d ; guard = %d", mode, state->fe[0]->dtv_property_cache.guard_interval); /* signal optimization parameter */ if (state->fe[0]->dtv_property_cache.isdbt_partial_reception) { seg_diff_mask = (state->fe[0]->dtv_property_cache.layer[0].modulation == DQPSK) << permu_seg[0]; for (i = 1; i < 3; i++) nbseg_diff += (state->fe[0]->dtv_property_cache.layer[i].modulation == DQPSK) * state->fe[0]->dtv_property_cache.layer[i].segment_count; for (i = 0; i < nbseg_diff; i++) seg_diff_mask |= 1 << permu_seg[i + 1]; } else { for (i = 0; i < 3; i++) nbseg_diff += (state->fe[0]->dtv_property_cache.layer[i].modulation == DQPSK) * state->fe[0]->dtv_property_cache.layer[i].segment_count; for (i = 0; i < nbseg_diff; i++) seg_diff_mask |= 1 << permu_seg[i]; } dprintk("nbseg_diff = %X (%d)", seg_diff_mask, seg_diff_mask); state->differential_constellation = (seg_diff_mask != 0); dib8000_set_diversity_in(state->fe[0], state->diversity_onoff); if (state->fe[0]->dtv_property_cache.isdbt_sb_mode == 1) { if (state->fe[0]->dtv_property_cache.isdbt_partial_reception == 1) seg_mask13 = 0x00E0; else // 1-segment seg_mask13 = 0x0040; } else seg_mask13 = 0x1fff; // WRITE: Mode & Diff mask dib8000_write_word(state, 0, (mode << 13) | seg_diff_mask); if ((seg_diff_mask) || (state->fe[0]->dtv_property_cache.isdbt_sb_mode)) dib8000_write_word(state, 268, (dib8000_read_word(state, 268) & 0xF9FF) | 0x0200); else dib8000_write_word(state, 268, (2 << 9) | 39); //init value // ---- SMALL ---- // P_small_seg_diff dib8000_write_word(state, 352, seg_diff_mask); // ADDR 352 dib8000_write_word(state, 353, seg_mask13); // ADDR 353 /* // P_small_narrow_band=0, P_small_last_seg=13, P_small_offset_num_car=5 */ // ---- SMALL ---- if (state->fe[0]->dtv_property_cache.isdbt_sb_mode == 1) { switch (state->fe[0]->dtv_property_cache.transmission_mode) { case TRANSMISSION_MODE_2K: if (state->fe[0]->dtv_property_cache.isdbt_partial_reception == 0) { if (state->fe[0]->dtv_property_cache.layer[0].modulation == DQPSK) ncoeff = coeff_2k_sb_1seg_dqpsk; else // QPSK or QAM ncoeff = coeff_2k_sb_1seg; } else { // 3-segments if (state->fe[0]->dtv_property_cache.layer[0].modulation == DQPSK) { if (state->fe[0]->dtv_property_cache.layer[1].modulation == DQPSK) ncoeff = coeff_2k_sb_3seg_0dqpsk_1dqpsk; else // QPSK or QAM on external segments ncoeff = coeff_2k_sb_3seg_0dqpsk; } else { // QPSK or QAM on central segment if (state->fe[0]->dtv_property_cache.layer[1].modulation == DQPSK) ncoeff = coeff_2k_sb_3seg_1dqpsk; else // QPSK or QAM on external segments ncoeff = coeff_2k_sb_3seg; } } break; case TRANSMISSION_MODE_4K: if (state->fe[0]->dtv_property_cache.isdbt_partial_reception == 0) { if (state->fe[0]->dtv_property_cache.layer[0].modulation == DQPSK) ncoeff = coeff_4k_sb_1seg_dqpsk; else // QPSK or QAM ncoeff = coeff_4k_sb_1seg; } else { // 3-segments if (state->fe[0]->dtv_property_cache.layer[0].modulation == DQPSK) { if (state->fe[0]->dtv_property_cache.layer[1].modulation == DQPSK) { ncoeff = coeff_4k_sb_3seg_0dqpsk_1dqpsk; } else { // QPSK or QAM on external segments ncoeff = coeff_4k_sb_3seg_0dqpsk; } } else { // QPSK or QAM on central segment if (state->fe[0]->dtv_property_cache.layer[1].modulation == DQPSK) { ncoeff = coeff_4k_sb_3seg_1dqpsk; } else // QPSK or QAM on external segments ncoeff = coeff_4k_sb_3seg; } } break; case TRANSMISSION_MODE_AUTO: case TRANSMISSION_MODE_8K: default: if (state->fe[0]->dtv_property_cache.isdbt_partial_reception == 0) { if (state->fe[0]->dtv_property_cache.layer[0].modulation == DQPSK) ncoeff = coeff_8k_sb_1seg_dqpsk; else // QPSK or QAM ncoeff = coeff_8k_sb_1seg; } else { // 3-segments if (state->fe[0]->dtv_property_cache.layer[0].modulation == DQPSK) { if (state->fe[0]->dtv_property_cache.layer[1].modulation == DQPSK) { ncoeff = coeff_8k_sb_3seg_0dqpsk_1dqpsk; } else { // QPSK or QAM on external segments ncoeff = coeff_8k_sb_3seg_0dqpsk; } } else { // QPSK or QAM on central segment if (state->fe[0]->dtv_property_cache.layer[1].modulation == DQPSK) { ncoeff = coeff_8k_sb_3seg_1dqpsk; } else // QPSK or QAM on external segments ncoeff = coeff_8k_sb_3seg; } } break; } for (i = 0; i < 8; i++) dib8000_write_word(state, 343 + i, ncoeff[i]); } // P_small_coef_ext_enable=ISDB-Tsb, P_small_narrow_band=ISDB-Tsb, P_small_last_seg=13, P_small_offset_num_car=5 dib8000_write_word(state, 351, (state->fe[0]->dtv_property_cache.isdbt_sb_mode << 9) | (state->fe[0]->dtv_property_cache.isdbt_sb_mode << 8) | (13 << 4) | 5); // ---- COFF ---- // Carloff, the most robust if (state->fe[0]->dtv_property_cache.isdbt_sb_mode == 1) { // P_coff_cpil_alpha=4, P_coff_inh=0, P_coff_cpil_winlen=64 // P_coff_narrow_band=1, P_coff_square_val=1, P_coff_one_seg=~partial_rcpt, P_coff_use_tmcc=1, P_coff_use_ac=1 dib8000_write_word(state, 187, (4 << 12) | (0 << 11) | (63 << 5) | (0x3 << 3) | ((~state->fe[0]->dtv_property_cache.isdbt_partial_reception & 1) << 2) | 0x3); /* // P_small_coef_ext_enable = 1 */ /* dib8000_write_word(state, 351, dib8000_read_word(state, 351) | 0x200); */ if (state->fe[0]->dtv_property_cache.isdbt_partial_reception == 0) { // P_coff_winlen=63, P_coff_thres_lock=15, P_coff_one_seg_width= (P_mode == 3) , P_coff_one_seg_sym= (P_mode-1) if (mode == 3) dib8000_write_word(state, 180, 0x1fcf | ((mode - 1) << 14)); else dib8000_write_word(state, 180, 0x0fcf | ((mode - 1) << 14)); // P_ctrl_corm_thres4pre_freq_inh=1,P_ctrl_pre_freq_mode_sat=1, // P_ctrl_pre_freq_inh=0, P_ctrl_pre_freq_step = 5, P_pre_freq_win_len=4 dib8000_write_word(state, 338, (1 << 12) | (1 << 10) | (0 << 9) | (5 << 5) | 4); // P_ctrl_pre_freq_win_len=16, P_ctrl_pre_freq_thres_lockin=8 dib8000_write_word(state, 340, (16 << 6) | (8 << 0)); // P_ctrl_pre_freq_thres_lockout=6, P_small_use_tmcc/ac/cp=1 dib8000_write_word(state, 341, (6 << 3) | (1 << 2) | (1 << 1) | (1 << 0)); // P_coff_corthres_8k, 4k, 2k and P_coff_cpilthres_8k, 4k, 2k dib8000_write_word(state, 181, 300); dib8000_write_word(state, 182, 150); dib8000_write_word(state, 183, 80); dib8000_write_word(state, 184, 300); dib8000_write_word(state, 185, 150); dib8000_write_word(state, 186, 80); } else { // Sound Broadcasting mode 3 seg // P_coff_one_seg_sym= 1, P_coff_one_seg_width= 1, P_coff_winlen=63, P_coff_thres_lock=15 /* if (mode == 3) */ /* dib8000_write_word(state, 180, 0x2fca | ((0) << 14)); */ /* else */ /* dib8000_write_word(state, 180, 0x2fca | ((1) << 14)); */ dib8000_write_word(state, 180, 0x1fcf | (1 << 14)); // P_ctrl_corm_thres4pre_freq_inh = 1, P_ctrl_pre_freq_mode_sat=1, // P_ctrl_pre_freq_inh=0, P_ctrl_pre_freq_step = 4, P_pre_freq_win_len=4 dib8000_write_word(state, 338, (1 << 12) | (1 << 10) | (0 << 9) | (4 << 5) | 4); // P_ctrl_pre_freq_win_len=16, P_ctrl_pre_freq_thres_lockin=8 dib8000_write_word(state, 340, (16 << 6) | (8 << 0)); //P_ctrl_pre_freq_thres_lockout=6, P_small_use_tmcc/ac/cp=1 dib8000_write_word(state, 341, (6 << 3) | (1 << 2) | (1 << 1) | (1 << 0)); // P_coff_corthres_8k, 4k, 2k and P_coff_cpilthres_8k, 4k, 2k dib8000_write_word(state, 181, 350); dib8000_write_word(state, 182, 300); dib8000_write_word(state, 183, 250); dib8000_write_word(state, 184, 350); dib8000_write_word(state, 185, 300); dib8000_write_word(state, 186, 250); } } else if (state->isdbt_cfg_loaded == 0) { // if not Sound Broadcasting mode : put default values for 13 segments dib8000_write_word(state, 180, (16 << 6) | 9); dib8000_write_word(state, 187, (4 << 12) | (8 << 5) | 0x2); coff_pow = 0x2800; for (i = 0; i < 6; i++) dib8000_write_word(state, 181 + i, coff_pow); // P_ctrl_corm_thres4pre_freq_inh=1, P_ctrl_pre_freq_mode_sat=1, // P_ctrl_pre_freq_mode_sat=1, P_ctrl_pre_freq_inh=0, P_ctrl_pre_freq_step = 3, P_pre_freq_win_len=1 dib8000_write_word(state, 338, (1 << 12) | (1 << 10) | (0 << 9) | (3 << 5) | 1); // P_ctrl_pre_freq_win_len=8, P_ctrl_pre_freq_thres_lockin=6 dib8000_write_word(state, 340, (8 << 6) | (6 << 0)); // P_ctrl_pre_freq_thres_lockout=4, P_small_use_tmcc/ac/cp=1 dib8000_write_word(state, 341, (4 << 3) | (1 << 2) | (1 << 1) | (1 << 0)); } // ---- FFT ---- if (state->fe[0]->dtv_property_cache.isdbt_sb_mode == 1 && state->fe[0]->dtv_property_cache.isdbt_partial_reception == 0) dib8000_write_word(state, 178, 64); // P_fft_powrange=64 else dib8000_write_word(state, 178, 32); // P_fft_powrange=32 /* make the cpil_coff_lock more robust but slower p_coff_winlen * 6bits; p_coff_thres_lock 6bits (for coff lock if needed) */ /* if ( ( nbseg_diff>0)&&(nbseg_diff<13)) dib8000_write_word(state, 187, (dib8000_read_word(state, 187) & 0xfffb) | (1 << 3)); */ dib8000_write_word(state, 189, ~seg_mask13 | seg_diff_mask); /* P_lmod4_seg_inh */ dib8000_write_word(state, 192, ~seg_mask13 | seg_diff_mask); /* P_pha3_seg_inh */ dib8000_write_word(state, 225, ~seg_mask13 | seg_diff_mask); /* P_tac_seg_inh */ if ((!state->fe[0]->dtv_property_cache.isdbt_sb_mode) && (state->cfg.pll->ifreq == 0)) dib8000_write_word(state, 266, ~seg_mask13 | seg_diff_mask | 0x40); /* P_equal_noise_seg_inh */ else dib8000_write_word(state, 266, ~seg_mask13 | seg_diff_mask); /* P_equal_noise_seg_inh */ dib8000_write_word(state, 287, ~seg_mask13 | 0x1000); /* P_tmcc_seg_inh */ //dib8000_write_word(state, 288, ~seg_mask13 | seg_diff_mask); /* P_tmcc_seg_eq_inh */ if (!autosearching) dib8000_write_word(state, 288, (~seg_mask13 | seg_diff_mask) & 0x1fff); /* P_tmcc_seg_eq_inh */ else dib8000_write_word(state, 288, 0x1fff); //disable equalisation of the tmcc when autosearch to be able to find the DQPSK channels. dprintk("287 = %X (%d)", ~seg_mask13 | 0x1000, ~seg_mask13 | 0x1000); dib8000_write_word(state, 211, seg_mask13 & (~seg_diff_mask)); /* P_des_seg_enabled */ /* offset loop parameters */ if (state->fe[0]->dtv_property_cache.isdbt_sb_mode == 1) { if (state->fe[0]->dtv_property_cache.isdbt_partial_reception == 0) /* P_timf_alpha = (11-P_mode), P_corm_alpha=6, P_corm_thres=0x80 */ dib8000_write_word(state, 32, ((11 - mode) << 12) | (6 << 8) | 0x40); else // Sound Broadcasting mode 3 seg /* P_timf_alpha = (10-P_mode), P_corm_alpha=6, P_corm_thres=0x80 */ dib8000_write_word(state, 32, ((10 - mode) << 12) | (6 << 8) | 0x60); } else // TODO in 13 seg, timf_alpha can always be the same or not ? /* P_timf_alpha = (9-P_mode, P_corm_alpha=6, P_corm_thres=0x80 */ dib8000_write_word(state, 32, ((9 - mode) << 12) | (6 << 8) | 0x80); if (state->fe[0]->dtv_property_cache.isdbt_sb_mode == 1) { if (state->fe[0]->dtv_property_cache.isdbt_partial_reception == 0) /* P_ctrl_pha_off_max=3 P_ctrl_sfreq_inh =0 P_ctrl_sfreq_step = (11-P_mode) */ dib8000_write_word(state, 37, (3 << 5) | (0 << 4) | (10 - mode)); else // Sound Broadcasting mode 3 seg /* P_ctrl_pha_off_max=3 P_ctrl_sfreq_inh =0 P_ctrl_sfreq_step = (10-P_mode) */ dib8000_write_word(state, 37, (3 << 5) | (0 << 4) | (9 - mode)); } else /* P_ctrl_pha_off_max=3 P_ctrl_sfreq_inh =0 P_ctrl_sfreq_step = 9 */ dib8000_write_word(state, 37, (3 << 5) | (0 << 4) | (8 - mode)); /* P_dvsy_sync_wait - reuse mode */ switch (state->fe[0]->dtv_property_cache.transmission_mode) { case TRANSMISSION_MODE_8K: mode = 256; break; case TRANSMISSION_MODE_4K: mode = 128; break; default: case TRANSMISSION_MODE_2K: mode = 64; break; } if (state->cfg.diversity_delay == 0) mode = (mode * (1 << (guard)) * 3) / 2 + 48; // add 50% SFN margin + compensate for one DVSY-fifo else mode = (mode * (1 << (guard)) * 3) / 2 + state->cfg.diversity_delay; // add 50% SFN margin + compensate for DVSY-fifo mode <<= 4; dib8000_write_word(state, 273, (dib8000_read_word(state, 273) & 0x000f) | mode); /* channel estimation fine configuration */ switch (max_constellation) { case QAM_64: ana_gain = 0x7; // -1 : avoid def_est saturation when ADC target is -16dB coeff[0] = 0x0148; /* P_adp_regul_cnt 0.04 */ coeff[1] = 0xfff0; /* P_adp_noise_cnt -0.002 */ coeff[2] = 0x00a4; /* P_adp_regul_ext 0.02 */ coeff[3] = 0xfff8; /* P_adp_noise_ext -0.001 */ //if (!state->cfg.hostbus_diversity) //if diversity, we should prehaps use the configuration of the max_constallation -1 break; case QAM_16: ana_gain = 0x7; // -1 : avoid def_est saturation when ADC target is -16dB coeff[0] = 0x023d; /* P_adp_regul_cnt 0.07 */ coeff[1] = 0xffdf; /* P_adp_noise_cnt -0.004 */ coeff[2] = 0x00a4; /* P_adp_regul_ext 0.02 */ coeff[3] = 0xfff0; /* P_adp_noise_ext -0.002 */ //if (!((state->cfg.hostbus_diversity) && (max_constellation == QAM_16))) break; default: ana_gain = 0; // 0 : goes along with ADC target at -22dB to keep good mobile performance and lock at sensitivity level coeff[0] = 0x099a; /* P_adp_regul_cnt 0.3 */ coeff[1] = 0xffae; /* P_adp_noise_cnt -0.01 */ coeff[2] = 0x0333; /* P_adp_regul_ext 0.1 */ coeff[3] = 0xfff8; /* P_adp_noise_ext -0.002 */ break; } for (mode = 0; mode < 4; mode++) dib8000_write_word(state, 215 + mode, coeff[mode]); // update ana_gain depending on max constellation dib8000_write_word(state, 116, ana_gain); // update ADC target depending on ana_gain if (ana_gain) { // set -16dB ADC target for ana_gain=-1 for (i = 0; i < 10; i++) dib8000_write_word(state, 80 + i, adc_target_16dB[i]); } else { // set -22dB ADC target for ana_gain=0 for (i = 0; i < 10; i++) dib8000_write_word(state, 80 + i, adc_target_16dB[i] - 355); } // ---- ANA_FE ---- if (state->fe[0]->dtv_property_cache.isdbt_sb_mode) { if (state->fe[0]->dtv_property_cache.isdbt_partial_reception == 1) ana_fe = ana_fe_coeff_3seg; else // 1-segment ana_fe = ana_fe_coeff_1seg; } else ana_fe = ana_fe_coeff_13seg; if (state->fe[0]->dtv_property_cache.isdbt_sb_mode == 1 || state->isdbt_cfg_loaded == 0) for (mode = 0; mode < 24; mode++) dib8000_write_word(state, 117 + mode, ana_fe[mode]); // ---- CHAN_BLK ---- for (i = 0; i < 13; i++) { if ((((~seg_diff_mask) >> i) & 1) == 1) { P_cfr_left_edge += (1 << i) * ((i == 0) || ((((seg_mask13 & (~seg_diff_mask)) >> (i - 1)) & 1) == 0)); P_cfr_right_edge += (1 << i) * ((i == 12) || ((((seg_mask13 & (~seg_diff_mask)) >> (i + 1)) & 1) == 0)); } } dib8000_write_word(state, 222, P_cfr_left_edge); // P_cfr_left_edge dib8000_write_word(state, 223, P_cfr_right_edge); // P_cfr_right_edge // "P_cspu_left_edge" not used => do not care // "P_cspu_right_edge" not used => do not care if (state->fe[0]->dtv_property_cache.isdbt_sb_mode == 1) { dib8000_write_word(state, 228, 1); // P_2d_mode_byp=1 dib8000_write_word(state, 205, dib8000_read_word(state, 205) & 0xfff0); // P_cspu_win_cut = 0 if (state->fe[0]->dtv_property_cache.isdbt_partial_reception == 0 && state->fe[0]->dtv_property_cache.transmission_mode == TRANSMISSION_MODE_2K) { //dib8000_write_word(state, 219, dib8000_read_word(state, 219) & 0xfffe); // P_adp_pass = 0 dib8000_write_word(state, 265, 15); // P_equal_noise_sel = 15 } } else if (state->isdbt_cfg_loaded == 0) { dib8000_write_word(state, 228, 0); // default value dib8000_write_word(state, 265, 31); // default value dib8000_write_word(state, 205, 0x200f); // init value } // ---- TMCC ---- for (i = 0; i < 3; i++) tmcc_pow += (((state->fe[0]->dtv_property_cache.layer[i].modulation == DQPSK) * 4 + 1) * state->fe[0]->dtv_property_cache.layer[i].segment_count); // Quantif of "P_tmcc_dec_thres_?k" is (0, 5+mode, 9); // Threshold is set at 1/4 of max power. tmcc_pow *= (1 << (9 - 2)); dib8000_write_word(state, 290, tmcc_pow); // P_tmcc_dec_thres_2k dib8000_write_word(state, 291, tmcc_pow); // P_tmcc_dec_thres_4k dib8000_write_word(state, 292, tmcc_pow); // P_tmcc_dec_thres_8k //dib8000_write_word(state, 287, (1 << 13) | 0x1000 ); // ---- PHA3 ---- if (state->isdbt_cfg_loaded == 0) dib8000_write_word(state, 250, 3285); /*p_2d_hspeed_thr0 */ if (state->fe[0]->dtv_property_cache.isdbt_sb_mode == 1) state->isdbt_cfg_loaded = 0; else state->isdbt_cfg_loaded = 1; } static int dib8000_autosearch_start(struct dvb_frontend *fe) { u8 factor; u32 value; struct dib8000_state *state = fe->demodulator_priv; int slist = 0; state->fe[0]->dtv_property_cache.inversion = 0; if (!state->fe[0]->dtv_property_cache.isdbt_sb_mode) state->fe[0]->dtv_property_cache.layer[0].segment_count = 13; state->fe[0]->dtv_property_cache.layer[0].modulation = QAM_64; state->fe[0]->dtv_property_cache.layer[0].fec = FEC_2_3; state->fe[0]->dtv_property_cache.layer[0].interleaving = 0; //choose the right list, in sb, always do everything if (state->fe[0]->dtv_property_cache.isdbt_sb_mode) { state->fe[0]->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_8K; state->fe[0]->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_8; slist = 7; dib8000_write_word(state, 0, (dib8000_read_word(state, 0) & 0x9fff) | (1 << 13)); } else { if (state->fe[0]->dtv_property_cache.guard_interval == GUARD_INTERVAL_AUTO) { if (state->fe[0]->dtv_property_cache.transmission_mode == TRANSMISSION_MODE_AUTO) { slist = 7; dib8000_write_word(state, 0, (dib8000_read_word(state, 0) & 0x9fff) | (1 << 13)); // P_mode = 1 to have autosearch start ok with mode2 } else slist = 3; } else { if (state->fe[0]->dtv_property_cache.transmission_mode == TRANSMISSION_MODE_AUTO) { slist = 2; dib8000_write_word(state, 0, (dib8000_read_word(state, 0) & 0x9fff) | (1 << 13)); // P_mode = 1 } else slist = 0; } if (state->fe[0]->dtv_property_cache.transmission_mode == TRANSMISSION_MODE_AUTO) state->fe[0]->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_8K; if (state->fe[0]->dtv_property_cache.guard_interval == GUARD_INTERVAL_AUTO) state->fe[0]->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_8; dprintk("using list for autosearch : %d", slist); dib8000_set_channel(state, (unsigned char)slist, 1); //dib8000_write_word(state, 0, (dib8000_read_word(state, 0) & 0x9fff) | (1 << 13)); // P_mode = 1 factor = 1; //set lock_mask values dib8000_write_word(state, 6, 0x4); dib8000_write_word(state, 7, 0x8); dib8000_write_word(state, 8, 0x1000); //set lock_mask wait time values value = 50 * state->cfg.pll->internal * factor; dib8000_write_word(state, 11, (u16) ((value >> 16) & 0xffff)); // lock0 wait time dib8000_write_word(state, 12, (u16) (value & 0xffff)); // lock0 wait time value = 100 * state->cfg.pll->internal * factor; dib8000_write_word(state, 13, (u16) ((value >> 16) & 0xffff)); // lock1 wait time dib8000_write_word(state, 14, (u16) (value & 0xffff)); // lock1 wait time value = 1000 * state->cfg.pll->internal * factor; dib8000_write_word(state, 15, (u16) ((value >> 16) & 0xffff)); // lock2 wait time dib8000_write_word(state, 16, (u16) (value & 0xffff)); // lock2 wait time value = dib8000_read_word(state, 0); dib8000_write_word(state, 0, (u16) ((1 << 15) | value)); dib8000_read_word(state, 1284); // reset the INT. n_irq_pending dib8000_write_word(state, 0, (u16) value); } return 0; } static int dib8000_autosearch_irq(struct dvb_frontend *fe) { struct dib8000_state *state = fe->demodulator_priv; u16 irq_pending = dib8000_read_word(state, 1284); if (irq_pending & 0x1) { // failed dprintk("dib8000_autosearch_irq failed"); return 1; } if (irq_pending & 0x2) { // succeeded dprintk("dib8000_autosearch_irq succeeded"); return 2; } return 0; // still pending } static int dib8000_tune(struct dvb_frontend *fe) { struct dib8000_state *state = fe->demodulator_priv; int ret = 0; u16 value, mode = fft_to_mode(state); // we are already tuned - just resuming from suspend if (state == NULL) return -EINVAL; dib8000_set_bandwidth(fe, state->fe[0]->dtv_property_cache.bandwidth_hz / 1000); dib8000_set_channel(state, 0, 0); // restart demod ret |= dib8000_write_word(state, 770, 0x4000); ret |= dib8000_write_word(state, 770, 0x0000); msleep(45); /* P_ctrl_inh_cor=0, P_ctrl_alpha_cor=4, P_ctrl_inh_isi=0, P_ctrl_alpha_isi=3 */ /* ret |= dib8000_write_word(state, 29, (0 << 9) | (4 << 5) | (0 << 4) | (3 << 0) ); workaround inh_isi stays at 1 */ // never achieved a lock before - wait for timfreq to update if (state->timf == 0) { if (state->fe[0]->dtv_property_cache.isdbt_sb_mode == 1) { if (state->fe[0]->dtv_property_cache.isdbt_partial_reception == 0) msleep(300); else // Sound Broadcasting mode 3 seg msleep(500); } else // 13 seg msleep(200); } if (state->fe[0]->dtv_property_cache.isdbt_sb_mode == 1) { if (state->fe[0]->dtv_property_cache.isdbt_partial_reception == 0) { /* P_timf_alpha = (13-P_mode) , P_corm_alpha=6, P_corm_thres=0x40 alpha to check on board */ dib8000_write_word(state, 32, ((13 - mode) << 12) | (6 << 8) | 0x40); //dib8000_write_word(state, 32, (8 << 12) | (6 << 8) | 0x80); /* P_ctrl_sfreq_step= (12-P_mode) P_ctrl_sfreq_inh =0 P_ctrl_pha_off_max */ ret |= dib8000_write_word(state, 37, (12 - mode) | ((5 + mode) << 5)); } else { // Sound Broadcasting mode 3 seg /* P_timf_alpha = (12-P_mode) , P_corm_alpha=6, P_corm_thres=0x60 alpha to check on board */ dib8000_write_word(state, 32, ((12 - mode) << 12) | (6 << 8) | 0x60); ret |= dib8000_write_word(state, 37, (11 - mode) | ((5 + mode) << 5)); } } else { // 13 seg /* P_timf_alpha = 8 , P_corm_alpha=6, P_corm_thres=0x80 alpha to check on board */ dib8000_write_word(state, 32, ((11 - mode) << 12) | (6 << 8) | 0x80); ret |= dib8000_write_word(state, 37, (10 - mode) | ((5 + mode) << 5)); } // we achieved a coff_cpil_lock - it's time to update the timf if ((dib8000_read_word(state, 568) >> 11) & 0x1) dib8000_update_timf(state); //now that tune is finished, lock0 should lock on fec_mpeg to output this lock on MP_LOCK. It's changed in autosearch start dib8000_write_word(state, 6, 0x200); if (state->revision == 0x8002) { value = dib8000_read_word(state, 903); dib8000_write_word(state, 903, value & ~(1 << 3)); msleep(1); dib8000_write_word(state, 903, value | (1 << 3)); } return ret; } static int dib8000_wakeup(struct dvb_frontend *fe) { struct dib8000_state *state = fe->demodulator_priv; u8 index_frontend; int ret; dib8000_set_power_mode(state, DIB8000M_POWER_ALL); dib8000_set_adc_state(state, DIBX000_ADC_ON); if (dib8000_set_adc_state(state, DIBX000_SLOW_ADC_ON) != 0) dprintk("could not start Slow ADC"); for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) { ret = state->fe[index_frontend]->ops.init(state->fe[index_frontend]); if (ret < 0) return ret; } return 0; } static int dib8000_sleep(struct dvb_frontend *fe) { struct dib8000_state *state = fe->demodulator_priv; u8 index_frontend; int ret; for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) { ret = state->fe[index_frontend]->ops.sleep(state->fe[index_frontend]); if (ret < 0) return ret; } dib8000_set_output_mode(fe, OUTMODE_HIGH_Z); dib8000_set_power_mode(state, DIB8000M_POWER_INTERFACE_ONLY); return dib8000_set_adc_state(state, DIBX000_SLOW_ADC_OFF) | dib8000_set_adc_state(state, DIBX000_ADC_OFF); } enum frontend_tune_state dib8000_get_tune_state(struct dvb_frontend *fe) { struct dib8000_state *state = fe->demodulator_priv; return state->tune_state; } EXPORT_SYMBOL(dib8000_get_tune_state); int dib8000_set_tune_state(struct dvb_frontend *fe, enum frontend_tune_state tune_state) { struct dib8000_state *state = fe->demodulator_priv; state->tune_state = tune_state; return 0; } EXPORT_SYMBOL(dib8000_set_tune_state); static int dib8000_get_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep) { struct dib8000_state *state = fe->demodulator_priv; u16 i, val = 0; fe_status_t stat; u8 index_frontend, sub_index_frontend; fe->dtv_property_cache.bandwidth_hz = 6000000; for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) { state->fe[index_frontend]->ops.read_status(state->fe[index_frontend], &stat); if (stat&FE_HAS_SYNC) { dprintk("TMCC lock on the slave%i", index_frontend); /* synchronize the cache with the other frontends */ state->fe[index_frontend]->ops.get_frontend(state->fe[index_frontend], fep); for (sub_index_frontend = 0; (sub_index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[sub_index_frontend] != NULL); sub_index_frontend++) { if (sub_index_frontend != index_frontend) { state->fe[sub_index_frontend]->dtv_property_cache.isdbt_sb_mode = state->fe[index_frontend]->dtv_property_cache.isdbt_sb_mode; state->fe[sub_index_frontend]->dtv_property_cache.inversion = state->fe[index_frontend]->dtv_property_cache.inversion; state->fe[sub_index_frontend]->dtv_property_cache.transmission_mode = state->fe[index_frontend]->dtv_property_cache.transmission_mode; state->fe[sub_index_frontend]->dtv_property_cache.guard_interval = state->fe[index_frontend]->dtv_property_cache.guard_interval; state->fe[sub_index_frontend]->dtv_property_cache.isdbt_partial_reception = state->fe[index_frontend]->dtv_property_cache.isdbt_partial_reception; for (i = 0; i < 3; i++) { state->fe[sub_index_frontend]->dtv_property_cache.layer[i].segment_count = state->fe[index_frontend]->dtv_property_cache.layer[i].segment_count; state->fe[sub_index_frontend]->dtv_property_cache.layer[i].interleaving = state->fe[index_frontend]->dtv_property_cache.layer[i].interleaving; state->fe[sub_index_frontend]->dtv_property_cache.layer[i].fec = state->fe[index_frontend]->dtv_property_cache.layer[i].fec; state->fe[sub_index_frontend]->dtv_property_cache.layer[i].modulation = state->fe[index_frontend]->dtv_property_cache.layer[i].modulation; } } } return 0; } } fe->dtv_property_cache.isdbt_sb_mode = dib8000_read_word(state, 508) & 0x1; val = dib8000_read_word(state, 570); fe->dtv_property_cache.inversion = (val & 0x40) >> 6; switch ((val & 0x30) >> 4) { case 1: fe->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_2K; break; case 3: default: fe->dtv_property_cache.transmission_mode = TRANSMISSION_MODE_8K; break; } switch (val & 0x3) { case 0: fe->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_32; dprintk("dib8000_get_frontend GI = 1/32 "); break; case 1: fe->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_16; dprintk("dib8000_get_frontend GI = 1/16 "); break; case 2: dprintk("dib8000_get_frontend GI = 1/8 "); fe->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_8; break; case 3: dprintk("dib8000_get_frontend GI = 1/4 "); fe->dtv_property_cache.guard_interval = GUARD_INTERVAL_1_4; break; } val = dib8000_read_word(state, 505); fe->dtv_property_cache.isdbt_partial_reception = val & 1; dprintk("dib8000_get_frontend : partial_reception = %d ", fe->dtv_property_cache.isdbt_partial_reception); for (i = 0; i < 3; i++) { val = dib8000_read_word(state, 493 + i); fe->dtv_property_cache.layer[i].segment_count = val & 0x0F; dprintk("dib8000_get_frontend : Layer %d segments = %d ", i, fe->dtv_property_cache.layer[i].segment_count); val = dib8000_read_word(state, 499 + i); fe->dtv_property_cache.layer[i].interleaving = val & 0x3; dprintk("dib8000_get_frontend : Layer %d time_intlv = %d ", i, fe->dtv_property_cache.layer[i].interleaving); val = dib8000_read_word(state, 481 + i); switch (val & 0x7) { case 1: fe->dtv_property_cache.layer[i].fec = FEC_1_2; dprintk("dib8000_get_frontend : Layer %d Code Rate = 1/2 ", i); break; case 2: fe->dtv_property_cache.layer[i].fec = FEC_2_3; dprintk("dib8000_get_frontend : Layer %d Code Rate = 2/3 ", i); break; case 3: fe->dtv_property_cache.layer[i].fec = FEC_3_4; dprintk("dib8000_get_frontend : Layer %d Code Rate = 3/4 ", i); break; case 5: fe->dtv_property_cache.layer[i].fec = FEC_5_6; dprintk("dib8000_get_frontend : Layer %d Code Rate = 5/6 ", i); break; default: fe->dtv_property_cache.layer[i].fec = FEC_7_8; dprintk("dib8000_get_frontend : Layer %d Code Rate = 7/8 ", i); break; } val = dib8000_read_word(state, 487 + i); switch (val & 0x3) { case 0: dprintk("dib8000_get_frontend : Layer %d DQPSK ", i); fe->dtv_property_cache.layer[i].modulation = DQPSK; break; case 1: fe->dtv_property_cache.layer[i].modulation = QPSK; dprintk("dib8000_get_frontend : Layer %d QPSK ", i); break; case 2: fe->dtv_property_cache.layer[i].modulation = QAM_16; dprintk("dib8000_get_frontend : Layer %d QAM16 ", i); break; case 3: default: dprintk("dib8000_get_frontend : Layer %d QAM64 ", i); fe->dtv_property_cache.layer[i].modulation = QAM_64; break; } } /* synchronize the cache with the other frontends */ for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) { state->fe[index_frontend]->dtv_property_cache.isdbt_sb_mode = fe->dtv_property_cache.isdbt_sb_mode; state->fe[index_frontend]->dtv_property_cache.inversion = fe->dtv_property_cache.inversion; state->fe[index_frontend]->dtv_property_cache.transmission_mode = fe->dtv_property_cache.transmission_mode; state->fe[index_frontend]->dtv_property_cache.guard_interval = fe->dtv_property_cache.guard_interval; state->fe[index_frontend]->dtv_property_cache.isdbt_partial_reception = fe->dtv_property_cache.isdbt_partial_reception; for (i = 0; i < 3; i++) { state->fe[index_frontend]->dtv_property_cache.layer[i].segment_count = fe->dtv_property_cache.layer[i].segment_count; state->fe[index_frontend]->dtv_property_cache.layer[i].interleaving = fe->dtv_property_cache.layer[i].interleaving; state->fe[index_frontend]->dtv_property_cache.layer[i].fec = fe->dtv_property_cache.layer[i].fec; state->fe[index_frontend]->dtv_property_cache.layer[i].modulation = fe->dtv_property_cache.layer[i].modulation; } } return 0; } static int dib8000_set_frontend(struct dvb_frontend *fe, struct dvb_frontend_parameters *fep) { struct dib8000_state *state = fe->demodulator_priv; u8 nbr_pending, exit_condition, index_frontend; s8 index_frontend_success = -1; int time, ret; int time_slave = FE_CALLBACK_TIME_NEVER; if (state->fe[0]->dtv_property_cache.frequency == 0) { dprintk("dib8000: must at least specify frequency "); return 0; } if (state->fe[0]->dtv_property_cache.bandwidth_hz == 0) { dprintk("dib8000: no bandwidth specified, set to default "); state->fe[0]->dtv_property_cache.bandwidth_hz = 6000000; } for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) { /* synchronization of the cache */ state->fe[index_frontend]->dtv_property_cache.delivery_system = SYS_ISDBT; memcpy(&state->fe[index_frontend]->dtv_property_cache, &fe->dtv_property_cache, sizeof(struct dtv_frontend_properties)); dib8000_set_output_mode(state->fe[index_frontend], OUTMODE_HIGH_Z); if (state->fe[index_frontend]->ops.tuner_ops.set_params) state->fe[index_frontend]->ops.tuner_ops.set_params(state->fe[index_frontend], fep); dib8000_set_tune_state(state->fe[index_frontend], CT_AGC_START); } /* start up the AGC */ do { time = dib8000_agc_startup(state->fe[0]); for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) { time_slave = dib8000_agc_startup(state->fe[index_frontend]); if (time == FE_CALLBACK_TIME_NEVER) time = time_slave; else if ((time_slave != FE_CALLBACK_TIME_NEVER) && (time_slave > time)) time = time_slave; } if (time != FE_CALLBACK_TIME_NEVER) msleep(time / 10); else break; exit_condition = 1; for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) { if (dib8000_get_tune_state(state->fe[index_frontend]) != CT_AGC_STOP) { exit_condition = 0; break; } } } while (exit_condition == 0); for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) dib8000_set_tune_state(state->fe[index_frontend], CT_DEMOD_START); if ((state->fe[0]->dtv_property_cache.delivery_system != SYS_ISDBT) || (state->fe[0]->dtv_property_cache.inversion == INVERSION_AUTO) || (state->fe[0]->dtv_property_cache.transmission_mode == TRANSMISSION_MODE_AUTO) || (state->fe[0]->dtv_property_cache.guard_interval == GUARD_INTERVAL_AUTO) || (((state->fe[0]->dtv_property_cache.isdbt_layer_enabled & (1 << 0)) != 0) && (state->fe[0]->dtv_property_cache.layer[0].segment_count != 0xff) && (state->fe[0]->dtv_property_cache.layer[0].segment_count != 0) && ((state->fe[0]->dtv_property_cache.layer[0].modulation == QAM_AUTO) || (state->fe[0]->dtv_property_cache.layer[0].fec == FEC_AUTO))) || (((state->fe[0]->dtv_property_cache.isdbt_layer_enabled & (1 << 1)) != 0) && (state->fe[0]->dtv_property_cache.layer[1].segment_count != 0xff) && (state->fe[0]->dtv_property_cache.layer[1].segment_count != 0) && ((state->fe[0]->dtv_property_cache.layer[1].modulation == QAM_AUTO) || (state->fe[0]->dtv_property_cache.layer[1].fec == FEC_AUTO))) || (((state->fe[0]->dtv_property_cache.isdbt_layer_enabled & (1 << 2)) != 0) && (state->fe[0]->dtv_property_cache.layer[2].segment_count != 0xff) && (state->fe[0]->dtv_property_cache.layer[2].segment_count != 0) && ((state->fe[0]->dtv_property_cache.layer[2].modulation == QAM_AUTO) || (state->fe[0]->dtv_property_cache.layer[2].fec == FEC_AUTO))) || (((state->fe[0]->dtv_property_cache.layer[0].segment_count == 0) || ((state->fe[0]->dtv_property_cache.isdbt_layer_enabled & (1 << 0)) == 0)) && ((state->fe[0]->dtv_property_cache.layer[1].segment_count == 0) || ((state->fe[0]->dtv_property_cache.isdbt_layer_enabled & (2 << 0)) == 0)) && ((state->fe[0]->dtv_property_cache.layer[2].segment_count == 0) || ((state->fe[0]->dtv_property_cache.isdbt_layer_enabled & (3 << 0)) == 0)))) { int i = 80000; u8 found = 0; u8 tune_failed = 0; for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) { dib8000_set_bandwidth(state->fe[index_frontend], fe->dtv_property_cache.bandwidth_hz / 1000); dib8000_autosearch_start(state->fe[index_frontend]); } do { msleep(20); nbr_pending = 0; exit_condition = 0; /* 0: tune pending; 1: tune failed; 2:tune success */ for (index_frontend = 0; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) { if (((tune_failed >> index_frontend) & 0x1) == 0) { found = dib8000_autosearch_irq(state->fe[index_frontend]); switch (found) { case 0: /* tune pending */ nbr_pending++; break; case 2: dprintk("autosearch succeed on the frontend%i", index_frontend); exit_condition = 2; index_frontend_success = index_frontend; break; default: dprintk("unhandled autosearch result"); case 1: dprintk("autosearch failed for the frontend%i", index_frontend); break; } } } /* if all tune are done and no success, exit: tune failed */ if ((nbr_pending == 0) && (exit_condition == 0)) exit_condition = 1; } while ((exit_condition == 0) && i--); if (exit_condition == 1) { /* tune failed */ dprintk("tune failed"); return 0; } dprintk("tune success on frontend%i", index_frontend_success); dib8000_get_frontend(fe, fep); } for (index_frontend = 0, ret = 0; (ret >= 0) && (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) ret = dib8000_tune(state->fe[index_frontend]); /* set output mode and diversity input */ dib8000_set_output_mode(state->fe[0], state->cfg.output_mode); for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) { dib8000_set_output_mode(state->fe[index_frontend], OUTMODE_DIVERSITY); dib8000_set_diversity_in(state->fe[index_frontend-1], 1); } /* turn off the diversity of the last chip */ dib8000_set_diversity_in(state->fe[index_frontend-1], 0); return ret; } static u16 dib8000_read_lock(struct dvb_frontend *fe) { struct dib8000_state *state = fe->demodulator_priv; return dib8000_read_word(state, 568); } static int dib8000_read_status(struct dvb_frontend *fe, fe_status_t * stat) { struct dib8000_state *state = fe->demodulator_priv; u16 lock_slave = 0, lock = dib8000_read_word(state, 568); u8 index_frontend; for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) lock_slave |= dib8000_read_lock(state->fe[index_frontend]); *stat = 0; if (((lock >> 13) & 1) || ((lock_slave >> 13) & 1)) *stat |= FE_HAS_SIGNAL; if (((lock >> 8) & 1) || ((lock_slave >> 8) & 1)) /* Equal */ *stat |= FE_HAS_CARRIER; if ((((lock >> 1) & 0xf) == 0xf) || (((lock_slave >> 1) & 0xf) == 0xf)) /* TMCC_SYNC */ *stat |= FE_HAS_SYNC; if ((((lock >> 12) & 1) || ((lock_slave >> 12) & 1)) && ((lock >> 5) & 7)) /* FEC MPEG */ *stat |= FE_HAS_LOCK; if (((lock >> 12) & 1) || ((lock_slave >> 12) & 1)) { lock = dib8000_read_word(state, 554); /* Viterbi Layer A */ if (lock & 0x01) *stat |= FE_HAS_VITERBI; lock = dib8000_read_word(state, 555); /* Viterbi Layer B */ if (lock & 0x01) *stat |= FE_HAS_VITERBI; lock = dib8000_read_word(state, 556); /* Viterbi Layer C */ if (lock & 0x01) *stat |= FE_HAS_VITERBI; } return 0; } static int dib8000_read_ber(struct dvb_frontend *fe, u32 * ber) { struct dib8000_state *state = fe->demodulator_priv; *ber = (dib8000_read_word(state, 560) << 16) | dib8000_read_word(state, 561); // 13 segments return 0; } static int dib8000_read_unc_blocks(struct dvb_frontend *fe, u32 * unc) { struct dib8000_state *state = fe->demodulator_priv; *unc = dib8000_read_word(state, 565); // packet error on 13 seg return 0; } static int dib8000_read_signal_strength(struct dvb_frontend *fe, u16 * strength) { struct dib8000_state *state = fe->demodulator_priv; u8 index_frontend; u16 val; *strength = 0; for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) { state->fe[index_frontend]->ops.read_signal_strength(state->fe[index_frontend], &val); if (val > 65535 - *strength) *strength = 65535; else *strength += val; } val = 65535 - dib8000_read_word(state, 390); if (val > 65535 - *strength) *strength = 65535; else *strength += val; return 0; } static u32 dib8000_get_snr(struct dvb_frontend *fe) { struct dib8000_state *state = fe->demodulator_priv; u32 n, s, exp; u16 val; val = dib8000_read_word(state, 542); n = (val >> 6) & 0xff; exp = (val & 0x3f); if ((exp & 0x20) != 0) exp -= 0x40; n <<= exp+16; val = dib8000_read_word(state, 543); s = (val >> 6) & 0xff; exp = (val & 0x3f); if ((exp & 0x20) != 0) exp -= 0x40; s <<= exp+16; if (n > 0) { u32 t = (s/n) << 16; return t + ((s << 16) - n*t) / n; } return 0xffffffff; } static int dib8000_read_snr(struct dvb_frontend *fe, u16 * snr) { struct dib8000_state *state = fe->demodulator_priv; u8 index_frontend; u32 snr_master; snr_master = dib8000_get_snr(fe); for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL); index_frontend++) snr_master += dib8000_get_snr(state->fe[index_frontend]); if (snr_master != 0) { snr_master = 10*intlog10(snr_master>>16); *snr = snr_master / ((1 << 24) / 10); } else *snr = 0; return 0; } int dib8000_set_slave_frontend(struct dvb_frontend *fe, struct dvb_frontend *fe_slave) { struct dib8000_state *state = fe->demodulator_priv; u8 index_frontend = 1; while ((index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL)) index_frontend++; if (index_frontend < MAX_NUMBER_OF_FRONTENDS) { dprintk("set slave fe %p to index %i", fe_slave, index_frontend); state->fe[index_frontend] = fe_slave; return 0; } dprintk("too many slave frontend"); return -ENOMEM; } EXPORT_SYMBOL(dib8000_set_slave_frontend); int dib8000_remove_slave_frontend(struct dvb_frontend *fe) { struct dib8000_state *state = fe->demodulator_priv; u8 index_frontend = 1; while ((index_frontend < MAX_NUMBER_OF_FRONTENDS) && (state->fe[index_frontend] != NULL)) index_frontend++; if (index_frontend != 1) { dprintk("remove slave fe %p (index %i)", state->fe[index_frontend-1], index_frontend-1); state->fe[index_frontend] = NULL; return 0; } dprintk("no frontend to be removed"); return -ENODEV; } EXPORT_SYMBOL(dib8000_remove_slave_frontend); struct dvb_frontend *dib8000_get_slave_frontend(struct dvb_frontend *fe, int slave_index) { struct dib8000_state *state = fe->demodulator_priv; if (slave_index >= MAX_NUMBER_OF_FRONTENDS) return NULL; return state->fe[slave_index]; } EXPORT_SYMBOL(dib8000_get_slave_frontend); int dib8000_i2c_enumeration(struct i2c_adapter *host, int no_of_demods, u8 default_addr, u8 first_addr) { int k = 0, ret = 0; u8 new_addr = 0; struct i2c_device client = {.adap = host }; client.i2c_write_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL); if (!client.i2c_write_buffer) { dprintk("%s: not enough memory", __func__); return -ENOMEM; } client.i2c_read_buffer = kzalloc(4 * sizeof(u8), GFP_KERNEL); if (!client.i2c_read_buffer) { dprintk("%s: not enough memory", __func__); ret = -ENOMEM; goto error_memory_read; } client.i2c_buffer_lock = kzalloc(sizeof(struct mutex), GFP_KERNEL); if (!client.i2c_buffer_lock) { dprintk("%s: not enough memory", __func__); ret = -ENOMEM; goto error_memory_lock; } mutex_init(client.i2c_buffer_lock); for (k = no_of_demods - 1; k >= 0; k--) { /* designated i2c address */ new_addr = first_addr + (k << 1); client.addr = new_addr; dib8000_i2c_write16(&client, 1287, 0x0003); /* sram lead in, rdy */ if (dib8000_identify(&client) == 0) { dib8000_i2c_write16(&client, 1287, 0x0003); /* sram lead in, rdy */ client.addr = default_addr; if (dib8000_identify(&client) == 0) { dprintk("#%d: not identified", k); ret = -EINVAL; goto error; } } /* start diversity to pull_down div_str - just for i2c-enumeration */ dib8000_i2c_write16(&client, 1286, (1 << 10) | (4 << 6)); /* set new i2c address and force divstart */ dib8000_i2c_write16(&client, 1285, (new_addr << 2) | 0x2); client.addr = new_addr; dib8000_identify(&client); dprintk("IC %d initialized (to i2c_address 0x%x)", k, new_addr); } for (k = 0; k < no_of_demods; k++) { new_addr = first_addr | (k << 1); client.addr = new_addr; // unforce divstr dib8000_i2c_write16(&client, 1285, new_addr << 2); /* deactivate div - it was just for i2c-enumeration */ dib8000_i2c_write16(&client, 1286, 0); } error: kfree(client.i2c_buffer_lock); error_memory_lock: kfree(client.i2c_read_buffer); error_memory_read: kfree(client.i2c_write_buffer); return ret; } EXPORT_SYMBOL(dib8000_i2c_enumeration); static int dib8000_fe_get_tune_settings(struct dvb_frontend *fe, struct dvb_frontend_tune_settings *tune) { tune->min_delay_ms = 1000; tune->step_size = 0; tune->max_drift = 0; return 0; } static void dib8000_release(struct dvb_frontend *fe) { struct dib8000_state *st = fe->demodulator_priv; u8 index_frontend; for (index_frontend = 1; (index_frontend < MAX_NUMBER_OF_FRONTENDS) && (st->fe[index_frontend] != NULL); index_frontend++) dvb_frontend_detach(st->fe[index_frontend]); dibx000_exit_i2c_master(&st->i2c_master); kfree(st->fe[0]); kfree(st); } struct i2c_adapter *dib8000_get_i2c_master(struct dvb_frontend *fe, enum dibx000_i2c_interface intf, int gating) { struct dib8000_state *st = fe->demodulator_priv; return dibx000_get_i2c_adapter(&st->i2c_master, intf, gating); } EXPORT_SYMBOL(dib8000_get_i2c_master); int dib8000_pid_filter_ctrl(struct dvb_frontend *fe, u8 onoff) { struct dib8000_state *st = fe->demodulator_priv; u16 val = dib8000_read_word(st, 299) & 0xffef; val |= (onoff & 0x1) << 4; dprintk("pid filter enabled %d", onoff); return dib8000_write_word(st, 299, val); } EXPORT_SYMBOL(dib8000_pid_filter_ctrl); int dib8000_pid_filter(struct dvb_frontend *fe, u8 id, u16 pid, u8 onoff) { struct dib8000_state *st = fe->demodulator_priv; dprintk("Index %x, PID %d, OnOff %d", id, pid, onoff); return dib8000_write_word(st, 305 + id, onoff ? (1 << 13) | pid : 0); } EXPORT_SYMBOL(dib8000_pid_filter); static const struct dvb_frontend_ops dib8000_ops = { .info = { .name = "DiBcom 8000 ISDB-T", .type = FE_OFDM, .frequency_min = 44250000, .frequency_max = 867250000, .frequency_stepsize = 62500, .caps = FE_CAN_INVERSION_AUTO | FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_FEC_AUTO | FE_CAN_QPSK | FE_CAN_QAM_16 | FE_CAN_QAM_64 | FE_CAN_QAM_AUTO | FE_CAN_TRANSMISSION_MODE_AUTO | FE_CAN_GUARD_INTERVAL_AUTO | FE_CAN_RECOVER | FE_CAN_HIERARCHY_AUTO, }, .release = dib8000_release, .init = dib8000_wakeup, .sleep = dib8000_sleep, .set_frontend = dib8000_set_frontend, .get_tune_settings = dib8000_fe_get_tune_settings, .get_frontend = dib8000_get_frontend, .read_status = dib8000_read_status, .read_ber = dib8000_read_ber, .read_signal_strength = dib8000_read_signal_strength, .read_snr = dib8000_read_snr, .read_ucblocks = dib8000_read_unc_blocks, }; struct dvb_frontend *dib8000_attach(struct i2c_adapter *i2c_adap, u8 i2c_addr, struct dib8000_config *cfg) { struct dvb_frontend *fe; struct dib8000_state *state; dprintk("dib8000_attach"); state = kzalloc(sizeof(struct dib8000_state), GFP_KERNEL); if (state == NULL) return NULL; fe = kzalloc(sizeof(struct dvb_frontend), GFP_KERNEL); if (fe == NULL) goto error; memcpy(&state->cfg, cfg, sizeof(struct dib8000_config)); state->i2c.adap = i2c_adap; state->i2c.addr = i2c_addr; state->i2c.i2c_write_buffer = state->i2c_write_buffer; state->i2c.i2c_read_buffer = state->i2c_read_buffer; mutex_init(&state->i2c_buffer_lock); state->i2c.i2c_buffer_lock = &state->i2c_buffer_lock; state->gpio_val = cfg->gpio_val; state->gpio_dir = cfg->gpio_dir; /* Ensure the output mode remains at the previous default if it's * not specifically set by the caller. */ if ((state->cfg.output_mode != OUTMODE_MPEG2_SERIAL) && (state->cfg.output_mode != OUTMODE_MPEG2_PAR_GATED_CLK)) state->cfg.output_mode = OUTMODE_MPEG2_FIFO; state->fe[0] = fe; fe->demodulator_priv = state; memcpy(&state->fe[0]->ops, &dib8000_ops, sizeof(struct dvb_frontend_ops)); state->timf_default = cfg->pll->timf; if (dib8000_identify(&state->i2c) == 0) goto error; dibx000_init_i2c_master(&state->i2c_master, DIB8000, state->i2c.adap, state->i2c.addr); dib8000_reset(fe); dib8000_write_word(state, 285, (dib8000_read_word(state, 285) & ~0x60) | (3 << 5)); /* ber_rs_len = 3 */ return fe; error: kfree(state); return NULL; } EXPORT_SYMBOL(dib8000_attach); MODULE_AUTHOR("Olivier Grenie <Olivier.Grenie@dibcom.fr, " "Patrick Boettcher <pboettcher@dibcom.fr>"); MODULE_DESCRIPTION("Driver for the DiBcom 8000 ISDB-T demodulator"); MODULE_LICENSE("GPL");
gpl-2.0
FennyFatal/SGS4-M919-FennyKernel
net/sunrpc/svc_xprt.c
3125
34682
/* * linux/net/sunrpc/svc_xprt.c * * Author: Tom Tucker <tom@opengridcomputing.com> */ #include <linux/sched.h> #include <linux/errno.h> #include <linux/freezer.h> #include <linux/kthread.h> #include <linux/slab.h> #include <net/sock.h> #include <linux/sunrpc/stats.h> #include <linux/sunrpc/svc_xprt.h> #include <linux/sunrpc/svcsock.h> #include <linux/sunrpc/xprt.h> #include <linux/module.h> #define RPCDBG_FACILITY RPCDBG_SVCXPRT static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt); static int svc_deferred_recv(struct svc_rqst *rqstp); static struct cache_deferred_req *svc_defer(struct cache_req *req); static void svc_age_temp_xprts(unsigned long closure); static void svc_delete_xprt(struct svc_xprt *xprt); /* apparently the "standard" is that clients close * idle connections after 5 minutes, servers after * 6 minutes * http://www.connectathon.org/talks96/nfstcp.pdf */ static int svc_conn_age_period = 6*60; /* List of registered transport classes */ static DEFINE_SPINLOCK(svc_xprt_class_lock); static LIST_HEAD(svc_xprt_class_list); /* SMP locking strategy: * * svc_pool->sp_lock protects most of the fields of that pool. * svc_serv->sv_lock protects sv_tempsocks, sv_permsocks, sv_tmpcnt. * when both need to be taken (rare), svc_serv->sv_lock is first. * BKL protects svc_serv->sv_nrthread. * svc_sock->sk_lock protects the svc_sock->sk_deferred list * and the ->sk_info_authunix cache. * * The XPT_BUSY bit in xprt->xpt_flags prevents a transport being * enqueued multiply. During normal transport processing this bit * is set by svc_xprt_enqueue and cleared by svc_xprt_received. * Providers should not manipulate this bit directly. * * Some flags can be set to certain values at any time * providing that certain rules are followed: * * XPT_CONN, XPT_DATA: * - Can be set or cleared at any time. * - After a set, svc_xprt_enqueue must be called to enqueue * the transport for processing. * - After a clear, the transport must be read/accepted. * If this succeeds, it must be set again. * XPT_CLOSE: * - Can set at any time. It is never cleared. * XPT_DEAD: * - Can only be set while XPT_BUSY is held which ensures * that no other thread will be using the transport or will * try to set XPT_DEAD. */ int svc_reg_xprt_class(struct svc_xprt_class *xcl) { struct svc_xprt_class *cl; int res = -EEXIST; dprintk("svc: Adding svc transport class '%s'\n", xcl->xcl_name); INIT_LIST_HEAD(&xcl->xcl_list); spin_lock(&svc_xprt_class_lock); /* Make sure there isn't already a class with the same name */ list_for_each_entry(cl, &svc_xprt_class_list, xcl_list) { if (strcmp(xcl->xcl_name, cl->xcl_name) == 0) goto out; } list_add_tail(&xcl->xcl_list, &svc_xprt_class_list); res = 0; out: spin_unlock(&svc_xprt_class_lock); return res; } EXPORT_SYMBOL_GPL(svc_reg_xprt_class); void svc_unreg_xprt_class(struct svc_xprt_class *xcl) { dprintk("svc: Removing svc transport class '%s'\n", xcl->xcl_name); spin_lock(&svc_xprt_class_lock); list_del_init(&xcl->xcl_list); spin_unlock(&svc_xprt_class_lock); } EXPORT_SYMBOL_GPL(svc_unreg_xprt_class); /* * Format the transport list for printing */ int svc_print_xprts(char *buf, int maxlen) { struct svc_xprt_class *xcl; char tmpstr[80]; int len = 0; buf[0] = '\0'; spin_lock(&svc_xprt_class_lock); list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { int slen; sprintf(tmpstr, "%s %d\n", xcl->xcl_name, xcl->xcl_max_payload); slen = strlen(tmpstr); if (len + slen > maxlen) break; len += slen; strcat(buf, tmpstr); } spin_unlock(&svc_xprt_class_lock); return len; } static void svc_xprt_free(struct kref *kref) { struct svc_xprt *xprt = container_of(kref, struct svc_xprt, xpt_ref); struct module *owner = xprt->xpt_class->xcl_owner; if (test_bit(XPT_CACHE_AUTH, &xprt->xpt_flags)) svcauth_unix_info_release(xprt); put_net(xprt->xpt_net); /* See comment on corresponding get in xs_setup_bc_tcp(): */ if (xprt->xpt_bc_xprt) xprt_put(xprt->xpt_bc_xprt); xprt->xpt_ops->xpo_free(xprt); module_put(owner); } void svc_xprt_put(struct svc_xprt *xprt) { kref_put(&xprt->xpt_ref, svc_xprt_free); } EXPORT_SYMBOL_GPL(svc_xprt_put); /* * Called by transport drivers to initialize the transport independent * portion of the transport instance. */ void svc_xprt_init(struct net *net, struct svc_xprt_class *xcl, struct svc_xprt *xprt, struct svc_serv *serv) { memset(xprt, 0, sizeof(*xprt)); xprt->xpt_class = xcl; xprt->xpt_ops = xcl->xcl_ops; kref_init(&xprt->xpt_ref); xprt->xpt_server = serv; INIT_LIST_HEAD(&xprt->xpt_list); INIT_LIST_HEAD(&xprt->xpt_ready); INIT_LIST_HEAD(&xprt->xpt_deferred); INIT_LIST_HEAD(&xprt->xpt_users); mutex_init(&xprt->xpt_mutex); spin_lock_init(&xprt->xpt_lock); set_bit(XPT_BUSY, &xprt->xpt_flags); rpc_init_wait_queue(&xprt->xpt_bc_pending, "xpt_bc_pending"); xprt->xpt_net = get_net(net); } EXPORT_SYMBOL_GPL(svc_xprt_init); static struct svc_xprt *__svc_xpo_create(struct svc_xprt_class *xcl, struct svc_serv *serv, struct net *net, const int family, const unsigned short port, int flags) { struct sockaddr_in sin = { .sin_family = AF_INET, .sin_addr.s_addr = htonl(INADDR_ANY), .sin_port = htons(port), }; #if IS_ENABLED(CONFIG_IPV6) struct sockaddr_in6 sin6 = { .sin6_family = AF_INET6, .sin6_addr = IN6ADDR_ANY_INIT, .sin6_port = htons(port), }; #endif struct sockaddr *sap; size_t len; switch (family) { case PF_INET: sap = (struct sockaddr *)&sin; len = sizeof(sin); break; #if IS_ENABLED(CONFIG_IPV6) case PF_INET6: sap = (struct sockaddr *)&sin6; len = sizeof(sin6); break; #endif default: return ERR_PTR(-EAFNOSUPPORT); } return xcl->xcl_ops->xpo_create(serv, net, sap, len, flags); } int svc_create_xprt(struct svc_serv *serv, const char *xprt_name, struct net *net, const int family, const unsigned short port, int flags) { struct svc_xprt_class *xcl; dprintk("svc: creating transport %s[%d]\n", xprt_name, port); spin_lock(&svc_xprt_class_lock); list_for_each_entry(xcl, &svc_xprt_class_list, xcl_list) { struct svc_xprt *newxprt; unsigned short newport; if (strcmp(xprt_name, xcl->xcl_name)) continue; if (!try_module_get(xcl->xcl_owner)) goto err; spin_unlock(&svc_xprt_class_lock); newxprt = __svc_xpo_create(xcl, serv, net, family, port, flags); if (IS_ERR(newxprt)) { module_put(xcl->xcl_owner); return PTR_ERR(newxprt); } clear_bit(XPT_TEMP, &newxprt->xpt_flags); spin_lock_bh(&serv->sv_lock); list_add(&newxprt->xpt_list, &serv->sv_permsocks); spin_unlock_bh(&serv->sv_lock); newport = svc_xprt_local_port(newxprt); clear_bit(XPT_BUSY, &newxprt->xpt_flags); return newport; } err: spin_unlock(&svc_xprt_class_lock); dprintk("svc: transport %s not found\n", xprt_name); /* This errno is exposed to user space. Provide a reasonable * perror msg for a bad transport. */ return -EPROTONOSUPPORT; } EXPORT_SYMBOL_GPL(svc_create_xprt); /* * Copy the local and remote xprt addresses to the rqstp structure */ void svc_xprt_copy_addrs(struct svc_rqst *rqstp, struct svc_xprt *xprt) { memcpy(&rqstp->rq_addr, &xprt->xpt_remote, xprt->xpt_remotelen); rqstp->rq_addrlen = xprt->xpt_remotelen; /* * Destination address in request is needed for binding the * source address in RPC replies/callbacks later. */ memcpy(&rqstp->rq_daddr, &xprt->xpt_local, xprt->xpt_locallen); rqstp->rq_daddrlen = xprt->xpt_locallen; } EXPORT_SYMBOL_GPL(svc_xprt_copy_addrs); /** * svc_print_addr - Format rq_addr field for printing * @rqstp: svc_rqst struct containing address to print * @buf: target buffer for formatted address * @len: length of target buffer * */ char *svc_print_addr(struct svc_rqst *rqstp, char *buf, size_t len) { return __svc_print_addr(svc_addr(rqstp), buf, len); } EXPORT_SYMBOL_GPL(svc_print_addr); /* * Queue up an idle server thread. Must have pool->sp_lock held. * Note: this is really a stack rather than a queue, so that we only * use as many different threads as we need, and the rest don't pollute * the cache. */ static void svc_thread_enqueue(struct svc_pool *pool, struct svc_rqst *rqstp) { list_add(&rqstp->rq_list, &pool->sp_threads); } /* * Dequeue an nfsd thread. Must have pool->sp_lock held. */ static void svc_thread_dequeue(struct svc_pool *pool, struct svc_rqst *rqstp) { list_del(&rqstp->rq_list); } static bool svc_xprt_has_something_to_do(struct svc_xprt *xprt) { if (xprt->xpt_flags & ((1<<XPT_CONN)|(1<<XPT_CLOSE))) return true; if (xprt->xpt_flags & ((1<<XPT_DATA)|(1<<XPT_DEFERRED))) return xprt->xpt_ops->xpo_has_wspace(xprt); return false; } /* * Queue up a transport with data pending. If there are idle nfsd * processes, wake 'em up. * */ void svc_xprt_enqueue(struct svc_xprt *xprt) { struct svc_serv *serv = xprt->xpt_server; struct svc_pool *pool; struct svc_rqst *rqstp; int cpu; if (!svc_xprt_has_something_to_do(xprt)) return; cpu = get_cpu(); pool = svc_pool_for_cpu(xprt->xpt_server, cpu); put_cpu(); spin_lock_bh(&pool->sp_lock); if (!list_empty(&pool->sp_threads) && !list_empty(&pool->sp_sockets)) printk(KERN_ERR "svc_xprt_enqueue: " "threads and transports both waiting??\n"); pool->sp_stats.packets++; /* Mark transport as busy. It will remain in this state until * the provider calls svc_xprt_received. We update XPT_BUSY * atomically because it also guards against trying to enqueue * the transport twice. */ if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) { /* Don't enqueue transport while already enqueued */ dprintk("svc: transport %p busy, not enqueued\n", xprt); goto out_unlock; } if (!list_empty(&pool->sp_threads)) { rqstp = list_entry(pool->sp_threads.next, struct svc_rqst, rq_list); dprintk("svc: transport %p served by daemon %p\n", xprt, rqstp); svc_thread_dequeue(pool, rqstp); if (rqstp->rq_xprt) printk(KERN_ERR "svc_xprt_enqueue: server %p, rq_xprt=%p!\n", rqstp, rqstp->rq_xprt); rqstp->rq_xprt = xprt; svc_xprt_get(xprt); rqstp->rq_reserved = serv->sv_max_mesg; atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); pool->sp_stats.threads_woken++; wake_up(&rqstp->rq_wait); } else { dprintk("svc: transport %p put into queue\n", xprt); list_add_tail(&xprt->xpt_ready, &pool->sp_sockets); pool->sp_stats.sockets_queued++; } out_unlock: spin_unlock_bh(&pool->sp_lock); } EXPORT_SYMBOL_GPL(svc_xprt_enqueue); /* * Dequeue the first transport. Must be called with the pool->sp_lock held. */ static struct svc_xprt *svc_xprt_dequeue(struct svc_pool *pool) { struct svc_xprt *xprt; if (list_empty(&pool->sp_sockets)) return NULL; xprt = list_entry(pool->sp_sockets.next, struct svc_xprt, xpt_ready); list_del_init(&xprt->xpt_ready); dprintk("svc: transport %p dequeued, inuse=%d\n", xprt, atomic_read(&xprt->xpt_ref.refcount)); return xprt; } /* * svc_xprt_received conditionally queues the transport for processing * by another thread. The caller must hold the XPT_BUSY bit and must * not thereafter touch transport data. * * Note: XPT_DATA only gets cleared when a read-attempt finds no (or * insufficient) data. */ void svc_xprt_received(struct svc_xprt *xprt) { BUG_ON(!test_bit(XPT_BUSY, &xprt->xpt_flags)); /* As soon as we clear busy, the xprt could be closed and * 'put', so we need a reference to call svc_xprt_enqueue with: */ svc_xprt_get(xprt); clear_bit(XPT_BUSY, &xprt->xpt_flags); svc_xprt_enqueue(xprt); svc_xprt_put(xprt); } EXPORT_SYMBOL_GPL(svc_xprt_received); /** * svc_reserve - change the space reserved for the reply to a request. * @rqstp: The request in question * @space: new max space to reserve * * Each request reserves some space on the output queue of the transport * to make sure the reply fits. This function reduces that reserved * space to be the amount of space used already, plus @space. * */ void svc_reserve(struct svc_rqst *rqstp, int space) { space += rqstp->rq_res.head[0].iov_len; if (space < rqstp->rq_reserved) { struct svc_xprt *xprt = rqstp->rq_xprt; atomic_sub((rqstp->rq_reserved - space), &xprt->xpt_reserved); rqstp->rq_reserved = space; svc_xprt_enqueue(xprt); } } EXPORT_SYMBOL_GPL(svc_reserve); static void svc_xprt_release(struct svc_rqst *rqstp) { struct svc_xprt *xprt = rqstp->rq_xprt; rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); kfree(rqstp->rq_deferred); rqstp->rq_deferred = NULL; svc_free_res_pages(rqstp); rqstp->rq_res.page_len = 0; rqstp->rq_res.page_base = 0; /* Reset response buffer and release * the reservation. * But first, check that enough space was reserved * for the reply, otherwise we have a bug! */ if ((rqstp->rq_res.len) > rqstp->rq_reserved) printk(KERN_ERR "RPC request reserved %d but used %d\n", rqstp->rq_reserved, rqstp->rq_res.len); rqstp->rq_res.head[0].iov_len = 0; svc_reserve(rqstp, 0); rqstp->rq_xprt = NULL; svc_xprt_put(xprt); } /* * External function to wake up a server waiting for data * This really only makes sense for services like lockd * which have exactly one thread anyway. */ void svc_wake_up(struct svc_serv *serv) { struct svc_rqst *rqstp; unsigned int i; struct svc_pool *pool; for (i = 0; i < serv->sv_nrpools; i++) { pool = &serv->sv_pools[i]; spin_lock_bh(&pool->sp_lock); if (!list_empty(&pool->sp_threads)) { rqstp = list_entry(pool->sp_threads.next, struct svc_rqst, rq_list); dprintk("svc: daemon %p woken up.\n", rqstp); /* svc_thread_dequeue(pool, rqstp); rqstp->rq_xprt = NULL; */ wake_up(&rqstp->rq_wait); } spin_unlock_bh(&pool->sp_lock); } } EXPORT_SYMBOL_GPL(svc_wake_up); int svc_port_is_privileged(struct sockaddr *sin) { switch (sin->sa_family) { case AF_INET: return ntohs(((struct sockaddr_in *)sin)->sin_port) < PROT_SOCK; case AF_INET6: return ntohs(((struct sockaddr_in6 *)sin)->sin6_port) < PROT_SOCK; default: return 0; } } /* * Make sure that we don't have too many active connections. If we have, * something must be dropped. It's not clear what will happen if we allow * "too many" connections, but when dealing with network-facing software, * we have to code defensively. Here we do that by imposing hard limits. * * There's no point in trying to do random drop here for DoS * prevention. The NFS clients does 1 reconnect in 15 seconds. An * attacker can easily beat that. * * The only somewhat efficient mechanism would be if drop old * connections from the same IP first. But right now we don't even * record the client IP in svc_sock. * * single-threaded services that expect a lot of clients will probably * need to set sv_maxconn to override the default value which is based * on the number of threads */ static void svc_check_conn_limits(struct svc_serv *serv) { unsigned int limit = serv->sv_maxconn ? serv->sv_maxconn : (serv->sv_nrthreads+3) * 20; if (serv->sv_tmpcnt > limit) { struct svc_xprt *xprt = NULL; spin_lock_bh(&serv->sv_lock); if (!list_empty(&serv->sv_tempsocks)) { if (net_ratelimit()) { /* Try to help the admin */ printk(KERN_NOTICE "%s: too many open " "connections, consider increasing %s\n", serv->sv_name, serv->sv_maxconn ? "the max number of connections." : "the number of threads."); } /* * Always select the oldest connection. It's not fair, * but so is life */ xprt = list_entry(serv->sv_tempsocks.prev, struct svc_xprt, xpt_list); set_bit(XPT_CLOSE, &xprt->xpt_flags); svc_xprt_get(xprt); } spin_unlock_bh(&serv->sv_lock); if (xprt) { svc_xprt_enqueue(xprt); svc_xprt_put(xprt); } } } /* * Receive the next request on any transport. This code is carefully * organised not to touch any cachelines in the shared svc_serv * structure, only cachelines in the local svc_pool. */ int svc_recv(struct svc_rqst *rqstp, long timeout) { struct svc_xprt *xprt = NULL; struct svc_serv *serv = rqstp->rq_server; struct svc_pool *pool = rqstp->rq_pool; int len, i; int pages; struct xdr_buf *arg; DECLARE_WAITQUEUE(wait, current); long time_left; dprintk("svc: server %p waiting for data (to = %ld)\n", rqstp, timeout); if (rqstp->rq_xprt) printk(KERN_ERR "svc_recv: service %p, transport not NULL!\n", rqstp); if (waitqueue_active(&rqstp->rq_wait)) printk(KERN_ERR "svc_recv: service %p, wait queue active!\n", rqstp); /* now allocate needed pages. If we get a failure, sleep briefly */ pages = (serv->sv_max_mesg + PAGE_SIZE) / PAGE_SIZE; for (i = 0; i < pages ; i++) while (rqstp->rq_pages[i] == NULL) { struct page *p = alloc_page(GFP_KERNEL); if (!p) { set_current_state(TASK_INTERRUPTIBLE); if (signalled() || kthread_should_stop()) { set_current_state(TASK_RUNNING); return -EINTR; } schedule_timeout(msecs_to_jiffies(500)); } rqstp->rq_pages[i] = p; } rqstp->rq_pages[i++] = NULL; /* this might be seen in nfs_read_actor */ BUG_ON(pages >= RPCSVC_MAXPAGES); /* Make arg->head point to first page and arg->pages point to rest */ arg = &rqstp->rq_arg; arg->head[0].iov_base = page_address(rqstp->rq_pages[0]); arg->head[0].iov_len = PAGE_SIZE; arg->pages = rqstp->rq_pages + 1; arg->page_base = 0; /* save at least one page for response */ arg->page_len = (pages-2)*PAGE_SIZE; arg->len = (pages-1)*PAGE_SIZE; arg->tail[0].iov_len = 0; try_to_freeze(); cond_resched(); if (signalled() || kthread_should_stop()) return -EINTR; /* Normally we will wait up to 5 seconds for any required * cache information to be provided. */ rqstp->rq_chandle.thread_wait = 5*HZ; spin_lock_bh(&pool->sp_lock); xprt = svc_xprt_dequeue(pool); if (xprt) { rqstp->rq_xprt = xprt; svc_xprt_get(xprt); rqstp->rq_reserved = serv->sv_max_mesg; atomic_add(rqstp->rq_reserved, &xprt->xpt_reserved); /* As there is a shortage of threads and this request * had to be queued, don't allow the thread to wait so * long for cache updates. */ rqstp->rq_chandle.thread_wait = 1*HZ; } else { /* No data pending. Go to sleep */ svc_thread_enqueue(pool, rqstp); /* * We have to be able to interrupt this wait * to bring down the daemons ... */ set_current_state(TASK_INTERRUPTIBLE); /* * checking kthread_should_stop() here allows us to avoid * locking and signalling when stopping kthreads that call * svc_recv. If the thread has already been woken up, then * we can exit here without sleeping. If not, then it * it'll be woken up quickly during the schedule_timeout */ if (kthread_should_stop()) { set_current_state(TASK_RUNNING); spin_unlock_bh(&pool->sp_lock); return -EINTR; } add_wait_queue(&rqstp->rq_wait, &wait); spin_unlock_bh(&pool->sp_lock); time_left = schedule_timeout(timeout); try_to_freeze(); spin_lock_bh(&pool->sp_lock); remove_wait_queue(&rqstp->rq_wait, &wait); if (!time_left) pool->sp_stats.threads_timedout++; xprt = rqstp->rq_xprt; if (!xprt) { svc_thread_dequeue(pool, rqstp); spin_unlock_bh(&pool->sp_lock); dprintk("svc: server %p, no data yet\n", rqstp); if (signalled() || kthread_should_stop()) return -EINTR; else return -EAGAIN; } } spin_unlock_bh(&pool->sp_lock); len = 0; if (test_bit(XPT_CLOSE, &xprt->xpt_flags)) { dprintk("svc_recv: found XPT_CLOSE\n"); svc_delete_xprt(xprt); /* Leave XPT_BUSY set on the dead xprt: */ goto out; } if (test_bit(XPT_LISTENER, &xprt->xpt_flags)) { struct svc_xprt *newxpt; newxpt = xprt->xpt_ops->xpo_accept(xprt); if (newxpt) { /* * We know this module_get will succeed because the * listener holds a reference too */ __module_get(newxpt->xpt_class->xcl_owner); svc_check_conn_limits(xprt->xpt_server); spin_lock_bh(&serv->sv_lock); set_bit(XPT_TEMP, &newxpt->xpt_flags); list_add(&newxpt->xpt_list, &serv->sv_tempsocks); serv->sv_tmpcnt++; if (serv->sv_temptimer.function == NULL) { /* setup timer to age temp transports */ setup_timer(&serv->sv_temptimer, svc_age_temp_xprts, (unsigned long)serv); mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); } spin_unlock_bh(&serv->sv_lock); svc_xprt_received(newxpt); } } else if (xprt->xpt_ops->xpo_has_wspace(xprt)) { dprintk("svc: server %p, pool %u, transport %p, inuse=%d\n", rqstp, pool->sp_id, xprt, atomic_read(&xprt->xpt_ref.refcount)); rqstp->rq_deferred = svc_deferred_dequeue(xprt); if (rqstp->rq_deferred) len = svc_deferred_recv(rqstp); else len = xprt->xpt_ops->xpo_recvfrom(rqstp); dprintk("svc: got len=%d\n", len); } svc_xprt_received(xprt); /* No data, incomplete (TCP) read, or accept() */ if (len == 0 || len == -EAGAIN) goto out; clear_bit(XPT_OLD, &xprt->xpt_flags); rqstp->rq_secure = svc_port_is_privileged(svc_addr(rqstp)); rqstp->rq_chandle.defer = svc_defer; if (serv->sv_stats) serv->sv_stats->netcnt++; return len; out: rqstp->rq_res.len = 0; svc_xprt_release(rqstp); return -EAGAIN; } EXPORT_SYMBOL_GPL(svc_recv); /* * Drop request */ void svc_drop(struct svc_rqst *rqstp) { dprintk("svc: xprt %p dropped request\n", rqstp->rq_xprt); svc_xprt_release(rqstp); } EXPORT_SYMBOL_GPL(svc_drop); /* * Return reply to client. */ int svc_send(struct svc_rqst *rqstp) { struct svc_xprt *xprt; int len; struct xdr_buf *xb; xprt = rqstp->rq_xprt; if (!xprt) return -EFAULT; /* release the receive skb before sending the reply */ rqstp->rq_xprt->xpt_ops->xpo_release_rqst(rqstp); /* calculate over-all length */ xb = &rqstp->rq_res; xb->len = xb->head[0].iov_len + xb->page_len + xb->tail[0].iov_len; /* Grab mutex to serialize outgoing data. */ mutex_lock(&xprt->xpt_mutex); if (test_bit(XPT_DEAD, &xprt->xpt_flags)) len = -ENOTCONN; else len = xprt->xpt_ops->xpo_sendto(rqstp); mutex_unlock(&xprt->xpt_mutex); rpc_wake_up(&xprt->xpt_bc_pending); svc_xprt_release(rqstp); if (len == -ECONNREFUSED || len == -ENOTCONN || len == -EAGAIN) return 0; return len; } /* * Timer function to close old temporary transports, using * a mark-and-sweep algorithm. */ static void svc_age_temp_xprts(unsigned long closure) { struct svc_serv *serv = (struct svc_serv *)closure; struct svc_xprt *xprt; struct list_head *le, *next; LIST_HEAD(to_be_aged); dprintk("svc_age_temp_xprts\n"); if (!spin_trylock_bh(&serv->sv_lock)) { /* busy, try again 1 sec later */ dprintk("svc_age_temp_xprts: busy\n"); mod_timer(&serv->sv_temptimer, jiffies + HZ); return; } list_for_each_safe(le, next, &serv->sv_tempsocks) { xprt = list_entry(le, struct svc_xprt, xpt_list); /* First time through, just mark it OLD. Second time * through, close it. */ if (!test_and_set_bit(XPT_OLD, &xprt->xpt_flags)) continue; if (atomic_read(&xprt->xpt_ref.refcount) > 1 || test_bit(XPT_BUSY, &xprt->xpt_flags)) continue; svc_xprt_get(xprt); list_move(le, &to_be_aged); set_bit(XPT_CLOSE, &xprt->xpt_flags); set_bit(XPT_DETACHED, &xprt->xpt_flags); } spin_unlock_bh(&serv->sv_lock); while (!list_empty(&to_be_aged)) { le = to_be_aged.next; /* fiddling the xpt_list node is safe 'cos we're XPT_DETACHED */ list_del_init(le); xprt = list_entry(le, struct svc_xprt, xpt_list); dprintk("queuing xprt %p for closing\n", xprt); /* a thread will dequeue and close it soon */ svc_xprt_enqueue(xprt); svc_xprt_put(xprt); } mod_timer(&serv->sv_temptimer, jiffies + svc_conn_age_period * HZ); } static void call_xpt_users(struct svc_xprt *xprt) { struct svc_xpt_user *u; spin_lock(&xprt->xpt_lock); while (!list_empty(&xprt->xpt_users)) { u = list_first_entry(&xprt->xpt_users, struct svc_xpt_user, list); list_del(&u->list); u->callback(u); } spin_unlock(&xprt->xpt_lock); } /* * Remove a dead transport */ static void svc_delete_xprt(struct svc_xprt *xprt) { struct svc_serv *serv = xprt->xpt_server; struct svc_deferred_req *dr; /* Only do this once */ if (test_and_set_bit(XPT_DEAD, &xprt->xpt_flags)) BUG(); dprintk("svc: svc_delete_xprt(%p)\n", xprt); xprt->xpt_ops->xpo_detach(xprt); spin_lock_bh(&serv->sv_lock); if (!test_and_set_bit(XPT_DETACHED, &xprt->xpt_flags)) list_del_init(&xprt->xpt_list); BUG_ON(!list_empty(&xprt->xpt_ready)); if (test_bit(XPT_TEMP, &xprt->xpt_flags)) serv->sv_tmpcnt--; spin_unlock_bh(&serv->sv_lock); while ((dr = svc_deferred_dequeue(xprt)) != NULL) kfree(dr); call_xpt_users(xprt); svc_xprt_put(xprt); } void svc_close_xprt(struct svc_xprt *xprt) { set_bit(XPT_CLOSE, &xprt->xpt_flags); if (test_and_set_bit(XPT_BUSY, &xprt->xpt_flags)) /* someone else will have to effect the close */ return; /* * We expect svc_close_xprt() to work even when no threads are * running (e.g., while configuring the server before starting * any threads), so if the transport isn't busy, we delete * it ourself: */ svc_delete_xprt(xprt); } EXPORT_SYMBOL_GPL(svc_close_xprt); static void svc_close_list(struct list_head *xprt_list, struct net *net) { struct svc_xprt *xprt; list_for_each_entry(xprt, xprt_list, xpt_list) { if (xprt->xpt_net != net) continue; set_bit(XPT_CLOSE, &xprt->xpt_flags); set_bit(XPT_BUSY, &xprt->xpt_flags); } } static void svc_clear_pools(struct svc_serv *serv, struct net *net) { struct svc_pool *pool; struct svc_xprt *xprt; struct svc_xprt *tmp; int i; for (i = 0; i < serv->sv_nrpools; i++) { pool = &serv->sv_pools[i]; spin_lock_bh(&pool->sp_lock); list_for_each_entry_safe(xprt, tmp, &pool->sp_sockets, xpt_ready) { if (xprt->xpt_net != net) continue; list_del_init(&xprt->xpt_ready); } spin_unlock_bh(&pool->sp_lock); } } static void svc_clear_list(struct list_head *xprt_list, struct net *net) { struct svc_xprt *xprt; struct svc_xprt *tmp; list_for_each_entry_safe(xprt, tmp, xprt_list, xpt_list) { if (xprt->xpt_net != net) continue; svc_delete_xprt(xprt); } list_for_each_entry(xprt, xprt_list, xpt_list) BUG_ON(xprt->xpt_net == net); } void svc_close_net(struct svc_serv *serv, struct net *net) { svc_close_list(&serv->sv_tempsocks, net); svc_close_list(&serv->sv_permsocks, net); svc_clear_pools(serv, net); /* * At this point the sp_sockets lists will stay empty, since * svc_enqueue will not add new entries without taking the * sp_lock and checking XPT_BUSY. */ svc_clear_list(&serv->sv_tempsocks, net); svc_clear_list(&serv->sv_permsocks, net); } /* * Handle defer and revisit of requests */ static void svc_revisit(struct cache_deferred_req *dreq, int too_many) { struct svc_deferred_req *dr = container_of(dreq, struct svc_deferred_req, handle); struct svc_xprt *xprt = dr->xprt; spin_lock(&xprt->xpt_lock); set_bit(XPT_DEFERRED, &xprt->xpt_flags); if (too_many || test_bit(XPT_DEAD, &xprt->xpt_flags)) { spin_unlock(&xprt->xpt_lock); dprintk("revisit canceled\n"); svc_xprt_put(xprt); kfree(dr); return; } dprintk("revisit queued\n"); dr->xprt = NULL; list_add(&dr->handle.recent, &xprt->xpt_deferred); spin_unlock(&xprt->xpt_lock); svc_xprt_enqueue(xprt); svc_xprt_put(xprt); } /* * Save the request off for later processing. The request buffer looks * like this: * * <xprt-header><rpc-header><rpc-pagelist><rpc-tail> * * This code can only handle requests that consist of an xprt-header * and rpc-header. */ static struct cache_deferred_req *svc_defer(struct cache_req *req) { struct svc_rqst *rqstp = container_of(req, struct svc_rqst, rq_chandle); struct svc_deferred_req *dr; if (rqstp->rq_arg.page_len || !rqstp->rq_usedeferral) return NULL; /* if more than a page, give up FIXME */ if (rqstp->rq_deferred) { dr = rqstp->rq_deferred; rqstp->rq_deferred = NULL; } else { size_t skip; size_t size; /* FIXME maybe discard if size too large */ size = sizeof(struct svc_deferred_req) + rqstp->rq_arg.len; dr = kmalloc(size, GFP_KERNEL); if (dr == NULL) return NULL; dr->handle.owner = rqstp->rq_server; dr->prot = rqstp->rq_prot; memcpy(&dr->addr, &rqstp->rq_addr, rqstp->rq_addrlen); dr->addrlen = rqstp->rq_addrlen; dr->daddr = rqstp->rq_daddr; dr->argslen = rqstp->rq_arg.len >> 2; dr->xprt_hlen = rqstp->rq_xprt_hlen; /* back up head to the start of the buffer and copy */ skip = rqstp->rq_arg.len - rqstp->rq_arg.head[0].iov_len; memcpy(dr->args, rqstp->rq_arg.head[0].iov_base - skip, dr->argslen << 2); } svc_xprt_get(rqstp->rq_xprt); dr->xprt = rqstp->rq_xprt; rqstp->rq_dropme = true; dr->handle.revisit = svc_revisit; return &dr->handle; } /* * recv data from a deferred request into an active one */ static int svc_deferred_recv(struct svc_rqst *rqstp) { struct svc_deferred_req *dr = rqstp->rq_deferred; /* setup iov_base past transport header */ rqstp->rq_arg.head[0].iov_base = dr->args + (dr->xprt_hlen>>2); /* The iov_len does not include the transport header bytes */ rqstp->rq_arg.head[0].iov_len = (dr->argslen<<2) - dr->xprt_hlen; rqstp->rq_arg.page_len = 0; /* The rq_arg.len includes the transport header bytes */ rqstp->rq_arg.len = dr->argslen<<2; rqstp->rq_prot = dr->prot; memcpy(&rqstp->rq_addr, &dr->addr, dr->addrlen); rqstp->rq_addrlen = dr->addrlen; /* Save off transport header len in case we get deferred again */ rqstp->rq_xprt_hlen = dr->xprt_hlen; rqstp->rq_daddr = dr->daddr; rqstp->rq_respages = rqstp->rq_pages; return (dr->argslen<<2) - dr->xprt_hlen; } static struct svc_deferred_req *svc_deferred_dequeue(struct svc_xprt *xprt) { struct svc_deferred_req *dr = NULL; if (!test_bit(XPT_DEFERRED, &xprt->xpt_flags)) return NULL; spin_lock(&xprt->xpt_lock); if (!list_empty(&xprt->xpt_deferred)) { dr = list_entry(xprt->xpt_deferred.next, struct svc_deferred_req, handle.recent); list_del_init(&dr->handle.recent); } else clear_bit(XPT_DEFERRED, &xprt->xpt_flags); spin_unlock(&xprt->xpt_lock); return dr; } /** * svc_find_xprt - find an RPC transport instance * @serv: pointer to svc_serv to search * @xcl_name: C string containing transport's class name * @net: owner net pointer * @af: Address family of transport's local address * @port: transport's IP port number * * Return the transport instance pointer for the endpoint accepting * connections/peer traffic from the specified transport class, * address family and port. * * Specifying 0 for the address family or port is effectively a * wild-card, and will result in matching the first transport in the * service's list that has a matching class name. */ struct svc_xprt *svc_find_xprt(struct svc_serv *serv, const char *xcl_name, struct net *net, const sa_family_t af, const unsigned short port) { struct svc_xprt *xprt; struct svc_xprt *found = NULL; /* Sanity check the args */ if (serv == NULL || xcl_name == NULL) return found; spin_lock_bh(&serv->sv_lock); list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { if (xprt->xpt_net != net) continue; if (strcmp(xprt->xpt_class->xcl_name, xcl_name)) continue; if (af != AF_UNSPEC && af != xprt->xpt_local.ss_family) continue; if (port != 0 && port != svc_xprt_local_port(xprt)) continue; found = xprt; svc_xprt_get(xprt); break; } spin_unlock_bh(&serv->sv_lock); return found; } EXPORT_SYMBOL_GPL(svc_find_xprt); static int svc_one_xprt_name(const struct svc_xprt *xprt, char *pos, int remaining) { int len; len = snprintf(pos, remaining, "%s %u\n", xprt->xpt_class->xcl_name, svc_xprt_local_port(xprt)); if (len >= remaining) return -ENAMETOOLONG; return len; } /** * svc_xprt_names - format a buffer with a list of transport names * @serv: pointer to an RPC service * @buf: pointer to a buffer to be filled in * @buflen: length of buffer to be filled in * * Fills in @buf with a string containing a list of transport names, * each name terminated with '\n'. * * Returns positive length of the filled-in string on success; otherwise * a negative errno value is returned if an error occurs. */ int svc_xprt_names(struct svc_serv *serv, char *buf, const int buflen) { struct svc_xprt *xprt; int len, totlen; char *pos; /* Sanity check args */ if (!serv) return 0; spin_lock_bh(&serv->sv_lock); pos = buf; totlen = 0; list_for_each_entry(xprt, &serv->sv_permsocks, xpt_list) { len = svc_one_xprt_name(xprt, pos, buflen - totlen); if (len < 0) { *buf = '\0'; totlen = len; } if (len <= 0) break; pos += len; totlen += len; } spin_unlock_bh(&serv->sv_lock); return totlen; } EXPORT_SYMBOL_GPL(svc_xprt_names); /*----------------------------------------------------------------------------*/ static void *svc_pool_stats_start(struct seq_file *m, loff_t *pos) { unsigned int pidx = (unsigned int)*pos; struct svc_serv *serv = m->private; dprintk("svc_pool_stats_start, *pidx=%u\n", pidx); if (!pidx) return SEQ_START_TOKEN; return (pidx > serv->sv_nrpools ? NULL : &serv->sv_pools[pidx-1]); } static void *svc_pool_stats_next(struct seq_file *m, void *p, loff_t *pos) { struct svc_pool *pool = p; struct svc_serv *serv = m->private; dprintk("svc_pool_stats_next, *pos=%llu\n", *pos); if (p == SEQ_START_TOKEN) { pool = &serv->sv_pools[0]; } else { unsigned int pidx = (pool - &serv->sv_pools[0]); if (pidx < serv->sv_nrpools-1) pool = &serv->sv_pools[pidx+1]; else pool = NULL; } ++*pos; return pool; } static void svc_pool_stats_stop(struct seq_file *m, void *p) { } static int svc_pool_stats_show(struct seq_file *m, void *p) { struct svc_pool *pool = p; if (p == SEQ_START_TOKEN) { seq_puts(m, "# pool packets-arrived sockets-enqueued threads-woken threads-timedout\n"); return 0; } seq_printf(m, "%u %lu %lu %lu %lu\n", pool->sp_id, pool->sp_stats.packets, pool->sp_stats.sockets_queued, pool->sp_stats.threads_woken, pool->sp_stats.threads_timedout); return 0; } static const struct seq_operations svc_pool_stats_seq_ops = { .start = svc_pool_stats_start, .next = svc_pool_stats_next, .stop = svc_pool_stats_stop, .show = svc_pool_stats_show, }; int svc_pool_stats_open(struct svc_serv *serv, struct file *file) { int err; err = seq_open(file, &svc_pool_stats_seq_ops); if (!err) ((struct seq_file *) file->private_data)->private = serv; return err; } EXPORT_SYMBOL(svc_pool_stats_open); /*----------------------------------------------------------------------------*/
gpl-2.0
dasago13/android_kernel_lenovo_s650
drivers/net/bonding/bond_sysfs.c
3381
45435
/* * Copyright(c) 2004-2005 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * * The full GNU General Public License is included in this distribution in the * file called LICENSE. * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/device.h> #include <linux/sched.h> #include <linux/fs.h> #include <linux/types.h> #include <linux/string.h> #include <linux/netdevice.h> #include <linux/inetdevice.h> #include <linux/in.h> #include <linux/sysfs.h> #include <linux/ctype.h> #include <linux/inet.h> #include <linux/rtnetlink.h> #include <linux/etherdevice.h> #include <net/net_namespace.h> #include <net/netns/generic.h> #include <linux/nsproxy.h> #include "bonding.h" #define to_dev(obj) container_of(obj, struct device, kobj) #define to_bond(cd) ((struct bonding *)(netdev_priv(to_net_dev(cd)))) /* * "show" function for the bond_masters attribute. * The class parameter is ignored. */ static ssize_t bonding_show_bonds(struct class *cls, struct class_attribute *attr, char *buf) { struct bond_net *bn = container_of(attr, struct bond_net, class_attr_bonding_masters); int res = 0; struct bonding *bond; rtnl_lock(); list_for_each_entry(bond, &bn->dev_list, bond_list) { if (res > (PAGE_SIZE - IFNAMSIZ)) { /* not enough space for another interface name */ if ((PAGE_SIZE - res) > 10) res = PAGE_SIZE - 10; res += sprintf(buf + res, "++more++ "); break; } res += sprintf(buf + res, "%s ", bond->dev->name); } if (res) buf[res-1] = '\n'; /* eat the leftover space */ rtnl_unlock(); return res; } static struct net_device *bond_get_by_name(struct bond_net *bn, const char *ifname) { struct bonding *bond; list_for_each_entry(bond, &bn->dev_list, bond_list) { if (strncmp(bond->dev->name, ifname, IFNAMSIZ) == 0) return bond->dev; } return NULL; } /* * "store" function for the bond_masters attribute. This is what * creates and deletes entire bonds. * * The class parameter is ignored. * */ static ssize_t bonding_store_bonds(struct class *cls, struct class_attribute *attr, const char *buffer, size_t count) { struct bond_net *bn = container_of(attr, struct bond_net, class_attr_bonding_masters); char command[IFNAMSIZ + 1] = {0, }; char *ifname; int rv, res = count; sscanf(buffer, "%16s", command); /* IFNAMSIZ*/ ifname = command + 1; if ((strlen(command) <= 1) || !dev_valid_name(ifname)) goto err_no_cmd; if (command[0] == '+') { pr_info("%s is being created...\n", ifname); rv = bond_create(bn->net, ifname); if (rv) { if (rv == -EEXIST) pr_info("%s already exists.\n", ifname); else pr_info("%s creation failed.\n", ifname); res = rv; } } else if (command[0] == '-') { struct net_device *bond_dev; rtnl_lock(); bond_dev = bond_get_by_name(bn, ifname); if (bond_dev) { pr_info("%s is being deleted...\n", ifname); unregister_netdevice(bond_dev); } else { pr_err("unable to delete non-existent %s\n", ifname); res = -ENODEV; } rtnl_unlock(); } else goto err_no_cmd; /* Always return either count or an error. If you return 0, you'll * get called forever, which is bad. */ return res; err_no_cmd: pr_err("no command found in bonding_masters. Use +ifname or -ifname.\n"); return -EPERM; } static const void *bonding_namespace(struct class *cls, const struct class_attribute *attr) { const struct bond_net *bn = container_of(attr, struct bond_net, class_attr_bonding_masters); return bn->net; } /* class attribute for bond_masters file. This ends up in /sys/class/net */ static const struct class_attribute class_attr_bonding_masters = { .attr = { .name = "bonding_masters", .mode = S_IWUSR | S_IRUGO, }, .show = bonding_show_bonds, .store = bonding_store_bonds, .namespace = bonding_namespace, }; int bond_create_slave_symlinks(struct net_device *master, struct net_device *slave) { char linkname[IFNAMSIZ+7]; int ret = 0; /* first, create a link from the slave back to the master */ ret = sysfs_create_link(&(slave->dev.kobj), &(master->dev.kobj), "master"); if (ret) return ret; /* next, create a link from the master to the slave */ sprintf(linkname, "slave_%s", slave->name); ret = sysfs_create_link(&(master->dev.kobj), &(slave->dev.kobj), linkname); return ret; } void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave) { char linkname[IFNAMSIZ+7]; sysfs_remove_link(&(slave->dev.kobj), "master"); sprintf(linkname, "slave_%s", slave->name); sysfs_remove_link(&(master->dev.kobj), linkname); } /* * Show the slaves in the current bond. */ static ssize_t bonding_show_slaves(struct device *d, struct device_attribute *attr, char *buf) { struct slave *slave; int i, res = 0; struct bonding *bond = to_bond(d); read_lock(&bond->lock); bond_for_each_slave(bond, slave, i) { if (res > (PAGE_SIZE - IFNAMSIZ)) { /* not enough space for another interface name */ if ((PAGE_SIZE - res) > 10) res = PAGE_SIZE - 10; res += sprintf(buf + res, "++more++ "); break; } res += sprintf(buf + res, "%s ", slave->dev->name); } read_unlock(&bond->lock); if (res) buf[res-1] = '\n'; /* eat the leftover space */ return res; } /* * Set the slaves in the current bond. The bond interface must be * up for this to succeed. * This is supposed to be only thin wrapper for bond_enslave and bond_release. * All hard work should be done there. */ static ssize_t bonding_store_slaves(struct device *d, struct device_attribute *attr, const char *buffer, size_t count) { char command[IFNAMSIZ + 1] = { 0, }; char *ifname; int res, ret = count; struct net_device *dev; struct bonding *bond = to_bond(d); if (!rtnl_trylock()) return restart_syscall(); sscanf(buffer, "%16s", command); /* IFNAMSIZ*/ ifname = command + 1; if ((strlen(command) <= 1) || !dev_valid_name(ifname)) goto err_no_cmd; dev = __dev_get_by_name(dev_net(bond->dev), ifname); if (!dev) { pr_info("%s: Interface %s does not exist!\n", bond->dev->name, ifname); ret = -ENODEV; goto out; } switch (command[0]) { case '+': pr_info("%s: Adding slave %s.\n", bond->dev->name, dev->name); res = bond_enslave(bond->dev, dev); break; case '-': pr_info("%s: Removing slave %s.\n", bond->dev->name, dev->name); res = bond_release(bond->dev, dev); break; default: goto err_no_cmd; } if (res) ret = res; goto out; err_no_cmd: pr_err("no command found in slaves file for bond %s. Use +ifname or -ifname.\n", bond->dev->name); ret = -EPERM; out: rtnl_unlock(); return ret; } static DEVICE_ATTR(slaves, S_IRUGO | S_IWUSR, bonding_show_slaves, bonding_store_slaves); /* * Show and set the bonding mode. The bond interface must be down to * change the mode. */ static ssize_t bonding_show_mode(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); return sprintf(buf, "%s %d\n", bond_mode_tbl[bond->params.mode].modename, bond->params.mode); } static ssize_t bonding_store_mode(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { int new_value, ret = count; struct bonding *bond = to_bond(d); if (bond->dev->flags & IFF_UP) { pr_err("unable to update mode of %s because interface is up.\n", bond->dev->name); ret = -EPERM; goto out; } if (bond->slave_cnt > 0) { pr_err("unable to update mode of %s because it has slaves.\n", bond->dev->name); ret = -EPERM; goto out; } new_value = bond_parse_parm(buf, bond_mode_tbl); if (new_value < 0) { pr_err("%s: Ignoring invalid mode value %.*s.\n", bond->dev->name, (int)strlen(buf) - 1, buf); ret = -EINVAL; goto out; } if ((new_value == BOND_MODE_ALB || new_value == BOND_MODE_TLB) && bond->params.arp_interval) { pr_err("%s: %s mode is incompatible with arp monitoring.\n", bond->dev->name, bond_mode_tbl[new_value].modename); ret = -EINVAL; goto out; } bond->params.mode = new_value; bond_set_mode_ops(bond, bond->params.mode); pr_info("%s: setting mode to %s (%d).\n", bond->dev->name, bond_mode_tbl[new_value].modename, new_value); out: return ret; } static DEVICE_ATTR(mode, S_IRUGO | S_IWUSR, bonding_show_mode, bonding_store_mode); /* * Show and set the bonding transmit hash method. * The bond interface must be down to change the xmit hash policy. */ static ssize_t bonding_show_xmit_hash(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); return sprintf(buf, "%s %d\n", xmit_hashtype_tbl[bond->params.xmit_policy].modename, bond->params.xmit_policy); } static ssize_t bonding_store_xmit_hash(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { int new_value, ret = count; struct bonding *bond = to_bond(d); if (bond->dev->flags & IFF_UP) { pr_err("%s: Interface is up. Unable to update xmit policy.\n", bond->dev->name); ret = -EPERM; goto out; } new_value = bond_parse_parm(buf, xmit_hashtype_tbl); if (new_value < 0) { pr_err("%s: Ignoring invalid xmit hash policy value %.*s.\n", bond->dev->name, (int)strlen(buf) - 1, buf); ret = -EINVAL; goto out; } else { bond->params.xmit_policy = new_value; bond_set_mode_ops(bond, bond->params.mode); pr_info("%s: setting xmit hash policy to %s (%d).\n", bond->dev->name, xmit_hashtype_tbl[new_value].modename, new_value); } out: return ret; } static DEVICE_ATTR(xmit_hash_policy, S_IRUGO | S_IWUSR, bonding_show_xmit_hash, bonding_store_xmit_hash); /* * Show and set arp_validate. */ static ssize_t bonding_show_arp_validate(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); return sprintf(buf, "%s %d\n", arp_validate_tbl[bond->params.arp_validate].modename, bond->params.arp_validate); } static ssize_t bonding_store_arp_validate(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { int new_value; struct bonding *bond = to_bond(d); new_value = bond_parse_parm(buf, arp_validate_tbl); if (new_value < 0) { pr_err("%s: Ignoring invalid arp_validate value %s\n", bond->dev->name, buf); return -EINVAL; } if (new_value && (bond->params.mode != BOND_MODE_ACTIVEBACKUP)) { pr_err("%s: arp_validate only supported in active-backup mode.\n", bond->dev->name); return -EINVAL; } pr_info("%s: setting arp_validate to %s (%d).\n", bond->dev->name, arp_validate_tbl[new_value].modename, new_value); bond->params.arp_validate = new_value; return count; } static DEVICE_ATTR(arp_validate, S_IRUGO | S_IWUSR, bonding_show_arp_validate, bonding_store_arp_validate); /* * Show and store fail_over_mac. User only allowed to change the * value when there are no slaves. */ static ssize_t bonding_show_fail_over_mac(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); return sprintf(buf, "%s %d\n", fail_over_mac_tbl[bond->params.fail_over_mac].modename, bond->params.fail_over_mac); } static ssize_t bonding_store_fail_over_mac(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { int new_value; struct bonding *bond = to_bond(d); if (bond->slave_cnt != 0) { pr_err("%s: Can't alter fail_over_mac with slaves in bond.\n", bond->dev->name); return -EPERM; } new_value = bond_parse_parm(buf, fail_over_mac_tbl); if (new_value < 0) { pr_err("%s: Ignoring invalid fail_over_mac value %s.\n", bond->dev->name, buf); return -EINVAL; } bond->params.fail_over_mac = new_value; pr_info("%s: Setting fail_over_mac to %s (%d).\n", bond->dev->name, fail_over_mac_tbl[new_value].modename, new_value); return count; } static DEVICE_ATTR(fail_over_mac, S_IRUGO | S_IWUSR, bonding_show_fail_over_mac, bonding_store_fail_over_mac); /* * Show and set the arp timer interval. There are two tricky bits * here. First, if ARP monitoring is activated, then we must disable * MII monitoring. Second, if the ARP timer isn't running, we must * start it. */ static ssize_t bonding_show_arp_interval(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); return sprintf(buf, "%d\n", bond->params.arp_interval); } static ssize_t bonding_store_arp_interval(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { int new_value, ret = count; struct bonding *bond = to_bond(d); if (sscanf(buf, "%d", &new_value) != 1) { pr_err("%s: no arp_interval value specified.\n", bond->dev->name); ret = -EINVAL; goto out; } if (new_value < 0) { pr_err("%s: Invalid arp_interval value %d not in range 1-%d; rejected.\n", bond->dev->name, new_value, INT_MAX); ret = -EINVAL; goto out; } if (bond->params.mode == BOND_MODE_ALB || bond->params.mode == BOND_MODE_TLB) { pr_info("%s: ARP monitoring cannot be used with ALB/TLB. Only MII monitoring is supported on %s.\n", bond->dev->name, bond->dev->name); ret = -EINVAL; goto out; } pr_info("%s: Setting ARP monitoring interval to %d.\n", bond->dev->name, new_value); bond->params.arp_interval = new_value; if (bond->params.miimon) { pr_info("%s: ARP monitoring cannot be used with MII monitoring. %s Disabling MII monitoring.\n", bond->dev->name, bond->dev->name); bond->params.miimon = 0; if (delayed_work_pending(&bond->mii_work)) { cancel_delayed_work(&bond->mii_work); flush_workqueue(bond->wq); } } if (!bond->params.arp_targets[0]) { pr_info("%s: ARP monitoring has been set up, but no ARP targets have been specified.\n", bond->dev->name); } if (bond->dev->flags & IFF_UP) { /* If the interface is up, we may need to fire off * the ARP timer. If the interface is down, the * timer will get fired off when the open function * is called. */ if (!delayed_work_pending(&bond->arp_work)) { if (bond->params.mode == BOND_MODE_ACTIVEBACKUP) INIT_DELAYED_WORK(&bond->arp_work, bond_activebackup_arp_mon); else INIT_DELAYED_WORK(&bond->arp_work, bond_loadbalance_arp_mon); queue_delayed_work(bond->wq, &bond->arp_work, 0); } } out: return ret; } static DEVICE_ATTR(arp_interval, S_IRUGO | S_IWUSR, bonding_show_arp_interval, bonding_store_arp_interval); /* * Show and set the arp targets. */ static ssize_t bonding_show_arp_targets(struct device *d, struct device_attribute *attr, char *buf) { int i, res = 0; struct bonding *bond = to_bond(d); for (i = 0; i < BOND_MAX_ARP_TARGETS; i++) { if (bond->params.arp_targets[i]) res += sprintf(buf + res, "%pI4 ", &bond->params.arp_targets[i]); } if (res) buf[res-1] = '\n'; /* eat the leftover space */ return res; } static ssize_t bonding_store_arp_targets(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { __be32 newtarget; int i = 0, done = 0, ret = count; struct bonding *bond = to_bond(d); __be32 *targets; targets = bond->params.arp_targets; newtarget = in_aton(buf + 1); /* look for adds */ if (buf[0] == '+') { if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) { pr_err("%s: invalid ARP target %pI4 specified for addition\n", bond->dev->name, &newtarget); ret = -EINVAL; goto out; } /* look for an empty slot to put the target in, and check for dupes */ for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) { if (targets[i] == newtarget) { /* duplicate */ pr_err("%s: ARP target %pI4 is already present\n", bond->dev->name, &newtarget); ret = -EINVAL; goto out; } if (targets[i] == 0) { pr_info("%s: adding ARP target %pI4.\n", bond->dev->name, &newtarget); done = 1; targets[i] = newtarget; } } if (!done) { pr_err("%s: ARP target table is full!\n", bond->dev->name); ret = -EINVAL; goto out; } } else if (buf[0] == '-') { if ((newtarget == 0) || (newtarget == htonl(INADDR_BROADCAST))) { pr_err("%s: invalid ARP target %pI4 specified for removal\n", bond->dev->name, &newtarget); ret = -EINVAL; goto out; } for (i = 0; (i < BOND_MAX_ARP_TARGETS) && !done; i++) { if (targets[i] == newtarget) { int j; pr_info("%s: removing ARP target %pI4.\n", bond->dev->name, &newtarget); for (j = i; (j < (BOND_MAX_ARP_TARGETS-1)) && targets[j+1]; j++) targets[j] = targets[j+1]; targets[j] = 0; done = 1; } } if (!done) { pr_info("%s: unable to remove nonexistent ARP target %pI4.\n", bond->dev->name, &newtarget); ret = -EINVAL; goto out; } } else { pr_err("no command found in arp_ip_targets file for bond %s. Use +<addr> or -<addr>.\n", bond->dev->name); ret = -EPERM; goto out; } out: return ret; } static DEVICE_ATTR(arp_ip_target, S_IRUGO | S_IWUSR , bonding_show_arp_targets, bonding_store_arp_targets); /* * Show and set the up and down delays. These must be multiples of the * MII monitoring value, and are stored internally as the multiplier. * Thus, we must translate to MS for the real world. */ static ssize_t bonding_show_downdelay(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); return sprintf(buf, "%d\n", bond->params.downdelay * bond->params.miimon); } static ssize_t bonding_store_downdelay(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { int new_value, ret = count; struct bonding *bond = to_bond(d); if (!(bond->params.miimon)) { pr_err("%s: Unable to set down delay as MII monitoring is disabled\n", bond->dev->name); ret = -EPERM; goto out; } if (sscanf(buf, "%d", &new_value) != 1) { pr_err("%s: no down delay value specified.\n", bond->dev->name); ret = -EINVAL; goto out; } if (new_value < 0) { pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n", bond->dev->name, new_value, 1, INT_MAX); ret = -EINVAL; goto out; } else { if ((new_value % bond->params.miimon) != 0) { pr_warning("%s: Warning: down delay (%d) is not a multiple of miimon (%d), delay rounded to %d ms\n", bond->dev->name, new_value, bond->params.miimon, (new_value / bond->params.miimon) * bond->params.miimon); } bond->params.downdelay = new_value / bond->params.miimon; pr_info("%s: Setting down delay to %d.\n", bond->dev->name, bond->params.downdelay * bond->params.miimon); } out: return ret; } static DEVICE_ATTR(downdelay, S_IRUGO | S_IWUSR, bonding_show_downdelay, bonding_store_downdelay); static ssize_t bonding_show_updelay(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); return sprintf(buf, "%d\n", bond->params.updelay * bond->params.miimon); } static ssize_t bonding_store_updelay(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { int new_value, ret = count; struct bonding *bond = to_bond(d); if (!(bond->params.miimon)) { pr_err("%s: Unable to set up delay as MII monitoring is disabled\n", bond->dev->name); ret = -EPERM; goto out; } if (sscanf(buf, "%d", &new_value) != 1) { pr_err("%s: no up delay value specified.\n", bond->dev->name); ret = -EINVAL; goto out; } if (new_value < 0) { pr_err("%s: Invalid down delay value %d not in range %d-%d; rejected.\n", bond->dev->name, new_value, 1, INT_MAX); ret = -EINVAL; goto out; } else { if ((new_value % bond->params.miimon) != 0) { pr_warning("%s: Warning: up delay (%d) is not a multiple of miimon (%d), updelay rounded to %d ms\n", bond->dev->name, new_value, bond->params.miimon, (new_value / bond->params.miimon) * bond->params.miimon); } bond->params.updelay = new_value / bond->params.miimon; pr_info("%s: Setting up delay to %d.\n", bond->dev->name, bond->params.updelay * bond->params.miimon); } out: return ret; } static DEVICE_ATTR(updelay, S_IRUGO | S_IWUSR, bonding_show_updelay, bonding_store_updelay); /* * Show and set the LACP interval. Interface must be down, and the mode * must be set to 802.3ad mode. */ static ssize_t bonding_show_lacp(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); return sprintf(buf, "%s %d\n", bond_lacp_tbl[bond->params.lacp_fast].modename, bond->params.lacp_fast); } static ssize_t bonding_store_lacp(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { int new_value, ret = count; struct bonding *bond = to_bond(d); if (bond->dev->flags & IFF_UP) { pr_err("%s: Unable to update LACP rate because interface is up.\n", bond->dev->name); ret = -EPERM; goto out; } if (bond->params.mode != BOND_MODE_8023AD) { pr_err("%s: Unable to update LACP rate because bond is not in 802.3ad mode.\n", bond->dev->name); ret = -EPERM; goto out; } new_value = bond_parse_parm(buf, bond_lacp_tbl); if ((new_value == 1) || (new_value == 0)) { bond->params.lacp_fast = new_value; bond_3ad_update_lacp_rate(bond); pr_info("%s: Setting LACP rate to %s (%d).\n", bond->dev->name, bond_lacp_tbl[new_value].modename, new_value); } else { pr_err("%s: Ignoring invalid LACP rate value %.*s.\n", bond->dev->name, (int)strlen(buf) - 1, buf); ret = -EINVAL; } out: return ret; } static DEVICE_ATTR(lacp_rate, S_IRUGO | S_IWUSR, bonding_show_lacp, bonding_store_lacp); static ssize_t bonding_show_min_links(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); return sprintf(buf, "%d\n", bond->params.min_links); } static ssize_t bonding_store_min_links(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct bonding *bond = to_bond(d); int ret; unsigned int new_value; ret = kstrtouint(buf, 0, &new_value); if (ret < 0) { pr_err("%s: Ignoring invalid min links value %s.\n", bond->dev->name, buf); return ret; } pr_info("%s: Setting min links value to %u\n", bond->dev->name, new_value); bond->params.min_links = new_value; return count; } static DEVICE_ATTR(min_links, S_IRUGO | S_IWUSR, bonding_show_min_links, bonding_store_min_links); static ssize_t bonding_show_ad_select(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); return sprintf(buf, "%s %d\n", ad_select_tbl[bond->params.ad_select].modename, bond->params.ad_select); } static ssize_t bonding_store_ad_select(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { int new_value, ret = count; struct bonding *bond = to_bond(d); if (bond->dev->flags & IFF_UP) { pr_err("%s: Unable to update ad_select because interface is up.\n", bond->dev->name); ret = -EPERM; goto out; } new_value = bond_parse_parm(buf, ad_select_tbl); if (new_value != -1) { bond->params.ad_select = new_value; pr_info("%s: Setting ad_select to %s (%d).\n", bond->dev->name, ad_select_tbl[new_value].modename, new_value); } else { pr_err("%s: Ignoring invalid ad_select value %.*s.\n", bond->dev->name, (int)strlen(buf) - 1, buf); ret = -EINVAL; } out: return ret; } static DEVICE_ATTR(ad_select, S_IRUGO | S_IWUSR, bonding_show_ad_select, bonding_store_ad_select); /* * Show and set the number of peer notifications to send after a failover event. */ static ssize_t bonding_show_num_peer_notif(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); return sprintf(buf, "%d\n", bond->params.num_peer_notif); } static ssize_t bonding_store_num_peer_notif(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { struct bonding *bond = to_bond(d); int err = kstrtou8(buf, 10, &bond->params.num_peer_notif); return err ? err : count; } static DEVICE_ATTR(num_grat_arp, S_IRUGO | S_IWUSR, bonding_show_num_peer_notif, bonding_store_num_peer_notif); static DEVICE_ATTR(num_unsol_na, S_IRUGO | S_IWUSR, bonding_show_num_peer_notif, bonding_store_num_peer_notif); /* * Show and set the MII monitor interval. There are two tricky bits * here. First, if MII monitoring is activated, then we must disable * ARP monitoring. Second, if the timer isn't running, we must * start it. */ static ssize_t bonding_show_miimon(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); return sprintf(buf, "%d\n", bond->params.miimon); } static ssize_t bonding_store_miimon(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { int new_value, ret = count; struct bonding *bond = to_bond(d); if (sscanf(buf, "%d", &new_value) != 1) { pr_err("%s: no miimon value specified.\n", bond->dev->name); ret = -EINVAL; goto out; } if (new_value < 0) { pr_err("%s: Invalid miimon value %d not in range %d-%d; rejected.\n", bond->dev->name, new_value, 1, INT_MAX); ret = -EINVAL; goto out; } else { pr_info("%s: Setting MII monitoring interval to %d.\n", bond->dev->name, new_value); bond->params.miimon = new_value; if (bond->params.updelay) pr_info("%s: Note: Updating updelay (to %d) since it is a multiple of the miimon value.\n", bond->dev->name, bond->params.updelay * bond->params.miimon); if (bond->params.downdelay) pr_info("%s: Note: Updating downdelay (to %d) since it is a multiple of the miimon value.\n", bond->dev->name, bond->params.downdelay * bond->params.miimon); if (bond->params.arp_interval) { pr_info("%s: MII monitoring cannot be used with ARP monitoring. Disabling ARP monitoring...\n", bond->dev->name); bond->params.arp_interval = 0; if (bond->params.arp_validate) { bond->params.arp_validate = BOND_ARP_VALIDATE_NONE; } if (delayed_work_pending(&bond->arp_work)) { cancel_delayed_work(&bond->arp_work); flush_workqueue(bond->wq); } } if (bond->dev->flags & IFF_UP) { /* If the interface is up, we may need to fire off * the MII timer. If the interface is down, the * timer will get fired off when the open function * is called. */ if (!delayed_work_pending(&bond->mii_work)) { INIT_DELAYED_WORK(&bond->mii_work, bond_mii_monitor); queue_delayed_work(bond->wq, &bond->mii_work, 0); } } } out: return ret; } static DEVICE_ATTR(miimon, S_IRUGO | S_IWUSR, bonding_show_miimon, bonding_store_miimon); /* * Show and set the primary slave. The store function is much * simpler than bonding_store_slaves function because it only needs to * handle one interface name. * The bond must be a mode that supports a primary for this be * set. */ static ssize_t bonding_show_primary(struct device *d, struct device_attribute *attr, char *buf) { int count = 0; struct bonding *bond = to_bond(d); if (bond->primary_slave) count = sprintf(buf, "%s\n", bond->primary_slave->dev->name); return count; } static ssize_t bonding_store_primary(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { int i; struct slave *slave; struct bonding *bond = to_bond(d); char ifname[IFNAMSIZ]; if (!rtnl_trylock()) return restart_syscall(); block_netpoll_tx(); read_lock(&bond->lock); write_lock_bh(&bond->curr_slave_lock); if (!USES_PRIMARY(bond->params.mode)) { pr_info("%s: Unable to set primary slave; %s is in mode %d\n", bond->dev->name, bond->dev->name, bond->params.mode); goto out; } sscanf(buf, "%16s", ifname); /* IFNAMSIZ */ /* check to see if we are clearing primary */ if (!strlen(ifname) || buf[0] == '\n') { pr_info("%s: Setting primary slave to None.\n", bond->dev->name); bond->primary_slave = NULL; bond_select_active_slave(bond); goto out; } bond_for_each_slave(bond, slave, i) { if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) { pr_info("%s: Setting %s as primary slave.\n", bond->dev->name, slave->dev->name); bond->primary_slave = slave; strcpy(bond->params.primary, slave->dev->name); bond_select_active_slave(bond); goto out; } } pr_info("%s: Unable to set %.*s as primary slave.\n", bond->dev->name, (int)strlen(buf) - 1, buf); out: write_unlock_bh(&bond->curr_slave_lock); read_unlock(&bond->lock); unblock_netpoll_tx(); rtnl_unlock(); return count; } static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR, bonding_show_primary, bonding_store_primary); /* * Show and set the primary_reselect flag. */ static ssize_t bonding_show_primary_reselect(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); return sprintf(buf, "%s %d\n", pri_reselect_tbl[bond->params.primary_reselect].modename, bond->params.primary_reselect); } static ssize_t bonding_store_primary_reselect(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { int new_value, ret = count; struct bonding *bond = to_bond(d); if (!rtnl_trylock()) return restart_syscall(); new_value = bond_parse_parm(buf, pri_reselect_tbl); if (new_value < 0) { pr_err("%s: Ignoring invalid primary_reselect value %.*s.\n", bond->dev->name, (int) strlen(buf) - 1, buf); ret = -EINVAL; goto out; } bond->params.primary_reselect = new_value; pr_info("%s: setting primary_reselect to %s (%d).\n", bond->dev->name, pri_reselect_tbl[new_value].modename, new_value); block_netpoll_tx(); read_lock(&bond->lock); write_lock_bh(&bond->curr_slave_lock); bond_select_active_slave(bond); write_unlock_bh(&bond->curr_slave_lock); read_unlock(&bond->lock); unblock_netpoll_tx(); out: rtnl_unlock(); return ret; } static DEVICE_ATTR(primary_reselect, S_IRUGO | S_IWUSR, bonding_show_primary_reselect, bonding_store_primary_reselect); /* * Show and set the use_carrier flag. */ static ssize_t bonding_show_carrier(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); return sprintf(buf, "%d\n", bond->params.use_carrier); } static ssize_t bonding_store_carrier(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { int new_value, ret = count; struct bonding *bond = to_bond(d); if (sscanf(buf, "%d", &new_value) != 1) { pr_err("%s: no use_carrier value specified.\n", bond->dev->name); ret = -EINVAL; goto out; } if ((new_value == 0) || (new_value == 1)) { bond->params.use_carrier = new_value; pr_info("%s: Setting use_carrier to %d.\n", bond->dev->name, new_value); } else { pr_info("%s: Ignoring invalid use_carrier value %d.\n", bond->dev->name, new_value); } out: return ret; } static DEVICE_ATTR(use_carrier, S_IRUGO | S_IWUSR, bonding_show_carrier, bonding_store_carrier); /* * Show and set currently active_slave. */ static ssize_t bonding_show_active_slave(struct device *d, struct device_attribute *attr, char *buf) { struct slave *curr; struct bonding *bond = to_bond(d); int count = 0; read_lock(&bond->curr_slave_lock); curr = bond->curr_active_slave; read_unlock(&bond->curr_slave_lock); if (USES_PRIMARY(bond->params.mode) && curr) count = sprintf(buf, "%s\n", curr->dev->name); return count; } static ssize_t bonding_store_active_slave(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { int i; struct slave *slave; struct slave *old_active = NULL; struct slave *new_active = NULL; struct bonding *bond = to_bond(d); char ifname[IFNAMSIZ]; if (!rtnl_trylock()) return restart_syscall(); block_netpoll_tx(); read_lock(&bond->lock); write_lock_bh(&bond->curr_slave_lock); if (!USES_PRIMARY(bond->params.mode)) { pr_info("%s: Unable to change active slave; %s is in mode %d\n", bond->dev->name, bond->dev->name, bond->params.mode); goto out; } sscanf(buf, "%16s", ifname); /* IFNAMSIZ */ /* check to see if we are clearing active */ if (!strlen(ifname) || buf[0] == '\n') { pr_info("%s: Clearing current active slave.\n", bond->dev->name); bond->curr_active_slave = NULL; bond_select_active_slave(bond); goto out; } bond_for_each_slave(bond, slave, i) { if (strncmp(slave->dev->name, ifname, IFNAMSIZ) == 0) { old_active = bond->curr_active_slave; new_active = slave; if (new_active == old_active) { /* do nothing */ pr_info("%s: %s is already the current" " active slave.\n", bond->dev->name, slave->dev->name); goto out; } else { if ((new_active) && (old_active) && (new_active->link == BOND_LINK_UP) && IS_UP(new_active->dev)) { pr_info("%s: Setting %s as active" " slave.\n", bond->dev->name, slave->dev->name); bond_change_active_slave(bond, new_active); } else { pr_info("%s: Could not set %s as" " active slave; either %s is" " down or the link is down.\n", bond->dev->name, slave->dev->name, slave->dev->name); } goto out; } } } pr_info("%s: Unable to set %.*s as active slave.\n", bond->dev->name, (int)strlen(buf) - 1, buf); out: write_unlock_bh(&bond->curr_slave_lock); read_unlock(&bond->lock); unblock_netpoll_tx(); rtnl_unlock(); return count; } static DEVICE_ATTR(active_slave, S_IRUGO | S_IWUSR, bonding_show_active_slave, bonding_store_active_slave); /* * Show link status of the bond interface. */ static ssize_t bonding_show_mii_status(struct device *d, struct device_attribute *attr, char *buf) { struct slave *curr; struct bonding *bond = to_bond(d); read_lock(&bond->curr_slave_lock); curr = bond->curr_active_slave; read_unlock(&bond->curr_slave_lock); return sprintf(buf, "%s\n", curr ? "up" : "down"); } static DEVICE_ATTR(mii_status, S_IRUGO, bonding_show_mii_status, NULL); /* * Show current 802.3ad aggregator ID. */ static ssize_t bonding_show_ad_aggregator(struct device *d, struct device_attribute *attr, char *buf) { int count = 0; struct bonding *bond = to_bond(d); if (bond->params.mode == BOND_MODE_8023AD) { struct ad_info ad_info; count = sprintf(buf, "%d\n", (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0 : ad_info.aggregator_id); } return count; } static DEVICE_ATTR(ad_aggregator, S_IRUGO, bonding_show_ad_aggregator, NULL); /* * Show number of active 802.3ad ports. */ static ssize_t bonding_show_ad_num_ports(struct device *d, struct device_attribute *attr, char *buf) { int count = 0; struct bonding *bond = to_bond(d); if (bond->params.mode == BOND_MODE_8023AD) { struct ad_info ad_info; count = sprintf(buf, "%d\n", (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0 : ad_info.ports); } return count; } static DEVICE_ATTR(ad_num_ports, S_IRUGO, bonding_show_ad_num_ports, NULL); /* * Show current 802.3ad actor key. */ static ssize_t bonding_show_ad_actor_key(struct device *d, struct device_attribute *attr, char *buf) { int count = 0; struct bonding *bond = to_bond(d); if (bond->params.mode == BOND_MODE_8023AD) { struct ad_info ad_info; count = sprintf(buf, "%d\n", (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0 : ad_info.actor_key); } return count; } static DEVICE_ATTR(ad_actor_key, S_IRUGO, bonding_show_ad_actor_key, NULL); /* * Show current 802.3ad partner key. */ static ssize_t bonding_show_ad_partner_key(struct device *d, struct device_attribute *attr, char *buf) { int count = 0; struct bonding *bond = to_bond(d); if (bond->params.mode == BOND_MODE_8023AD) { struct ad_info ad_info; count = sprintf(buf, "%d\n", (bond_3ad_get_active_agg_info(bond, &ad_info)) ? 0 : ad_info.partner_key); } return count; } static DEVICE_ATTR(ad_partner_key, S_IRUGO, bonding_show_ad_partner_key, NULL); /* * Show current 802.3ad partner mac. */ static ssize_t bonding_show_ad_partner_mac(struct device *d, struct device_attribute *attr, char *buf) { int count = 0; struct bonding *bond = to_bond(d); if (bond->params.mode == BOND_MODE_8023AD) { struct ad_info ad_info; if (!bond_3ad_get_active_agg_info(bond, &ad_info)) count = sprintf(buf, "%pM\n", ad_info.partner_system); } return count; } static DEVICE_ATTR(ad_partner_mac, S_IRUGO, bonding_show_ad_partner_mac, NULL); /* * Show the queue_ids of the slaves in the current bond. */ static ssize_t bonding_show_queue_id(struct device *d, struct device_attribute *attr, char *buf) { struct slave *slave; int i, res = 0; struct bonding *bond = to_bond(d); if (!rtnl_trylock()) return restart_syscall(); read_lock(&bond->lock); bond_for_each_slave(bond, slave, i) { if (res > (PAGE_SIZE - IFNAMSIZ - 6)) { /* not enough space for another interface_name:queue_id pair */ if ((PAGE_SIZE - res) > 10) res = PAGE_SIZE - 10; res += sprintf(buf + res, "++more++ "); break; } res += sprintf(buf + res, "%s:%d ", slave->dev->name, slave->queue_id); } read_unlock(&bond->lock); if (res) buf[res-1] = '\n'; /* eat the leftover space */ rtnl_unlock(); return res; } /* * Set the queue_ids of the slaves in the current bond. The bond * interface must be enslaved for this to work. */ static ssize_t bonding_store_queue_id(struct device *d, struct device_attribute *attr, const char *buffer, size_t count) { struct slave *slave, *update_slave; struct bonding *bond = to_bond(d); u16 qid; int i, ret = count; char *delim; struct net_device *sdev = NULL; if (!rtnl_trylock()) return restart_syscall(); /* delim will point to queue id if successful */ delim = strchr(buffer, ':'); if (!delim) goto err_no_cmd; /* * Terminate string that points to device name and bump it * up one, so we can read the queue id there. */ *delim = '\0'; if (sscanf(++delim, "%hd\n", &qid) != 1) goto err_no_cmd; /* Check buffer length, valid ifname and queue id */ if (strlen(buffer) > IFNAMSIZ || !dev_valid_name(buffer) || qid > bond->params.tx_queues) goto err_no_cmd; /* Get the pointer to that interface if it exists */ sdev = __dev_get_by_name(dev_net(bond->dev), buffer); if (!sdev) goto err_no_cmd; read_lock(&bond->lock); /* Search for thes slave and check for duplicate qids */ update_slave = NULL; bond_for_each_slave(bond, slave, i) { if (sdev == slave->dev) /* * We don't need to check the matching * slave for dups, since we're overwriting it */ update_slave = slave; else if (qid && qid == slave->queue_id) { goto err_no_cmd_unlock; } } if (!update_slave) goto err_no_cmd_unlock; /* Actually set the qids for the slave */ update_slave->queue_id = qid; read_unlock(&bond->lock); out: rtnl_unlock(); return ret; err_no_cmd_unlock: read_unlock(&bond->lock); err_no_cmd: pr_info("invalid input for queue_id set for %s.\n", bond->dev->name); ret = -EPERM; goto out; } static DEVICE_ATTR(queue_id, S_IRUGO | S_IWUSR, bonding_show_queue_id, bonding_store_queue_id); /* * Show and set the all_slaves_active flag. */ static ssize_t bonding_show_slaves_active(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); return sprintf(buf, "%d\n", bond->params.all_slaves_active); } static ssize_t bonding_store_slaves_active(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { int i, new_value, ret = count; struct bonding *bond = to_bond(d); struct slave *slave; if (sscanf(buf, "%d", &new_value) != 1) { pr_err("%s: no all_slaves_active value specified.\n", bond->dev->name); ret = -EINVAL; goto out; } if (new_value == bond->params.all_slaves_active) goto out; if ((new_value == 0) || (new_value == 1)) { bond->params.all_slaves_active = new_value; } else { pr_info("%s: Ignoring invalid all_slaves_active value %d.\n", bond->dev->name, new_value); ret = -EINVAL; goto out; } bond_for_each_slave(bond, slave, i) { if (!bond_is_active_slave(slave)) { if (new_value) slave->inactive = 0; else slave->inactive = 1; } } out: return ret; } static DEVICE_ATTR(all_slaves_active, S_IRUGO | S_IWUSR, bonding_show_slaves_active, bonding_store_slaves_active); /* * Show and set the number of IGMP membership reports to send on link failure */ static ssize_t bonding_show_resend_igmp(struct device *d, struct device_attribute *attr, char *buf) { struct bonding *bond = to_bond(d); return sprintf(buf, "%d\n", bond->params.resend_igmp); } static ssize_t bonding_store_resend_igmp(struct device *d, struct device_attribute *attr, const char *buf, size_t count) { int new_value, ret = count; struct bonding *bond = to_bond(d); if (sscanf(buf, "%d", &new_value) != 1) { pr_err("%s: no resend_igmp value specified.\n", bond->dev->name); ret = -EINVAL; goto out; } if (new_value < 0 || new_value > 255) { pr_err("%s: Invalid resend_igmp value %d not in range 0-255; rejected.\n", bond->dev->name, new_value); ret = -EINVAL; goto out; } pr_info("%s: Setting resend_igmp to %d.\n", bond->dev->name, new_value); bond->params.resend_igmp = new_value; out: return ret; } static DEVICE_ATTR(resend_igmp, S_IRUGO | S_IWUSR, bonding_show_resend_igmp, bonding_store_resend_igmp); static struct attribute *per_bond_attrs[] = { &dev_attr_slaves.attr, &dev_attr_mode.attr, &dev_attr_fail_over_mac.attr, &dev_attr_arp_validate.attr, &dev_attr_arp_interval.attr, &dev_attr_arp_ip_target.attr, &dev_attr_downdelay.attr, &dev_attr_updelay.attr, &dev_attr_lacp_rate.attr, &dev_attr_ad_select.attr, &dev_attr_xmit_hash_policy.attr, &dev_attr_num_grat_arp.attr, &dev_attr_num_unsol_na.attr, &dev_attr_miimon.attr, &dev_attr_primary.attr, &dev_attr_primary_reselect.attr, &dev_attr_use_carrier.attr, &dev_attr_active_slave.attr, &dev_attr_mii_status.attr, &dev_attr_ad_aggregator.attr, &dev_attr_ad_num_ports.attr, &dev_attr_ad_actor_key.attr, &dev_attr_ad_partner_key.attr, &dev_attr_ad_partner_mac.attr, &dev_attr_queue_id.attr, &dev_attr_all_slaves_active.attr, &dev_attr_resend_igmp.attr, &dev_attr_min_links.attr, NULL, }; static struct attribute_group bonding_group = { .name = "bonding", .attrs = per_bond_attrs, }; /* * Initialize sysfs. This sets up the bonding_masters file in * /sys/class/net. */ int bond_create_sysfs(struct bond_net *bn) { int ret; bn->class_attr_bonding_masters = class_attr_bonding_masters; sysfs_attr_init(&bn->class_attr_bonding_masters.attr); ret = netdev_class_create_file(&bn->class_attr_bonding_masters); /* * Permit multiple loads of the module by ignoring failures to * create the bonding_masters sysfs file. Bonding devices * created by second or subsequent loads of the module will * not be listed in, or controllable by, bonding_masters, but * will have the usual "bonding" sysfs directory. * * This is done to preserve backwards compatibility for * initscripts/sysconfig, which load bonding multiple times to * configure multiple bonding devices. */ if (ret == -EEXIST) { /* Is someone being kinky and naming a device bonding_master? */ if (__dev_get_by_name(bn->net, class_attr_bonding_masters.attr.name)) pr_err("network device named %s already exists in sysfs", class_attr_bonding_masters.attr.name); ret = 0; } return ret; } /* * Remove /sys/class/net/bonding_masters. */ void bond_destroy_sysfs(struct bond_net *bn) { netdev_class_remove_file(&bn->class_attr_bonding_masters); } /* * Initialize sysfs for each bond. This sets up and registers * the 'bondctl' directory for each individual bond under /sys/class/net. */ void bond_prepare_sysfs_group(struct bonding *bond) { bond->dev->sysfs_groups[0] = &bonding_group; }
gpl-2.0
crseanpaul/muon-catalyzed-fusion
arch/mips/kernel/rtlx.c
4661
12517
/* * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. * Copyright (C) 2005, 06 Ralf Baechle (ralf@linux-mips.org) * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * */ #include <linux/device.h> #include <linux/kernel.h> #include <linux/fs.h> #include <linux/init.h> #include <asm/uaccess.h> #include <linux/list.h> #include <linux/vmalloc.h> #include <linux/elf.h> #include <linux/seq_file.h> #include <linux/syscalls.h> #include <linux/moduleloader.h> #include <linux/interrupt.h> #include <linux/poll.h> #include <linux/sched.h> #include <linux/wait.h> #include <asm/mipsmtregs.h> #include <asm/mips_mt.h> #include <asm/cacheflush.h> #include <linux/atomic.h> #include <asm/cpu.h> #include <asm/processor.h> #include <asm/vpe.h> #include <asm/rtlx.h> static struct rtlx_info *rtlx; static int major; static char module_name[] = "rtlx"; static struct chan_waitqueues { wait_queue_head_t rt_queue; wait_queue_head_t lx_queue; atomic_t in_open; struct mutex mutex; } channel_wqs[RTLX_CHANNELS]; static struct vpe_notifications notify; static int sp_stopping; extern void *vpe_get_shared(int index); static void rtlx_dispatch(void) { do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ); } /* Interrupt handler may be called before rtlx_init has otherwise had a chance to run. */ static irqreturn_t rtlx_interrupt(int irq, void *dev_id) { unsigned int vpeflags; unsigned long flags; int i; /* Ought not to be strictly necessary for SMTC builds */ local_irq_save(flags); vpeflags = dvpe(); set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ); irq_enable_hazard(); evpe(vpeflags); local_irq_restore(flags); for (i = 0; i < RTLX_CHANNELS; i++) { wake_up(&channel_wqs[i].lx_queue); wake_up(&channel_wqs[i].rt_queue); } return IRQ_HANDLED; } static void __used dump_rtlx(void) { int i; printk("id 0x%lx state %d\n", rtlx->id, rtlx->state); for (i = 0; i < RTLX_CHANNELS; i++) { struct rtlx_channel *chan = &rtlx->channel[i]; printk(" rt_state %d lx_state %d buffer_size %d\n", chan->rt_state, chan->lx_state, chan->buffer_size); printk(" rt_read %d rt_write %d\n", chan->rt_read, chan->rt_write); printk(" lx_read %d lx_write %d\n", chan->lx_read, chan->lx_write); printk(" rt_buffer <%s>\n", chan->rt_buffer); printk(" lx_buffer <%s>\n", chan->lx_buffer); } } /* call when we have the address of the shared structure from the SP side. */ static int rtlx_init(struct rtlx_info *rtlxi) { if (rtlxi->id != RTLX_ID) { printk(KERN_ERR "no valid RTLX id at 0x%p 0x%lx\n", rtlxi, rtlxi->id); return -ENOEXEC; } rtlx = rtlxi; return 0; } /* notifications */ static void starting(int vpe) { int i; sp_stopping = 0; /* force a reload of rtlx */ rtlx=NULL; /* wake up any sleeping rtlx_open's */ for (i = 0; i < RTLX_CHANNELS; i++) wake_up_interruptible(&channel_wqs[i].lx_queue); } static void stopping(int vpe) { int i; sp_stopping = 1; for (i = 0; i < RTLX_CHANNELS; i++) wake_up_interruptible(&channel_wqs[i].lx_queue); } int rtlx_open(int index, int can_sleep) { struct rtlx_info **p; struct rtlx_channel *chan; enum rtlx_state state; int ret = 0; if (index >= RTLX_CHANNELS) { printk(KERN_DEBUG "rtlx_open index out of range\n"); return -ENOSYS; } if (atomic_inc_return(&channel_wqs[index].in_open) > 1) { printk(KERN_DEBUG "rtlx_open channel %d already opened\n", index); ret = -EBUSY; goto out_fail; } if (rtlx == NULL) { if( (p = vpe_get_shared(tclimit)) == NULL) { if (can_sleep) { __wait_event_interruptible(channel_wqs[index].lx_queue, (p = vpe_get_shared(tclimit)), ret); if (ret) goto out_fail; } else { printk(KERN_DEBUG "No SP program loaded, and device " "opened with O_NONBLOCK\n"); ret = -ENOSYS; goto out_fail; } } smp_rmb(); if (*p == NULL) { if (can_sleep) { DEFINE_WAIT(wait); for (;;) { prepare_to_wait( &channel_wqs[index].lx_queue, &wait, TASK_INTERRUPTIBLE); smp_rmb(); if (*p != NULL) break; if (!signal_pending(current)) { schedule(); continue; } ret = -ERESTARTSYS; goto out_fail; } finish_wait(&channel_wqs[index].lx_queue, &wait); } else { pr_err(" *vpe_get_shared is NULL. " "Has an SP program been loaded?\n"); ret = -ENOSYS; goto out_fail; } } if ((unsigned int)*p < KSEG0) { printk(KERN_WARNING "vpe_get_shared returned an " "invalid pointer maybe an error code %d\n", (int)*p); ret = -ENOSYS; goto out_fail; } if ((ret = rtlx_init(*p)) < 0) goto out_ret; } chan = &rtlx->channel[index]; state = xchg(&chan->lx_state, RTLX_STATE_OPENED); if (state == RTLX_STATE_OPENED) { ret = -EBUSY; goto out_fail; } out_fail: smp_mb(); atomic_dec(&channel_wqs[index].in_open); smp_mb(); out_ret: return ret; } int rtlx_release(int index) { if (rtlx == NULL) { pr_err("rtlx_release() with null rtlx\n"); return 0; } rtlx->channel[index].lx_state = RTLX_STATE_UNUSED; return 0; } unsigned int rtlx_read_poll(int index, int can_sleep) { struct rtlx_channel *chan; if (rtlx == NULL) return 0; chan = &rtlx->channel[index]; /* data available to read? */ if (chan->lx_read == chan->lx_write) { if (can_sleep) { int ret = 0; __wait_event_interruptible(channel_wqs[index].lx_queue, (chan->lx_read != chan->lx_write) || sp_stopping, ret); if (ret) return ret; if (sp_stopping) return 0; } else return 0; } return (chan->lx_write + chan->buffer_size - chan->lx_read) % chan->buffer_size; } static inline int write_spacefree(int read, int write, int size) { if (read == write) { /* * Never fill the buffer completely, so indexes are always * equal if empty and only empty, or !equal if data available */ return size - 1; } return ((read + size - write) % size) - 1; } unsigned int rtlx_write_poll(int index) { struct rtlx_channel *chan = &rtlx->channel[index]; return write_spacefree(chan->rt_read, chan->rt_write, chan->buffer_size); } ssize_t rtlx_read(int index, void __user *buff, size_t count) { size_t lx_write, fl = 0L; struct rtlx_channel *lx; unsigned long failed; if (rtlx == NULL) return -ENOSYS; lx = &rtlx->channel[index]; mutex_lock(&channel_wqs[index].mutex); smp_rmb(); lx_write = lx->lx_write; /* find out how much in total */ count = min(count, (size_t)(lx_write + lx->buffer_size - lx->lx_read) % lx->buffer_size); /* then how much from the read pointer onwards */ fl = min(count, (size_t)lx->buffer_size - lx->lx_read); failed = copy_to_user(buff, lx->lx_buffer + lx->lx_read, fl); if (failed) goto out; /* and if there is anything left at the beginning of the buffer */ if (count - fl) failed = copy_to_user(buff + fl, lx->lx_buffer, count - fl); out: count -= failed; smp_wmb(); lx->lx_read = (lx->lx_read + count) % lx->buffer_size; smp_wmb(); mutex_unlock(&channel_wqs[index].mutex); return count; } ssize_t rtlx_write(int index, const void __user *buffer, size_t count) { struct rtlx_channel *rt; unsigned long failed; size_t rt_read; size_t fl; if (rtlx == NULL) return(-ENOSYS); rt = &rtlx->channel[index]; mutex_lock(&channel_wqs[index].mutex); smp_rmb(); rt_read = rt->rt_read; /* total number of bytes to copy */ count = min(count, (size_t)write_spacefree(rt_read, rt->rt_write, rt->buffer_size)); /* first bit from write pointer to the end of the buffer, or count */ fl = min(count, (size_t) rt->buffer_size - rt->rt_write); failed = copy_from_user(rt->rt_buffer + rt->rt_write, buffer, fl); if (failed) goto out; /* if there's any left copy to the beginning of the buffer */ if (count - fl) { failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl); } out: count -= failed; smp_wmb(); rt->rt_write = (rt->rt_write + count) % rt->buffer_size; smp_wmb(); mutex_unlock(&channel_wqs[index].mutex); return count; } static int file_open(struct inode *inode, struct file *filp) { return rtlx_open(iminor(inode), (filp->f_flags & O_NONBLOCK) ? 0 : 1); } static int file_release(struct inode *inode, struct file *filp) { return rtlx_release(iminor(inode)); } static unsigned int file_poll(struct file *file, poll_table * wait) { int minor; unsigned int mask = 0; minor = iminor(file->f_path.dentry->d_inode); poll_wait(file, &channel_wqs[minor].rt_queue, wait); poll_wait(file, &channel_wqs[minor].lx_queue, wait); if (rtlx == NULL) return 0; /* data available to read? */ if (rtlx_read_poll(minor, 0)) mask |= POLLIN | POLLRDNORM; /* space to write */ if (rtlx_write_poll(minor)) mask |= POLLOUT | POLLWRNORM; return mask; } static ssize_t file_read(struct file *file, char __user * buffer, size_t count, loff_t * ppos) { int minor = iminor(file->f_path.dentry->d_inode); /* data available? */ if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1)) { return 0; // -EAGAIN makes cat whinge } return rtlx_read(minor, buffer, count); } static ssize_t file_write(struct file *file, const char __user * buffer, size_t count, loff_t * ppos) { int minor; struct rtlx_channel *rt; minor = iminor(file->f_path.dentry->d_inode); rt = &rtlx->channel[minor]; /* any space left... */ if (!rtlx_write_poll(minor)) { int ret = 0; if (file->f_flags & O_NONBLOCK) return -EAGAIN; __wait_event_interruptible(channel_wqs[minor].rt_queue, rtlx_write_poll(minor), ret); if (ret) return ret; } return rtlx_write(minor, buffer, count); } static const struct file_operations rtlx_fops = { .owner = THIS_MODULE, .open = file_open, .release = file_release, .write = file_write, .read = file_read, .poll = file_poll, .llseek = noop_llseek, }; static struct irqaction rtlx_irq = { .handler = rtlx_interrupt, .name = "RTLX", }; static int rtlx_irq_num = MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ; static char register_chrdev_failed[] __initdata = KERN_ERR "rtlx_module_init: unable to register device\n"; static int __init rtlx_module_init(void) { struct device *dev; int i, err; if (!cpu_has_mipsmt) { printk("VPE loader: not a MIPS MT capable processor\n"); return -ENODEV; } if (tclimit == 0) { printk(KERN_WARNING "No TCs reserved for AP/SP, not " "initializing RTLX.\nPass maxtcs=<n> argument as kernel " "argument\n"); return -ENODEV; } major = register_chrdev(0, module_name, &rtlx_fops); if (major < 0) { printk(register_chrdev_failed); return major; } /* initialise the wait queues */ for (i = 0; i < RTLX_CHANNELS; i++) { init_waitqueue_head(&channel_wqs[i].rt_queue); init_waitqueue_head(&channel_wqs[i].lx_queue); atomic_set(&channel_wqs[i].in_open, 0); mutex_init(&channel_wqs[i].mutex); dev = device_create(mt_class, NULL, MKDEV(major, i), NULL, "%s%d", module_name, i); if (IS_ERR(dev)) { err = PTR_ERR(dev); goto out_chrdev; } } /* set up notifiers */ notify.start = starting; notify.stop = stopping; vpe_notify(tclimit, &notify); if (cpu_has_vint) set_vi_handler(MIPS_CPU_RTLX_IRQ, rtlx_dispatch); else { pr_err("APRP RTLX init on non-vectored-interrupt processor\n"); err = -ENODEV; goto out_chrdev; } rtlx_irq.dev_id = rtlx; setup_irq(rtlx_irq_num, &rtlx_irq); return 0; out_chrdev: for (i = 0; i < RTLX_CHANNELS; i++) device_destroy(mt_class, MKDEV(major, i)); return err; } static void __exit rtlx_module_exit(void) { int i; for (i = 0; i < RTLX_CHANNELS; i++) device_destroy(mt_class, MKDEV(major, i)); unregister_chrdev(major, module_name); } module_init(rtlx_module_init); module_exit(rtlx_module_exit); MODULE_DESCRIPTION("MIPS RTLX"); MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc."); MODULE_LICENSE("GPL");
gpl-2.0
NamelessRom/android_kernel_samsung_jf
drivers/mmc/host/sdhci-pxav2.c
4917
6065
/* * Copyright (C) 2010 Marvell International Ltd. * Zhangfei Gao <zhangfei.gao@marvell.com> * Kevin Wang <dwang4@marvell.com> * Jun Nie <njun@marvell.com> * Qiming Wu <wuqm@marvell.com> * Philip Rakity <prakity@marvell.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/err.h> #include <linux/init.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/module.h> #include <linux/io.h> #include <linux/gpio.h> #include <linux/mmc/card.h> #include <linux/mmc/host.h> #include <linux/platform_data/pxa_sdhci.h> #include <linux/slab.h> #include "sdhci.h" #include "sdhci-pltfm.h" #define SD_FIFO_PARAM 0xe0 #define DIS_PAD_SD_CLK_GATE 0x0400 /* Turn on/off Dynamic SD Clock Gating */ #define CLK_GATE_ON 0x0200 /* Disable/enable Clock Gate */ #define CLK_GATE_CTL 0x0100 /* Clock Gate Control */ #define CLK_GATE_SETTING_BITS (DIS_PAD_SD_CLK_GATE | \ CLK_GATE_ON | CLK_GATE_CTL) #define SD_CLOCK_BURST_SIZE_SETUP 0xe6 #define SDCLK_SEL_SHIFT 8 #define SDCLK_SEL_MASK 0x3 #define SDCLK_DELAY_SHIFT 10 #define SDCLK_DELAY_MASK 0x3c #define SD_CE_ATA_2 0xea #define MMC_CARD 0x1000 #define MMC_WIDTH 0x0100 static void pxav2_set_private_registers(struct sdhci_host *host, u8 mask) { struct platform_device *pdev = to_platform_device(mmc_dev(host->mmc)); struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; if (mask == SDHCI_RESET_ALL) { u16 tmp = 0; /* * tune timing of read data/command when crc error happen * no performance impact */ if (pdata && pdata->clk_delay_sel == 1) { tmp = readw(host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP); tmp &= ~(SDCLK_DELAY_MASK << SDCLK_DELAY_SHIFT); tmp |= (pdata->clk_delay_cycles & SDCLK_DELAY_MASK) << SDCLK_DELAY_SHIFT; tmp &= ~(SDCLK_SEL_MASK << SDCLK_SEL_SHIFT); tmp |= (1 & SDCLK_SEL_MASK) << SDCLK_SEL_SHIFT; writew(tmp, host->ioaddr + SD_CLOCK_BURST_SIZE_SETUP); } if (pdata && (pdata->flags & PXA_FLAG_ENABLE_CLOCK_GATING)) { tmp = readw(host->ioaddr + SD_FIFO_PARAM); tmp &= ~CLK_GATE_SETTING_BITS; writew(tmp, host->ioaddr + SD_FIFO_PARAM); } else { tmp = readw(host->ioaddr + SD_FIFO_PARAM); tmp &= ~CLK_GATE_SETTING_BITS; tmp |= CLK_GATE_SETTING_BITS; writew(tmp, host->ioaddr + SD_FIFO_PARAM); } } } static int pxav2_mmc_set_width(struct sdhci_host *host, int width) { u8 ctrl; u16 tmp; ctrl = readb(host->ioaddr + SDHCI_HOST_CONTROL); tmp = readw(host->ioaddr + SD_CE_ATA_2); if (width == MMC_BUS_WIDTH_8) { ctrl &= ~SDHCI_CTRL_4BITBUS; tmp |= MMC_CARD | MMC_WIDTH; } else { tmp &= ~(MMC_CARD | MMC_WIDTH); if (width == MMC_BUS_WIDTH_4) ctrl |= SDHCI_CTRL_4BITBUS; else ctrl &= ~SDHCI_CTRL_4BITBUS; } writew(tmp, host->ioaddr + SD_CE_ATA_2); writeb(ctrl, host->ioaddr + SDHCI_HOST_CONTROL); return 0; } static u32 pxav2_get_max_clock(struct sdhci_host *host) { struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); return clk_get_rate(pltfm_host->clk); } static struct sdhci_ops pxav2_sdhci_ops = { .get_max_clock = pxav2_get_max_clock, .platform_reset_exit = pxav2_set_private_registers, .platform_8bit_width = pxav2_mmc_set_width, }; static int __devinit sdhci_pxav2_probe(struct platform_device *pdev) { struct sdhci_pltfm_host *pltfm_host; struct sdhci_pxa_platdata *pdata = pdev->dev.platform_data; struct device *dev = &pdev->dev; struct sdhci_host *host = NULL; struct sdhci_pxa *pxa = NULL; int ret; struct clk *clk; pxa = kzalloc(sizeof(struct sdhci_pxa), GFP_KERNEL); if (!pxa) return -ENOMEM; host = sdhci_pltfm_init(pdev, NULL); if (IS_ERR(host)) { kfree(pxa); return PTR_ERR(host); } pltfm_host = sdhci_priv(host); pltfm_host->priv = pxa; clk = clk_get(dev, "PXA-SDHCLK"); if (IS_ERR(clk)) { dev_err(dev, "failed to get io clock\n"); ret = PTR_ERR(clk); goto err_clk_get; } pltfm_host->clk = clk; clk_enable(clk); host->quirks = SDHCI_QUIRK_BROKEN_ADMA | SDHCI_QUIRK_BROKEN_TIMEOUT_VAL | SDHCI_QUIRK_CAP_CLOCK_BASE_BROKEN; if (pdata) { if (pdata->flags & PXA_FLAG_CARD_PERMANENT) { /* on-chip device */ host->quirks |= SDHCI_QUIRK_BROKEN_CARD_DETECTION; host->mmc->caps |= MMC_CAP_NONREMOVABLE; } /* If slot design supports 8 bit data, indicate this to MMC. */ if (pdata->flags & PXA_FLAG_SD_8_BIT_CAPABLE_SLOT) host->mmc->caps |= MMC_CAP_8_BIT_DATA; if (pdata->quirks) host->quirks |= pdata->quirks; if (pdata->host_caps) host->mmc->caps |= pdata->host_caps; if (pdata->pm_caps) host->mmc->pm_caps |= pdata->pm_caps; } host->ops = &pxav2_sdhci_ops; ret = sdhci_add_host(host); if (ret) { dev_err(&pdev->dev, "failed to add host\n"); goto err_add_host; } platform_set_drvdata(pdev, host); return 0; err_add_host: clk_disable(clk); clk_put(clk); err_clk_get: sdhci_pltfm_free(pdev); kfree(pxa); return ret; } static int __devexit sdhci_pxav2_remove(struct platform_device *pdev) { struct sdhci_host *host = platform_get_drvdata(pdev); struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); struct sdhci_pxa *pxa = pltfm_host->priv; sdhci_remove_host(host, 1); clk_disable(pltfm_host->clk); clk_put(pltfm_host->clk); sdhci_pltfm_free(pdev); kfree(pxa); platform_set_drvdata(pdev, NULL); return 0; } static struct platform_driver sdhci_pxav2_driver = { .driver = { .name = "sdhci-pxav2", .owner = THIS_MODULE, .pm = SDHCI_PLTFM_PMOPS, }, .probe = sdhci_pxav2_probe, .remove = __devexit_p(sdhci_pxav2_remove), }; module_platform_driver(sdhci_pxav2_driver); MODULE_DESCRIPTION("SDHCI driver for pxav2"); MODULE_AUTHOR("Marvell International Ltd."); MODULE_LICENSE("GPL v2");
gpl-2.0
falaze/nexus5n
drivers/staging/wlags49_h2/wl_enc.c
5173
6467
/******************************************************************************* * Agere Systems Inc. * Wireless device driver for Linux (wlags49). * * Copyright (c) 1998-2003 Agere Systems Inc. * All rights reserved. * http://www.agere.com * * Initially developed by TriplePoint, Inc. * http://www.triplepoint.com * *------------------------------------------------------------------------------ * * This file defines functions related to WEP key coding/decoding. * *------------------------------------------------------------------------------ * * SOFTWARE LICENSE * * This software is provided subject to the following terms and conditions, * which you should read carefully before using the software. Using this * software indicates your acceptance of these terms and conditions. If you do * not agree with these terms and conditions, do not use the software. * * Copyright © 2003 Agere Systems Inc. * All rights reserved. * * Redistribution and use in source or binary forms, with or without * modifications, are permitted provided that the following conditions are met: * * . Redistributions of source code must retain the above copyright notice, this * list of conditions and the following Disclaimer as comments in the code as * well as in the documentation and/or other materials provided with the * distribution. * * . Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following Disclaimer in the documentation * and/or other materials provided with the distribution. * * . Neither the name of Agere Systems Inc. nor the names of the contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * Disclaimer * * THIS SOFTWARE IS PROVIDED “AS IS” AND ANY EXPRESS OR IMPLIED WARRANTIES, * INCLUDING, BUT NOT LIMITED TO, INFRINGEMENT AND THE IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. ANY * USE, MODIFICATION OR DISTRIBUTION OF THIS SOFTWARE IS SOLELY AT THE USERS OWN * RISK. IN NO EVENT SHALL AGERE SYSTEMS INC. OR CONTRIBUTORS BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, INCLUDING, BUT NOT LIMITED TO, CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH * DAMAGE. * ******************************************************************************/ /******************************************************************************* * include files ******************************************************************************/ #include <linux/string.h> #include <wl_version.h> #include <debug.h> #include <hcf.h> #include <wl_enc.h> /******************************************************************************* * global definitions ******************************************************************************/ #if DBG extern dbg_info_t *DbgInfo; #endif /* DBG */ /******************************************************************************* * wl_wep_code() ******************************************************************************* * * DESCRIPTION: * * This function encodes a set of wep keys for privacy * * PARAMETERS: * * szCrypt - * szDest - * Data - * nLen - * * RETURNS: * * OK * ******************************************************************************/ int wl_wep_code( char *szCrypt, char *szDest, void *Data, int nLen ) { int i; int t; int k ; char bits; char *szData = (char *) Data; /*------------------------------------------------------------------------*/ for( i = bits = 0 ; i < MACADDRESS_STR_LEN; i++ ) { bits ^= szCrypt[i]; bits += szCrypt[i]; } for( i = t = *szDest = 0; i < nLen; i++, t++ ) { k = szData[i] ^ ( bits + i ); switch( i % 3 ) { case 0 : szDest[t] = ((k & 0xFC) >> 2) + CH_START ; szDest[t+1] = ((k & 0x03) << 4) + CH_START ; szDest[t+2] = '\0'; break; case 1 : szDest[t] += (( k & 0xF0 ) >> 4 ); szDest[t+1] = (( k & 0x0F ) << 2 ) + CH_START ; szDest[t+2] = '\0'; break; case 2 : szDest[t] += (( k & 0xC0 ) >> 6 ); szDest[t+1] = ( k & 0x3F ) + CH_START ; szDest[t+2] = '\0'; t++; break; } } return( strlen( szDest )) ; } /*============================================================================*/ /******************************************************************************* * wl_wep_decode() ******************************************************************************* * * DESCRIPTION: * * This function decodes a set of WEP keys for use by the card. * * PARAMETERS: * * szCrypt - * szDest - * Data - * * RETURNS: * * OK * ******************************************************************************/ int wl_wep_decode( char *szCrypt, void *Dest, char *szData ) { int i; int t; int nLen; char bits; char *szDest = Dest; /*------------------------------------------------------------------------*/ for( i = bits = 0 ; i < 12; i++ ) { bits ^= szCrypt[i] ; bits += szCrypt[i] ; } nLen = ( strlen( szData ) * 3) / 4 ; for( i = t = 0; i < nLen; i++, t++ ) { switch( i % 3 ) { case 0 : szDest[i] = ((( szData[t]-CH_START ) & 0x3f ) << 2 ) + ((( szData[t+1]-CH_START ) & 0x30 ) >> 4 ); break; case 1 : szDest[i] = ((( szData[t]-CH_START ) & 0x0f ) << 4 ) + ((( szData[t+1]-CH_START ) & 0x3c ) >> 2 ); break; case 2 : szDest[i] = ((( szData[t]-CH_START ) & 0x03 ) << 6 ) + (( szData[t+1]-CH_START ) & 0x3f ); t++; break; } szDest[i] ^= ( bits + i ) ; } return( i ) ; } /*============================================================================*/
gpl-2.0
onealtom/MYD-C335X-Linux-Kernel
drivers/xen/xen-pciback/passthrough.c
8501
4666
/* * PCI Backend - Provides restricted access to the real PCI bus topology * to the frontend * * Author: Ryan Wilson <hap9@epoch.ncsc.mil> */ #include <linux/list.h> #include <linux/pci.h> #include <linux/mutex.h> #include "pciback.h" struct passthrough_dev_data { /* Access to dev_list must be protected by lock */ struct list_head dev_list; struct mutex lock; }; static struct pci_dev *__xen_pcibk_get_pci_dev(struct xen_pcibk_device *pdev, unsigned int domain, unsigned int bus, unsigned int devfn) { struct passthrough_dev_data *dev_data = pdev->pci_dev_data; struct pci_dev_entry *dev_entry; struct pci_dev *dev = NULL; mutex_lock(&dev_data->lock); list_for_each_entry(dev_entry, &dev_data->dev_list, list) { if (domain == (unsigned int)pci_domain_nr(dev_entry->dev->bus) && bus == (unsigned int)dev_entry->dev->bus->number && devfn == dev_entry->dev->devfn) { dev = dev_entry->dev; break; } } mutex_unlock(&dev_data->lock); return dev; } static int __xen_pcibk_add_pci_dev(struct xen_pcibk_device *pdev, struct pci_dev *dev, int devid, publish_pci_dev_cb publish_cb) { struct passthrough_dev_data *dev_data = pdev->pci_dev_data; struct pci_dev_entry *dev_entry; unsigned int domain, bus, devfn; int err; dev_entry = kmalloc(sizeof(*dev_entry), GFP_KERNEL); if (!dev_entry) return -ENOMEM; dev_entry->dev = dev; mutex_lock(&dev_data->lock); list_add_tail(&dev_entry->list, &dev_data->dev_list); mutex_unlock(&dev_data->lock); /* Publish this device. */ domain = (unsigned int)pci_domain_nr(dev->bus); bus = (unsigned int)dev->bus->number; devfn = dev->devfn; err = publish_cb(pdev, domain, bus, devfn, devid); return err; } static void __xen_pcibk_release_pci_dev(struct xen_pcibk_device *pdev, struct pci_dev *dev) { struct passthrough_dev_data *dev_data = pdev->pci_dev_data; struct pci_dev_entry *dev_entry, *t; struct pci_dev *found_dev = NULL; mutex_lock(&dev_data->lock); list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) { if (dev_entry->dev == dev) { list_del(&dev_entry->list); found_dev = dev_entry->dev; kfree(dev_entry); } } mutex_unlock(&dev_data->lock); if (found_dev) pcistub_put_pci_dev(found_dev); } static int __xen_pcibk_init_devices(struct xen_pcibk_device *pdev) { struct passthrough_dev_data *dev_data; dev_data = kmalloc(sizeof(*dev_data), GFP_KERNEL); if (!dev_data) return -ENOMEM; mutex_init(&dev_data->lock); INIT_LIST_HEAD(&dev_data->dev_list); pdev->pci_dev_data = dev_data; return 0; } static int __xen_pcibk_publish_pci_roots(struct xen_pcibk_device *pdev, publish_pci_root_cb publish_root_cb) { int err = 0; struct passthrough_dev_data *dev_data = pdev->pci_dev_data; struct pci_dev_entry *dev_entry, *e; struct pci_dev *dev; int found; unsigned int domain, bus; mutex_lock(&dev_data->lock); list_for_each_entry(dev_entry, &dev_data->dev_list, list) { /* Only publish this device as a root if none of its * parent bridges are exported */ found = 0; dev = dev_entry->dev->bus->self; for (; !found && dev != NULL; dev = dev->bus->self) { list_for_each_entry(e, &dev_data->dev_list, list) { if (dev == e->dev) { found = 1; break; } } } domain = (unsigned int)pci_domain_nr(dev_entry->dev->bus); bus = (unsigned int)dev_entry->dev->bus->number; if (!found) { err = publish_root_cb(pdev, domain, bus); if (err) break; } } mutex_unlock(&dev_data->lock); return err; } static void __xen_pcibk_release_devices(struct xen_pcibk_device *pdev) { struct passthrough_dev_data *dev_data = pdev->pci_dev_data; struct pci_dev_entry *dev_entry, *t; list_for_each_entry_safe(dev_entry, t, &dev_data->dev_list, list) { list_del(&dev_entry->list); pcistub_put_pci_dev(dev_entry->dev); kfree(dev_entry); } kfree(dev_data); pdev->pci_dev_data = NULL; } static int __xen_pcibk_get_pcifront_dev(struct pci_dev *pcidev, struct xen_pcibk_device *pdev, unsigned int *domain, unsigned int *bus, unsigned int *devfn) { *domain = pci_domain_nr(pcidev->bus); *bus = pcidev->bus->number; *devfn = pcidev->devfn; return 1; } const struct xen_pcibk_backend xen_pcibk_passthrough_backend = { .name = "passthrough", .init = __xen_pcibk_init_devices, .free = __xen_pcibk_release_devices, .find = __xen_pcibk_get_pcifront_dev, .publish = __xen_pcibk_publish_pci_roots, .release = __xen_pcibk_release_pci_dev, .add = __xen_pcibk_add_pci_dev, .get = __xen_pcibk_get_pci_dev, };
gpl-2.0
huiyiqun/kernel_flo
drivers/tty/serial/8250/8250_hub6.c
12341
1184
/* * Copyright (C) 2005 Russell King. * Data taken from include/asm-i386/serial.h * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/serial_8250.h> #define HUB6(card,port) \ { \ .iobase = 0x302, \ .irq = 3, \ .uartclk = 1843200, \ .iotype = UPIO_HUB6, \ .flags = UPF_BOOT_AUTOCONF, \ .hub6 = (card) << 6 | (port) << 3 | 1, \ } static struct plat_serial8250_port hub6_data[] = { HUB6(0, 0), HUB6(0, 1), HUB6(0, 2), HUB6(0, 3), HUB6(0, 4), HUB6(0, 5), HUB6(1, 0), HUB6(1, 1), HUB6(1, 2), HUB6(1, 3), HUB6(1, 4), HUB6(1, 5), { }, }; static struct platform_device hub6_device = { .name = "serial8250", .id = PLAT8250_DEV_HUB6, .dev = { .platform_data = hub6_data, }, }; static int __init hub6_init(void) { return platform_device_register(&hub6_device); } module_init(hub6_init); MODULE_AUTHOR("Russell King"); MODULE_DESCRIPTION("8250 serial probe module for Hub6 cards"); MODULE_LICENSE("GPL");
gpl-2.0
ShinySide/SM-530T
drivers/misc/dmverity_query.c
54
2933
#include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/types.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> #include <linux/mm.h> #include <linux/types.h> #include <linux/highmem.h> #include <soc/qcom/scm.h> #define OEM_SVC_CALLS 0x03000000 #define MAKE_OEM_SCM_CMD(svc_id, cmd_id) ((((svc_id << 8) | (cmd_id)) & 0xFFFF) | OEM_SVC_CALLS) #define SCM_DMVERITY_CMD_ID 0x1 #define SCM_SVC_DMVERITY 245 uint32_t dmverity_resp = 4; static int verity_scm_call(void) { int ret; struct scm_desc descrp = {0}; descrp.arginfo = SCM_ARGS(4, SCM_VAL, SCM_VAL, SCM_RW, SCM_VAL); descrp.args[0] = 3; //command Read descrp.args[1] = 0; descrp.args[2] = virt_to_phys((void*)&dmverity_resp); // Respnse descrp.args[3] = 4; ret = scm_call2(MAKE_OEM_SCM_CMD(SCM_SVC_DMVERITY, SCM_DMVERITY_CMD_ID), &descrp); return dmverity_resp; } #define DRIVER_DESC "Read whether odin flash succeeded" ssize_t dmverity_read(struct file *filep, char __user *buf, size_t size, loff_t *offset) { uint32_t odin_flag; //int ret; /* First check is to get rid of integer overflow exploits */ if (size < sizeof(uint32_t)) { printk(KERN_ERR"Size must be atleast %d\n", sizeof(uint32_t)); return -EINVAL; } odin_flag = verity_scm_call(); printk(KERN_INFO"dmverity: odin flag: %x\n", odin_flag); if (copy_to_user(buf, &odin_flag, sizeof(uint32_t))) { printk(KERN_ERR"Copy to user failed\n"); return -1; } else return sizeof(uint32_t); } static const struct file_operations dmverity_proc_fops = { .read = dmverity_read, }; /** * dmverity_odin_flag_read_init - Initialization function for DMVERITY * * It creates and initializes dmverity proc entry with initialized read handler */ static int __init dmverity_odin_flag_read_init(void) { //extern int boot_mode_recovery; if (/* boot_mode_recovery == */ 1) { /* Only create this in recovery mode. Not sure why I am doing this */ if (proc_create("dmverity_odin_flag", 0644,NULL, &dmverity_proc_fops) == NULL) { printk(KERN_ERR"dmverity_odin_flag_read_init: Error creating proc entry\n"); goto error_return; } printk(KERN_INFO"dmverity_odin_flag_read_init:: Registering /proc/dmverity_odin_flag Interface \n"); } else { printk(KERN_INFO"dmverity_odin_flag_read_init:: not enabling in non-recovery mode\n"); goto error_return; } return 0; error_return: return -1; } /** * dmverity_odin_flag_read_exit - Cleanup Code for DMVERITY * * It removes /proc/dmverity proc entry and does the required cleanup operations */ static void __exit dmverity_odin_flag_read_exit(void) { remove_proc_entry("dmverity_odin_flag", NULL); printk(KERN_INFO"Deregistering /proc/dmverity_odin_flag interface\n"); } module_init(dmverity_odin_flag_read_init); module_exit(dmverity_odin_flag_read_exit); MODULE_DESCRIPTION(DRIVER_DESC);
gpl-2.0
rminnich/linux
arch/arm64/kernel/hibernate.c
54
14524
/*: * Hibernate support specific for ARM64 * * Derived from work on ARM hibernation support by: * * Ubuntu project, hibernation support for mach-dove * Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu) * Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.) * https://lkml.org/lkml/2010/6/18/4 * https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html * https://patchwork.kernel.org/patch/96442/ * * Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl> * * License terms: GNU General Public License (GPL) version 2 */ #define pr_fmt(x) "hibernate: " x #include <linux/cpu.h> #include <linux/kvm_host.h> #include <linux/mm.h> #include <linux/pm.h> #include <linux/sched.h> #include <linux/suspend.h> #include <linux/utsname.h> #include <linux/version.h> #include <asm/barrier.h> #include <asm/cacheflush.h> #include <asm/cputype.h> #include <asm/irqflags.h> #include <asm/kexec.h> #include <asm/memory.h> #include <asm/mmu_context.h> #include <asm/pgalloc.h> #include <asm/pgtable.h> #include <asm/pgtable-hwdef.h> #include <asm/sections.h> #include <asm/smp.h> #include <asm/smp_plat.h> #include <asm/suspend.h> #include <asm/sysreg.h> #include <asm/virt.h> /* * Hibernate core relies on this value being 0 on resume, and marks it * __nosavedata assuming it will keep the resume kernel's '0' value. This * doesn't happen with either KASLR. * * defined as "__visible int in_suspend __nosavedata" in * kernel/power/hibernate.c */ extern int in_suspend; /* Do we need to reset el2? */ #define el2_reset_needed() (is_hyp_mode_available() && !is_kernel_in_hyp_mode()) /* temporary el2 vectors in the __hibernate_exit_text section. */ extern char hibernate_el2_vectors[]; /* hyp-stub vectors, used to restore el2 during resume from hibernate. */ extern char __hyp_stub_vectors[]; /* * The logical cpu number we should resume on, initialised to a non-cpu * number. */ static int sleep_cpu = -EINVAL; /* * Values that may not change over hibernate/resume. We put the build number * and date in here so that we guarantee not to resume with a different * kernel. */ struct arch_hibernate_hdr_invariants { char uts_version[__NEW_UTS_LEN + 1]; }; /* These values need to be know across a hibernate/restore. */ static struct arch_hibernate_hdr { struct arch_hibernate_hdr_invariants invariants; /* These are needed to find the relocated kernel if built with kaslr */ phys_addr_t ttbr1_el1; void (*reenter_kernel)(void); /* * We need to know where the __hyp_stub_vectors are after restore to * re-configure el2. */ phys_addr_t __hyp_stub_vectors; u64 sleep_cpu_mpidr; } resume_hdr; static inline void arch_hdr_invariants(struct arch_hibernate_hdr_invariants *i) { memset(i, 0, sizeof(*i)); memcpy(i->uts_version, init_utsname()->version, sizeof(i->uts_version)); } int pfn_is_nosave(unsigned long pfn) { unsigned long nosave_begin_pfn = sym_to_pfn(&__nosave_begin); unsigned long nosave_end_pfn = sym_to_pfn(&__nosave_end - 1); return ((pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn)) || crash_is_nosave(pfn); } void notrace save_processor_state(void) { WARN_ON(num_online_cpus() != 1); } void notrace restore_processor_state(void) { } int arch_hibernation_header_save(void *addr, unsigned int max_size) { struct arch_hibernate_hdr *hdr = addr; if (max_size < sizeof(*hdr)) return -EOVERFLOW; arch_hdr_invariants(&hdr->invariants); hdr->ttbr1_el1 = __pa_symbol(swapper_pg_dir); hdr->reenter_kernel = _cpu_resume; /* We can't use __hyp_get_vectors() because kvm may still be loaded */ if (el2_reset_needed()) hdr->__hyp_stub_vectors = __pa_symbol(__hyp_stub_vectors); else hdr->__hyp_stub_vectors = 0; /* Save the mpidr of the cpu we called cpu_suspend() on... */ if (sleep_cpu < 0) { pr_err("Failing to hibernate on an unknown CPU.\n"); return -ENODEV; } hdr->sleep_cpu_mpidr = cpu_logical_map(sleep_cpu); pr_info("Hibernating on CPU %d [mpidr:0x%llx]\n", sleep_cpu, hdr->sleep_cpu_mpidr); return 0; } EXPORT_SYMBOL(arch_hibernation_header_save); int arch_hibernation_header_restore(void *addr) { int ret; struct arch_hibernate_hdr_invariants invariants; struct arch_hibernate_hdr *hdr = addr; arch_hdr_invariants(&invariants); if (memcmp(&hdr->invariants, &invariants, sizeof(invariants))) { pr_crit("Hibernate image not generated by this kernel!\n"); return -EINVAL; } sleep_cpu = get_logical_index(hdr->sleep_cpu_mpidr); pr_info("Hibernated on CPU %d [mpidr:0x%llx]\n", sleep_cpu, hdr->sleep_cpu_mpidr); if (sleep_cpu < 0) { pr_crit("Hibernated on a CPU not known to this kernel!\n"); sleep_cpu = -EINVAL; return -EINVAL; } if (!cpu_online(sleep_cpu)) { pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n"); ret = cpu_up(sleep_cpu); if (ret) { pr_err("Failed to bring hibernate-CPU up!\n"); sleep_cpu = -EINVAL; return ret; } } resume_hdr = *hdr; return 0; } EXPORT_SYMBOL(arch_hibernation_header_restore); /* * Copies length bytes, starting at src_start into an new page, * perform cache maintentance, then maps it at the specified address low * address as executable. * * This is used by hibernate to copy the code it needs to execute when * overwriting the kernel text. This function generates a new set of page * tables, which it loads into ttbr0. * * Length is provided as we probably only want 4K of data, even on a 64K * page system. */ static int create_safe_exec_page(void *src_start, size_t length, unsigned long dst_addr, phys_addr_t *phys_dst_addr, void *(*allocator)(gfp_t mask), gfp_t mask) { int rc = 0; pgd_t *pgd; pud_t *pud; pmd_t *pmd; pte_t *pte; unsigned long dst = (unsigned long)allocator(mask); if (!dst) { rc = -ENOMEM; goto out; } memcpy((void *)dst, src_start, length); flush_icache_range(dst, dst + length); pgd = pgd_offset_raw(allocator(mask), dst_addr); if (pgd_none(*pgd)) { pud = allocator(mask); if (!pud) { rc = -ENOMEM; goto out; } pgd_populate(&init_mm, pgd, pud); } pud = pud_offset(pgd, dst_addr); if (pud_none(*pud)) { pmd = allocator(mask); if (!pmd) { rc = -ENOMEM; goto out; } pud_populate(&init_mm, pud, pmd); } pmd = pmd_offset(pud, dst_addr); if (pmd_none(*pmd)) { pte = allocator(mask); if (!pte) { rc = -ENOMEM; goto out; } pmd_populate_kernel(&init_mm, pmd, pte); } pte = pte_offset_kernel(pmd, dst_addr); set_pte(pte, __pte(virt_to_phys((void *)dst) | pgprot_val(PAGE_KERNEL_EXEC))); /* * Load our new page tables. A strict BBM approach requires that we * ensure that TLBs are free of any entries that may overlap with the * global mappings we are about to install. * * For a real hibernate/resume cycle TTBR0 currently points to a zero * page, but TLBs may contain stale ASID-tagged entries (e.g. for EFI * runtime services), while for a userspace-driven test_resume cycle it * points to userspace page tables (and we must point it at a zero page * ourselves). Elsewhere we only (un)install the idmap with preemption * disabled, so T0SZ should be as required regardless. */ cpu_set_reserved_ttbr0(); local_flush_tlb_all(); write_sysreg(virt_to_phys(pgd), ttbr0_el1); isb(); *phys_dst_addr = virt_to_phys((void *)dst); out: return rc; } #define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start)) int swsusp_arch_suspend(void) { int ret = 0; unsigned long flags; struct sleep_stack_data state; if (cpus_are_stuck_in_kernel()) { pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n"); return -EBUSY; } local_dbg_save(flags); if (__cpu_suspend_enter(&state)) { /* make the crash dump kernel image visible/saveable */ crash_prepare_suspend(); sleep_cpu = smp_processor_id(); ret = swsusp_save(); } else { /* Clean kernel core startup/idle code to PoC*/ dcache_clean_range(__mmuoff_data_start, __mmuoff_data_end); dcache_clean_range(__idmap_text_start, __idmap_text_end); /* Clean kvm setup code to PoC? */ if (el2_reset_needed()) dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end); /* make the crash dump kernel image protected again */ crash_post_resume(); /* * Tell the hibernation core that we've just restored * the memory */ in_suspend = 0; sleep_cpu = -EINVAL; __cpu_suspend_exit(); } local_dbg_restore(flags); return ret; } static void _copy_pte(pte_t *dst_pte, pte_t *src_pte, unsigned long addr) { pte_t pte = *src_pte; if (pte_valid(pte)) { /* * Resume will overwrite areas that may be marked * read only (code, rodata). Clear the RDONLY bit from * the temporary mappings we use during restore. */ set_pte(dst_pte, pte_clear_rdonly(pte)); } else if (debug_pagealloc_enabled() && !pte_none(pte)) { /* * debug_pagealloc will removed the PTE_VALID bit if * the page isn't in use by the resume kernel. It may have * been in use by the original kernel, in which case we need * to put it back in our copy to do the restore. * * Before marking this entry valid, check the pfn should * be mapped. */ BUG_ON(!pfn_valid(pte_pfn(pte))); set_pte(dst_pte, pte_mkpresent(pte_clear_rdonly(pte))); } } static int copy_pte(pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long start, unsigned long end) { pte_t *src_pte; pte_t *dst_pte; unsigned long addr = start; dst_pte = (pte_t *)get_safe_page(GFP_ATOMIC); if (!dst_pte) return -ENOMEM; pmd_populate_kernel(&init_mm, dst_pmd, dst_pte); dst_pte = pte_offset_kernel(dst_pmd, start); src_pte = pte_offset_kernel(src_pmd, start); do { _copy_pte(dst_pte, src_pte, addr); } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); return 0; } static int copy_pmd(pud_t *dst_pud, pud_t *src_pud, unsigned long start, unsigned long end) { pmd_t *src_pmd; pmd_t *dst_pmd; unsigned long next; unsigned long addr = start; if (pud_none(*dst_pud)) { dst_pmd = (pmd_t *)get_safe_page(GFP_ATOMIC); if (!dst_pmd) return -ENOMEM; pud_populate(&init_mm, dst_pud, dst_pmd); } dst_pmd = pmd_offset(dst_pud, start); src_pmd = pmd_offset(src_pud, start); do { next = pmd_addr_end(addr, end); if (pmd_none(*src_pmd)) continue; if (pmd_table(*src_pmd)) { if (copy_pte(dst_pmd, src_pmd, addr, next)) return -ENOMEM; } else { set_pmd(dst_pmd, __pmd(pmd_val(*src_pmd) & ~PMD_SECT_RDONLY)); } } while (dst_pmd++, src_pmd++, addr = next, addr != end); return 0; } static int copy_pud(pgd_t *dst_pgd, pgd_t *src_pgd, unsigned long start, unsigned long end) { pud_t *dst_pud; pud_t *src_pud; unsigned long next; unsigned long addr = start; if (pgd_none(*dst_pgd)) { dst_pud = (pud_t *)get_safe_page(GFP_ATOMIC); if (!dst_pud) return -ENOMEM; pgd_populate(&init_mm, dst_pgd, dst_pud); } dst_pud = pud_offset(dst_pgd, start); src_pud = pud_offset(src_pgd, start); do { next = pud_addr_end(addr, end); if (pud_none(*src_pud)) continue; if (pud_table(*(src_pud))) { if (copy_pmd(dst_pud, src_pud, addr, next)) return -ENOMEM; } else { set_pud(dst_pud, __pud(pud_val(*src_pud) & ~PMD_SECT_RDONLY)); } } while (dst_pud++, src_pud++, addr = next, addr != end); return 0; } static int copy_page_tables(pgd_t *dst_pgd, unsigned long start, unsigned long end) { unsigned long next; unsigned long addr = start; pgd_t *src_pgd = pgd_offset_k(start); dst_pgd = pgd_offset_raw(dst_pgd, start); do { next = pgd_addr_end(addr, end); if (pgd_none(*src_pgd)) continue; if (copy_pud(dst_pgd, src_pgd, addr, next)) return -ENOMEM; } while (dst_pgd++, src_pgd++, addr = next, addr != end); return 0; } /* * Setup then Resume from the hibernate image using swsusp_arch_suspend_exit(). * * Memory allocated by get_safe_page() will be dealt with by the hibernate code, * we don't need to free it here. */ int swsusp_arch_resume(void) { int rc = 0; void *zero_page; size_t exit_size; pgd_t *tmp_pg_dir; phys_addr_t phys_hibernate_exit; void __noreturn (*hibernate_exit)(phys_addr_t, phys_addr_t, void *, void *, phys_addr_t, phys_addr_t); /* * Restoring the memory image will overwrite the ttbr1 page tables. * Create a second copy of just the linear map, and use this when * restoring. */ tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); if (!tmp_pg_dir) { pr_err("Failed to allocate memory for temporary page tables.\n"); rc = -ENOMEM; goto out; } rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, 0); if (rc) goto out; /* * We need a zero page that is zero before & after resume in order to * to break before make on the ttbr1 page tables. */ zero_page = (void *)get_safe_page(GFP_ATOMIC); if (!zero_page) { pr_err("Failed to allocate zero page.\n"); rc = -ENOMEM; goto out; } /* * Locate the exit code in the bottom-but-one page, so that *NULL * still has disastrous affects. */ hibernate_exit = (void *)PAGE_SIZE; exit_size = __hibernate_exit_text_end - __hibernate_exit_text_start; /* * Copy swsusp_arch_suspend_exit() to a safe page. This will generate * a new set of ttbr0 page tables and load them. */ rc = create_safe_exec_page(__hibernate_exit_text_start, exit_size, (unsigned long)hibernate_exit, &phys_hibernate_exit, (void *)get_safe_page, GFP_ATOMIC); if (rc) { pr_err("Failed to create safe executable page for hibernate_exit code.\n"); goto out; } /* * The hibernate exit text contains a set of el2 vectors, that will * be executed at el2 with the mmu off in order to reload hyp-stub. */ __flush_dcache_area(hibernate_exit, exit_size); /* * KASLR will cause the el2 vectors to be in a different location in * the resumed kernel. Load hibernate's temporary copy into el2. * * We can skip this step if we booted at EL1, or are running with VHE. */ if (el2_reset_needed()) { phys_addr_t el2_vectors = phys_hibernate_exit; /* base */ el2_vectors += hibernate_el2_vectors - __hibernate_exit_text_start; /* offset */ __hyp_set_vectors(el2_vectors); } hibernate_exit(virt_to_phys(tmp_pg_dir), resume_hdr.ttbr1_el1, resume_hdr.reenter_kernel, restore_pblist, resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page)); out: return rc; } int hibernate_resume_nonboot_cpu_disable(void) { if (sleep_cpu < 0) { pr_err("Failing to resume from hibernate on an unknown CPU.\n"); return -ENODEV; } return freeze_secondary_cpus(sleep_cpu); }
gpl-2.0
sivu/linux
drivers/gpu/drm/nouveau/nvkm/engine/disp/dport.c
310
10714
/* * Copyright 2013 Red Hat Inc. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS IN THE SOFTWARE. * * Authors: Ben Skeggs */ #include "dport.h" #include "outpdp.h" #include "nv50.h" #include <subdev/bios.h> #include <subdev/bios/init.h> #include <subdev/i2c.h> #include <nvif/class.h> /****************************************************************************** * link training *****************************************************************************/ struct dp_state { struct nvkm_output_dp *outp; int link_nr; u32 link_bw; u8 stat[6]; u8 conf[4]; bool pc2; u8 pc2stat; u8 pc2conf[2]; }; static int dp_set_link_config(struct dp_state *dp) { struct nvkm_output_dp *outp = dp->outp; struct nvkm_disp *disp = outp->base.disp; struct nvkm_subdev *subdev = &disp->engine.subdev; struct nvkm_bios *bios = subdev->device->bios; struct nvbios_init init = { .subdev = subdev, .bios = bios, .offset = 0x0000, .outp = &outp->base.info, .crtc = -1, .execute = 1, }; u32 lnkcmp; u8 sink[2]; int ret; OUTP_DBG(&outp->base, "%d lanes at %d KB/s", dp->link_nr, dp->link_bw); /* set desired link configuration on the source */ if ((lnkcmp = dp->outp->info.lnkcmp)) { if (outp->version < 0x30) { while ((dp->link_bw / 10) < nvbios_rd16(bios, lnkcmp)) lnkcmp += 4; init.offset = nvbios_rd16(bios, lnkcmp + 2); } else { while ((dp->link_bw / 27000) < nvbios_rd08(bios, lnkcmp)) lnkcmp += 3; init.offset = nvbios_rd16(bios, lnkcmp + 1); } nvbios_exec(&init); } ret = outp->func->lnk_ctl(outp, dp->link_nr, dp->link_bw / 27000, outp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP); if (ret) { if (ret < 0) OUTP_ERR(&outp->base, "lnk_ctl failed with %d", ret); return ret; } outp->func->lnk_pwr(outp, dp->link_nr); /* set desired link configuration on the sink */ sink[0] = dp->link_bw / 27000; sink[1] = dp->link_nr; if (outp->dpcd[DPCD_RC02] & DPCD_RC02_ENHANCED_FRAME_CAP) sink[1] |= DPCD_LC01_ENHANCED_FRAME_EN; return nvkm_wraux(outp->aux, DPCD_LC00_LINK_BW_SET, sink, 2); } static void dp_set_training_pattern(struct dp_state *dp, u8 pattern) { struct nvkm_output_dp *outp = dp->outp; u8 sink_tp; OUTP_DBG(&outp->base, "training pattern %d", pattern); outp->func->pattern(outp, pattern); nvkm_rdaux(outp->aux, DPCD_LC02, &sink_tp, 1); sink_tp &= ~DPCD_LC02_TRAINING_PATTERN_SET; sink_tp |= pattern; nvkm_wraux(outp->aux, DPCD_LC02, &sink_tp, 1); } static int dp_link_train_commit(struct dp_state *dp, bool pc) { struct nvkm_output_dp *outp = dp->outp; int ret, i; for (i = 0; i < dp->link_nr; i++) { u8 lane = (dp->stat[4 + (i >> 1)] >> ((i & 1) * 4)) & 0xf; u8 lpc2 = (dp->pc2stat >> (i * 2)) & 0x3; u8 lpre = (lane & 0x0c) >> 2; u8 lvsw = (lane & 0x03) >> 0; u8 hivs = 3 - lpre; u8 hipe = 3; u8 hipc = 3; if (lpc2 >= hipc) lpc2 = hipc | DPCD_LC0F_LANE0_MAX_POST_CURSOR2_REACHED; if (lpre >= hipe) { lpre = hipe | DPCD_LC03_MAX_SWING_REACHED; /* yes. */ lvsw = hivs = 3 - (lpre & 3); } else if (lvsw >= hivs) { lvsw = hivs | DPCD_LC03_MAX_SWING_REACHED; } dp->conf[i] = (lpre << 3) | lvsw; dp->pc2conf[i >> 1] |= lpc2 << ((i & 1) * 4); OUTP_DBG(&outp->base, "config lane %d %02x %02x", i, dp->conf[i], lpc2); outp->func->drv_ctl(outp, i, lvsw & 3, lpre & 3, lpc2 & 3); } ret = nvkm_wraux(outp->aux, DPCD_LC03(0), dp->conf, 4); if (ret) return ret; if (pc) { ret = nvkm_wraux(outp->aux, DPCD_LC0F, dp->pc2conf, 2); if (ret) return ret; } return 0; } static int dp_link_train_update(struct dp_state *dp, bool pc, u32 delay) { struct nvkm_output_dp *outp = dp->outp; int ret; if (outp->dpcd[DPCD_RC0E_AUX_RD_INTERVAL]) mdelay(outp->dpcd[DPCD_RC0E_AUX_RD_INTERVAL] * 4); else udelay(delay); ret = nvkm_rdaux(outp->aux, DPCD_LS02, dp->stat, 6); if (ret) return ret; if (pc) { ret = nvkm_rdaux(outp->aux, DPCD_LS0C, &dp->pc2stat, 1); if (ret) dp->pc2stat = 0x00; OUTP_DBG(&outp->base, "status %6ph pc2 %02x", dp->stat, dp->pc2stat); } else { OUTP_DBG(&outp->base, "status %6ph", dp->stat); } return 0; } static int dp_link_train_cr(struct dp_state *dp) { bool cr_done = false, abort = false; int voltage = dp->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET; int tries = 0, i; dp_set_training_pattern(dp, 1); do { if (dp_link_train_commit(dp, false) || dp_link_train_update(dp, false, 100)) break; cr_done = true; for (i = 0; i < dp->link_nr; i++) { u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf; if (!(lane & DPCD_LS02_LANE0_CR_DONE)) { cr_done = false; if (dp->conf[i] & DPCD_LC03_MAX_SWING_REACHED) abort = true; break; } } if ((dp->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET) != voltage) { voltage = dp->conf[0] & DPCD_LC03_VOLTAGE_SWING_SET; tries = 0; } } while (!cr_done && !abort && ++tries < 5); return cr_done ? 0 : -1; } static int dp_link_train_eq(struct dp_state *dp) { struct nvkm_output_dp *outp = dp->outp; bool eq_done = false, cr_done = true; int tries = 0, i; if (outp->dpcd[2] & DPCD_RC02_TPS3_SUPPORTED) dp_set_training_pattern(dp, 3); else dp_set_training_pattern(dp, 2); do { if ((tries && dp_link_train_commit(dp, dp->pc2)) || dp_link_train_update(dp, dp->pc2, 400)) break; eq_done = !!(dp->stat[2] & DPCD_LS04_INTERLANE_ALIGN_DONE); for (i = 0; i < dp->link_nr && eq_done; i++) { u8 lane = (dp->stat[i >> 1] >> ((i & 1) * 4)) & 0xf; if (!(lane & DPCD_LS02_LANE0_CR_DONE)) cr_done = false; if (!(lane & DPCD_LS02_LANE0_CHANNEL_EQ_DONE) || !(lane & DPCD_LS02_LANE0_SYMBOL_LOCKED)) eq_done = false; } } while (!eq_done && cr_done && ++tries <= 5); return eq_done ? 0 : -1; } static void dp_link_train_init(struct dp_state *dp, bool spread) { struct nvkm_output_dp *outp = dp->outp; struct nvkm_disp *disp = outp->base.disp; struct nvkm_subdev *subdev = &disp->engine.subdev; struct nvbios_init init = { .subdev = subdev, .bios = subdev->device->bios, .outp = &outp->base.info, .crtc = -1, .execute = 1, }; /* set desired spread */ if (spread) init.offset = outp->info.script[2]; else init.offset = outp->info.script[3]; nvbios_exec(&init); /* pre-train script */ init.offset = outp->info.script[0]; nvbios_exec(&init); } static void dp_link_train_fini(struct dp_state *dp) { struct nvkm_output_dp *outp = dp->outp; struct nvkm_disp *disp = outp->base.disp; struct nvkm_subdev *subdev = &disp->engine.subdev; struct nvbios_init init = { .subdev = subdev, .bios = subdev->device->bios, .outp = &outp->base.info, .crtc = -1, .execute = 1, }; /* post-train script */ init.offset = outp->info.script[1], nvbios_exec(&init); } static const struct dp_rates { u32 rate; u8 bw; u8 nr; } nvkm_dp_rates[] = { { 2160000, 0x14, 4 }, { 1080000, 0x0a, 4 }, { 1080000, 0x14, 2 }, { 648000, 0x06, 4 }, { 540000, 0x0a, 2 }, { 540000, 0x14, 1 }, { 324000, 0x06, 2 }, { 270000, 0x0a, 1 }, { 162000, 0x06, 1 }, {} }; void nvkm_dp_train(struct work_struct *w) { struct nvkm_output_dp *outp = container_of(w, typeof(*outp), lt.work); struct nv50_disp *disp = nv50_disp(outp->base.disp); const struct dp_rates *cfg = nvkm_dp_rates; struct dp_state _dp = { .outp = outp, }, *dp = &_dp; u32 datarate = 0; u8 pwr; int ret; if (!outp->base.info.location && disp->func->sor.magic) disp->func->sor.magic(&outp->base); /* bring capabilities within encoder limits */ if (disp->base.engine.subdev.device->chipset < 0xd0) outp->dpcd[2] &= ~DPCD_RC02_TPS3_SUPPORTED; if ((outp->dpcd[2] & 0x1f) > outp->base.info.dpconf.link_nr) { outp->dpcd[2] &= ~DPCD_RC02_MAX_LANE_COUNT; outp->dpcd[2] |= outp->base.info.dpconf.link_nr; } if (outp->dpcd[1] > outp->base.info.dpconf.link_bw) outp->dpcd[1] = outp->base.info.dpconf.link_bw; dp->pc2 = outp->dpcd[2] & DPCD_RC02_TPS3_SUPPORTED; /* restrict link config to the lowest required rate, if requested */ if (datarate) { datarate = (datarate / 8) * 10; /* 8B/10B coding overhead */ while (cfg[1].rate >= datarate) cfg++; } cfg--; /* disable link interrupt handling during link training */ nvkm_notify_put(&outp->irq); /* ensure sink is not in a low-power state */ if (!nvkm_rdaux(outp->aux, DPCD_SC00, &pwr, 1)) { if ((pwr & DPCD_SC00_SET_POWER) != DPCD_SC00_SET_POWER_D0) { pwr &= ~DPCD_SC00_SET_POWER; pwr |= DPCD_SC00_SET_POWER_D0; nvkm_wraux(outp->aux, DPCD_SC00, &pwr, 1); } } /* enable down-spreading and execute pre-train script from vbios */ dp_link_train_init(dp, outp->dpcd[3] & 0x01); while (ret = -EIO, (++cfg)->rate) { /* select next configuration supported by encoder and sink */ while (cfg->nr > (outp->dpcd[2] & DPCD_RC02_MAX_LANE_COUNT) || cfg->bw > (outp->dpcd[DPCD_RC01_MAX_LINK_RATE])) cfg++; dp->link_bw = cfg->bw * 27000; dp->link_nr = cfg->nr; /* program selected link configuration */ ret = dp_set_link_config(dp); if (ret == 0) { /* attempt to train the link at this configuration */ memset(dp->stat, 0x00, sizeof(dp->stat)); if (!dp_link_train_cr(dp) && !dp_link_train_eq(dp)) break; } else if (ret) { /* dp_set_link_config() handled training, or * we failed to communicate with the sink. */ break; } } /* finish link training and execute post-train script from vbios */ dp_set_training_pattern(dp, 0); if (ret < 0) OUTP_ERR(&outp->base, "link training failed"); dp_link_train_fini(dp); /* signal completion and enable link interrupt handling */ OUTP_DBG(&outp->base, "training complete"); atomic_set(&outp->lt.done, 1); wake_up(&outp->lt.wait); nvkm_notify_get(&outp->irq); }
gpl-2.0
project-voodoo-vibrant/linux_sgh-t959v
arch/sh/kernel/cpu/sh4a/clock-sh7785.c
566
3792
/* * arch/sh/kernel/cpu/sh4a/clock-sh7785.c * * SH7785 support for the clock framework * * Copyright (C) 2007 - 2009 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/cpufreq.h> #include <asm/clock.h> #include <asm/freq.h> #include <cpu/sh7785.h> /* * Default rate for the root input clock, reset this with clk_set_rate() * from the platform code. */ static struct clk extal_clk = { .name = "extal", .id = -1, .rate = 33333333, }; static unsigned long pll_recalc(struct clk *clk) { int multiplier; multiplier = test_mode_pin(MODE_PIN4) ? 36 : 72; return clk->parent->rate * multiplier; } static struct clk_ops pll_clk_ops = { .recalc = pll_recalc, }; static struct clk pll_clk = { .name = "pll_clk", .id = -1, .ops = &pll_clk_ops, .parent = &extal_clk, .flags = CLK_ENABLE_ON_INIT, }; static struct clk *clks[] = { &extal_clk, &pll_clk, }; static unsigned int div2[] = { 1, 2, 4, 6, 8, 12, 16, 18, 24, 32, 36, 48 }; static struct clk_div_mult_table div4_table = { .divisors = div2, .nr_divisors = ARRAY_SIZE(div2), }; enum { DIV4_I, DIV4_U, DIV4_SH, DIV4_B, DIV4_DDR, DIV4_GA, DIV4_DU, DIV4_P, DIV4_NR }; #define DIV4(_str, _bit, _mask, _flags) \ SH_CLK_DIV4(_str, &pll_clk, FRQMR1, _bit, _mask, _flags) struct clk div4_clks[DIV4_NR] = { [DIV4_P] = DIV4("peripheral_clk", 0, 0x0f80, 0), [DIV4_DU] = DIV4("du_clk", 4, 0x0ff0, 0), [DIV4_GA] = DIV4("ga_clk", 8, 0x0030, 0), [DIV4_DDR] = DIV4("ddr_clk", 12, 0x000c, CLK_ENABLE_ON_INIT), [DIV4_B] = DIV4("bus_clk", 16, 0x0fe0, CLK_ENABLE_ON_INIT), [DIV4_SH] = DIV4("shyway_clk", 20, 0x000c, CLK_ENABLE_ON_INIT), [DIV4_U] = DIV4("umem_clk", 24, 0x000c, CLK_ENABLE_ON_INIT), [DIV4_I] = DIV4("cpu_clk", 28, 0x000e, CLK_ENABLE_ON_INIT), }; #define MSTPCR0 0xffc80030 #define MSTPCR1 0xffc80034 static struct clk mstp_clks[] = { /* MSTPCR0 */ SH_CLK_MSTP32("scif_fck", 5, &div4_clks[DIV4_P], MSTPCR0, 29, 0), SH_CLK_MSTP32("scif_fck", 4, &div4_clks[DIV4_P], MSTPCR0, 28, 0), SH_CLK_MSTP32("scif_fck", 3, &div4_clks[DIV4_P], MSTPCR0, 27, 0), SH_CLK_MSTP32("scif_fck", 2, &div4_clks[DIV4_P], MSTPCR0, 26, 0), SH_CLK_MSTP32("scif_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 25, 0), SH_CLK_MSTP32("scif_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 24, 0), SH_CLK_MSTP32("ssi_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 21, 0), SH_CLK_MSTP32("ssi_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 20, 0), SH_CLK_MSTP32("hac_fck", 1, &div4_clks[DIV4_P], MSTPCR0, 17, 0), SH_CLK_MSTP32("hac_fck", 0, &div4_clks[DIV4_P], MSTPCR0, 16, 0), SH_CLK_MSTP32("mmcif_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 13, 0), SH_CLK_MSTP32("flctl_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 12, 0), SH_CLK_MSTP32("tmu345_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 9, 0), SH_CLK_MSTP32("tmu012_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 8, 0), SH_CLK_MSTP32("siof_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 3, 0), SH_CLK_MSTP32("hspi_fck", -1, &div4_clks[DIV4_P], MSTPCR0, 2, 0), /* MSTPCR1 */ SH_CLK_MSTP32("hudi_fck", -1, NULL, MSTPCR1, 19, 0), SH_CLK_MSTP32("ubc_fck", -1, NULL, MSTPCR1, 17, 0), SH_CLK_MSTP32("dmac_11_6_fck", -1, NULL, MSTPCR1, 5, 0), SH_CLK_MSTP32("dmac_5_0_fck", -1, NULL, MSTPCR1, 4, 0), SH_CLK_MSTP32("gdta_fck", -1, NULL, MSTPCR1, 0, 0), }; int __init arch_clk_init(void) { int i, ret = 0; for (i = 0; i < ARRAY_SIZE(clks); i++) ret |= clk_register(clks[i]); if (!ret) ret = sh_clk_div4_register(div4_clks, ARRAY_SIZE(div4_clks), &div4_table); if (!ret) ret = sh_clk_mstp32_register(mstp_clks, ARRAY_SIZE(mstp_clks)); return ret; }
gpl-2.0
MasterSS/linux
drivers/infiniband/hw/qib/qib_mad.c
566
72647
/* * Copyright (c) 2012 Intel Corporation. All rights reserved. * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved. * Copyright (c) 2005, 2006 PathScale, Inc. All rights reserved. * * This software is available to you under a choice of one of two * licenses. You may choose to be licensed under the terms of the GNU * General Public License (GPL) Version 2, available from the file * COPYING in the main directory of this source tree, or the * OpenIB.org BSD license below: * * Redistribution and use in source and binary forms, with or * without modification, are permitted provided that the following * conditions are met: * * - Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * - Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials * provided with the distribution. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ #include <rdma/ib_smi.h> #include "qib.h" #include "qib_mad.h" static int reply(struct ib_smp *smp) { /* * The verbs framework will handle the directed/LID route * packet changes. */ smp->method = IB_MGMT_METHOD_GET_RESP; if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) smp->status |= IB_SMP_DIRECTION; return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_REPLY; } static int reply_failure(struct ib_smp *smp) { /* * The verbs framework will handle the directed/LID route * packet changes. */ smp->method = IB_MGMT_METHOD_GET_RESP; if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) smp->status |= IB_SMP_DIRECTION; return IB_MAD_RESULT_FAILURE | IB_MAD_RESULT_REPLY; } static void qib_send_trap(struct qib_ibport *ibp, void *data, unsigned len) { struct ib_mad_send_buf *send_buf; struct ib_mad_agent *agent; struct ib_smp *smp; int ret; unsigned long flags; unsigned long timeout; agent = ibp->send_agent; if (!agent) return; /* o14-3.2.1 */ if (!(ppd_from_ibp(ibp)->lflags & QIBL_LINKACTIVE)) return; /* o14-2 */ if (ibp->trap_timeout && time_before(jiffies, ibp->trap_timeout)) return; send_buf = ib_create_send_mad(agent, 0, 0, 0, IB_MGMT_MAD_HDR, IB_MGMT_MAD_DATA, GFP_ATOMIC, IB_MGMT_BASE_VERSION); if (IS_ERR(send_buf)) return; smp = send_buf->mad; smp->base_version = IB_MGMT_BASE_VERSION; smp->mgmt_class = IB_MGMT_CLASS_SUBN_LID_ROUTED; smp->class_version = 1; smp->method = IB_MGMT_METHOD_TRAP; ibp->tid++; smp->tid = cpu_to_be64(ibp->tid); smp->attr_id = IB_SMP_ATTR_NOTICE; /* o14-1: smp->mkey = 0; */ memcpy(smp->data, data, len); spin_lock_irqsave(&ibp->lock, flags); if (!ibp->sm_ah) { if (ibp->sm_lid != be16_to_cpu(IB_LID_PERMISSIVE)) { struct ib_ah *ah; ah = qib_create_qp0_ah(ibp, ibp->sm_lid); if (IS_ERR(ah)) ret = PTR_ERR(ah); else { send_buf->ah = ah; ibp->sm_ah = to_iah(ah); ret = 0; } } else ret = -EINVAL; } else { send_buf->ah = &ibp->sm_ah->ibah; ret = 0; } spin_unlock_irqrestore(&ibp->lock, flags); if (!ret) ret = ib_post_send_mad(send_buf, NULL); if (!ret) { /* 4.096 usec. */ timeout = (4096 * (1UL << ibp->subnet_timeout)) / 1000; ibp->trap_timeout = jiffies + usecs_to_jiffies(timeout); } else { ib_free_send_mad(send_buf); ibp->trap_timeout = 0; } } /* * Send a bad [PQ]_Key trap (ch. 14.3.8). */ void qib_bad_pqkey(struct qib_ibport *ibp, __be16 trap_num, u32 key, u32 sl, u32 qp1, u32 qp2, __be16 lid1, __be16 lid2) { struct ib_mad_notice_attr data; if (trap_num == IB_NOTICE_TRAP_BAD_PKEY) ibp->pkey_violations++; else ibp->qkey_violations++; ibp->n_pkt_drops++; /* Send violation trap */ data.generic_type = IB_NOTICE_TYPE_SECURITY; data.prod_type_msb = 0; data.prod_type_lsb = IB_NOTICE_PROD_CA; data.trap_num = trap_num; data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); data.toggle_count = 0; memset(&data.details, 0, sizeof(data.details)); data.details.ntc_257_258.lid1 = lid1; data.details.ntc_257_258.lid2 = lid2; data.details.ntc_257_258.key = cpu_to_be32(key); data.details.ntc_257_258.sl_qp1 = cpu_to_be32((sl << 28) | qp1); data.details.ntc_257_258.qp2 = cpu_to_be32(qp2); qib_send_trap(ibp, &data, sizeof(data)); } /* * Send a bad M_Key trap (ch. 14.3.9). */ static void qib_bad_mkey(struct qib_ibport *ibp, struct ib_smp *smp) { struct ib_mad_notice_attr data; /* Send violation trap */ data.generic_type = IB_NOTICE_TYPE_SECURITY; data.prod_type_msb = 0; data.prod_type_lsb = IB_NOTICE_PROD_CA; data.trap_num = IB_NOTICE_TRAP_BAD_MKEY; data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); data.toggle_count = 0; memset(&data.details, 0, sizeof(data.details)); data.details.ntc_256.lid = data.issuer_lid; data.details.ntc_256.method = smp->method; data.details.ntc_256.attr_id = smp->attr_id; data.details.ntc_256.attr_mod = smp->attr_mod; data.details.ntc_256.mkey = smp->mkey; if (smp->mgmt_class == IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE) { u8 hop_cnt; data.details.ntc_256.dr_slid = smp->dr_slid; data.details.ntc_256.dr_trunc_hop = IB_NOTICE_TRAP_DR_NOTICE; hop_cnt = smp->hop_cnt; if (hop_cnt > ARRAY_SIZE(data.details.ntc_256.dr_rtn_path)) { data.details.ntc_256.dr_trunc_hop |= IB_NOTICE_TRAP_DR_TRUNC; hop_cnt = ARRAY_SIZE(data.details.ntc_256.dr_rtn_path); } data.details.ntc_256.dr_trunc_hop |= hop_cnt; memcpy(data.details.ntc_256.dr_rtn_path, smp->return_path, hop_cnt); } qib_send_trap(ibp, &data, sizeof(data)); } /* * Send a Port Capability Mask Changed trap (ch. 14.3.11). */ void qib_cap_mask_chg(struct qib_ibport *ibp) { struct ib_mad_notice_attr data; data.generic_type = IB_NOTICE_TYPE_INFO; data.prod_type_msb = 0; data.prod_type_lsb = IB_NOTICE_PROD_CA; data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG; data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); data.toggle_count = 0; memset(&data.details, 0, sizeof(data.details)); data.details.ntc_144.lid = data.issuer_lid; data.details.ntc_144.new_cap_mask = cpu_to_be32(ibp->port_cap_flags); qib_send_trap(ibp, &data, sizeof(data)); } /* * Send a System Image GUID Changed trap (ch. 14.3.12). */ void qib_sys_guid_chg(struct qib_ibport *ibp) { struct ib_mad_notice_attr data; data.generic_type = IB_NOTICE_TYPE_INFO; data.prod_type_msb = 0; data.prod_type_lsb = IB_NOTICE_PROD_CA; data.trap_num = IB_NOTICE_TRAP_SYS_GUID_CHG; data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); data.toggle_count = 0; memset(&data.details, 0, sizeof(data.details)); data.details.ntc_145.lid = data.issuer_lid; data.details.ntc_145.new_sys_guid = ib_qib_sys_image_guid; qib_send_trap(ibp, &data, sizeof(data)); } /* * Send a Node Description Changed trap (ch. 14.3.13). */ void qib_node_desc_chg(struct qib_ibport *ibp) { struct ib_mad_notice_attr data; data.generic_type = IB_NOTICE_TYPE_INFO; data.prod_type_msb = 0; data.prod_type_lsb = IB_NOTICE_PROD_CA; data.trap_num = IB_NOTICE_TRAP_CAP_MASK_CHG; data.issuer_lid = cpu_to_be16(ppd_from_ibp(ibp)->lid); data.toggle_count = 0; memset(&data.details, 0, sizeof(data.details)); data.details.ntc_144.lid = data.issuer_lid; data.details.ntc_144.local_changes = 1; data.details.ntc_144.change_flags = IB_NOTICE_TRAP_NODE_DESC_CHG; qib_send_trap(ibp, &data, sizeof(data)); } static int subn_get_nodedescription(struct ib_smp *smp, struct ib_device *ibdev) { if (smp->attr_mod) smp->status |= IB_SMP_INVALID_FIELD; memcpy(smp->data, ibdev->node_desc, sizeof(smp->data)); return reply(smp); } static int subn_get_nodeinfo(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { struct ib_node_info *nip = (struct ib_node_info *)&smp->data; struct qib_devdata *dd = dd_from_ibdev(ibdev); u32 vendor, majrev, minrev; unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */ /* GUID 0 is illegal */ if (smp->attr_mod || pidx >= dd->num_pports || dd->pport[pidx].guid == 0) smp->status |= IB_SMP_INVALID_FIELD; else nip->port_guid = dd->pport[pidx].guid; nip->base_version = 1; nip->class_version = 1; nip->node_type = 1; /* channel adapter */ nip->num_ports = ibdev->phys_port_cnt; /* This is already in network order */ nip->sys_guid = ib_qib_sys_image_guid; nip->node_guid = dd->pport->guid; /* Use first-port GUID as node */ nip->partition_cap = cpu_to_be16(qib_get_npkeys(dd)); nip->device_id = cpu_to_be16(dd->deviceid); majrev = dd->majrev; minrev = dd->minrev; nip->revision = cpu_to_be32((majrev << 16) | minrev); nip->local_port_num = port; vendor = dd->vendorid; nip->vendor_id[0] = QIB_SRC_OUI_1; nip->vendor_id[1] = QIB_SRC_OUI_2; nip->vendor_id[2] = QIB_SRC_OUI_3; return reply(smp); } static int subn_get_guidinfo(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { struct qib_devdata *dd = dd_from_ibdev(ibdev); u32 startgx = 8 * be32_to_cpu(smp->attr_mod); __be64 *p = (__be64 *) smp->data; unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */ /* 32 blocks of 8 64-bit GUIDs per block */ memset(smp->data, 0, sizeof(smp->data)); if (startgx == 0 && pidx < dd->num_pports) { struct qib_pportdata *ppd = dd->pport + pidx; struct qib_ibport *ibp = &ppd->ibport_data; __be64 g = ppd->guid; unsigned i; /* GUID 0 is illegal */ if (g == 0) smp->status |= IB_SMP_INVALID_FIELD; else { /* The first is a copy of the read-only HW GUID. */ p[0] = g; for (i = 1; i < QIB_GUIDS_PER_PORT; i++) p[i] = ibp->guids[i - 1]; } } else smp->status |= IB_SMP_INVALID_FIELD; return reply(smp); } static void set_link_width_enabled(struct qib_pportdata *ppd, u32 w) { (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LWID_ENB, w); } static void set_link_speed_enabled(struct qib_pportdata *ppd, u32 s) { (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_SPD_ENB, s); } static int get_overrunthreshold(struct qib_pportdata *ppd) { return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH); } /** * set_overrunthreshold - set the overrun threshold * @ppd: the physical port data * @n: the new threshold * * Note that this will only take effect when the link state changes. */ static int set_overrunthreshold(struct qib_pportdata *ppd, unsigned n) { (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OVERRUN_THRESH, (u32)n); return 0; } static int get_phyerrthreshold(struct qib_pportdata *ppd) { return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH); } /** * set_phyerrthreshold - set the physical error threshold * @ppd: the physical port data * @n: the new threshold * * Note that this will only take effect when the link state changes. */ static int set_phyerrthreshold(struct qib_pportdata *ppd, unsigned n) { (void) ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PHYERR_THRESH, (u32)n); return 0; } /** * get_linkdowndefaultstate - get the default linkdown state * @ppd: the physical port data * * Returns zero if the default is POLL, 1 if the default is SLEEP. */ static int get_linkdowndefaultstate(struct qib_pportdata *ppd) { return ppd->dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT) == IB_LINKINITCMD_SLEEP; } static int check_mkey(struct qib_ibport *ibp, struct ib_smp *smp, int mad_flags) { int valid_mkey = 0; int ret = 0; /* Is the mkey in the process of expiring? */ if (ibp->mkey_lease_timeout && time_after_eq(jiffies, ibp->mkey_lease_timeout)) { /* Clear timeout and mkey protection field. */ ibp->mkey_lease_timeout = 0; ibp->mkeyprot = 0; } if ((mad_flags & IB_MAD_IGNORE_MKEY) || ibp->mkey == 0 || ibp->mkey == smp->mkey) valid_mkey = 1; /* Unset lease timeout on any valid Get/Set/TrapRepress */ if (valid_mkey && ibp->mkey_lease_timeout && (smp->method == IB_MGMT_METHOD_GET || smp->method == IB_MGMT_METHOD_SET || smp->method == IB_MGMT_METHOD_TRAP_REPRESS)) ibp->mkey_lease_timeout = 0; if (!valid_mkey) { switch (smp->method) { case IB_MGMT_METHOD_GET: /* Bad mkey not a violation below level 2 */ if (ibp->mkeyprot < 2) break; case IB_MGMT_METHOD_SET: case IB_MGMT_METHOD_TRAP_REPRESS: if (ibp->mkey_violations != 0xFFFF) ++ibp->mkey_violations; if (!ibp->mkey_lease_timeout && ibp->mkey_lease_period) ibp->mkey_lease_timeout = jiffies + ibp->mkey_lease_period * HZ; /* Generate a trap notice. */ qib_bad_mkey(ibp, smp); ret = 1; } } return ret; } static int subn_get_portinfo(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { struct qib_devdata *dd; struct qib_pportdata *ppd; struct qib_ibport *ibp; struct ib_port_info *pip = (struct ib_port_info *)smp->data; u8 mtu; int ret; u32 state; u32 port_num = be32_to_cpu(smp->attr_mod); if (port_num == 0) port_num = port; else { if (port_num > ibdev->phys_port_cnt) { smp->status |= IB_SMP_INVALID_FIELD; ret = reply(smp); goto bail; } if (port_num != port) { ibp = to_iport(ibdev, port_num); ret = check_mkey(ibp, smp, 0); if (ret) { ret = IB_MAD_RESULT_FAILURE; goto bail; } } } dd = dd_from_ibdev(ibdev); /* IB numbers ports from 1, hdw from 0 */ ppd = dd->pport + (port_num - 1); ibp = &ppd->ibport_data; /* Clear all fields. Only set the non-zero fields. */ memset(smp->data, 0, sizeof(smp->data)); /* Only return the mkey if the protection field allows it. */ if (!(smp->method == IB_MGMT_METHOD_GET && ibp->mkey != smp->mkey && ibp->mkeyprot == 1)) pip->mkey = ibp->mkey; pip->gid_prefix = ibp->gid_prefix; pip->lid = cpu_to_be16(ppd->lid); pip->sm_lid = cpu_to_be16(ibp->sm_lid); pip->cap_mask = cpu_to_be32(ibp->port_cap_flags); /* pip->diag_code; */ pip->mkey_lease_period = cpu_to_be16(ibp->mkey_lease_period); pip->local_port_num = port; pip->link_width_enabled = ppd->link_width_enabled; pip->link_width_supported = ppd->link_width_supported; pip->link_width_active = ppd->link_width_active; state = dd->f_iblink_state(ppd->lastibcstat); pip->linkspeed_portstate = ppd->link_speed_supported << 4 | state; pip->portphysstate_linkdown = (dd->f_ibphys_portstate(ppd->lastibcstat) << 4) | (get_linkdowndefaultstate(ppd) ? 1 : 2); pip->mkeyprot_resv_lmc = (ibp->mkeyprot << 6) | ppd->lmc; pip->linkspeedactive_enabled = (ppd->link_speed_active << 4) | ppd->link_speed_enabled; switch (ppd->ibmtu) { default: /* something is wrong; fall through */ case 4096: mtu = IB_MTU_4096; break; case 2048: mtu = IB_MTU_2048; break; case 1024: mtu = IB_MTU_1024; break; case 512: mtu = IB_MTU_512; break; case 256: mtu = IB_MTU_256; break; } pip->neighbormtu_mastersmsl = (mtu << 4) | ibp->sm_sl; pip->vlcap_inittype = ppd->vls_supported << 4; /* InitType = 0 */ pip->vl_high_limit = ibp->vl_high_limit; pip->vl_arb_high_cap = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_CAP); pip->vl_arb_low_cap = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_VL_LOW_CAP); /* InitTypeReply = 0 */ pip->inittypereply_mtucap = qib_ibmtu ? qib_ibmtu : IB_MTU_4096; /* HCAs ignore VLStallCount and HOQLife */ /* pip->vlstallcnt_hoqlife; */ pip->operationalvl_pei_peo_fpi_fpo = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_OP_VLS) << 4; pip->mkey_violations = cpu_to_be16(ibp->mkey_violations); /* P_KeyViolations are counted by hardware. */ pip->pkey_violations = cpu_to_be16(ibp->pkey_violations); pip->qkey_violations = cpu_to_be16(ibp->qkey_violations); /* Only the hardware GUID is supported for now */ pip->guid_cap = QIB_GUIDS_PER_PORT; pip->clientrereg_resv_subnetto = ibp->subnet_timeout; /* 32.768 usec. response time (guessing) */ pip->resv_resptimevalue = 3; pip->localphyerrors_overrunerrors = (get_phyerrthreshold(ppd) << 4) | get_overrunthreshold(ppd); /* pip->max_credit_hint; */ if (ibp->port_cap_flags & IB_PORT_LINK_LATENCY_SUP) { u32 v; v = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_LINKLATENCY); pip->link_roundtrip_latency[0] = v >> 16; pip->link_roundtrip_latency[1] = v >> 8; pip->link_roundtrip_latency[2] = v; } ret = reply(smp); bail: return ret; } /** * get_pkeys - return the PKEY table * @dd: the qlogic_ib device * @port: the IB port number * @pkeys: the pkey table is placed here */ static int get_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys) { struct qib_pportdata *ppd = dd->pport + port - 1; /* * always a kernel context, no locking needed. * If we get here with ppd setup, no need to check * that pd is valid. */ struct qib_ctxtdata *rcd = dd->rcd[ppd->hw_pidx]; memcpy(pkeys, rcd->pkeys, sizeof(rcd->pkeys)); return 0; } static int subn_get_pkeytable(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff); u16 *p = (u16 *) smp->data; __be16 *q = (__be16 *) smp->data; /* 64 blocks of 32 16-bit P_Key entries */ memset(smp->data, 0, sizeof(smp->data)); if (startpx == 0) { struct qib_devdata *dd = dd_from_ibdev(ibdev); unsigned i, n = qib_get_npkeys(dd); get_pkeys(dd, port, p); for (i = 0; i < n; i++) q[i] = cpu_to_be16(p[i]); } else smp->status |= IB_SMP_INVALID_FIELD; return reply(smp); } static int subn_set_guidinfo(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { struct qib_devdata *dd = dd_from_ibdev(ibdev); u32 startgx = 8 * be32_to_cpu(smp->attr_mod); __be64 *p = (__be64 *) smp->data; unsigned pidx = port - 1; /* IB number port from 1, hdw from 0 */ /* 32 blocks of 8 64-bit GUIDs per block */ if (startgx == 0 && pidx < dd->num_pports) { struct qib_pportdata *ppd = dd->pport + pidx; struct qib_ibport *ibp = &ppd->ibport_data; unsigned i; /* The first entry is read-only. */ for (i = 1; i < QIB_GUIDS_PER_PORT; i++) ibp->guids[i - 1] = p[i]; } else smp->status |= IB_SMP_INVALID_FIELD; /* The only GUID we support is the first read-only entry. */ return subn_get_guidinfo(smp, ibdev, port); } /** * subn_set_portinfo - set port information * @smp: the incoming SM packet * @ibdev: the infiniband device * @port: the port on the device * * Set Portinfo (see ch. 14.2.5.6). */ static int subn_set_portinfo(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { struct ib_port_info *pip = (struct ib_port_info *)smp->data; struct ib_event event; struct qib_devdata *dd; struct qib_pportdata *ppd; struct qib_ibport *ibp; u8 clientrereg = (pip->clientrereg_resv_subnetto & 0x80); unsigned long flags; u16 lid, smlid; u8 lwe; u8 lse; u8 state; u8 vls; u8 msl; u16 lstate; int ret, ore, mtu; u32 port_num = be32_to_cpu(smp->attr_mod); if (port_num == 0) port_num = port; else { if (port_num > ibdev->phys_port_cnt) goto err; /* Port attributes can only be set on the receiving port */ if (port_num != port) goto get_only; } dd = dd_from_ibdev(ibdev); /* IB numbers ports from 1, hdw from 0 */ ppd = dd->pport + (port_num - 1); ibp = &ppd->ibport_data; event.device = ibdev; event.element.port_num = port; ibp->mkey = pip->mkey; ibp->gid_prefix = pip->gid_prefix; ibp->mkey_lease_period = be16_to_cpu(pip->mkey_lease_period); lid = be16_to_cpu(pip->lid); /* Must be a valid unicast LID address. */ if (lid == 0 || lid >= QIB_MULTICAST_LID_BASE) smp->status |= IB_SMP_INVALID_FIELD; else if (ppd->lid != lid || ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) { if (ppd->lid != lid) qib_set_uevent_bits(ppd, _QIB_EVENT_LID_CHANGE_BIT); if (ppd->lmc != (pip->mkeyprot_resv_lmc & 7)) qib_set_uevent_bits(ppd, _QIB_EVENT_LMC_CHANGE_BIT); qib_set_lid(ppd, lid, pip->mkeyprot_resv_lmc & 7); event.event = IB_EVENT_LID_CHANGE; ib_dispatch_event(&event); } smlid = be16_to_cpu(pip->sm_lid); msl = pip->neighbormtu_mastersmsl & 0xF; /* Must be a valid unicast LID address. */ if (smlid == 0 || smlid >= QIB_MULTICAST_LID_BASE) smp->status |= IB_SMP_INVALID_FIELD; else if (smlid != ibp->sm_lid || msl != ibp->sm_sl) { spin_lock_irqsave(&ibp->lock, flags); if (ibp->sm_ah) { if (smlid != ibp->sm_lid) ibp->sm_ah->attr.dlid = smlid; if (msl != ibp->sm_sl) ibp->sm_ah->attr.sl = msl; } spin_unlock_irqrestore(&ibp->lock, flags); if (smlid != ibp->sm_lid) ibp->sm_lid = smlid; if (msl != ibp->sm_sl) ibp->sm_sl = msl; event.event = IB_EVENT_SM_CHANGE; ib_dispatch_event(&event); } /* Allow 1x or 4x to be set (see 14.2.6.6). */ lwe = pip->link_width_enabled; if (lwe) { if (lwe == 0xFF) set_link_width_enabled(ppd, ppd->link_width_supported); else if (lwe >= 16 || (lwe & ~ppd->link_width_supported)) smp->status |= IB_SMP_INVALID_FIELD; else if (lwe != ppd->link_width_enabled) set_link_width_enabled(ppd, lwe); } lse = pip->linkspeedactive_enabled & 0xF; if (lse) { /* * The IB 1.2 spec. only allows link speed values * 1, 3, 5, 7, 15. 1.2.1 extended to allow specific * speeds. */ if (lse == 15) set_link_speed_enabled(ppd, ppd->link_speed_supported); else if (lse >= 8 || (lse & ~ppd->link_speed_supported)) smp->status |= IB_SMP_INVALID_FIELD; else if (lse != ppd->link_speed_enabled) set_link_speed_enabled(ppd, lse); } /* Set link down default state. */ switch (pip->portphysstate_linkdown & 0xF) { case 0: /* NOP */ break; case 1: /* SLEEP */ (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT, IB_LINKINITCMD_SLEEP); break; case 2: /* POLL */ (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_LINKDEFAULT, IB_LINKINITCMD_POLL); break; default: smp->status |= IB_SMP_INVALID_FIELD; } ibp->mkeyprot = pip->mkeyprot_resv_lmc >> 6; ibp->vl_high_limit = pip->vl_high_limit; (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_VL_HIGH_LIMIT, ibp->vl_high_limit); mtu = ib_mtu_enum_to_int((pip->neighbormtu_mastersmsl >> 4) & 0xF); if (mtu == -1) smp->status |= IB_SMP_INVALID_FIELD; else qib_set_mtu(ppd, mtu); /* Set operational VLs */ vls = (pip->operationalvl_pei_peo_fpi_fpo >> 4) & 0xF; if (vls) { if (vls > ppd->vls_supported) smp->status |= IB_SMP_INVALID_FIELD; else (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_OP_VLS, vls); } if (pip->mkey_violations == 0) ibp->mkey_violations = 0; if (pip->pkey_violations == 0) ibp->pkey_violations = 0; if (pip->qkey_violations == 0) ibp->qkey_violations = 0; ore = pip->localphyerrors_overrunerrors; if (set_phyerrthreshold(ppd, (ore >> 4) & 0xF)) smp->status |= IB_SMP_INVALID_FIELD; if (set_overrunthreshold(ppd, (ore & 0xF))) smp->status |= IB_SMP_INVALID_FIELD; ibp->subnet_timeout = pip->clientrereg_resv_subnetto & 0x1F; /* * Do the port state change now that the other link parameters * have been set. * Changing the port physical state only makes sense if the link * is down or is being set to down. */ state = pip->linkspeed_portstate & 0xF; lstate = (pip->portphysstate_linkdown >> 4) & 0xF; if (lstate && !(state == IB_PORT_DOWN || state == IB_PORT_NOP)) smp->status |= IB_SMP_INVALID_FIELD; /* * Only state changes of DOWN, ARM, and ACTIVE are valid * and must be in the correct state to take effect (see 7.2.6). */ switch (state) { case IB_PORT_NOP: if (lstate == 0) break; /* FALLTHROUGH */ case IB_PORT_DOWN: if (lstate == 0) lstate = QIB_IB_LINKDOWN_ONLY; else if (lstate == 1) lstate = QIB_IB_LINKDOWN_SLEEP; else if (lstate == 2) lstate = QIB_IB_LINKDOWN; else if (lstate == 3) lstate = QIB_IB_LINKDOWN_DISABLE; else { smp->status |= IB_SMP_INVALID_FIELD; break; } spin_lock_irqsave(&ppd->lflags_lock, flags); ppd->lflags &= ~QIBL_LINKV; spin_unlock_irqrestore(&ppd->lflags_lock, flags); qib_set_linkstate(ppd, lstate); /* * Don't send a reply if the response would be sent * through the disabled port. */ if (lstate == QIB_IB_LINKDOWN_DISABLE && smp->hop_cnt) { ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; goto done; } qib_wait_linkstate(ppd, QIBL_LINKV, 10); break; case IB_PORT_ARMED: qib_set_linkstate(ppd, QIB_IB_LINKARM); break; case IB_PORT_ACTIVE: qib_set_linkstate(ppd, QIB_IB_LINKACTIVE); break; default: smp->status |= IB_SMP_INVALID_FIELD; } if (clientrereg) { event.event = IB_EVENT_CLIENT_REREGISTER; ib_dispatch_event(&event); } ret = subn_get_portinfo(smp, ibdev, port); /* restore re-reg bit per o14-12.2.1 */ pip->clientrereg_resv_subnetto |= clientrereg; goto get_only; err: smp->status |= IB_SMP_INVALID_FIELD; get_only: ret = subn_get_portinfo(smp, ibdev, port); done: return ret; } /** * rm_pkey - decrecment the reference count for the given PKEY * @dd: the qlogic_ib device * @key: the PKEY index * * Return true if this was the last reference and the hardware table entry * needs to be changed. */ static int rm_pkey(struct qib_pportdata *ppd, u16 key) { int i; int ret; for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { if (ppd->pkeys[i] != key) continue; if (atomic_dec_and_test(&ppd->pkeyrefs[i])) { ppd->pkeys[i] = 0; ret = 1; goto bail; } break; } ret = 0; bail: return ret; } /** * add_pkey - add the given PKEY to the hardware table * @dd: the qlogic_ib device * @key: the PKEY * * Return an error code if unable to add the entry, zero if no change, * or 1 if the hardware PKEY register needs to be updated. */ static int add_pkey(struct qib_pportdata *ppd, u16 key) { int i; u16 lkey = key & 0x7FFF; int any = 0; int ret; if (lkey == 0x7FFF) { ret = 0; goto bail; } /* Look for an empty slot or a matching PKEY. */ for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { if (!ppd->pkeys[i]) { any++; continue; } /* If it matches exactly, try to increment the ref count */ if (ppd->pkeys[i] == key) { if (atomic_inc_return(&ppd->pkeyrefs[i]) > 1) { ret = 0; goto bail; } /* Lost the race. Look for an empty slot below. */ atomic_dec(&ppd->pkeyrefs[i]); any++; } /* * It makes no sense to have both the limited and unlimited * PKEY set at the same time since the unlimited one will * disable the limited one. */ if ((ppd->pkeys[i] & 0x7FFF) == lkey) { ret = -EEXIST; goto bail; } } if (!any) { ret = -EBUSY; goto bail; } for (i = 0; i < ARRAY_SIZE(ppd->pkeys); i++) { if (!ppd->pkeys[i] && atomic_inc_return(&ppd->pkeyrefs[i]) == 1) { /* for qibstats, etc. */ ppd->pkeys[i] = key; ret = 1; goto bail; } } ret = -EBUSY; bail: return ret; } /** * set_pkeys - set the PKEY table for ctxt 0 * @dd: the qlogic_ib device * @port: the IB port number * @pkeys: the PKEY table */ static int set_pkeys(struct qib_devdata *dd, u8 port, u16 *pkeys) { struct qib_pportdata *ppd; struct qib_ctxtdata *rcd; int i; int changed = 0; /* * IB port one/two always maps to context zero/one, * always a kernel context, no locking needed * If we get here with ppd setup, no need to check * that rcd is valid. */ ppd = dd->pport + (port - 1); rcd = dd->rcd[ppd->hw_pidx]; for (i = 0; i < ARRAY_SIZE(rcd->pkeys); i++) { u16 key = pkeys[i]; u16 okey = rcd->pkeys[i]; if (key == okey) continue; /* * The value of this PKEY table entry is changing. * Remove the old entry in the hardware's array of PKEYs. */ if (okey & 0x7FFF) changed |= rm_pkey(ppd, okey); if (key & 0x7FFF) { int ret = add_pkey(ppd, key); if (ret < 0) key = 0; else changed |= ret; } rcd->pkeys[i] = key; } if (changed) { struct ib_event event; (void) dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PKEYS, 0); event.event = IB_EVENT_PKEY_CHANGE; event.device = &dd->verbs_dev.ibdev; event.element.port_num = port; ib_dispatch_event(&event); } return 0; } static int subn_set_pkeytable(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { u32 startpx = 32 * (be32_to_cpu(smp->attr_mod) & 0xffff); __be16 *p = (__be16 *) smp->data; u16 *q = (u16 *) smp->data; struct qib_devdata *dd = dd_from_ibdev(ibdev); unsigned i, n = qib_get_npkeys(dd); for (i = 0; i < n; i++) q[i] = be16_to_cpu(p[i]); if (startpx != 0 || set_pkeys(dd, port, q) != 0) smp->status |= IB_SMP_INVALID_FIELD; return subn_get_pkeytable(smp, ibdev, port); } static int subn_get_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { struct qib_ibport *ibp = to_iport(ibdev, port); u8 *p = (u8 *) smp->data; unsigned i; memset(smp->data, 0, sizeof(smp->data)); if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) smp->status |= IB_SMP_UNSUP_METHOD; else for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2) *p++ = (ibp->sl_to_vl[i] << 4) | ibp->sl_to_vl[i + 1]; return reply(smp); } static int subn_set_sl_to_vl(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { struct qib_ibport *ibp = to_iport(ibdev, port); u8 *p = (u8 *) smp->data; unsigned i; if (!(ibp->port_cap_flags & IB_PORT_SL_MAP_SUP)) { smp->status |= IB_SMP_UNSUP_METHOD; return reply(smp); } for (i = 0; i < ARRAY_SIZE(ibp->sl_to_vl); i += 2, p++) { ibp->sl_to_vl[i] = *p >> 4; ibp->sl_to_vl[i + 1] = *p & 0xF; } qib_set_uevent_bits(ppd_from_ibp(to_iport(ibdev, port)), _QIB_EVENT_SL2VL_CHANGE_BIT); return subn_get_sl_to_vl(smp, ibdev, port); } static int subn_get_vl_arb(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { unsigned which = be32_to_cpu(smp->attr_mod) >> 16; struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port)); memset(smp->data, 0, sizeof(smp->data)); if (ppd->vls_supported == IB_VL_VL0) smp->status |= IB_SMP_UNSUP_METHOD; else if (which == IB_VLARB_LOWPRI_0_31) (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB, smp->data); else if (which == IB_VLARB_HIGHPRI_0_31) (void) ppd->dd->f_get_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB, smp->data); else smp->status |= IB_SMP_INVALID_FIELD; return reply(smp); } static int subn_set_vl_arb(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { unsigned which = be32_to_cpu(smp->attr_mod) >> 16; struct qib_pportdata *ppd = ppd_from_ibp(to_iport(ibdev, port)); if (ppd->vls_supported == IB_VL_VL0) smp->status |= IB_SMP_UNSUP_METHOD; else if (which == IB_VLARB_LOWPRI_0_31) (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_LOW_ARB, smp->data); else if (which == IB_VLARB_HIGHPRI_0_31) (void) ppd->dd->f_set_ib_table(ppd, QIB_IB_TBL_VL_HIGH_ARB, smp->data); else smp->status |= IB_SMP_INVALID_FIELD; return subn_get_vl_arb(smp, ibdev, port); } static int subn_trap_repress(struct ib_smp *smp, struct ib_device *ibdev, u8 port) { /* * For now, we only send the trap once so no need to process this. * o13-6, o13-7, * o14-3.a4 The SMA shall not send any message in response to a valid * SubnTrapRepress() message. */ return IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; } static int pma_get_classportinfo(struct ib_pma_mad *pmp, struct ib_device *ibdev) { struct ib_class_port_info *p = (struct ib_class_port_info *)pmp->data; struct qib_devdata *dd = dd_from_ibdev(ibdev); memset(pmp->data, 0, sizeof(pmp->data)); if (pmp->mad_hdr.attr_mod != 0) pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; /* Note that AllPortSelect is not valid */ p->base_version = 1; p->class_version = 1; p->capability_mask = IB_PMA_CLASS_CAP_EXT_WIDTH; /* * Set the most significant bit of CM2 to indicate support for * congestion statistics */ p->reserved[0] = dd->psxmitwait_supported << 7; /* * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec. */ p->resp_time_value = 18; return reply((struct ib_smp *) pmp); } static int pma_get_portsamplescontrol(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portsamplescontrol *p = (struct ib_pma_portsamplescontrol *)pmp->data; struct qib_ibdev *dev = to_idev(ibdev); struct qib_devdata *dd = dd_from_dev(dev); struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); unsigned long flags; u8 port_select = p->port_select; memset(pmp->data, 0, sizeof(pmp->data)); p->port_select = port_select; if (pmp->mad_hdr.attr_mod != 0 || port_select != port) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; goto bail; } spin_lock_irqsave(&ibp->lock, flags); p->tick = dd->f_get_ib_cfg(ppd, QIB_IB_CFG_PMA_TICKS); p->sample_status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); p->counter_width = 4; /* 32 bit counters */ p->counter_mask0_9 = COUNTER_MASK0_9; p->sample_start = cpu_to_be32(ibp->pma_sample_start); p->sample_interval = cpu_to_be32(ibp->pma_sample_interval); p->tag = cpu_to_be16(ibp->pma_tag); p->counter_select[0] = ibp->pma_counter_select[0]; p->counter_select[1] = ibp->pma_counter_select[1]; p->counter_select[2] = ibp->pma_counter_select[2]; p->counter_select[3] = ibp->pma_counter_select[3]; p->counter_select[4] = ibp->pma_counter_select[4]; spin_unlock_irqrestore(&ibp->lock, flags); bail: return reply((struct ib_smp *) pmp); } static int pma_set_portsamplescontrol(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portsamplescontrol *p = (struct ib_pma_portsamplescontrol *)pmp->data; struct qib_ibdev *dev = to_idev(ibdev); struct qib_devdata *dd = dd_from_dev(dev); struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); unsigned long flags; u8 status, xmit_flags; int ret; if (pmp->mad_hdr.attr_mod != 0 || p->port_select != port) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; ret = reply((struct ib_smp *) pmp); goto bail; } spin_lock_irqsave(&ibp->lock, flags); /* Port Sampling code owns the PS* HW counters */ xmit_flags = ppd->cong_stats.flags; ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_SAMPLE; status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); if (status == IB_PMA_SAMPLE_STATUS_DONE || (status == IB_PMA_SAMPLE_STATUS_RUNNING && xmit_flags == IB_PMA_CONG_HW_CONTROL_TIMER)) { ibp->pma_sample_start = be32_to_cpu(p->sample_start); ibp->pma_sample_interval = be32_to_cpu(p->sample_interval); ibp->pma_tag = be16_to_cpu(p->tag); ibp->pma_counter_select[0] = p->counter_select[0]; ibp->pma_counter_select[1] = p->counter_select[1]; ibp->pma_counter_select[2] = p->counter_select[2]; ibp->pma_counter_select[3] = p->counter_select[3]; ibp->pma_counter_select[4] = p->counter_select[4]; dd->f_set_cntr_sample(ppd, ibp->pma_sample_interval, ibp->pma_sample_start); } spin_unlock_irqrestore(&ibp->lock, flags); ret = pma_get_portsamplescontrol(pmp, ibdev, port); bail: return ret; } static u64 get_counter(struct qib_ibport *ibp, struct qib_pportdata *ppd, __be16 sel) { u64 ret; switch (sel) { case IB_PMA_PORT_XMIT_DATA: ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITDATA); break; case IB_PMA_PORT_RCV_DATA: ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVDATA); break; case IB_PMA_PORT_XMIT_PKTS: ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITPKTS); break; case IB_PMA_PORT_RCV_PKTS: ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSRCVPKTS); break; case IB_PMA_PORT_XMIT_WAIT: ret = ppd->dd->f_portcntr(ppd, QIBPORTCNTR_PSXMITWAIT); break; default: ret = 0; } return ret; } /* This function assumes that the xmit_wait lock is already held */ static u64 xmit_wait_get_value_delta(struct qib_pportdata *ppd) { u32 delta; delta = get_counter(&ppd->ibport_data, ppd, IB_PMA_PORT_XMIT_WAIT); return ppd->cong_stats.counter + delta; } static void cache_hw_sample_counters(struct qib_pportdata *ppd) { struct qib_ibport *ibp = &ppd->ibport_data; ppd->cong_stats.counter_cache.psxmitdata = get_counter(ibp, ppd, IB_PMA_PORT_XMIT_DATA); ppd->cong_stats.counter_cache.psrcvdata = get_counter(ibp, ppd, IB_PMA_PORT_RCV_DATA); ppd->cong_stats.counter_cache.psxmitpkts = get_counter(ibp, ppd, IB_PMA_PORT_XMIT_PKTS); ppd->cong_stats.counter_cache.psrcvpkts = get_counter(ibp, ppd, IB_PMA_PORT_RCV_PKTS); ppd->cong_stats.counter_cache.psxmitwait = get_counter(ibp, ppd, IB_PMA_PORT_XMIT_WAIT); } static u64 get_cache_hw_sample_counters(struct qib_pportdata *ppd, __be16 sel) { u64 ret; switch (sel) { case IB_PMA_PORT_XMIT_DATA: ret = ppd->cong_stats.counter_cache.psxmitdata; break; case IB_PMA_PORT_RCV_DATA: ret = ppd->cong_stats.counter_cache.psrcvdata; break; case IB_PMA_PORT_XMIT_PKTS: ret = ppd->cong_stats.counter_cache.psxmitpkts; break; case IB_PMA_PORT_RCV_PKTS: ret = ppd->cong_stats.counter_cache.psrcvpkts; break; case IB_PMA_PORT_XMIT_WAIT: ret = ppd->cong_stats.counter_cache.psxmitwait; break; default: ret = 0; } return ret; } static int pma_get_portsamplesresult(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portsamplesresult *p = (struct ib_pma_portsamplesresult *)pmp->data; struct qib_ibdev *dev = to_idev(ibdev); struct qib_devdata *dd = dd_from_dev(dev); struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); unsigned long flags; u8 status; int i; memset(pmp->data, 0, sizeof(pmp->data)); spin_lock_irqsave(&ibp->lock, flags); p->tag = cpu_to_be16(ibp->pma_tag); if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER) p->sample_status = IB_PMA_SAMPLE_STATUS_DONE; else { status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); p->sample_status = cpu_to_be16(status); if (status == IB_PMA_SAMPLE_STATUS_DONE) { cache_hw_sample_counters(ppd); ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd); dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0); ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER; } } for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++) p->counter[i] = cpu_to_be32( get_cache_hw_sample_counters( ppd, ibp->pma_counter_select[i])); spin_unlock_irqrestore(&ibp->lock, flags); return reply((struct ib_smp *) pmp); } static int pma_get_portsamplesresult_ext(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portsamplesresult_ext *p = (struct ib_pma_portsamplesresult_ext *)pmp->data; struct qib_ibdev *dev = to_idev(ibdev); struct qib_devdata *dd = dd_from_dev(dev); struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); unsigned long flags; u8 status; int i; /* Port Sampling code owns the PS* HW counters */ memset(pmp->data, 0, sizeof(pmp->data)); spin_lock_irqsave(&ibp->lock, flags); p->tag = cpu_to_be16(ibp->pma_tag); if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_TIMER) p->sample_status = IB_PMA_SAMPLE_STATUS_DONE; else { status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); p->sample_status = cpu_to_be16(status); /* 64 bits */ p->extended_width = cpu_to_be32(0x80000000); if (status == IB_PMA_SAMPLE_STATUS_DONE) { cache_hw_sample_counters(ppd); ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd); dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0); ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER; } } for (i = 0; i < ARRAY_SIZE(ibp->pma_counter_select); i++) p->counter[i] = cpu_to_be64( get_cache_hw_sample_counters( ppd, ibp->pma_counter_select[i])); spin_unlock_irqrestore(&ibp->lock, flags); return reply((struct ib_smp *) pmp); } static int pma_get_portcounters(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) pmp->data; struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); struct qib_verbs_counters cntrs; u8 port_select = p->port_select; qib_get_counters(ppd, &cntrs); /* Adjust counters for any resets done. */ cntrs.symbol_error_counter -= ibp->z_symbol_error_counter; cntrs.link_error_recovery_counter -= ibp->z_link_error_recovery_counter; cntrs.link_downed_counter -= ibp->z_link_downed_counter; cntrs.port_rcv_errors -= ibp->z_port_rcv_errors; cntrs.port_rcv_remphys_errors -= ibp->z_port_rcv_remphys_errors; cntrs.port_xmit_discards -= ibp->z_port_xmit_discards; cntrs.port_xmit_data -= ibp->z_port_xmit_data; cntrs.port_rcv_data -= ibp->z_port_rcv_data; cntrs.port_xmit_packets -= ibp->z_port_xmit_packets; cntrs.port_rcv_packets -= ibp->z_port_rcv_packets; cntrs.local_link_integrity_errors -= ibp->z_local_link_integrity_errors; cntrs.excessive_buffer_overrun_errors -= ibp->z_excessive_buffer_overrun_errors; cntrs.vl15_dropped -= ibp->z_vl15_dropped; cntrs.vl15_dropped += ibp->n_vl15_dropped; memset(pmp->data, 0, sizeof(pmp->data)); p->port_select = port_select; if (pmp->mad_hdr.attr_mod != 0 || port_select != port) pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; if (cntrs.symbol_error_counter > 0xFFFFUL) p->symbol_error_counter = cpu_to_be16(0xFFFF); else p->symbol_error_counter = cpu_to_be16((u16)cntrs.symbol_error_counter); if (cntrs.link_error_recovery_counter > 0xFFUL) p->link_error_recovery_counter = 0xFF; else p->link_error_recovery_counter = (u8)cntrs.link_error_recovery_counter; if (cntrs.link_downed_counter > 0xFFUL) p->link_downed_counter = 0xFF; else p->link_downed_counter = (u8)cntrs.link_downed_counter; if (cntrs.port_rcv_errors > 0xFFFFUL) p->port_rcv_errors = cpu_to_be16(0xFFFF); else p->port_rcv_errors = cpu_to_be16((u16) cntrs.port_rcv_errors); if (cntrs.port_rcv_remphys_errors > 0xFFFFUL) p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF); else p->port_rcv_remphys_errors = cpu_to_be16((u16)cntrs.port_rcv_remphys_errors); if (cntrs.port_xmit_discards > 0xFFFFUL) p->port_xmit_discards = cpu_to_be16(0xFFFF); else p->port_xmit_discards = cpu_to_be16((u16)cntrs.port_xmit_discards); if (cntrs.local_link_integrity_errors > 0xFUL) cntrs.local_link_integrity_errors = 0xFUL; if (cntrs.excessive_buffer_overrun_errors > 0xFUL) cntrs.excessive_buffer_overrun_errors = 0xFUL; p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) | cntrs.excessive_buffer_overrun_errors; if (cntrs.vl15_dropped > 0xFFFFUL) p->vl15_dropped = cpu_to_be16(0xFFFF); else p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped); if (cntrs.port_xmit_data > 0xFFFFFFFFUL) p->port_xmit_data = cpu_to_be32(0xFFFFFFFF); else p->port_xmit_data = cpu_to_be32((u32)cntrs.port_xmit_data); if (cntrs.port_rcv_data > 0xFFFFFFFFUL) p->port_rcv_data = cpu_to_be32(0xFFFFFFFF); else p->port_rcv_data = cpu_to_be32((u32)cntrs.port_rcv_data); if (cntrs.port_xmit_packets > 0xFFFFFFFFUL) p->port_xmit_packets = cpu_to_be32(0xFFFFFFFF); else p->port_xmit_packets = cpu_to_be32((u32)cntrs.port_xmit_packets); if (cntrs.port_rcv_packets > 0xFFFFFFFFUL) p->port_rcv_packets = cpu_to_be32(0xFFFFFFFF); else p->port_rcv_packets = cpu_to_be32((u32) cntrs.port_rcv_packets); return reply((struct ib_smp *) pmp); } static int pma_get_portcounters_cong(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { /* Congestion PMA packets start at offset 24 not 64 */ struct ib_pma_portcounters_cong *p = (struct ib_pma_portcounters_cong *)pmp->reserved; struct qib_verbs_counters cntrs; struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); struct qib_devdata *dd = dd_from_ppd(ppd); u32 port_select = be32_to_cpu(pmp->mad_hdr.attr_mod) & 0xFF; u64 xmit_wait_counter; unsigned long flags; /* * This check is performed only in the GET method because the * SET method ends up calling this anyway. */ if (!dd->psxmitwait_supported) pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; if (port_select != port) pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; qib_get_counters(ppd, &cntrs); spin_lock_irqsave(&ppd->ibport_data.lock, flags); xmit_wait_counter = xmit_wait_get_value_delta(ppd); spin_unlock_irqrestore(&ppd->ibport_data.lock, flags); /* Adjust counters for any resets done. */ cntrs.symbol_error_counter -= ibp->z_symbol_error_counter; cntrs.link_error_recovery_counter -= ibp->z_link_error_recovery_counter; cntrs.link_downed_counter -= ibp->z_link_downed_counter; cntrs.port_rcv_errors -= ibp->z_port_rcv_errors; cntrs.port_rcv_remphys_errors -= ibp->z_port_rcv_remphys_errors; cntrs.port_xmit_discards -= ibp->z_port_xmit_discards; cntrs.local_link_integrity_errors -= ibp->z_local_link_integrity_errors; cntrs.excessive_buffer_overrun_errors -= ibp->z_excessive_buffer_overrun_errors; cntrs.vl15_dropped -= ibp->z_vl15_dropped; cntrs.vl15_dropped += ibp->n_vl15_dropped; cntrs.port_xmit_data -= ibp->z_port_xmit_data; cntrs.port_rcv_data -= ibp->z_port_rcv_data; cntrs.port_xmit_packets -= ibp->z_port_xmit_packets; cntrs.port_rcv_packets -= ibp->z_port_rcv_packets; memset(pmp->reserved, 0, sizeof(pmp->reserved) + sizeof(pmp->data)); /* * Set top 3 bits to indicate interval in picoseconds in * remaining bits. */ p->port_check_rate = cpu_to_be16((QIB_XMIT_RATE_PICO << 13) | (dd->psxmitwait_check_rate & ~(QIB_XMIT_RATE_PICO << 13))); p->port_adr_events = cpu_to_be64(0); p->port_xmit_wait = cpu_to_be64(xmit_wait_counter); p->port_xmit_data = cpu_to_be64(cntrs.port_xmit_data); p->port_rcv_data = cpu_to_be64(cntrs.port_rcv_data); p->port_xmit_packets = cpu_to_be64(cntrs.port_xmit_packets); p->port_rcv_packets = cpu_to_be64(cntrs.port_rcv_packets); if (cntrs.symbol_error_counter > 0xFFFFUL) p->symbol_error_counter = cpu_to_be16(0xFFFF); else p->symbol_error_counter = cpu_to_be16( (u16)cntrs.symbol_error_counter); if (cntrs.link_error_recovery_counter > 0xFFUL) p->link_error_recovery_counter = 0xFF; else p->link_error_recovery_counter = (u8)cntrs.link_error_recovery_counter; if (cntrs.link_downed_counter > 0xFFUL) p->link_downed_counter = 0xFF; else p->link_downed_counter = (u8)cntrs.link_downed_counter; if (cntrs.port_rcv_errors > 0xFFFFUL) p->port_rcv_errors = cpu_to_be16(0xFFFF); else p->port_rcv_errors = cpu_to_be16((u16) cntrs.port_rcv_errors); if (cntrs.port_rcv_remphys_errors > 0xFFFFUL) p->port_rcv_remphys_errors = cpu_to_be16(0xFFFF); else p->port_rcv_remphys_errors = cpu_to_be16( (u16)cntrs.port_rcv_remphys_errors); if (cntrs.port_xmit_discards > 0xFFFFUL) p->port_xmit_discards = cpu_to_be16(0xFFFF); else p->port_xmit_discards = cpu_to_be16((u16)cntrs.port_xmit_discards); if (cntrs.local_link_integrity_errors > 0xFUL) cntrs.local_link_integrity_errors = 0xFUL; if (cntrs.excessive_buffer_overrun_errors > 0xFUL) cntrs.excessive_buffer_overrun_errors = 0xFUL; p->link_overrun_errors = (cntrs.local_link_integrity_errors << 4) | cntrs.excessive_buffer_overrun_errors; if (cntrs.vl15_dropped > 0xFFFFUL) p->vl15_dropped = cpu_to_be16(0xFFFF); else p->vl15_dropped = cpu_to_be16((u16)cntrs.vl15_dropped); return reply((struct ib_smp *)pmp); } static void qib_snapshot_pmacounters( struct qib_ibport *ibp, struct qib_pma_counters *pmacounters) { struct qib_pma_counters *p; int cpu; memset(pmacounters, 0, sizeof(*pmacounters)); for_each_possible_cpu(cpu) { p = per_cpu_ptr(ibp->pmastats, cpu); pmacounters->n_unicast_xmit += p->n_unicast_xmit; pmacounters->n_unicast_rcv += p->n_unicast_rcv; pmacounters->n_multicast_xmit += p->n_multicast_xmit; pmacounters->n_multicast_rcv += p->n_multicast_rcv; } } static int pma_get_portcounters_ext(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portcounters_ext *p = (struct ib_pma_portcounters_ext *)pmp->data; struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); u64 swords, rwords, spkts, rpkts, xwait; struct qib_pma_counters pma; u8 port_select = p->port_select; memset(pmp->data, 0, sizeof(pmp->data)); p->port_select = port_select; if (pmp->mad_hdr.attr_mod != 0 || port_select != port) { pmp->mad_hdr.status |= IB_SMP_INVALID_FIELD; goto bail; } qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait); /* Adjust counters for any resets done. */ swords -= ibp->z_port_xmit_data; rwords -= ibp->z_port_rcv_data; spkts -= ibp->z_port_xmit_packets; rpkts -= ibp->z_port_rcv_packets; p->port_xmit_data = cpu_to_be64(swords); p->port_rcv_data = cpu_to_be64(rwords); p->port_xmit_packets = cpu_to_be64(spkts); p->port_rcv_packets = cpu_to_be64(rpkts); qib_snapshot_pmacounters(ibp, &pma); p->port_unicast_xmit_packets = cpu_to_be64(pma.n_unicast_xmit - ibp->z_unicast_xmit); p->port_unicast_rcv_packets = cpu_to_be64(pma.n_unicast_rcv - ibp->z_unicast_rcv); p->port_multicast_xmit_packets = cpu_to_be64(pma.n_multicast_xmit - ibp->z_multicast_xmit); p->port_multicast_rcv_packets = cpu_to_be64(pma.n_multicast_rcv - ibp->z_multicast_rcv); bail: return reply((struct ib_smp *) pmp); } static int pma_set_portcounters(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) pmp->data; struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); struct qib_verbs_counters cntrs; /* * Since the HW doesn't support clearing counters, we save the * current count and subtract it from future responses. */ qib_get_counters(ppd, &cntrs); if (p->counter_select & IB_PMA_SEL_SYMBOL_ERROR) ibp->z_symbol_error_counter = cntrs.symbol_error_counter; if (p->counter_select & IB_PMA_SEL_LINK_ERROR_RECOVERY) ibp->z_link_error_recovery_counter = cntrs.link_error_recovery_counter; if (p->counter_select & IB_PMA_SEL_LINK_DOWNED) ibp->z_link_downed_counter = cntrs.link_downed_counter; if (p->counter_select & IB_PMA_SEL_PORT_RCV_ERRORS) ibp->z_port_rcv_errors = cntrs.port_rcv_errors; if (p->counter_select & IB_PMA_SEL_PORT_RCV_REMPHYS_ERRORS) ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors; if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DISCARDS) ibp->z_port_xmit_discards = cntrs.port_xmit_discards; if (p->counter_select & IB_PMA_SEL_LOCAL_LINK_INTEGRITY_ERRORS) ibp->z_local_link_integrity_errors = cntrs.local_link_integrity_errors; if (p->counter_select & IB_PMA_SEL_EXCESSIVE_BUFFER_OVERRUNS) ibp->z_excessive_buffer_overrun_errors = cntrs.excessive_buffer_overrun_errors; if (p->counter_select & IB_PMA_SEL_PORT_VL15_DROPPED) { ibp->n_vl15_dropped = 0; ibp->z_vl15_dropped = cntrs.vl15_dropped; } if (p->counter_select & IB_PMA_SEL_PORT_XMIT_DATA) ibp->z_port_xmit_data = cntrs.port_xmit_data; if (p->counter_select & IB_PMA_SEL_PORT_RCV_DATA) ibp->z_port_rcv_data = cntrs.port_rcv_data; if (p->counter_select & IB_PMA_SEL_PORT_XMIT_PACKETS) ibp->z_port_xmit_packets = cntrs.port_xmit_packets; if (p->counter_select & IB_PMA_SEL_PORT_RCV_PACKETS) ibp->z_port_rcv_packets = cntrs.port_rcv_packets; return pma_get_portcounters(pmp, ibdev, port); } static int pma_set_portcounters_cong(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); struct qib_devdata *dd = dd_from_ppd(ppd); struct qib_verbs_counters cntrs; u32 counter_select = (be32_to_cpu(pmp->mad_hdr.attr_mod) >> 24) & 0xFF; int ret = 0; unsigned long flags; qib_get_counters(ppd, &cntrs); /* Get counter values before we save them */ ret = pma_get_portcounters_cong(pmp, ibdev, port); if (counter_select & IB_PMA_SEL_CONG_XMIT) { spin_lock_irqsave(&ppd->ibport_data.lock, flags); ppd->cong_stats.counter = 0; dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0); spin_unlock_irqrestore(&ppd->ibport_data.lock, flags); } if (counter_select & IB_PMA_SEL_CONG_PORT_DATA) { ibp->z_port_xmit_data = cntrs.port_xmit_data; ibp->z_port_rcv_data = cntrs.port_rcv_data; ibp->z_port_xmit_packets = cntrs.port_xmit_packets; ibp->z_port_rcv_packets = cntrs.port_rcv_packets; } if (counter_select & IB_PMA_SEL_CONG_ALL) { ibp->z_symbol_error_counter = cntrs.symbol_error_counter; ibp->z_link_error_recovery_counter = cntrs.link_error_recovery_counter; ibp->z_link_downed_counter = cntrs.link_downed_counter; ibp->z_port_rcv_errors = cntrs.port_rcv_errors; ibp->z_port_rcv_remphys_errors = cntrs.port_rcv_remphys_errors; ibp->z_port_xmit_discards = cntrs.port_xmit_discards; ibp->z_local_link_integrity_errors = cntrs.local_link_integrity_errors; ibp->z_excessive_buffer_overrun_errors = cntrs.excessive_buffer_overrun_errors; ibp->n_vl15_dropped = 0; ibp->z_vl15_dropped = cntrs.vl15_dropped; } return ret; } static int pma_set_portcounters_ext(struct ib_pma_mad *pmp, struct ib_device *ibdev, u8 port) { struct ib_pma_portcounters *p = (struct ib_pma_portcounters *) pmp->data; struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); u64 swords, rwords, spkts, rpkts, xwait; struct qib_pma_counters pma; qib_snapshot_counters(ppd, &swords, &rwords, &spkts, &rpkts, &xwait); if (p->counter_select & IB_PMA_SELX_PORT_XMIT_DATA) ibp->z_port_xmit_data = swords; if (p->counter_select & IB_PMA_SELX_PORT_RCV_DATA) ibp->z_port_rcv_data = rwords; if (p->counter_select & IB_PMA_SELX_PORT_XMIT_PACKETS) ibp->z_port_xmit_packets = spkts; if (p->counter_select & IB_PMA_SELX_PORT_RCV_PACKETS) ibp->z_port_rcv_packets = rpkts; qib_snapshot_pmacounters(ibp, &pma); if (p->counter_select & IB_PMA_SELX_PORT_UNI_XMIT_PACKETS) ibp->z_unicast_xmit = pma.n_unicast_xmit; if (p->counter_select & IB_PMA_SELX_PORT_UNI_RCV_PACKETS) ibp->z_unicast_rcv = pma.n_unicast_rcv; if (p->counter_select & IB_PMA_SELX_PORT_MULTI_XMIT_PACKETS) ibp->z_multicast_xmit = pma.n_multicast_xmit; if (p->counter_select & IB_PMA_SELX_PORT_MULTI_RCV_PACKETS) ibp->z_multicast_rcv = pma.n_multicast_rcv; return pma_get_portcounters_ext(pmp, ibdev, port); } static int process_subn(struct ib_device *ibdev, int mad_flags, u8 port, const struct ib_mad *in_mad, struct ib_mad *out_mad) { struct ib_smp *smp = (struct ib_smp *)out_mad; struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); int ret; *out_mad = *in_mad; if (smp->class_version != 1) { smp->status |= IB_SMP_UNSUP_VERSION; ret = reply(smp); goto bail; } ret = check_mkey(ibp, smp, mad_flags); if (ret) { u32 port_num = be32_to_cpu(smp->attr_mod); /* * If this is a get/set portinfo, we already check the * M_Key if the MAD is for another port and the M_Key * is OK on the receiving port. This check is needed * to increment the error counters when the M_Key * fails to match on *both* ports. */ if (in_mad->mad_hdr.attr_id == IB_SMP_ATTR_PORT_INFO && (smp->method == IB_MGMT_METHOD_GET || smp->method == IB_MGMT_METHOD_SET) && port_num && port_num <= ibdev->phys_port_cnt && port != port_num) (void) check_mkey(to_iport(ibdev, port_num), smp, 0); ret = IB_MAD_RESULT_FAILURE; goto bail; } switch (smp->method) { case IB_MGMT_METHOD_GET: switch (smp->attr_id) { case IB_SMP_ATTR_NODE_DESC: ret = subn_get_nodedescription(smp, ibdev); goto bail; case IB_SMP_ATTR_NODE_INFO: ret = subn_get_nodeinfo(smp, ibdev, port); goto bail; case IB_SMP_ATTR_GUID_INFO: ret = subn_get_guidinfo(smp, ibdev, port); goto bail; case IB_SMP_ATTR_PORT_INFO: ret = subn_get_portinfo(smp, ibdev, port); goto bail; case IB_SMP_ATTR_PKEY_TABLE: ret = subn_get_pkeytable(smp, ibdev, port); goto bail; case IB_SMP_ATTR_SL_TO_VL_TABLE: ret = subn_get_sl_to_vl(smp, ibdev, port); goto bail; case IB_SMP_ATTR_VL_ARB_TABLE: ret = subn_get_vl_arb(smp, ibdev, port); goto bail; case IB_SMP_ATTR_SM_INFO: if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) { ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; goto bail; } if (ibp->port_cap_flags & IB_PORT_SM) { ret = IB_MAD_RESULT_SUCCESS; goto bail; } /* FALLTHROUGH */ default: smp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply(smp); goto bail; } case IB_MGMT_METHOD_SET: switch (smp->attr_id) { case IB_SMP_ATTR_GUID_INFO: ret = subn_set_guidinfo(smp, ibdev, port); goto bail; case IB_SMP_ATTR_PORT_INFO: ret = subn_set_portinfo(smp, ibdev, port); goto bail; case IB_SMP_ATTR_PKEY_TABLE: ret = subn_set_pkeytable(smp, ibdev, port); goto bail; case IB_SMP_ATTR_SL_TO_VL_TABLE: ret = subn_set_sl_to_vl(smp, ibdev, port); goto bail; case IB_SMP_ATTR_VL_ARB_TABLE: ret = subn_set_vl_arb(smp, ibdev, port); goto bail; case IB_SMP_ATTR_SM_INFO: if (ibp->port_cap_flags & IB_PORT_SM_DISABLED) { ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; goto bail; } if (ibp->port_cap_flags & IB_PORT_SM) { ret = IB_MAD_RESULT_SUCCESS; goto bail; } /* FALLTHROUGH */ default: smp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply(smp); goto bail; } case IB_MGMT_METHOD_TRAP_REPRESS: if (smp->attr_id == IB_SMP_ATTR_NOTICE) ret = subn_trap_repress(smp, ibdev, port); else { smp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply(smp); } goto bail; case IB_MGMT_METHOD_TRAP: case IB_MGMT_METHOD_REPORT: case IB_MGMT_METHOD_REPORT_RESP: case IB_MGMT_METHOD_GET_RESP: /* * The ib_mad module will call us to process responses * before checking for other consumers. * Just tell the caller to process it normally. */ ret = IB_MAD_RESULT_SUCCESS; goto bail; case IB_MGMT_METHOD_SEND: if (ib_get_smp_direction(smp) && smp->attr_id == QIB_VENDOR_IPG) { ppd->dd->f_set_ib_cfg(ppd, QIB_IB_CFG_PORT, smp->data[0]); ret = IB_MAD_RESULT_SUCCESS | IB_MAD_RESULT_CONSUMED; } else ret = IB_MAD_RESULT_SUCCESS; goto bail; default: smp->status |= IB_SMP_UNSUP_METHOD; ret = reply(smp); } bail: return ret; } static int process_perf(struct ib_device *ibdev, u8 port, const struct ib_mad *in_mad, struct ib_mad *out_mad) { struct ib_pma_mad *pmp = (struct ib_pma_mad *)out_mad; int ret; *out_mad = *in_mad; if (pmp->mad_hdr.class_version != 1) { pmp->mad_hdr.status |= IB_SMP_UNSUP_VERSION; ret = reply((struct ib_smp *) pmp); goto bail; } switch (pmp->mad_hdr.method) { case IB_MGMT_METHOD_GET: switch (pmp->mad_hdr.attr_id) { case IB_PMA_CLASS_PORT_INFO: ret = pma_get_classportinfo(pmp, ibdev); goto bail; case IB_PMA_PORT_SAMPLES_CONTROL: ret = pma_get_portsamplescontrol(pmp, ibdev, port); goto bail; case IB_PMA_PORT_SAMPLES_RESULT: ret = pma_get_portsamplesresult(pmp, ibdev, port); goto bail; case IB_PMA_PORT_SAMPLES_RESULT_EXT: ret = pma_get_portsamplesresult_ext(pmp, ibdev, port); goto bail; case IB_PMA_PORT_COUNTERS: ret = pma_get_portcounters(pmp, ibdev, port); goto bail; case IB_PMA_PORT_COUNTERS_EXT: ret = pma_get_portcounters_ext(pmp, ibdev, port); goto bail; case IB_PMA_PORT_COUNTERS_CONG: ret = pma_get_portcounters_cong(pmp, ibdev, port); goto bail; default: pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_smp *) pmp); goto bail; } case IB_MGMT_METHOD_SET: switch (pmp->mad_hdr.attr_id) { case IB_PMA_PORT_SAMPLES_CONTROL: ret = pma_set_portsamplescontrol(pmp, ibdev, port); goto bail; case IB_PMA_PORT_COUNTERS: ret = pma_set_portcounters(pmp, ibdev, port); goto bail; case IB_PMA_PORT_COUNTERS_EXT: ret = pma_set_portcounters_ext(pmp, ibdev, port); goto bail; case IB_PMA_PORT_COUNTERS_CONG: ret = pma_set_portcounters_cong(pmp, ibdev, port); goto bail; default: pmp->mad_hdr.status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_smp *) pmp); goto bail; } case IB_MGMT_METHOD_TRAP: case IB_MGMT_METHOD_GET_RESP: /* * The ib_mad module will call us to process responses * before checking for other consumers. * Just tell the caller to process it normally. */ ret = IB_MAD_RESULT_SUCCESS; goto bail; default: pmp->mad_hdr.status |= IB_SMP_UNSUP_METHOD; ret = reply((struct ib_smp *) pmp); } bail: return ret; } static int cc_get_classportinfo(struct ib_cc_mad *ccp, struct ib_device *ibdev) { struct ib_cc_classportinfo_attr *p = (struct ib_cc_classportinfo_attr *)ccp->mgmt_data; memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data)); p->base_version = 1; p->class_version = 1; p->cap_mask = 0; /* * Expected response time is 4.096 usec. * 2^18 == 1.073741824 sec. */ p->resp_time_value = 18; return reply((struct ib_smp *) ccp); } static int cc_get_congestion_info(struct ib_cc_mad *ccp, struct ib_device *ibdev, u8 port) { struct ib_cc_info_attr *p = (struct ib_cc_info_attr *)ccp->mgmt_data; struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data)); p->congestion_info = 0; p->control_table_cap = ppd->cc_max_table_entries; return reply((struct ib_smp *) ccp); } static int cc_get_congestion_setting(struct ib_cc_mad *ccp, struct ib_device *ibdev, u8 port) { int i; struct ib_cc_congestion_setting_attr *p = (struct ib_cc_congestion_setting_attr *)ccp->mgmt_data; struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); struct ib_cc_congestion_entry_shadow *entries; memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data)); spin_lock(&ppd->cc_shadow_lock); entries = ppd->congestion_entries_shadow->entries; p->port_control = cpu_to_be16( ppd->congestion_entries_shadow->port_control); p->control_map = cpu_to_be16( ppd->congestion_entries_shadow->control_map); for (i = 0; i < IB_CC_CCS_ENTRIES; i++) { p->entries[i].ccti_increase = entries[i].ccti_increase; p->entries[i].ccti_timer = cpu_to_be16(entries[i].ccti_timer); p->entries[i].trigger_threshold = entries[i].trigger_threshold; p->entries[i].ccti_min = entries[i].ccti_min; } spin_unlock(&ppd->cc_shadow_lock); return reply((struct ib_smp *) ccp); } static int cc_get_congestion_control_table(struct ib_cc_mad *ccp, struct ib_device *ibdev, u8 port) { struct ib_cc_table_attr *p = (struct ib_cc_table_attr *)ccp->mgmt_data; struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); u32 cct_block_index = be32_to_cpu(ccp->attr_mod); u32 max_cct_block; u32 cct_entry; struct ib_cc_table_entry_shadow *entries; int i; /* Is the table index more than what is supported? */ if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1) goto bail; memset(ccp->mgmt_data, 0, sizeof(ccp->mgmt_data)); spin_lock(&ppd->cc_shadow_lock); max_cct_block = (ppd->ccti_entries_shadow->ccti_last_entry + 1)/IB_CCT_ENTRIES; max_cct_block = max_cct_block ? max_cct_block - 1 : 0; if (cct_block_index > max_cct_block) { spin_unlock(&ppd->cc_shadow_lock); goto bail; } ccp->attr_mod = cpu_to_be32(cct_block_index); cct_entry = IB_CCT_ENTRIES * (cct_block_index + 1); cct_entry--; p->ccti_limit = cpu_to_be16(cct_entry); entries = &ppd->ccti_entries_shadow-> entries[IB_CCT_ENTRIES * cct_block_index]; cct_entry %= IB_CCT_ENTRIES; for (i = 0; i <= cct_entry; i++) p->ccti_entries[i].entry = cpu_to_be16(entries[i].entry); spin_unlock(&ppd->cc_shadow_lock); return reply((struct ib_smp *) ccp); bail: return reply_failure((struct ib_smp *) ccp); } static int cc_set_congestion_setting(struct ib_cc_mad *ccp, struct ib_device *ibdev, u8 port) { struct ib_cc_congestion_setting_attr *p = (struct ib_cc_congestion_setting_attr *)ccp->mgmt_data; struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); int i; ppd->cc_sl_control_map = be16_to_cpu(p->control_map); for (i = 0; i < IB_CC_CCS_ENTRIES; i++) { ppd->congestion_entries[i].ccti_increase = p->entries[i].ccti_increase; ppd->congestion_entries[i].ccti_timer = be16_to_cpu(p->entries[i].ccti_timer); ppd->congestion_entries[i].trigger_threshold = p->entries[i].trigger_threshold; ppd->congestion_entries[i].ccti_min = p->entries[i].ccti_min; } return reply((struct ib_smp *) ccp); } static int cc_set_congestion_control_table(struct ib_cc_mad *ccp, struct ib_device *ibdev, u8 port) { struct ib_cc_table_attr *p = (struct ib_cc_table_attr *)ccp->mgmt_data; struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); u32 cct_block_index = be32_to_cpu(ccp->attr_mod); u32 cct_entry; struct ib_cc_table_entry_shadow *entries; int i; /* Is the table index more than what is supported? */ if (cct_block_index > IB_CC_TABLE_CAP_DEFAULT - 1) goto bail; /* If this packet is the first in the sequence then * zero the total table entry count. */ if (be16_to_cpu(p->ccti_limit) < IB_CCT_ENTRIES) ppd->total_cct_entry = 0; cct_entry = (be16_to_cpu(p->ccti_limit))%IB_CCT_ENTRIES; /* ccti_limit is 0 to 63 */ ppd->total_cct_entry += (cct_entry + 1); if (ppd->total_cct_entry > ppd->cc_supported_table_entries) goto bail; ppd->ccti_limit = be16_to_cpu(p->ccti_limit); entries = ppd->ccti_entries + (IB_CCT_ENTRIES * cct_block_index); for (i = 0; i <= cct_entry; i++) entries[i].entry = be16_to_cpu(p->ccti_entries[i].entry); spin_lock(&ppd->cc_shadow_lock); ppd->ccti_entries_shadow->ccti_last_entry = ppd->total_cct_entry - 1; memcpy(ppd->ccti_entries_shadow->entries, ppd->ccti_entries, (ppd->total_cct_entry * sizeof(struct ib_cc_table_entry))); ppd->congestion_entries_shadow->port_control = IB_CC_CCS_PC_SL_BASED; ppd->congestion_entries_shadow->control_map = ppd->cc_sl_control_map; memcpy(ppd->congestion_entries_shadow->entries, ppd->congestion_entries, IB_CC_CCS_ENTRIES * sizeof(struct ib_cc_congestion_entry)); spin_unlock(&ppd->cc_shadow_lock); return reply((struct ib_smp *) ccp); bail: return reply_failure((struct ib_smp *) ccp); } static int check_cc_key(struct qib_ibport *ibp, struct ib_cc_mad *ccp, int mad_flags) { return 0; } static int process_cc(struct ib_device *ibdev, int mad_flags, u8 port, const struct ib_mad *in_mad, struct ib_mad *out_mad) { struct ib_cc_mad *ccp = (struct ib_cc_mad *)out_mad; struct qib_ibport *ibp = to_iport(ibdev, port); int ret; *out_mad = *in_mad; if (ccp->class_version != 2) { ccp->status |= IB_SMP_UNSUP_VERSION; ret = reply((struct ib_smp *)ccp); goto bail; } ret = check_cc_key(ibp, ccp, mad_flags); if (ret) goto bail; switch (ccp->method) { case IB_MGMT_METHOD_GET: switch (ccp->attr_id) { case IB_CC_ATTR_CLASSPORTINFO: ret = cc_get_classportinfo(ccp, ibdev); goto bail; case IB_CC_ATTR_CONGESTION_INFO: ret = cc_get_congestion_info(ccp, ibdev, port); goto bail; case IB_CC_ATTR_CA_CONGESTION_SETTING: ret = cc_get_congestion_setting(ccp, ibdev, port); goto bail; case IB_CC_ATTR_CONGESTION_CONTROL_TABLE: ret = cc_get_congestion_control_table(ccp, ibdev, port); goto bail; /* FALLTHROUGH */ default: ccp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_smp *) ccp); goto bail; } case IB_MGMT_METHOD_SET: switch (ccp->attr_id) { case IB_CC_ATTR_CA_CONGESTION_SETTING: ret = cc_set_congestion_setting(ccp, ibdev, port); goto bail; case IB_CC_ATTR_CONGESTION_CONTROL_TABLE: ret = cc_set_congestion_control_table(ccp, ibdev, port); goto bail; /* FALLTHROUGH */ default: ccp->status |= IB_SMP_UNSUP_METH_ATTR; ret = reply((struct ib_smp *) ccp); goto bail; } case IB_MGMT_METHOD_GET_RESP: /* * The ib_mad module will call us to process responses * before checking for other consumers. * Just tell the caller to process it normally. */ ret = IB_MAD_RESULT_SUCCESS; goto bail; case IB_MGMT_METHOD_TRAP: default: ccp->status |= IB_SMP_UNSUP_METHOD; ret = reply((struct ib_smp *) ccp); } bail: return ret; } /** * qib_process_mad - process an incoming MAD packet * @ibdev: the infiniband device this packet came in on * @mad_flags: MAD flags * @port: the port number this packet came in on * @in_wc: the work completion entry for this packet * @in_grh: the global route header for this packet * @in_mad: the incoming MAD * @out_mad: any outgoing MAD reply * * Returns IB_MAD_RESULT_SUCCESS if this is a MAD that we are not * interested in processing. * * Note that the verbs framework has already done the MAD sanity checks, * and hop count/pointer updating for IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE * MADs. * * This is called by the ib_mad module. */ int qib_process_mad(struct ib_device *ibdev, int mad_flags, u8 port, const struct ib_wc *in_wc, const struct ib_grh *in_grh, const struct ib_mad_hdr *in, size_t in_mad_size, struct ib_mad_hdr *out, size_t *out_mad_size, u16 *out_mad_pkey_index) { int ret; struct qib_ibport *ibp = to_iport(ibdev, port); struct qib_pportdata *ppd = ppd_from_ibp(ibp); const struct ib_mad *in_mad = (const struct ib_mad *)in; struct ib_mad *out_mad = (struct ib_mad *)out; if (WARN_ON_ONCE(in_mad_size != sizeof(*in_mad) || *out_mad_size != sizeof(*out_mad))) return IB_MAD_RESULT_FAILURE; switch (in_mad->mad_hdr.mgmt_class) { case IB_MGMT_CLASS_SUBN_DIRECTED_ROUTE: case IB_MGMT_CLASS_SUBN_LID_ROUTED: ret = process_subn(ibdev, mad_flags, port, in_mad, out_mad); goto bail; case IB_MGMT_CLASS_PERF_MGMT: ret = process_perf(ibdev, port, in_mad, out_mad); goto bail; case IB_MGMT_CLASS_CONG_MGMT: if (!ppd->congestion_entries_shadow || !qib_cc_table_size) { ret = IB_MAD_RESULT_SUCCESS; goto bail; } ret = process_cc(ibdev, mad_flags, port, in_mad, out_mad); goto bail; default: ret = IB_MAD_RESULT_SUCCESS; } bail: return ret; } static void send_handler(struct ib_mad_agent *agent, struct ib_mad_send_wc *mad_send_wc) { ib_free_send_mad(mad_send_wc->send_buf); } static void xmit_wait_timer_func(unsigned long opaque) { struct qib_pportdata *ppd = (struct qib_pportdata *)opaque; struct qib_devdata *dd = dd_from_ppd(ppd); unsigned long flags; u8 status; spin_lock_irqsave(&ppd->ibport_data.lock, flags); if (ppd->cong_stats.flags == IB_PMA_CONG_HW_CONTROL_SAMPLE) { status = dd->f_portcntr(ppd, QIBPORTCNTR_PSSTAT); if (status == IB_PMA_SAMPLE_STATUS_DONE) { /* save counter cache */ cache_hw_sample_counters(ppd); ppd->cong_stats.flags = IB_PMA_CONG_HW_CONTROL_TIMER; } else goto done; } ppd->cong_stats.counter = xmit_wait_get_value_delta(ppd); dd->f_set_cntr_sample(ppd, QIB_CONG_TIMER_PSINTERVAL, 0x0); done: spin_unlock_irqrestore(&ppd->ibport_data.lock, flags); mod_timer(&ppd->cong_stats.timer, jiffies + HZ); } int qib_create_agents(struct qib_ibdev *dev) { struct qib_devdata *dd = dd_from_dev(dev); struct ib_mad_agent *agent; struct qib_ibport *ibp; int p; int ret; for (p = 0; p < dd->num_pports; p++) { ibp = &dd->pport[p].ibport_data; agent = ib_register_mad_agent(&dev->ibdev, p + 1, IB_QPT_SMI, NULL, 0, send_handler, NULL, NULL, 0); if (IS_ERR(agent)) { ret = PTR_ERR(agent); goto err; } /* Initialize xmit_wait structure */ dd->pport[p].cong_stats.counter = 0; init_timer(&dd->pport[p].cong_stats.timer); dd->pport[p].cong_stats.timer.function = xmit_wait_timer_func; dd->pport[p].cong_stats.timer.data = (unsigned long)(&dd->pport[p]); dd->pport[p].cong_stats.timer.expires = 0; add_timer(&dd->pport[p].cong_stats.timer); ibp->send_agent = agent; } return 0; err: for (p = 0; p < dd->num_pports; p++) { ibp = &dd->pport[p].ibport_data; if (ibp->send_agent) { agent = ibp->send_agent; ibp->send_agent = NULL; ib_unregister_mad_agent(agent); } } return ret; } void qib_free_agents(struct qib_ibdev *dev) { struct qib_devdata *dd = dd_from_dev(dev); struct ib_mad_agent *agent; struct qib_ibport *ibp; int p; for (p = 0; p < dd->num_pports; p++) { ibp = &dd->pport[p].ibport_data; if (ibp->send_agent) { agent = ibp->send_agent; ibp->send_agent = NULL; ib_unregister_mad_agent(agent); } if (ibp->sm_ah) { ib_destroy_ah(&ibp->sm_ah->ibah); ibp->sm_ah = NULL; } if (dd->pport[p].cong_stats.timer.data) del_timer_sync(&dd->pport[p].cong_stats.timer); } }
gpl-2.0
nuxeh/linux
drivers/hwmon/acpi_power_meter.c
2102
24384
/* * A hwmon driver for ACPI 4.0 power meters * Copyright (C) 2009 IBM * * Author: Darrick J. Wong <darrick.wong@oracle.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/jiffies.h> #include <linux/mutex.h> #include <linux/dmi.h> #include <linux/slab.h> #include <linux/kdev_t.h> #include <linux/sched.h> #include <linux/time.h> #include <linux/err.h> #include <linux/acpi.h> #define ACPI_POWER_METER_NAME "power_meter" ACPI_MODULE_NAME(ACPI_POWER_METER_NAME); #define ACPI_POWER_METER_DEVICE_NAME "Power Meter" #define ACPI_POWER_METER_CLASS "pwr_meter_resource" #define NUM_SENSORS 17 #define POWER_METER_CAN_MEASURE (1 << 0) #define POWER_METER_CAN_TRIP (1 << 1) #define POWER_METER_CAN_CAP (1 << 2) #define POWER_METER_CAN_NOTIFY (1 << 3) #define POWER_METER_IS_BATTERY (1 << 8) #define UNKNOWN_HYSTERESIS 0xFFFFFFFF #define METER_NOTIFY_CONFIG 0x80 #define METER_NOTIFY_TRIP 0x81 #define METER_NOTIFY_CAP 0x82 #define METER_NOTIFY_CAPPING 0x83 #define METER_NOTIFY_INTERVAL 0x84 #define POWER_AVERAGE_NAME "power1_average" #define POWER_CAP_NAME "power1_cap" #define POWER_AVG_INTERVAL_NAME "power1_average_interval" #define POWER_ALARM_NAME "power1_alarm" static int cap_in_hardware; static bool force_cap_on; static int can_cap_in_hardware(void) { return force_cap_on || cap_in_hardware; } static const struct acpi_device_id power_meter_ids[] = { {"ACPI000D", 0}, {"", 0}, }; MODULE_DEVICE_TABLE(acpi, power_meter_ids); struct acpi_power_meter_capabilities { u64 flags; u64 units; u64 type; u64 accuracy; u64 sampling_time; u64 min_avg_interval; u64 max_avg_interval; u64 hysteresis; u64 configurable_cap; u64 min_cap; u64 max_cap; }; struct acpi_power_meter_resource { struct acpi_device *acpi_dev; acpi_bus_id name; struct mutex lock; struct device *hwmon_dev; struct acpi_power_meter_capabilities caps; acpi_string model_number; acpi_string serial_number; acpi_string oem_info; u64 power; u64 cap; u64 avg_interval; int sensors_valid; unsigned long sensors_last_updated; struct sensor_device_attribute sensors[NUM_SENSORS]; int num_sensors; s64 trip[2]; int num_domain_devices; struct acpi_device **domain_devices; struct kobject *holders_dir; }; struct sensor_template { char *label; ssize_t (*show)(struct device *dev, struct device_attribute *devattr, char *buf); ssize_t (*set)(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count); int index; }; /* Averaging interval */ static int update_avg_interval(struct acpi_power_meter_resource *resource) { unsigned long long data; acpi_status status; status = acpi_evaluate_integer(resource->acpi_dev->handle, "_GAI", NULL, &data); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Evaluating _GAI")); return -ENODEV; } resource->avg_interval = data; return 0; } static ssize_t show_avg_interval(struct device *dev, struct device_attribute *devattr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); struct acpi_power_meter_resource *resource = acpi_dev->driver_data; mutex_lock(&resource->lock); update_avg_interval(resource); mutex_unlock(&resource->lock); return sprintf(buf, "%llu\n", resource->avg_interval); } static ssize_t set_avg_interval(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct acpi_device *acpi_dev = to_acpi_device(dev); struct acpi_power_meter_resource *resource = acpi_dev->driver_data; union acpi_object arg0 = { ACPI_TYPE_INTEGER }; struct acpi_object_list args = { 1, &arg0 }; int res; unsigned long temp; unsigned long long data; acpi_status status; res = kstrtoul(buf, 10, &temp); if (res) return res; if (temp > resource->caps.max_avg_interval || temp < resource->caps.min_avg_interval) return -EINVAL; arg0.integer.value = temp; mutex_lock(&resource->lock); status = acpi_evaluate_integer(resource->acpi_dev->handle, "_PAI", &args, &data); if (!ACPI_FAILURE(status)) resource->avg_interval = temp; mutex_unlock(&resource->lock); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PAI")); return -EINVAL; } /* _PAI returns 0 on success, nonzero otherwise */ if (data) return -EINVAL; return count; } /* Cap functions */ static int update_cap(struct acpi_power_meter_resource *resource) { unsigned long long data; acpi_status status; status = acpi_evaluate_integer(resource->acpi_dev->handle, "_GHL", NULL, &data); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Evaluating _GHL")); return -ENODEV; } resource->cap = data; return 0; } static ssize_t show_cap(struct device *dev, struct device_attribute *devattr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); struct acpi_power_meter_resource *resource = acpi_dev->driver_data; mutex_lock(&resource->lock); update_cap(resource); mutex_unlock(&resource->lock); return sprintf(buf, "%llu\n", resource->cap * 1000); } static ssize_t set_cap(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct acpi_device *acpi_dev = to_acpi_device(dev); struct acpi_power_meter_resource *resource = acpi_dev->driver_data; union acpi_object arg0 = { ACPI_TYPE_INTEGER }; struct acpi_object_list args = { 1, &arg0 }; int res; unsigned long temp; unsigned long long data; acpi_status status; res = kstrtoul(buf, 10, &temp); if (res) return res; temp = DIV_ROUND_CLOSEST(temp, 1000); if (temp > resource->caps.max_cap || temp < resource->caps.min_cap) return -EINVAL; arg0.integer.value = temp; mutex_lock(&resource->lock); status = acpi_evaluate_integer(resource->acpi_dev->handle, "_SHL", &args, &data); if (!ACPI_FAILURE(status)) resource->cap = temp; mutex_unlock(&resource->lock); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Evaluating _SHL")); return -EINVAL; } /* _SHL returns 0 on success, nonzero otherwise */ if (data) return -EINVAL; return count; } /* Power meter trip points */ static int set_acpi_trip(struct acpi_power_meter_resource *resource) { union acpi_object arg_objs[] = { {ACPI_TYPE_INTEGER}, {ACPI_TYPE_INTEGER} }; struct acpi_object_list args = { 2, arg_objs }; unsigned long long data; acpi_status status; /* Both trip levels must be set */ if (resource->trip[0] < 0 || resource->trip[1] < 0) return 0; /* This driver stores min, max; ACPI wants max, min. */ arg_objs[0].integer.value = resource->trip[1]; arg_objs[1].integer.value = resource->trip[0]; status = acpi_evaluate_integer(resource->acpi_dev->handle, "_PTP", &args, &data); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PTP")); return -EINVAL; } /* _PTP returns 0 on success, nonzero otherwise */ if (data) return -EINVAL; return 0; } static ssize_t set_trip(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct acpi_device *acpi_dev = to_acpi_device(dev); struct acpi_power_meter_resource *resource = acpi_dev->driver_data; int res; unsigned long temp; res = kstrtoul(buf, 10, &temp); if (res) return res; temp = DIV_ROUND_CLOSEST(temp, 1000); mutex_lock(&resource->lock); resource->trip[attr->index - 7] = temp; res = set_acpi_trip(resource); mutex_unlock(&resource->lock); if (res) return res; return count; } /* Power meter */ static int update_meter(struct acpi_power_meter_resource *resource) { unsigned long long data; acpi_status status; unsigned long local_jiffies = jiffies; if (time_before(local_jiffies, resource->sensors_last_updated + msecs_to_jiffies(resource->caps.sampling_time)) && resource->sensors_valid) return 0; status = acpi_evaluate_integer(resource->acpi_dev->handle, "_PMM", NULL, &data); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PMM")); return -ENODEV; } resource->power = data; resource->sensors_valid = 1; resource->sensors_last_updated = jiffies; return 0; } static ssize_t show_power(struct device *dev, struct device_attribute *devattr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); struct acpi_power_meter_resource *resource = acpi_dev->driver_data; mutex_lock(&resource->lock); update_meter(resource); mutex_unlock(&resource->lock); return sprintf(buf, "%llu\n", resource->power * 1000); } /* Miscellaneous */ static ssize_t show_str(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct acpi_device *acpi_dev = to_acpi_device(dev); struct acpi_power_meter_resource *resource = acpi_dev->driver_data; acpi_string val; switch (attr->index) { case 0: val = resource->model_number; break; case 1: val = resource->serial_number; break; case 2: val = resource->oem_info; break; default: WARN(1, "Implementation error: unexpected attribute index %d\n", attr->index); val = ""; break; } return sprintf(buf, "%s\n", val); } static ssize_t show_val(struct device *dev, struct device_attribute *devattr, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); struct acpi_device *acpi_dev = to_acpi_device(dev); struct acpi_power_meter_resource *resource = acpi_dev->driver_data; u64 val = 0; switch (attr->index) { case 0: val = resource->caps.min_avg_interval; break; case 1: val = resource->caps.max_avg_interval; break; case 2: val = resource->caps.min_cap * 1000; break; case 3: val = resource->caps.max_cap * 1000; break; case 4: if (resource->caps.hysteresis == UNKNOWN_HYSTERESIS) return sprintf(buf, "unknown\n"); val = resource->caps.hysteresis * 1000; break; case 5: if (resource->caps.flags & POWER_METER_IS_BATTERY) val = 1; else val = 0; break; case 6: if (resource->power > resource->cap) val = 1; else val = 0; break; case 7: case 8: if (resource->trip[attr->index - 7] < 0) return sprintf(buf, "unknown\n"); val = resource->trip[attr->index - 7] * 1000; break; default: WARN(1, "Implementation error: unexpected attribute index %d\n", attr->index); break; } return sprintf(buf, "%llu\n", val); } static ssize_t show_accuracy(struct device *dev, struct device_attribute *devattr, char *buf) { struct acpi_device *acpi_dev = to_acpi_device(dev); struct acpi_power_meter_resource *resource = acpi_dev->driver_data; unsigned int acc = resource->caps.accuracy; return sprintf(buf, "%u.%u%%\n", acc / 1000, acc % 1000); } static ssize_t show_name(struct device *dev, struct device_attribute *devattr, char *buf) { return sprintf(buf, "%s\n", ACPI_POWER_METER_NAME); } #define RO_SENSOR_TEMPLATE(_label, _show, _index) \ { \ .label = _label, \ .show = _show, \ .index = _index, \ } #define RW_SENSOR_TEMPLATE(_label, _show, _set, _index) \ { \ .label = _label, \ .show = _show, \ .set = _set, \ .index = _index, \ } /* Sensor descriptions. If you add a sensor, update NUM_SENSORS above! */ static struct sensor_template meter_attrs[] = { RO_SENSOR_TEMPLATE(POWER_AVERAGE_NAME, show_power, 0), RO_SENSOR_TEMPLATE("power1_accuracy", show_accuracy, 0), RO_SENSOR_TEMPLATE("power1_average_interval_min", show_val, 0), RO_SENSOR_TEMPLATE("power1_average_interval_max", show_val, 1), RO_SENSOR_TEMPLATE("power1_is_battery", show_val, 5), RW_SENSOR_TEMPLATE(POWER_AVG_INTERVAL_NAME, show_avg_interval, set_avg_interval, 0), {}, }; static struct sensor_template misc_cap_attrs[] = { RO_SENSOR_TEMPLATE("power1_cap_min", show_val, 2), RO_SENSOR_TEMPLATE("power1_cap_max", show_val, 3), RO_SENSOR_TEMPLATE("power1_cap_hyst", show_val, 4), RO_SENSOR_TEMPLATE(POWER_ALARM_NAME, show_val, 6), {}, }; static struct sensor_template ro_cap_attrs[] = { RO_SENSOR_TEMPLATE(POWER_CAP_NAME, show_cap, 0), {}, }; static struct sensor_template rw_cap_attrs[] = { RW_SENSOR_TEMPLATE(POWER_CAP_NAME, show_cap, set_cap, 0), {}, }; static struct sensor_template trip_attrs[] = { RW_SENSOR_TEMPLATE("power1_average_min", show_val, set_trip, 7), RW_SENSOR_TEMPLATE("power1_average_max", show_val, set_trip, 8), {}, }; static struct sensor_template misc_attrs[] = { RO_SENSOR_TEMPLATE("name", show_name, 0), RO_SENSOR_TEMPLATE("power1_model_number", show_str, 0), RO_SENSOR_TEMPLATE("power1_oem_info", show_str, 2), RO_SENSOR_TEMPLATE("power1_serial_number", show_str, 1), {}, }; #undef RO_SENSOR_TEMPLATE #undef RW_SENSOR_TEMPLATE /* Read power domain data */ static void remove_domain_devices(struct acpi_power_meter_resource *resource) { int i; if (!resource->num_domain_devices) return; for (i = 0; i < resource->num_domain_devices; i++) { struct acpi_device *obj = resource->domain_devices[i]; if (!obj) continue; sysfs_remove_link(resource->holders_dir, kobject_name(&obj->dev.kobj)); put_device(&obj->dev); } kfree(resource->domain_devices); kobject_put(resource->holders_dir); resource->num_domain_devices = 0; } static int read_domain_devices(struct acpi_power_meter_resource *resource) { int res = 0; int i; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *pss; acpi_status status; status = acpi_evaluate_object(resource->acpi_dev->handle, "_PMD", NULL, &buffer); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PMD")); return -ENODEV; } pss = buffer.pointer; if (!pss || pss->type != ACPI_TYPE_PACKAGE) { dev_err(&resource->acpi_dev->dev, ACPI_POWER_METER_NAME "Invalid _PMD data\n"); res = -EFAULT; goto end; } if (!pss->package.count) goto end; resource->domain_devices = kzalloc(sizeof(struct acpi_device *) * pss->package.count, GFP_KERNEL); if (!resource->domain_devices) { res = -ENOMEM; goto end; } resource->holders_dir = kobject_create_and_add("measures", &resource->acpi_dev->dev.kobj); if (!resource->holders_dir) { res = -ENOMEM; goto exit_free; } resource->num_domain_devices = pss->package.count; for (i = 0; i < pss->package.count; i++) { struct acpi_device *obj; union acpi_object *element = &(pss->package.elements[i]); /* Refuse non-references */ if (element->type != ACPI_TYPE_LOCAL_REFERENCE) continue; /* Create a symlink to domain objects */ resource->domain_devices[i] = NULL; if (acpi_bus_get_device(element->reference.handle, &resource->domain_devices[i])) continue; obj = resource->domain_devices[i]; get_device(&obj->dev); res = sysfs_create_link(resource->holders_dir, &obj->dev.kobj, kobject_name(&obj->dev.kobj)); if (res) { put_device(&obj->dev); resource->domain_devices[i] = NULL; } } res = 0; goto end; exit_free: kfree(resource->domain_devices); end: kfree(buffer.pointer); return res; } /* Registration and deregistration */ static int register_attrs(struct acpi_power_meter_resource *resource, struct sensor_template *attrs) { struct device *dev = &resource->acpi_dev->dev; struct sensor_device_attribute *sensors = &resource->sensors[resource->num_sensors]; int res = 0; while (attrs->label) { sensors->dev_attr.attr.name = attrs->label; sensors->dev_attr.attr.mode = S_IRUGO; sensors->dev_attr.show = attrs->show; sensors->index = attrs->index; if (attrs->set) { sensors->dev_attr.attr.mode |= S_IWUSR; sensors->dev_attr.store = attrs->set; } sysfs_attr_init(&sensors->dev_attr.attr); res = device_create_file(dev, &sensors->dev_attr); if (res) { sensors->dev_attr.attr.name = NULL; goto error; } sensors++; resource->num_sensors++; attrs++; } error: return res; } static void remove_attrs(struct acpi_power_meter_resource *resource) { int i; for (i = 0; i < resource->num_sensors; i++) { if (!resource->sensors[i].dev_attr.attr.name) continue; device_remove_file(&resource->acpi_dev->dev, &resource->sensors[i].dev_attr); } remove_domain_devices(resource); resource->num_sensors = 0; } static int setup_attrs(struct acpi_power_meter_resource *resource) { int res = 0; res = read_domain_devices(resource); if (res) return res; if (resource->caps.flags & POWER_METER_CAN_MEASURE) { res = register_attrs(resource, meter_attrs); if (res) goto error; } if (resource->caps.flags & POWER_METER_CAN_CAP) { if (!can_cap_in_hardware()) { dev_err(&resource->acpi_dev->dev, "Ignoring unsafe software power cap!\n"); goto skip_unsafe_cap; } if (resource->caps.configurable_cap) res = register_attrs(resource, rw_cap_attrs); else res = register_attrs(resource, ro_cap_attrs); if (res) goto error; res = register_attrs(resource, misc_cap_attrs); if (res) goto error; } skip_unsafe_cap: if (resource->caps.flags & POWER_METER_CAN_TRIP) { res = register_attrs(resource, trip_attrs); if (res) goto error; } res = register_attrs(resource, misc_attrs); if (res) goto error; return res; error: remove_attrs(resource); return res; } static void free_capabilities(struct acpi_power_meter_resource *resource) { acpi_string *str; int i; str = &resource->model_number; for (i = 0; i < 3; i++, str++) kfree(*str); } static int read_capabilities(struct acpi_power_meter_resource *resource) { int res = 0; int i; struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; struct acpi_buffer state = { 0, NULL }; struct acpi_buffer format = { sizeof("NNNNNNNNNNN"), "NNNNNNNNNNN" }; union acpi_object *pss; acpi_string *str; acpi_status status; status = acpi_evaluate_object(resource->acpi_dev->handle, "_PMC", NULL, &buffer); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Evaluating _PMC")); return -ENODEV; } pss = buffer.pointer; if (!pss || pss->type != ACPI_TYPE_PACKAGE || pss->package.count != 14) { dev_err(&resource->acpi_dev->dev, ACPI_POWER_METER_NAME "Invalid _PMC data\n"); res = -EFAULT; goto end; } /* Grab all the integer data at once */ state.length = sizeof(struct acpi_power_meter_capabilities); state.pointer = &resource->caps; status = acpi_extract_package(pss, &format, &state); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Invalid data")); res = -EFAULT; goto end; } if (resource->caps.units) { dev_err(&resource->acpi_dev->dev, ACPI_POWER_METER_NAME "Unknown units %llu.\n", resource->caps.units); res = -EINVAL; goto end; } /* Grab the string data */ str = &resource->model_number; for (i = 11; i < 14; i++) { union acpi_object *element = &(pss->package.elements[i]); if (element->type != ACPI_TYPE_STRING) { res = -EINVAL; goto error; } *str = kzalloc(sizeof(u8) * (element->string.length + 1), GFP_KERNEL); if (!*str) { res = -ENOMEM; goto error; } strncpy(*str, element->string.pointer, element->string.length); str++; } dev_info(&resource->acpi_dev->dev, "Found ACPI power meter.\n"); goto end; error: str = &resource->model_number; for (i = 0; i < 3; i++, str++) kfree(*str); end: kfree(buffer.pointer); return res; } /* Handle ACPI event notifications */ static void acpi_power_meter_notify(struct acpi_device *device, u32 event) { struct acpi_power_meter_resource *resource; int res; if (!device || !acpi_driver_data(device)) return; resource = acpi_driver_data(device); mutex_lock(&resource->lock); switch (event) { case METER_NOTIFY_CONFIG: free_capabilities(resource); res = read_capabilities(resource); if (res) break; remove_attrs(resource); setup_attrs(resource); break; case METER_NOTIFY_TRIP: sysfs_notify(&device->dev.kobj, NULL, POWER_AVERAGE_NAME); update_meter(resource); break; case METER_NOTIFY_CAP: sysfs_notify(&device->dev.kobj, NULL, POWER_CAP_NAME); update_cap(resource); break; case METER_NOTIFY_INTERVAL: sysfs_notify(&device->dev.kobj, NULL, POWER_AVG_INTERVAL_NAME); update_avg_interval(resource); break; case METER_NOTIFY_CAPPING: sysfs_notify(&device->dev.kobj, NULL, POWER_ALARM_NAME); dev_info(&device->dev, "Capping in progress.\n"); break; default: WARN(1, "Unexpected event %d\n", event); break; } mutex_unlock(&resource->lock); acpi_bus_generate_netlink_event(ACPI_POWER_METER_CLASS, dev_name(&device->dev), event, 0); } static int acpi_power_meter_add(struct acpi_device *device) { int res; struct acpi_power_meter_resource *resource; if (!device) return -EINVAL; resource = kzalloc(sizeof(struct acpi_power_meter_resource), GFP_KERNEL); if (!resource) return -ENOMEM; resource->sensors_valid = 0; resource->acpi_dev = device; mutex_init(&resource->lock); strcpy(acpi_device_name(device), ACPI_POWER_METER_DEVICE_NAME); strcpy(acpi_device_class(device), ACPI_POWER_METER_CLASS); device->driver_data = resource; free_capabilities(resource); res = read_capabilities(resource); if (res) goto exit_free; resource->trip[0] = resource->trip[1] = -1; res = setup_attrs(resource); if (res) goto exit_free; resource->hwmon_dev = hwmon_device_register(&device->dev); if (IS_ERR(resource->hwmon_dev)) { res = PTR_ERR(resource->hwmon_dev); goto exit_remove; } res = 0; goto exit; exit_remove: remove_attrs(resource); exit_free: kfree(resource); exit: return res; } static int acpi_power_meter_remove(struct acpi_device *device) { struct acpi_power_meter_resource *resource; if (!device || !acpi_driver_data(device)) return -EINVAL; resource = acpi_driver_data(device); hwmon_device_unregister(resource->hwmon_dev); free_capabilities(resource); remove_attrs(resource); kfree(resource); return 0; } #ifdef CONFIG_PM_SLEEP static int acpi_power_meter_resume(struct device *dev) { struct acpi_power_meter_resource *resource; if (!dev) return -EINVAL; resource = acpi_driver_data(to_acpi_device(dev)); if (!resource) return -EINVAL; free_capabilities(resource); read_capabilities(resource); return 0; } #endif /* CONFIG_PM_SLEEP */ static SIMPLE_DEV_PM_OPS(acpi_power_meter_pm, NULL, acpi_power_meter_resume); static struct acpi_driver acpi_power_meter_driver = { .name = "power_meter", .class = ACPI_POWER_METER_CLASS, .ids = power_meter_ids, .ops = { .add = acpi_power_meter_add, .remove = acpi_power_meter_remove, .notify = acpi_power_meter_notify, }, .drv.pm = &acpi_power_meter_pm, }; /* Module init/exit routines */ static int __init enable_cap_knobs(const struct dmi_system_id *d) { cap_in_hardware = 1; return 0; } static struct dmi_system_id __initdata pm_dmi_table[] = { { enable_cap_knobs, "IBM Active Energy Manager", { DMI_MATCH(DMI_SYS_VENDOR, "IBM") }, }, {} }; static int __init acpi_power_meter_init(void) { int result; if (acpi_disabled) return -ENODEV; dmi_check_system(pm_dmi_table); result = acpi_bus_register_driver(&acpi_power_meter_driver); if (result < 0) return result; return 0; } static void __exit acpi_power_meter_exit(void) { acpi_bus_unregister_driver(&acpi_power_meter_driver); } MODULE_AUTHOR("Darrick J. Wong <darrick.wong@oracle.com>"); MODULE_DESCRIPTION("ACPI 4.0 power meter driver"); MODULE_LICENSE("GPL"); module_param(force_cap_on, bool, 0644); MODULE_PARM_DESC(force_cap_on, "Enable power cap even it is unsafe to do so."); module_init(acpi_power_meter_init); module_exit(acpi_power_meter_exit);
gpl-2.0
wurikiji/ttFS
hunbag/linux-3.10.61/arch/arm/mach-mmp/mmp2.c
2102
4483
/* * linux/arch/arm/mach-mmp/mmp2.c * * code name MMP2 * * Copyright (C) 2009 Marvell International Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/io.h> #include <linux/platform_device.h> #include <asm/hardware/cache-tauros2.h> #include <asm/mach/time.h> #include <mach/addr-map.h> #include <mach/regs-apbc.h> #include <mach/cputype.h> #include <mach/irqs.h> #include <mach/dma.h> #include <mach/mfp.h> #include <mach/devices.h> #include <mach/mmp2.h> #include "common.h" #define MFPR_VIRT_BASE (APB_VIRT_BASE + 0x1e000) static struct mfp_addr_map mmp2_addr_map[] __initdata = { MFP_ADDR_X(GPIO0, GPIO58, 0x54), MFP_ADDR_X(GPIO59, GPIO73, 0x280), MFP_ADDR_X(GPIO74, GPIO101, 0x170), MFP_ADDR(GPIO102, 0x0), MFP_ADDR(GPIO103, 0x4), MFP_ADDR(GPIO104, 0x1fc), MFP_ADDR(GPIO105, 0x1f8), MFP_ADDR(GPIO106, 0x1f4), MFP_ADDR(GPIO107, 0x1f0), MFP_ADDR(GPIO108, 0x21c), MFP_ADDR(GPIO109, 0x218), MFP_ADDR(GPIO110, 0x214), MFP_ADDR(GPIO111, 0x200), MFP_ADDR(GPIO112, 0x244), MFP_ADDR(GPIO113, 0x25c), MFP_ADDR(GPIO114, 0x164), MFP_ADDR_X(GPIO115, GPIO122, 0x260), MFP_ADDR(GPIO123, 0x148), MFP_ADDR_X(GPIO124, GPIO141, 0xc), MFP_ADDR(GPIO142, 0x8), MFP_ADDR_X(GPIO143, GPIO151, 0x220), MFP_ADDR_X(GPIO152, GPIO153, 0x248), MFP_ADDR_X(GPIO154, GPIO155, 0x254), MFP_ADDR_X(GPIO156, GPIO159, 0x14c), MFP_ADDR(GPIO160, 0x250), MFP_ADDR(GPIO161, 0x210), MFP_ADDR(GPIO162, 0x20c), MFP_ADDR(GPIO163, 0x208), MFP_ADDR(GPIO164, 0x204), MFP_ADDR(GPIO165, 0x1ec), MFP_ADDR(GPIO166, 0x1e8), MFP_ADDR(GPIO167, 0x1e4), MFP_ADDR(GPIO168, 0x1e0), MFP_ADDR_X(TWSI1_SCL, TWSI1_SDA, 0x140), MFP_ADDR_X(TWSI4_SCL, TWSI4_SDA, 0x2bc), MFP_ADDR(PMIC_INT, 0x2c4), MFP_ADDR(CLK_REQ, 0x160), MFP_ADDR_END, }; void mmp2_clear_pmic_int(void) { void __iomem *mfpr_pmic; unsigned long data; mfpr_pmic = APB_VIRT_BASE + 0x1e000 + 0x2c4; data = __raw_readl(mfpr_pmic); __raw_writel(data | (1 << 6), mfpr_pmic); __raw_writel(data, mfpr_pmic); } void __init mmp2_init_irq(void) { mmp2_init_icu(); } static int __init mmp2_init(void) { if (cpu_is_mmp2()) { #ifdef CONFIG_CACHE_TAUROS2 tauros2_init(0); #endif mfp_init_base(MFPR_VIRT_BASE); mfp_init_addr(mmp2_addr_map); pxa_init_dma(IRQ_MMP2_DMA_RIQ, 16); mmp2_clk_init(); } return 0; } postcore_initcall(mmp2_init); #define APBC_TIMERS APBC_REG(0x024) void __init mmp2_timer_init(void) { unsigned long clk_rst; __raw_writel(APBC_APBCLK | APBC_RST, APBC_TIMERS); /* * enable bus/functional clock, enable 6.5MHz (divider 4), * release reset */ clk_rst = APBC_APBCLK | APBC_FNCLK | APBC_FNCLKSEL(1); __raw_writel(clk_rst, APBC_TIMERS); timer_init(IRQ_MMP2_TIMER1); } /* on-chip devices */ MMP2_DEVICE(uart1, "pxa2xx-uart", 0, UART1, 0xd4030000, 0x30, 4, 5); MMP2_DEVICE(uart2, "pxa2xx-uart", 1, UART2, 0xd4017000, 0x30, 20, 21); MMP2_DEVICE(uart3, "pxa2xx-uart", 2, UART3, 0xd4018000, 0x30, 22, 23); MMP2_DEVICE(uart4, "pxa2xx-uart", 3, UART4, 0xd4016000, 0x30, 18, 19); MMP2_DEVICE(twsi1, "pxa2xx-i2c", 0, TWSI1, 0xd4011000, 0x70); MMP2_DEVICE(twsi2, "pxa2xx-i2c", 1, TWSI2, 0xd4031000, 0x70); MMP2_DEVICE(twsi3, "pxa2xx-i2c", 2, TWSI3, 0xd4032000, 0x70); MMP2_DEVICE(twsi4, "pxa2xx-i2c", 3, TWSI4, 0xd4033000, 0x70); MMP2_DEVICE(twsi5, "pxa2xx-i2c", 4, TWSI5, 0xd4033800, 0x70); MMP2_DEVICE(twsi6, "pxa2xx-i2c", 5, TWSI6, 0xd4034000, 0x70); MMP2_DEVICE(nand, "pxa3xx-nand", -1, NAND, 0xd4283000, 0x100, 28, 29); MMP2_DEVICE(sdh0, "sdhci-pxav3", 0, MMC, 0xd4280000, 0x120); MMP2_DEVICE(sdh1, "sdhci-pxav3", 1, MMC2, 0xd4280800, 0x120); MMP2_DEVICE(sdh2, "sdhci-pxav3", 2, MMC3, 0xd4281000, 0x120); MMP2_DEVICE(sdh3, "sdhci-pxav3", 3, MMC4, 0xd4281800, 0x120); MMP2_DEVICE(asram, "asram", -1, NONE, 0xe0000000, 0x4000); /* 0xd1000000 ~ 0xd101ffff is reserved for secure processor */ MMP2_DEVICE(isram, "isram", -1, NONE, 0xd1020000, 0x18000); struct resource mmp2_resource_gpio[] = { { .start = 0xd4019000, .end = 0xd4019fff, .flags = IORESOURCE_MEM, }, { .start = IRQ_MMP2_GPIO, .end = IRQ_MMP2_GPIO, .name = "gpio_mux", .flags = IORESOURCE_IRQ, }, }; struct platform_device mmp2_device_gpio = { .name = "mmp2-gpio", .id = -1, .num_resources = ARRAY_SIZE(mmp2_resource_gpio), .resource = mmp2_resource_gpio, };
gpl-2.0
crackerizer/android_kernel_samsung_smdk4412
fs/ubifs/io.c
2358
32321
/* * This file is part of UBIFS. * * Copyright (C) 2006-2008 Nokia Corporation. * Copyright (C) 2006, 2007 University of Szeged, Hungary * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 51 * Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * * Authors: Artem Bityutskiy (Битюцкий Артём) * Adrian Hunter * Zoltan Sogor */ /* * This file implements UBIFS I/O subsystem which provides various I/O-related * helper functions (reading/writing/checking/validating nodes) and implements * write-buffering support. Write buffers help to save space which otherwise * would have been wasted for padding to the nearest minimal I/O unit boundary. * Instead, data first goes to the write-buffer and is flushed when the * buffer is full or when it is not used for some time (by timer). This is * similar to the mechanism is used by JFFS2. * * UBIFS distinguishes between minimum write size (@c->min_io_size) and maximum * write size (@c->max_write_size). The latter is the maximum amount of bytes * the underlying flash is able to program at a time, and writing in * @c->max_write_size units should presumably be faster. Obviously, * @c->min_io_size <= @c->max_write_size. Write-buffers are of * @c->max_write_size bytes in size for maximum performance. However, when a * write-buffer is flushed, only the portion of it (aligned to @c->min_io_size * boundary) which contains data is written, not the whole write-buffer, * because this is more space-efficient. * * This optimization adds few complications to the code. Indeed, on the one * hand, we want to write in optimal @c->max_write_size bytes chunks, which * also means aligning writes at the @c->max_write_size bytes offsets. On the * other hand, we do not want to waste space when synchronizing the write * buffer, so during synchronization we writes in smaller chunks. And this makes * the next write offset to be not aligned to @c->max_write_size bytes. So the * have to make sure that the write-buffer offset (@wbuf->offs) becomes aligned * to @c->max_write_size bytes again. We do this by temporarily shrinking * write-buffer size (@wbuf->size). * * Write-buffers are defined by 'struct ubifs_wbuf' objects and protected by * mutexes defined inside these objects. Since sometimes upper-level code * has to lock the write-buffer (e.g. journal space reservation code), many * functions related to write-buffers have "nolock" suffix which means that the * caller has to lock the write-buffer before calling this function. * * UBIFS stores nodes at 64 bit-aligned addresses. If the node length is not * aligned, UBIFS starts the next node from the aligned address, and the padded * bytes may contain any rubbish. In other words, UBIFS does not put padding * bytes in those small gaps. Common headers of nodes store real node lengths, * not aligned lengths. Indexing nodes also store real lengths in branches. * * UBIFS uses padding when it pads to the next min. I/O unit. In this case it * uses padding nodes or padding bytes, if the padding node does not fit. * * All UBIFS nodes are protected by CRC checksums and UBIFS checks CRC when * they are read from the flash media. */ #include <linux/crc32.h> #include <linux/slab.h> #include "ubifs.h" /** * ubifs_ro_mode - switch UBIFS to read read-only mode. * @c: UBIFS file-system description object * @err: error code which is the reason of switching to R/O mode */ void ubifs_ro_mode(struct ubifs_info *c, int err) { if (!c->ro_error) { c->ro_error = 1; c->no_chk_data_crc = 0; c->vfs_sb->s_flags |= MS_RDONLY; ubifs_warn("switched to read-only mode, error %d", err); dbg_dump_stack(); } } /** * ubifs_check_node - check node. * @c: UBIFS file-system description object * @buf: node to check * @lnum: logical eraseblock number * @offs: offset within the logical eraseblock * @quiet: print no messages * @must_chk_crc: indicates whether to always check the CRC * * This function checks node magic number and CRC checksum. This function also * validates node length to prevent UBIFS from becoming crazy when an attacker * feeds it a file-system image with incorrect nodes. For example, too large * node length in the common header could cause UBIFS to read memory outside of * allocated buffer when checking the CRC checksum. * * This function may skip data nodes CRC checking if @c->no_chk_data_crc is * true, which is controlled by corresponding UBIFS mount option. However, if * @must_chk_crc is true, then @c->no_chk_data_crc is ignored and CRC is * checked. Similarly, if @c->mounting or @c->remounting_rw is true (we are * mounting or re-mounting to R/W mode), @c->no_chk_data_crc is ignored and CRC * is checked. This is because during mounting or re-mounting from R/O mode to * R/W mode we may read journal nodes (when replying the journal or doing the * recovery) and the journal nodes may potentially be corrupted, so checking is * required. * * This function returns zero in case of success and %-EUCLEAN in case of bad * CRC or magic. */ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum, int offs, int quiet, int must_chk_crc) { int err = -EINVAL, type, node_len; uint32_t crc, node_crc, magic; const struct ubifs_ch *ch = buf; ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0); ubifs_assert(!(offs & 7) && offs < c->leb_size); magic = le32_to_cpu(ch->magic); if (magic != UBIFS_NODE_MAGIC) { if (!quiet) ubifs_err("bad magic %#08x, expected %#08x", magic, UBIFS_NODE_MAGIC); err = -EUCLEAN; goto out; } type = ch->node_type; if (type < 0 || type >= UBIFS_NODE_TYPES_CNT) { if (!quiet) ubifs_err("bad node type %d", type); goto out; } node_len = le32_to_cpu(ch->len); if (node_len + offs > c->leb_size) goto out_len; if (c->ranges[type].max_len == 0) { if (node_len != c->ranges[type].len) goto out_len; } else if (node_len < c->ranges[type].min_len || node_len > c->ranges[type].max_len) goto out_len; if (!must_chk_crc && type == UBIFS_DATA_NODE && !c->mounting && !c->remounting_rw && c->no_chk_data_crc) return 0; crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8); node_crc = le32_to_cpu(ch->crc); if (crc != node_crc) { if (!quiet) ubifs_err("bad CRC: calculated %#08x, read %#08x", crc, node_crc); err = -EUCLEAN; goto out; } return 0; out_len: if (!quiet) ubifs_err("bad node length %d", node_len); out: if (!quiet) { ubifs_err("bad node at LEB %d:%d", lnum, offs); dbg_dump_node(c, buf); dbg_dump_stack(); } return err; } /** * ubifs_pad - pad flash space. * @c: UBIFS file-system description object * @buf: buffer to put padding to * @pad: how many bytes to pad * * The flash media obliges us to write only in chunks of %c->min_io_size and * when we have to write less data we add padding node to the write-buffer and * pad it to the next minimal I/O unit's boundary. Padding nodes help when the * media is being scanned. If the amount of wasted space is not enough to fit a * padding node which takes %UBIFS_PAD_NODE_SZ bytes, we write padding bytes * pattern (%UBIFS_PADDING_BYTE). * * Padding nodes are also used to fill gaps when the "commit-in-gaps" method is * used. */ void ubifs_pad(const struct ubifs_info *c, void *buf, int pad) { uint32_t crc; ubifs_assert(pad >= 0 && !(pad & 7)); if (pad >= UBIFS_PAD_NODE_SZ) { struct ubifs_ch *ch = buf; struct ubifs_pad_node *pad_node = buf; ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC); ch->node_type = UBIFS_PAD_NODE; ch->group_type = UBIFS_NO_NODE_GROUP; ch->padding[0] = ch->padding[1] = 0; ch->sqnum = 0; ch->len = cpu_to_le32(UBIFS_PAD_NODE_SZ); pad -= UBIFS_PAD_NODE_SZ; pad_node->pad_len = cpu_to_le32(pad); crc = crc32(UBIFS_CRC32_INIT, buf + 8, UBIFS_PAD_NODE_SZ - 8); ch->crc = cpu_to_le32(crc); memset(buf + UBIFS_PAD_NODE_SZ, 0, pad); } else if (pad > 0) /* Too little space, padding node won't fit */ memset(buf, UBIFS_PADDING_BYTE, pad); } /** * next_sqnum - get next sequence number. * @c: UBIFS file-system description object */ static unsigned long long next_sqnum(struct ubifs_info *c) { unsigned long long sqnum; spin_lock(&c->cnt_lock); sqnum = ++c->max_sqnum; spin_unlock(&c->cnt_lock); if (unlikely(sqnum >= SQNUM_WARN_WATERMARK)) { if (sqnum >= SQNUM_WATERMARK) { ubifs_err("sequence number overflow %llu, end of life", sqnum); ubifs_ro_mode(c, -EINVAL); } ubifs_warn("running out of sequence numbers, end of life soon"); } return sqnum; } /** * ubifs_prepare_node - prepare node to be written to flash. * @c: UBIFS file-system description object * @node: the node to pad * @len: node length * @pad: if the buffer has to be padded * * This function prepares node at @node to be written to the media - it * calculates node CRC, fills the common header, and adds proper padding up to * the next minimum I/O unit if @pad is not zero. */ void ubifs_prepare_node(struct ubifs_info *c, void *node, int len, int pad) { uint32_t crc; struct ubifs_ch *ch = node; unsigned long long sqnum = next_sqnum(c); ubifs_assert(len >= UBIFS_CH_SZ); ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC); ch->len = cpu_to_le32(len); ch->group_type = UBIFS_NO_NODE_GROUP; ch->sqnum = cpu_to_le64(sqnum); ch->padding[0] = ch->padding[1] = 0; crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8); ch->crc = cpu_to_le32(crc); if (pad) { len = ALIGN(len, 8); pad = ALIGN(len, c->min_io_size) - len; ubifs_pad(c, node + len, pad); } } /** * ubifs_prep_grp_node - prepare node of a group to be written to flash. * @c: UBIFS file-system description object * @node: the node to pad * @len: node length * @last: indicates the last node of the group * * This function prepares node at @node to be written to the media - it * calculates node CRC and fills the common header. */ void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last) { uint32_t crc; struct ubifs_ch *ch = node; unsigned long long sqnum = next_sqnum(c); ubifs_assert(len >= UBIFS_CH_SZ); ch->magic = cpu_to_le32(UBIFS_NODE_MAGIC); ch->len = cpu_to_le32(len); if (last) ch->group_type = UBIFS_LAST_OF_NODE_GROUP; else ch->group_type = UBIFS_IN_NODE_GROUP; ch->sqnum = cpu_to_le64(sqnum); ch->padding[0] = ch->padding[1] = 0; crc = crc32(UBIFS_CRC32_INIT, node + 8, len - 8); ch->crc = cpu_to_le32(crc); } /** * wbuf_timer_callback - write-buffer timer callback function. * @data: timer data (write-buffer descriptor) * * This function is called when the write-buffer timer expires. */ static enum hrtimer_restart wbuf_timer_callback_nolock(struct hrtimer *timer) { struct ubifs_wbuf *wbuf = container_of(timer, struct ubifs_wbuf, timer); dbg_io("jhead %s", dbg_jhead(wbuf->jhead)); wbuf->need_sync = 1; wbuf->c->need_wbuf_sync = 1; ubifs_wake_up_bgt(wbuf->c); return HRTIMER_NORESTART; } /** * new_wbuf_timer - start new write-buffer timer. * @wbuf: write-buffer descriptor */ static void new_wbuf_timer_nolock(struct ubifs_wbuf *wbuf) { ubifs_assert(!hrtimer_active(&wbuf->timer)); if (wbuf->no_timer) return; dbg_io("set timer for jhead %s, %llu-%llu millisecs", dbg_jhead(wbuf->jhead), div_u64(ktime_to_ns(wbuf->softlimit), USEC_PER_SEC), div_u64(ktime_to_ns(wbuf->softlimit) + wbuf->delta, USEC_PER_SEC)); hrtimer_start_range_ns(&wbuf->timer, wbuf->softlimit, wbuf->delta, HRTIMER_MODE_REL); } /** * cancel_wbuf_timer - cancel write-buffer timer. * @wbuf: write-buffer descriptor */ static void cancel_wbuf_timer_nolock(struct ubifs_wbuf *wbuf) { if (wbuf->no_timer) return; wbuf->need_sync = 0; hrtimer_cancel(&wbuf->timer); } /** * ubifs_wbuf_sync_nolock - synchronize write-buffer. * @wbuf: write-buffer to synchronize * * This function synchronizes write-buffer @buf and returns zero in case of * success or a negative error code in case of failure. * * Note, although write-buffers are of @c->max_write_size, this function does * not necessarily writes all @c->max_write_size bytes to the flash. Instead, * if the write-buffer is only partially filled with data, only the used part * of the write-buffer (aligned on @c->min_io_size boundary) is synchronized. * This way we waste less space. */ int ubifs_wbuf_sync_nolock(struct ubifs_wbuf *wbuf) { struct ubifs_info *c = wbuf->c; int err, dirt, sync_len; cancel_wbuf_timer_nolock(wbuf); if (!wbuf->used || wbuf->lnum == -1) /* Write-buffer is empty or not seeked */ return 0; dbg_io("LEB %d:%d, %d bytes, jhead %s", wbuf->lnum, wbuf->offs, wbuf->used, dbg_jhead(wbuf->jhead)); ubifs_assert(!(wbuf->avail & 7)); ubifs_assert(wbuf->offs + wbuf->size <= c->leb_size); ubifs_assert(wbuf->size >= c->min_io_size); ubifs_assert(wbuf->size <= c->max_write_size); ubifs_assert(wbuf->size % c->min_io_size == 0); ubifs_assert(!c->ro_media && !c->ro_mount); if (c->leb_size - wbuf->offs >= c->max_write_size) ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size)); if (c->ro_error) return -EROFS; /* * Do not write whole write buffer but write only the minimum necessary * amount of min. I/O units. */ sync_len = ALIGN(wbuf->used, c->min_io_size); dirt = sync_len - wbuf->used; if (dirt) ubifs_pad(c, wbuf->buf + wbuf->used, dirt); err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs, sync_len, wbuf->dtype); if (err) { ubifs_err("cannot write %d bytes to LEB %d:%d", sync_len, wbuf->lnum, wbuf->offs); dbg_dump_stack(); return err; } spin_lock(&wbuf->lock); wbuf->offs += sync_len; /* * Now @wbuf->offs is not necessarily aligned to @c->max_write_size. * But our goal is to optimize writes and make sure we write in * @c->max_write_size chunks and to @c->max_write_size-aligned offset. * Thus, if @wbuf->offs is not aligned to @c->max_write_size now, make * sure that @wbuf->offs + @wbuf->size is aligned to * @c->max_write_size. This way we make sure that after next * write-buffer flush we are again at the optimal offset (aligned to * @c->max_write_size). */ if (c->leb_size - wbuf->offs < c->max_write_size) wbuf->size = c->leb_size - wbuf->offs; else if (wbuf->offs & (c->max_write_size - 1)) wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs; else wbuf->size = c->max_write_size; wbuf->avail = wbuf->size; wbuf->used = 0; wbuf->next_ino = 0; spin_unlock(&wbuf->lock); if (wbuf->sync_callback) err = wbuf->sync_callback(c, wbuf->lnum, c->leb_size - wbuf->offs, dirt); return err; } /** * ubifs_wbuf_seek_nolock - seek write-buffer. * @wbuf: write-buffer * @lnum: logical eraseblock number to seek to * @offs: logical eraseblock offset to seek to * @dtype: data type * * This function targets the write-buffer to logical eraseblock @lnum:@offs. * The write-buffer has to be empty. Returns zero in case of success and a * negative error code in case of failure. */ int ubifs_wbuf_seek_nolock(struct ubifs_wbuf *wbuf, int lnum, int offs, int dtype) { const struct ubifs_info *c = wbuf->c; dbg_io("LEB %d:%d, jhead %s", lnum, offs, dbg_jhead(wbuf->jhead)); ubifs_assert(lnum >= 0 && lnum < c->leb_cnt); ubifs_assert(offs >= 0 && offs <= c->leb_size); ubifs_assert(offs % c->min_io_size == 0 && !(offs & 7)); ubifs_assert(lnum != wbuf->lnum); ubifs_assert(wbuf->used == 0); spin_lock(&wbuf->lock); wbuf->lnum = lnum; wbuf->offs = offs; if (c->leb_size - wbuf->offs < c->max_write_size) wbuf->size = c->leb_size - wbuf->offs; else if (wbuf->offs & (c->max_write_size - 1)) wbuf->size = ALIGN(wbuf->offs, c->max_write_size) - wbuf->offs; else wbuf->size = c->max_write_size; wbuf->avail = wbuf->size; wbuf->used = 0; spin_unlock(&wbuf->lock); wbuf->dtype = dtype; return 0; } /** * ubifs_bg_wbufs_sync - synchronize write-buffers. * @c: UBIFS file-system description object * * This function is called by background thread to synchronize write-buffers. * Returns zero in case of success and a negative error code in case of * failure. */ int ubifs_bg_wbufs_sync(struct ubifs_info *c) { int err, i; ubifs_assert(!c->ro_media && !c->ro_mount); if (!c->need_wbuf_sync) return 0; c->need_wbuf_sync = 0; if (c->ro_error) { err = -EROFS; goto out_timers; } dbg_io("synchronize"); for (i = 0; i < c->jhead_cnt; i++) { struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf; cond_resched(); /* * If the mutex is locked then wbuf is being changed, so * synchronization is not necessary. */ if (mutex_is_locked(&wbuf->io_mutex)) continue; mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); if (!wbuf->need_sync) { mutex_unlock(&wbuf->io_mutex); continue; } err = ubifs_wbuf_sync_nolock(wbuf); mutex_unlock(&wbuf->io_mutex); if (err) { ubifs_err("cannot sync write-buffer, error %d", err); ubifs_ro_mode(c, err); goto out_timers; } } return 0; out_timers: /* Cancel all timers to prevent repeated errors */ for (i = 0; i < c->jhead_cnt; i++) { struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf; mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); cancel_wbuf_timer_nolock(wbuf); mutex_unlock(&wbuf->io_mutex); } return err; } /** * ubifs_wbuf_write_nolock - write data to flash via write-buffer. * @wbuf: write-buffer * @buf: node to write * @len: node length * * This function writes data to flash via write-buffer @wbuf. This means that * the last piece of the node won't reach the flash media immediately if it * does not take whole max. write unit (@c->max_write_size). Instead, the node * will sit in RAM until the write-buffer is synchronized (e.g., by timer, or * because more data are appended to the write-buffer). * * This function returns zero in case of success and a negative error code in * case of failure. If the node cannot be written because there is no more * space in this logical eraseblock, %-ENOSPC is returned. */ int ubifs_wbuf_write_nolock(struct ubifs_wbuf *wbuf, void *buf, int len) { struct ubifs_info *c = wbuf->c; int err, written, n, aligned_len = ALIGN(len, 8); dbg_io("%d bytes (%s) to jhead %s wbuf at LEB %d:%d", len, dbg_ntype(((struct ubifs_ch *)buf)->node_type), dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs + wbuf->used); ubifs_assert(len > 0 && wbuf->lnum >= 0 && wbuf->lnum < c->leb_cnt); ubifs_assert(wbuf->offs >= 0 && wbuf->offs % c->min_io_size == 0); ubifs_assert(!(wbuf->offs & 7) && wbuf->offs <= c->leb_size); ubifs_assert(wbuf->avail > 0 && wbuf->avail <= wbuf->size); ubifs_assert(wbuf->size >= c->min_io_size); ubifs_assert(wbuf->size <= c->max_write_size); ubifs_assert(wbuf->size % c->min_io_size == 0); ubifs_assert(mutex_is_locked(&wbuf->io_mutex)); ubifs_assert(!c->ro_media && !c->ro_mount); ubifs_assert(!c->space_fixup); if (c->leb_size - wbuf->offs >= c->max_write_size) ubifs_assert(!((wbuf->offs + wbuf->size) % c->max_write_size)); if (c->leb_size - wbuf->offs - wbuf->used < aligned_len) { err = -ENOSPC; goto out; } cancel_wbuf_timer_nolock(wbuf); if (c->ro_error) return -EROFS; if (aligned_len <= wbuf->avail) { /* * The node is not very large and fits entirely within * write-buffer. */ memcpy(wbuf->buf + wbuf->used, buf, len); if (aligned_len == wbuf->avail) { dbg_io("flush jhead %s wbuf to LEB %d:%d", dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs); err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs, wbuf->size, wbuf->dtype); if (err) goto out; spin_lock(&wbuf->lock); wbuf->offs += wbuf->size; if (c->leb_size - wbuf->offs >= c->max_write_size) wbuf->size = c->max_write_size; else wbuf->size = c->leb_size - wbuf->offs; wbuf->avail = wbuf->size; wbuf->used = 0; wbuf->next_ino = 0; spin_unlock(&wbuf->lock); } else { spin_lock(&wbuf->lock); wbuf->avail -= aligned_len; wbuf->used += aligned_len; spin_unlock(&wbuf->lock); } goto exit; } written = 0; if (wbuf->used) { /* * The node is large enough and does not fit entirely within * current available space. We have to fill and flush * write-buffer and switch to the next max. write unit. */ dbg_io("flush jhead %s wbuf to LEB %d:%d", dbg_jhead(wbuf->jhead), wbuf->lnum, wbuf->offs); memcpy(wbuf->buf + wbuf->used, buf, wbuf->avail); err = ubi_leb_write(c->ubi, wbuf->lnum, wbuf->buf, wbuf->offs, wbuf->size, wbuf->dtype); if (err) goto out; wbuf->offs += wbuf->size; len -= wbuf->avail; aligned_len -= wbuf->avail; written += wbuf->avail; } else if (wbuf->offs & (c->max_write_size - 1)) { /* * The write-buffer offset is not aligned to * @c->max_write_size and @wbuf->size is less than * @c->max_write_size. Write @wbuf->size bytes to make sure the * following writes are done in optimal @c->max_write_size * chunks. */ dbg_io("write %d bytes to LEB %d:%d", wbuf->size, wbuf->lnum, wbuf->offs); err = ubi_leb_write(c->ubi, wbuf->lnum, buf, wbuf->offs, wbuf->size, wbuf->dtype); if (err) goto out; wbuf->offs += wbuf->size; len -= wbuf->size; aligned_len -= wbuf->size; written += wbuf->size; } /* * The remaining data may take more whole max. write units, so write the * remains multiple to max. write unit size directly to the flash media. * We align node length to 8-byte boundary because we anyway flash wbuf * if the remaining space is less than 8 bytes. */ n = aligned_len >> c->max_write_shift; if (n) { n <<= c->max_write_shift; dbg_io("write %d bytes to LEB %d:%d", n, wbuf->lnum, wbuf->offs); err = ubi_leb_write(c->ubi, wbuf->lnum, buf + written, wbuf->offs, n, wbuf->dtype); if (err) goto out; wbuf->offs += n; aligned_len -= n; len -= n; written += n; } spin_lock(&wbuf->lock); if (aligned_len) /* * And now we have what's left and what does not take whole * max. write unit, so write it to the write-buffer and we are * done. */ memcpy(wbuf->buf, buf + written, len); if (c->leb_size - wbuf->offs >= c->max_write_size) wbuf->size = c->max_write_size; else wbuf->size = c->leb_size - wbuf->offs; wbuf->avail = wbuf->size - aligned_len; wbuf->used = aligned_len; wbuf->next_ino = 0; spin_unlock(&wbuf->lock); exit: if (wbuf->sync_callback) { int free = c->leb_size - wbuf->offs - wbuf->used; err = wbuf->sync_callback(c, wbuf->lnum, free, 0); if (err) goto out; } if (wbuf->used) new_wbuf_timer_nolock(wbuf); return 0; out: ubifs_err("cannot write %d bytes to LEB %d:%d, error %d", len, wbuf->lnum, wbuf->offs, err); dbg_dump_node(c, buf); dbg_dump_stack(); dbg_dump_leb(c, wbuf->lnum); return err; } /** * ubifs_write_node - write node to the media. * @c: UBIFS file-system description object * @buf: the node to write * @len: node length * @lnum: logical eraseblock number * @offs: offset within the logical eraseblock * @dtype: node life-time hint (%UBI_LONGTERM, %UBI_SHORTTERM, %UBI_UNKNOWN) * * This function automatically fills node magic number, assigns sequence * number, and calculates node CRC checksum. The length of the @buf buffer has * to be aligned to the minimal I/O unit size. This function automatically * appends padding node and padding bytes if needed. Returns zero in case of * success and a negative error code in case of failure. */ int ubifs_write_node(struct ubifs_info *c, void *buf, int len, int lnum, int offs, int dtype) { int err, buf_len = ALIGN(len, c->min_io_size); dbg_io("LEB %d:%d, %s, length %d (aligned %d)", lnum, offs, dbg_ntype(((struct ubifs_ch *)buf)->node_type), len, buf_len); ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0); ubifs_assert(offs % c->min_io_size == 0 && offs < c->leb_size); ubifs_assert(!c->ro_media && !c->ro_mount); ubifs_assert(!c->space_fixup); if (c->ro_error) return -EROFS; ubifs_prepare_node(c, buf, len, 1); err = ubi_leb_write(c->ubi, lnum, buf, offs, buf_len, dtype); if (err) { ubifs_err("cannot write %d bytes to LEB %d:%d, error %d", buf_len, lnum, offs, err); dbg_dump_node(c, buf); dbg_dump_stack(); } return err; } /** * ubifs_read_node_wbuf - read node from the media or write-buffer. * @wbuf: wbuf to check for un-written data * @buf: buffer to read to * @type: node type * @len: node length * @lnum: logical eraseblock number * @offs: offset within the logical eraseblock * * This function reads a node of known type and length, checks it and stores * in @buf. If the node partially or fully sits in the write-buffer, this * function takes data from the buffer, otherwise it reads the flash media. * Returns zero in case of success, %-EUCLEAN if CRC mismatched and a negative * error code in case of failure. */ int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len, int lnum, int offs) { const struct ubifs_info *c = wbuf->c; int err, rlen, overlap; struct ubifs_ch *ch = buf; dbg_io("LEB %d:%d, %s, length %d, jhead %s", lnum, offs, dbg_ntype(type), len, dbg_jhead(wbuf->jhead)); ubifs_assert(wbuf && lnum >= 0 && lnum < c->leb_cnt && offs >= 0); ubifs_assert(!(offs & 7) && offs < c->leb_size); ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT); spin_lock(&wbuf->lock); overlap = (lnum == wbuf->lnum && offs + len > wbuf->offs); if (!overlap) { /* We may safely unlock the write-buffer and read the data */ spin_unlock(&wbuf->lock); return ubifs_read_node(c, buf, type, len, lnum, offs); } /* Don't read under wbuf */ rlen = wbuf->offs - offs; if (rlen < 0) rlen = 0; /* Copy the rest from the write-buffer */ memcpy(buf + rlen, wbuf->buf + offs + rlen - wbuf->offs, len - rlen); spin_unlock(&wbuf->lock); if (rlen > 0) { /* Read everything that goes before write-buffer */ err = ubi_read(c->ubi, lnum, buf, offs, rlen); if (err && err != -EBADMSG) { ubifs_err("failed to read node %d from LEB %d:%d, " "error %d", type, lnum, offs, err); dbg_dump_stack(); return err; } } if (type != ch->node_type) { ubifs_err("bad node type (%d but expected %d)", ch->node_type, type); goto out; } err = ubifs_check_node(c, buf, lnum, offs, 0, 0); if (err) { ubifs_err("expected node type %d", type); return err; } rlen = le32_to_cpu(ch->len); if (rlen != len) { ubifs_err("bad node length %d, expected %d", rlen, len); goto out; } return 0; out: ubifs_err("bad node at LEB %d:%d", lnum, offs); dbg_dump_node(c, buf); dbg_dump_stack(); return -EINVAL; } /** * ubifs_read_node - read node. * @c: UBIFS file-system description object * @buf: buffer to read to * @type: node type * @len: node length (not aligned) * @lnum: logical eraseblock number * @offs: offset within the logical eraseblock * * This function reads a node of known type and and length, checks it and * stores in @buf. Returns zero in case of success, %-EUCLEAN if CRC mismatched * and a negative error code in case of failure. */ int ubifs_read_node(const struct ubifs_info *c, void *buf, int type, int len, int lnum, int offs) { int err, l; struct ubifs_ch *ch = buf; dbg_io("LEB %d:%d, %s, length %d", lnum, offs, dbg_ntype(type), len); ubifs_assert(lnum >= 0 && lnum < c->leb_cnt && offs >= 0); ubifs_assert(len >= UBIFS_CH_SZ && offs + len <= c->leb_size); ubifs_assert(!(offs & 7) && offs < c->leb_size); ubifs_assert(type >= 0 && type < UBIFS_NODE_TYPES_CNT); err = ubi_read(c->ubi, lnum, buf, offs, len); if (err && err != -EBADMSG) { ubifs_err("cannot read node %d from LEB %d:%d, error %d", type, lnum, offs, err); return err; } if (type != ch->node_type) { ubifs_err("bad node type (%d but expected %d)", ch->node_type, type); goto out; } err = ubifs_check_node(c, buf, lnum, offs, 0, 0); if (err) { ubifs_err("expected node type %d", type); return err; } l = le32_to_cpu(ch->len); if (l != len) { ubifs_err("bad node length %d, expected %d", l, len); goto out; } return 0; out: ubifs_err("bad node at LEB %d:%d, LEB mapping status %d", lnum, offs, ubi_is_mapped(c->ubi, lnum)); dbg_dump_node(c, buf); dbg_dump_stack(); return -EINVAL; } /** * ubifs_wbuf_init - initialize write-buffer. * @c: UBIFS file-system description object * @wbuf: write-buffer to initialize * * This function initializes write-buffer. Returns zero in case of success * %-ENOMEM in case of failure. */ int ubifs_wbuf_init(struct ubifs_info *c, struct ubifs_wbuf *wbuf) { size_t size; wbuf->buf = kmalloc(c->max_write_size, GFP_KERNEL); if (!wbuf->buf) return -ENOMEM; size = (c->max_write_size / UBIFS_CH_SZ + 1) * sizeof(ino_t); wbuf->inodes = kmalloc(size, GFP_KERNEL); if (!wbuf->inodes) { kfree(wbuf->buf); wbuf->buf = NULL; return -ENOMEM; } wbuf->used = 0; wbuf->lnum = wbuf->offs = -1; /* * If the LEB starts at the max. write size aligned address, then * write-buffer size has to be set to @c->max_write_size. Otherwise, * set it to something smaller so that it ends at the closest max. * write size boundary. */ size = c->max_write_size - (c->leb_start % c->max_write_size); wbuf->avail = wbuf->size = size; wbuf->dtype = UBI_UNKNOWN; wbuf->sync_callback = NULL; mutex_init(&wbuf->io_mutex); spin_lock_init(&wbuf->lock); wbuf->c = c; wbuf->next_ino = 0; hrtimer_init(&wbuf->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); wbuf->timer.function = wbuf_timer_callback_nolock; wbuf->softlimit = ktime_set(WBUF_TIMEOUT_SOFTLIMIT, 0); wbuf->delta = WBUF_TIMEOUT_HARDLIMIT - WBUF_TIMEOUT_SOFTLIMIT; wbuf->delta *= 1000000000ULL; ubifs_assert(wbuf->delta <= ULONG_MAX); return 0; } /** * ubifs_wbuf_add_ino_nolock - add an inode number into the wbuf inode array. * @wbuf: the write-buffer where to add * @inum: the inode number * * This function adds an inode number to the inode array of the write-buffer. */ void ubifs_wbuf_add_ino_nolock(struct ubifs_wbuf *wbuf, ino_t inum) { if (!wbuf->buf) /* NOR flash or something similar */ return; spin_lock(&wbuf->lock); if (wbuf->used) wbuf->inodes[wbuf->next_ino++] = inum; spin_unlock(&wbuf->lock); } /** * wbuf_has_ino - returns if the wbuf contains data from the inode. * @wbuf: the write-buffer * @inum: the inode number * * This function returns with %1 if the write-buffer contains some data from the * given inode otherwise it returns with %0. */ static int wbuf_has_ino(struct ubifs_wbuf *wbuf, ino_t inum) { int i, ret = 0; spin_lock(&wbuf->lock); for (i = 0; i < wbuf->next_ino; i++) if (inum == wbuf->inodes[i]) { ret = 1; break; } spin_unlock(&wbuf->lock); return ret; } /** * ubifs_sync_wbufs_by_inode - synchronize write-buffers for an inode. * @c: UBIFS file-system description object * @inode: inode to synchronize * * This function synchronizes write-buffers which contain nodes belonging to * @inode. Returns zero in case of success and a negative error code in case of * failure. */ int ubifs_sync_wbufs_by_inode(struct ubifs_info *c, struct inode *inode) { int i, err = 0; for (i = 0; i < c->jhead_cnt; i++) { struct ubifs_wbuf *wbuf = &c->jheads[i].wbuf; if (i == GCHD) /* * GC head is special, do not look at it. Even if the * head contains something related to this inode, it is * a _copy_ of corresponding on-flash node which sits * somewhere else. */ continue; if (!wbuf_has_ino(wbuf, inode->i_ino)) continue; mutex_lock_nested(&wbuf->io_mutex, wbuf->jhead); if (wbuf_has_ino(wbuf, inode->i_ino)) err = ubifs_wbuf_sync_nolock(wbuf); mutex_unlock(&wbuf->io_mutex); if (err) { ubifs_ro_mode(c, err); return err; } } return 0; }
gpl-2.0
talnoah/m7-gpe
tools/perf/util/map.c
3638
15967
#include "symbol.h" #include <errno.h> #include <inttypes.h> #include <limits.h> #include <stdlib.h> #include <string.h> #include <stdio.h> #include <unistd.h> #include "map.h" const char *map_type__name[MAP__NR_TYPES] = { [MAP__FUNCTION] = "Functions", [MAP__VARIABLE] = "Variables", }; static inline int is_anon_memory(const char *filename) { return strcmp(filename, "//anon") == 0; } static inline int is_no_dso_memory(const char *filename) { return !strcmp(filename, "[stack]") || !strcmp(filename, "[vdso]") || !strcmp(filename, "[heap]"); } void map__init(struct map *self, enum map_type type, u64 start, u64 end, u64 pgoff, struct dso *dso) { self->type = type; self->start = start; self->end = end; self->pgoff = pgoff; self->dso = dso; self->map_ip = map__map_ip; self->unmap_ip = map__unmap_ip; RB_CLEAR_NODE(&self->rb_node); self->groups = NULL; self->referenced = false; self->erange_warned = false; } struct map *map__new(struct list_head *dsos__list, u64 start, u64 len, u64 pgoff, u32 pid, char *filename, enum map_type type) { struct map *self = malloc(sizeof(*self)); if (self != NULL) { char newfilename[PATH_MAX]; struct dso *dso; int anon, no_dso; anon = is_anon_memory(filename); no_dso = is_no_dso_memory(filename); if (anon) { snprintf(newfilename, sizeof(newfilename), "/tmp/perf-%d.map", pid); filename = newfilename; } dso = __dsos__findnew(dsos__list, filename); if (dso == NULL) goto out_delete; map__init(self, type, start, start + len, pgoff, dso); if (anon || no_dso) { self->map_ip = self->unmap_ip = identity__map_ip; /* * Set memory without DSO as loaded. All map__find_* * functions still return NULL, and we avoid the * unnecessary map__load warning. */ if (no_dso) dso__set_loaded(dso, self->type); } } return self; out_delete: free(self); return NULL; } void map__delete(struct map *self) { free(self); } void map__fixup_start(struct map *self) { struct rb_root *symbols = &self->dso->symbols[self->type]; struct rb_node *nd = rb_first(symbols); if (nd != NULL) { struct symbol *sym = rb_entry(nd, struct symbol, rb_node); self->start = sym->start; } } void map__fixup_end(struct map *self) { struct rb_root *symbols = &self->dso->symbols[self->type]; struct rb_node *nd = rb_last(symbols); if (nd != NULL) { struct symbol *sym = rb_entry(nd, struct symbol, rb_node); self->end = sym->end; } } #define DSO__DELETED "(deleted)" int map__load(struct map *self, symbol_filter_t filter) { const char *name = self->dso->long_name; int nr; if (dso__loaded(self->dso, self->type)) return 0; nr = dso__load(self->dso, self, filter); if (nr < 0) { if (self->dso->has_build_id) { char sbuild_id[BUILD_ID_SIZE * 2 + 1]; build_id__sprintf(self->dso->build_id, sizeof(self->dso->build_id), sbuild_id); pr_warning("%s with build id %s not found", name, sbuild_id); } else pr_warning("Failed to open %s", name); pr_warning(", continuing without symbols\n"); return -1; } else if (nr == 0) { const size_t len = strlen(name); const size_t real_len = len - sizeof(DSO__DELETED); if (len > sizeof(DSO__DELETED) && strcmp(name + real_len + 1, DSO__DELETED) == 0) { pr_warning("%.*s was updated (is prelink enabled?). " "Restart the long running apps that use it!\n", (int)real_len, name); } else { pr_warning("no symbols found in %s, maybe install " "a debug package?\n", name); } return -1; } /* * Only applies to the kernel, as its symtabs aren't relative like the * module ones. */ if (self->dso->kernel) map__reloc_vmlinux(self); return 0; } struct symbol *map__find_symbol(struct map *self, u64 addr, symbol_filter_t filter) { if (map__load(self, filter) < 0) return NULL; return dso__find_symbol(self->dso, self->type, addr); } struct symbol *map__find_symbol_by_name(struct map *self, const char *name, symbol_filter_t filter) { if (map__load(self, filter) < 0) return NULL; if (!dso__sorted_by_name(self->dso, self->type)) dso__sort_by_name(self->dso, self->type); return dso__find_symbol_by_name(self->dso, self->type, name); } struct map *map__clone(struct map *self) { struct map *map = malloc(sizeof(*self)); if (!map) return NULL; memcpy(map, self, sizeof(*self)); return map; } int map__overlap(struct map *l, struct map *r) { if (l->start > r->start) { struct map *t = l; l = r; r = t; } if (l->end > r->start) return 1; return 0; } size_t map__fprintf(struct map *self, FILE *fp) { return fprintf(fp, " %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s\n", self->start, self->end, self->pgoff, self->dso->name); } size_t map__fprintf_dsoname(struct map *map, FILE *fp) { const char *dsoname; if (map && map->dso && (map->dso->name || map->dso->long_name)) { if (symbol_conf.show_kernel_path && map->dso->long_name) dsoname = map->dso->long_name; else if (map->dso->name) dsoname = map->dso->name; } else dsoname = "[unknown]"; return fprintf(fp, "%s", dsoname); } /* * objdump wants/reports absolute IPs for ET_EXEC, and RIPs for ET_DYN. * map->dso->adjust_symbols==1 for ET_EXEC-like cases. */ u64 map__rip_2objdump(struct map *map, u64 rip) { u64 addr = map->dso->adjust_symbols ? map->unmap_ip(map, rip) : /* RIP -> IP */ rip; return addr; } u64 map__objdump_2ip(struct map *map, u64 addr) { u64 ip = map->dso->adjust_symbols ? addr : map->unmap_ip(map, addr); /* RIP -> IP */ return ip; } void map_groups__init(struct map_groups *mg) { int i; for (i = 0; i < MAP__NR_TYPES; ++i) { mg->maps[i] = RB_ROOT; INIT_LIST_HEAD(&mg->removed_maps[i]); } mg->machine = NULL; } static void maps__delete(struct rb_root *maps) { struct rb_node *next = rb_first(maps); while (next) { struct map *pos = rb_entry(next, struct map, rb_node); next = rb_next(&pos->rb_node); rb_erase(&pos->rb_node, maps); map__delete(pos); } } static void maps__delete_removed(struct list_head *maps) { struct map *pos, *n; list_for_each_entry_safe(pos, n, maps, node) { list_del(&pos->node); map__delete(pos); } } void map_groups__exit(struct map_groups *mg) { int i; for (i = 0; i < MAP__NR_TYPES; ++i) { maps__delete(&mg->maps[i]); maps__delete_removed(&mg->removed_maps[i]); } } void map_groups__flush(struct map_groups *mg) { int type; for (type = 0; type < MAP__NR_TYPES; type++) { struct rb_root *root = &mg->maps[type]; struct rb_node *next = rb_first(root); while (next) { struct map *pos = rb_entry(next, struct map, rb_node); next = rb_next(&pos->rb_node); rb_erase(&pos->rb_node, root); /* * We may have references to this map, for * instance in some hist_entry instances, so * just move them to a separate list. */ list_add_tail(&pos->node, &mg->removed_maps[pos->type]); } } } struct symbol *map_groups__find_symbol(struct map_groups *mg, enum map_type type, u64 addr, struct map **mapp, symbol_filter_t filter) { struct map *map = map_groups__find(mg, type, addr); if (map != NULL) { if (mapp != NULL) *mapp = map; return map__find_symbol(map, map->map_ip(map, addr), filter); } return NULL; } struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg, enum map_type type, const char *name, struct map **mapp, symbol_filter_t filter) { struct rb_node *nd; for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) { struct map *pos = rb_entry(nd, struct map, rb_node); struct symbol *sym = map__find_symbol_by_name(pos, name, filter); if (sym == NULL) continue; if (mapp != NULL) *mapp = pos; return sym; } return NULL; } size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type, int verbose, FILE *fp) { size_t printed = fprintf(fp, "%s:\n", map_type__name[type]); struct rb_node *nd; for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) { struct map *pos = rb_entry(nd, struct map, rb_node); printed += fprintf(fp, "Map:"); printed += map__fprintf(pos, fp); if (verbose > 2) { printed += dso__fprintf(pos->dso, type, fp); printed += fprintf(fp, "--\n"); } } return printed; } size_t map_groups__fprintf_maps(struct map_groups *mg, int verbose, FILE *fp) { size_t printed = 0, i; for (i = 0; i < MAP__NR_TYPES; ++i) printed += __map_groups__fprintf_maps(mg, i, verbose, fp); return printed; } static size_t __map_groups__fprintf_removed_maps(struct map_groups *mg, enum map_type type, int verbose, FILE *fp) { struct map *pos; size_t printed = 0; list_for_each_entry(pos, &mg->removed_maps[type], node) { printed += fprintf(fp, "Map:"); printed += map__fprintf(pos, fp); if (verbose > 1) { printed += dso__fprintf(pos->dso, type, fp); printed += fprintf(fp, "--\n"); } } return printed; } static size_t map_groups__fprintf_removed_maps(struct map_groups *mg, int verbose, FILE *fp) { size_t printed = 0, i; for (i = 0; i < MAP__NR_TYPES; ++i) printed += __map_groups__fprintf_removed_maps(mg, i, verbose, fp); return printed; } size_t map_groups__fprintf(struct map_groups *mg, int verbose, FILE *fp) { size_t printed = map_groups__fprintf_maps(mg, verbose, fp); printed += fprintf(fp, "Removed maps:\n"); return printed + map_groups__fprintf_removed_maps(mg, verbose, fp); } int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map, int verbose, FILE *fp) { struct rb_root *root = &mg->maps[map->type]; struct rb_node *next = rb_first(root); int err = 0; while (next) { struct map *pos = rb_entry(next, struct map, rb_node); next = rb_next(&pos->rb_node); if (!map__overlap(pos, map)) continue; if (verbose >= 2) { fputs("overlapping maps:\n", fp); map__fprintf(map, fp); map__fprintf(pos, fp); } rb_erase(&pos->rb_node, root); /* * Now check if we need to create new maps for areas not * overlapped by the new map: */ if (map->start > pos->start) { struct map *before = map__clone(pos); if (before == NULL) { err = -ENOMEM; goto move_map; } before->end = map->start - 1; map_groups__insert(mg, before); if (verbose >= 2) map__fprintf(before, fp); } if (map->end < pos->end) { struct map *after = map__clone(pos); if (after == NULL) { err = -ENOMEM; goto move_map; } after->start = map->end + 1; map_groups__insert(mg, after); if (verbose >= 2) map__fprintf(after, fp); } move_map: /* * If we have references, just move them to a separate list. */ if (pos->referenced) list_add_tail(&pos->node, &mg->removed_maps[map->type]); else map__delete(pos); if (err) return err; } return 0; } /* * XXX This should not really _copy_ te maps, but refcount them. */ int map_groups__clone(struct map_groups *mg, struct map_groups *parent, enum map_type type) { struct rb_node *nd; for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) { struct map *map = rb_entry(nd, struct map, rb_node); struct map *new = map__clone(map); if (new == NULL) return -ENOMEM; map_groups__insert(mg, new); } return 0; } static u64 map__reloc_map_ip(struct map *map, u64 ip) { return ip + (s64)map->pgoff; } static u64 map__reloc_unmap_ip(struct map *map, u64 ip) { return ip - (s64)map->pgoff; } void map__reloc_vmlinux(struct map *self) { struct kmap *kmap = map__kmap(self); s64 reloc; if (!kmap->ref_reloc_sym || !kmap->ref_reloc_sym->unrelocated_addr) return; reloc = (kmap->ref_reloc_sym->unrelocated_addr - kmap->ref_reloc_sym->addr); if (!reloc) return; self->map_ip = map__reloc_map_ip; self->unmap_ip = map__reloc_unmap_ip; self->pgoff = reloc; } void maps__insert(struct rb_root *maps, struct map *map) { struct rb_node **p = &maps->rb_node; struct rb_node *parent = NULL; const u64 ip = map->start; struct map *m; while (*p != NULL) { parent = *p; m = rb_entry(parent, struct map, rb_node); if (ip < m->start) p = &(*p)->rb_left; else p = &(*p)->rb_right; } rb_link_node(&map->rb_node, parent, p); rb_insert_color(&map->rb_node, maps); } void maps__remove(struct rb_root *self, struct map *map) { rb_erase(&map->rb_node, self); } struct map *maps__find(struct rb_root *maps, u64 ip) { struct rb_node **p = &maps->rb_node; struct rb_node *parent = NULL; struct map *m; while (*p != NULL) { parent = *p; m = rb_entry(parent, struct map, rb_node); if (ip < m->start) p = &(*p)->rb_left; else if (ip > m->end) p = &(*p)->rb_right; else return m; } return NULL; } int machine__init(struct machine *self, const char *root_dir, pid_t pid) { map_groups__init(&self->kmaps); RB_CLEAR_NODE(&self->rb_node); INIT_LIST_HEAD(&self->user_dsos); INIT_LIST_HEAD(&self->kernel_dsos); self->threads = RB_ROOT; INIT_LIST_HEAD(&self->dead_threads); self->last_match = NULL; self->kmaps.machine = self; self->pid = pid; self->root_dir = strdup(root_dir); return self->root_dir == NULL ? -ENOMEM : 0; } static void dsos__delete(struct list_head *self) { struct dso *pos, *n; list_for_each_entry_safe(pos, n, self, node) { list_del(&pos->node); dso__delete(pos); } } void machine__exit(struct machine *self) { map_groups__exit(&self->kmaps); dsos__delete(&self->user_dsos); dsos__delete(&self->kernel_dsos); free(self->root_dir); self->root_dir = NULL; } void machine__delete(struct machine *self) { machine__exit(self); free(self); } struct machine *machines__add(struct rb_root *self, pid_t pid, const char *root_dir) { struct rb_node **p = &self->rb_node; struct rb_node *parent = NULL; struct machine *pos, *machine = malloc(sizeof(*machine)); if (!machine) return NULL; if (machine__init(machine, root_dir, pid) != 0) { free(machine); return NULL; } while (*p != NULL) { parent = *p; pos = rb_entry(parent, struct machine, rb_node); if (pid < pos->pid) p = &(*p)->rb_left; else p = &(*p)->rb_right; } rb_link_node(&machine->rb_node, parent, p); rb_insert_color(&machine->rb_node, self); return machine; } struct machine *machines__find(struct rb_root *self, pid_t pid) { struct rb_node **p = &self->rb_node; struct rb_node *parent = NULL; struct machine *machine; struct machine *default_machine = NULL; while (*p != NULL) { parent = *p; machine = rb_entry(parent, struct machine, rb_node); if (pid < machine->pid) p = &(*p)->rb_left; else if (pid > machine->pid) p = &(*p)->rb_right; else return machine; if (!machine->pid) default_machine = machine; } return default_machine; } struct machine *machines__findnew(struct rb_root *self, pid_t pid) { char path[PATH_MAX]; const char *root_dir; struct machine *machine = machines__find(self, pid); if (!machine || machine->pid != pid) { if (pid == HOST_KERNEL_ID || pid == DEFAULT_GUEST_KERNEL_ID) root_dir = ""; else { if (!symbol_conf.guestmount) goto out; sprintf(path, "%s/%d", symbol_conf.guestmount, pid); if (access(path, R_OK)) { pr_err("Can't access file %s\n", path); goto out; } root_dir = path; } machine = machines__add(self, pid, root_dir); } out: return machine; } void machines__process(struct rb_root *self, machine__process_t process, void *data) { struct rb_node *nd; for (nd = rb_first(self); nd; nd = rb_next(nd)) { struct machine *pos = rb_entry(nd, struct machine, rb_node); process(pos, data); } } char *machine__mmap_name(struct machine *self, char *bf, size_t size) { if (machine__is_host(self)) snprintf(bf, size, "[%s]", "kernel.kallsyms"); else if (machine__is_default_guest(self)) snprintf(bf, size, "[%s]", "guest.kernel.kallsyms"); else snprintf(bf, size, "[%s.%d]", "guest.kernel.kallsyms", self->pid); return bf; }
gpl-2.0
hash07/Apollo_X
arch/cris/kernel/profile.c
4150
1912
#include <linux/init.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/proc_fs.h> #include <linux/slab.h> #include <linux/types.h> #include <asm/ptrace.h> #include <asm/uaccess.h> #define SAMPLE_BUFFER_SIZE 8192 static char *sample_buffer; static char *sample_buffer_pos; static int prof_running = 0; void cris_profile_sample(struct pt_regs *regs) { if (!prof_running) return; if (user_mode(regs)) *(unsigned int*)sample_buffer_pos = current->pid; else *(unsigned int*)sample_buffer_pos = 0; *(unsigned int *)(sample_buffer_pos + 4) = instruction_pointer(regs); sample_buffer_pos += 8; if (sample_buffer_pos == sample_buffer + SAMPLE_BUFFER_SIZE) sample_buffer_pos = sample_buffer; } static ssize_t read_cris_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos) { unsigned long p = *ppos; ssize_t ret; ret = simple_read_from_buffer(buf, count, ppos, sample_buffer, SAMPLE_BUFFER_SIZE); if (ret < 0) return ret; memset(sample_buffer + p, 0, ret); return ret; } static ssize_t write_cris_profile(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { sample_buffer_pos = sample_buffer; memset(sample_buffer, 0, SAMPLE_BUFFER_SIZE); return count < SAMPLE_BUFFER_SIZE ? count : SAMPLE_BUFFER_SIZE; } static const struct file_operations cris_proc_profile_operations = { .read = read_cris_profile, .write = write_cris_profile, .llseek = default_llseek, }; static int __init init_cris_profile(void) { struct proc_dir_entry *entry; sample_buffer = kmalloc(SAMPLE_BUFFER_SIZE, GFP_KERNEL); if (!sample_buffer) { return -ENOMEM; } sample_buffer_pos = sample_buffer; entry = proc_create("system_profile", S_IWUSR | S_IRUGO, NULL, &cris_proc_profile_operations); if (entry) { proc_set_size(entry, SAMPLE_BUFFER_SIZE); } prof_running = 1; return 0; } __initcall(init_cris_profile);
gpl-2.0
slukk/mako_msm
arch/m68k/atari/ataints.c
4406
6330
/* * arch/m68k/atari/ataints.c -- Atari Linux interrupt handling code * * 5/2/94 Roman Hodek: * Added support for TT interrupts; setup for TT SCU (may someone has * twiddled there and we won't get the right interrupts :-() * * Major change: The device-independent code in m68k/ints.c didn't know * about non-autovec ints yet. It hardcoded the number of possible ints to * 7 (IRQ1...IRQ7). But the Atari has lots of non-autovec ints! I made the * number of possible ints a constant defined in interrupt.h, which is * 47 for the Atari. So we can call request_irq() for all Atari interrupts * just the normal way. Additionally, all vectors >= 48 are initialized to * call trap() instead of inthandler(). This must be changed here, too. * * 1995-07-16 Lars Brinkhoff <f93labr@dd.chalmers.se>: * Corrected a bug in atari_add_isr() which rejected all SCC * interrupt sources if there were no TT MFP! * * 12/13/95: New interface functions atari_level_triggered_int() and * atari_register_vme_int() as support for level triggered VME interrupts. * * 02/12/96: (Roman) * Total rewrite of Atari interrupt handling, for new scheme see comments * below. * * 1996-09-03 lars brinkhoff <f93labr@dd.chalmers.se>: * Added new function atari_unregister_vme_int(), and * modified atari_register_vme_int() as well as IS_VALID_INTNO() * to work with it. * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. * */ #include <linux/types.h> #include <linux/kernel.h> #include <linux/kernel_stat.h> #include <linux/init.h> #include <linux/seq_file.h> #include <linux/module.h> #include <asm/traps.h> #include <asm/atarihw.h> #include <asm/atariints.h> #include <asm/atari_stdma.h> #include <asm/irq.h> #include <asm/entry.h> /* * Atari interrupt handling scheme: * -------------------------------- * * All interrupt source have an internal number (defined in * <asm/atariints.h>): Autovector interrupts are 1..7, then follow ST-MFP, * TT-MFP, SCC, and finally VME interrupts. Vector numbers for the latter can * be allocated by atari_register_vme_int(). */ /* * Bitmap for free interrupt vector numbers * (new vectors starting from 0x70 can be allocated by * atari_register_vme_int()) */ static int free_vme_vec_bitmap; /* GK: * HBL IRQ handler for Falcon. Nobody needs it :-) * ++andreas: raise ipl to disable further HBLANK interrupts. */ asmlinkage void falcon_hblhandler(void); asm(".text\n" __ALIGN_STR "\n\t" "falcon_hblhandler:\n\t" "orw #0x200,%sp@\n\t" /* set saved ipl to 2 */ "rte"); extern void atari_microwire_cmd(int cmd); static unsigned int atari_irq_startup(struct irq_data *data) { unsigned int irq = data->irq; m68k_irq_startup(data); atari_turnon_irq(irq); atari_enable_irq(irq); return 0; } static void atari_irq_shutdown(struct irq_data *data) { unsigned int irq = data->irq; atari_disable_irq(irq); atari_turnoff_irq(irq); m68k_irq_shutdown(data); if (irq == IRQ_AUTO_4) vectors[VEC_INT4] = falcon_hblhandler; } static void atari_irq_enable(struct irq_data *data) { atari_enable_irq(data->irq); } static void atari_irq_disable(struct irq_data *data) { atari_disable_irq(data->irq); } static struct irq_chip atari_irq_chip = { .name = "atari", .irq_startup = atari_irq_startup, .irq_shutdown = atari_irq_shutdown, .irq_enable = atari_irq_enable, .irq_disable = atari_irq_disable, }; /* * void atari_init_IRQ (void) * * Parameters: None * * Returns: Nothing * * This function should be called during kernel startup to initialize * the atari IRQ handling routines. */ void __init atari_init_IRQ(void) { m68k_setup_user_interrupt(VEC_USER, NUM_ATARI_SOURCES - IRQ_USER); m68k_setup_irq_controller(&atari_irq_chip, handle_simple_irq, 1, NUM_ATARI_SOURCES - 1); /* Initialize the MFP(s) */ #ifdef ATARI_USE_SOFTWARE_EOI st_mfp.vec_adr = 0x48; /* Software EOI-Mode */ #else st_mfp.vec_adr = 0x40; /* Automatic EOI-Mode */ #endif st_mfp.int_en_a = 0x00; /* turn off MFP-Ints */ st_mfp.int_en_b = 0x00; st_mfp.int_mk_a = 0xff; /* no Masking */ st_mfp.int_mk_b = 0xff; if (ATARIHW_PRESENT(TT_MFP)) { #ifdef ATARI_USE_SOFTWARE_EOI tt_mfp.vec_adr = 0x58; /* Software EOI-Mode */ #else tt_mfp.vec_adr = 0x50; /* Automatic EOI-Mode */ #endif tt_mfp.int_en_a = 0x00; /* turn off MFP-Ints */ tt_mfp.int_en_b = 0x00; tt_mfp.int_mk_a = 0xff; /* no Masking */ tt_mfp.int_mk_b = 0xff; } if (ATARIHW_PRESENT(SCC) && !atari_SCC_reset_done) { atari_scc.cha_a_ctrl = 9; MFPDELAY(); atari_scc.cha_a_ctrl = (char) 0xc0; /* hardware reset */ } if (ATARIHW_PRESENT(SCU)) { /* init the SCU if present */ tt_scu.sys_mask = 0x10; /* enable VBL (for the cursor) and * disable HSYNC interrupts (who * needs them?) MFP and SCC are * enabled in VME mask */ tt_scu.vme_mask = 0x60; /* enable MFP and SCC ints */ } else { /* If no SCU and no Hades, the HSYNC interrupt needs to be * disabled this way. (Else _inthandler in kernel/sys_call.S * gets overruns) */ vectors[VEC_INT2] = falcon_hblhandler; vectors[VEC_INT4] = falcon_hblhandler; } if (ATARIHW_PRESENT(PCM_8BIT) && ATARIHW_PRESENT(MICROWIRE)) { /* Initialize the LM1992 Sound Controller to enable the PSG sound. This is misplaced here, it should be in an atasound_init(), that doesn't exist yet. */ atari_microwire_cmd(MW_LM1992_PSG_HIGH); } stdma_init(); /* Initialize the PSG: all sounds off, both ports output */ sound_ym.rd_data_reg_sel = 7; sound_ym.wd_data = 0xff; } /* * atari_register_vme_int() returns the number of a free interrupt vector for * hardware with a programmable int vector (probably a VME board). */ unsigned long atari_register_vme_int(void) { int i; for (i = 0; i < 32; i++) if ((free_vme_vec_bitmap & (1 << i)) == 0) break; if (i == 16) return 0; free_vme_vec_bitmap |= 1 << i; return VME_SOURCE_BASE + i; } EXPORT_SYMBOL(atari_register_vme_int); void atari_unregister_vme_int(unsigned long irq) { if (irq >= VME_SOURCE_BASE && irq < VME_SOURCE_BASE + VME_MAX_SOURCES) { irq -= VME_SOURCE_BASE; free_vme_vec_bitmap &= ~(1 << irq); } } EXPORT_SYMBOL(atari_unregister_vme_int);
gpl-2.0
skelitonlord/android_kernel_samsung_matissewifi
arch/arm/mach-exynos/setup-usb-phy.c
4662
3354
/* * Copyright (C) 2011 Samsung Electronics Co.Ltd * Author: Joonyoung Shim <jy0922.shim@samsung.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * */ #include <linux/clk.h> #include <linux/delay.h> #include <linux/err.h> #include <linux/io.h> #include <linux/platform_device.h> #include <mach/regs-pmu.h> #include <mach/regs-usb-phy.h> #include <plat/cpu.h> #include <plat/usb-phy.h> static atomic_t host_usage; static int exynos4_usb_host_phy_is_on(void) { return (readl(EXYNOS4_PHYPWR) & PHY1_STD_ANALOG_POWERDOWN) ? 0 : 1; } static int exynos4_usb_phy1_init(struct platform_device *pdev) { struct clk *otg_clk; struct clk *xusbxti_clk; u32 phyclk; u32 rstcon; int err; atomic_inc(&host_usage); otg_clk = clk_get(&pdev->dev, "otg"); if (IS_ERR(otg_clk)) { dev_err(&pdev->dev, "Failed to get otg clock\n"); return PTR_ERR(otg_clk); } err = clk_enable(otg_clk); if (err) { clk_put(otg_clk); return err; } if (exynos4_usb_host_phy_is_on()) return 0; writel(readl(S5P_USBHOST_PHY_CONTROL) | S5P_USBHOST_PHY_ENABLE, S5P_USBHOST_PHY_CONTROL); /* set clock frequency for PLL */ phyclk = readl(EXYNOS4_PHYCLK) & ~CLKSEL_MASK; xusbxti_clk = clk_get(&pdev->dev, "xusbxti"); if (xusbxti_clk && !IS_ERR(xusbxti_clk)) { switch (clk_get_rate(xusbxti_clk)) { case 12 * MHZ: phyclk |= CLKSEL_12M; break; case 24 * MHZ: phyclk |= CLKSEL_24M; break; default: case 48 * MHZ: /* default reference clock */ break; } clk_put(xusbxti_clk); } writel(phyclk, EXYNOS4_PHYCLK); /* floating prevention logic: disable */ writel((readl(EXYNOS4_PHY1CON) | FPENABLEN), EXYNOS4_PHY1CON); /* set to normal HSIC 0 and 1 of PHY1 */ writel((readl(EXYNOS4_PHYPWR) & ~PHY1_HSIC_NORMAL_MASK), EXYNOS4_PHYPWR); /* set to normal standard USB of PHY1 */ writel((readl(EXYNOS4_PHYPWR) & ~PHY1_STD_NORMAL_MASK), EXYNOS4_PHYPWR); /* reset all ports of both PHY and Link */ rstcon = readl(EXYNOS4_RSTCON) | HOST_LINK_PORT_SWRST_MASK | PHY1_SWRST_MASK; writel(rstcon, EXYNOS4_RSTCON); udelay(10); rstcon &= ~(HOST_LINK_PORT_SWRST_MASK | PHY1_SWRST_MASK); writel(rstcon, EXYNOS4_RSTCON); udelay(80); clk_disable(otg_clk); clk_put(otg_clk); return 0; } static int exynos4_usb_phy1_exit(struct platform_device *pdev) { struct clk *otg_clk; int err; if (atomic_dec_return(&host_usage) > 0) return 0; otg_clk = clk_get(&pdev->dev, "otg"); if (IS_ERR(otg_clk)) { dev_err(&pdev->dev, "Failed to get otg clock\n"); return PTR_ERR(otg_clk); } err = clk_enable(otg_clk); if (err) { clk_put(otg_clk); return err; } writel((readl(EXYNOS4_PHYPWR) | PHY1_STD_ANALOG_POWERDOWN), EXYNOS4_PHYPWR); writel(readl(S5P_USBHOST_PHY_CONTROL) & ~S5P_USBHOST_PHY_ENABLE, S5P_USBHOST_PHY_CONTROL); clk_disable(otg_clk); clk_put(otg_clk); return 0; } int s5p_usb_phy_init(struct platform_device *pdev, int type) { if (type == S5P_USB_PHY_HOST) return exynos4_usb_phy1_init(pdev); return -EINVAL; } int s5p_usb_phy_exit(struct platform_device *pdev, int type) { if (type == S5P_USB_PHY_HOST) return exynos4_usb_phy1_exit(pdev); return -EINVAL; }
gpl-2.0
InsomniaAOSP/platform_kernel_htc_m7
sound/core/seq/seq.c
8246
4140
/* * ALSA sequencer main module * Copyright (c) 1998-1999 by Frank van de Pol <fvdpol@coil.demon.nl> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/seq_kernel.h> #include "seq_clientmgr.h" #include "seq_memory.h" #include "seq_queue.h" #include "seq_lock.h" #include "seq_timer.h" #include "seq_system.h" #include "seq_info.h" #include <sound/minors.h> #include <sound/seq_device.h> #if defined(CONFIG_SND_SEQ_DUMMY_MODULE) int seq_client_load[15] = {[0] = SNDRV_SEQ_CLIENT_DUMMY, [1 ... 14] = -1}; #else int seq_client_load[15] = {[0 ... 14] = -1}; #endif int seq_default_timer_class = SNDRV_TIMER_CLASS_GLOBAL; int seq_default_timer_sclass = SNDRV_TIMER_SCLASS_NONE; int seq_default_timer_card = -1; int seq_default_timer_device = #ifdef CONFIG_SND_SEQ_HRTIMER_DEFAULT SNDRV_TIMER_GLOBAL_HRTIMER #elif defined(CONFIG_SND_SEQ_RTCTIMER_DEFAULT) SNDRV_TIMER_GLOBAL_RTC #else SNDRV_TIMER_GLOBAL_SYSTEM #endif ; int seq_default_timer_subdevice = 0; int seq_default_timer_resolution = 0; /* Hz */ MODULE_AUTHOR("Frank van de Pol <fvdpol@coil.demon.nl>, Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("Advanced Linux Sound Architecture sequencer."); MODULE_LICENSE("GPL"); module_param_array(seq_client_load, int, NULL, 0444); MODULE_PARM_DESC(seq_client_load, "The numbers of global (system) clients to load through kmod."); module_param(seq_default_timer_class, int, 0644); MODULE_PARM_DESC(seq_default_timer_class, "The default timer class."); module_param(seq_default_timer_sclass, int, 0644); MODULE_PARM_DESC(seq_default_timer_sclass, "The default timer slave class."); module_param(seq_default_timer_card, int, 0644); MODULE_PARM_DESC(seq_default_timer_card, "The default timer card number."); module_param(seq_default_timer_device, int, 0644); MODULE_PARM_DESC(seq_default_timer_device, "The default timer device number."); module_param(seq_default_timer_subdevice, int, 0644); MODULE_PARM_DESC(seq_default_timer_subdevice, "The default timer subdevice number."); module_param(seq_default_timer_resolution, int, 0644); MODULE_PARM_DESC(seq_default_timer_resolution, "The default timer resolution in Hz."); MODULE_ALIAS_CHARDEV(CONFIG_SND_MAJOR, SNDRV_MINOR_SEQUENCER); MODULE_ALIAS("devname:snd/seq"); /* * INIT PART */ static int __init alsa_seq_init(void) { int err; snd_seq_autoload_lock(); if ((err = client_init_data()) < 0) goto error; /* init memory, room for selected events */ if ((err = snd_sequencer_memory_init()) < 0) goto error; /* init event queues */ if ((err = snd_seq_queues_init()) < 0) goto error; /* register sequencer device */ if ((err = snd_sequencer_device_init()) < 0) goto error; /* register proc interface */ if ((err = snd_seq_info_init()) < 0) goto error; /* register our internal client */ if ((err = snd_seq_system_client_init()) < 0) goto error; error: snd_seq_autoload_unlock(); return err; } static void __exit alsa_seq_exit(void) { /* unregister our internal client */ snd_seq_system_client_done(); /* unregister proc interface */ snd_seq_info_done(); /* delete timing queues */ snd_seq_queues_delete(); /* unregister sequencer device */ snd_sequencer_device_done(); /* release event memory */ snd_sequencer_memory_done(); } module_init(alsa_seq_init) module_exit(alsa_seq_exit)
gpl-2.0
avisconti/prova
arch/arm/kernel/dma.c
9014
5794
/* * linux/arch/arm/kernel/dma.c * * Copyright (C) 1995-2000 Russell King * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * Front-end to the DMA handling. This handles the allocation/freeing * of DMA channels, and provides a unified interface to the machines * DMA facilities. */ #include <linux/module.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/errno.h> #include <linux/scatterlist.h> #include <linux/seq_file.h> #include <linux/proc_fs.h> #include <asm/dma.h> #include <asm/mach/dma.h> DEFINE_RAW_SPINLOCK(dma_spin_lock); EXPORT_SYMBOL(dma_spin_lock); static dma_t *dma_chan[MAX_DMA_CHANNELS]; static inline dma_t *dma_channel(unsigned int chan) { if (chan >= MAX_DMA_CHANNELS) return NULL; return dma_chan[chan]; } int __init isa_dma_add(unsigned int chan, dma_t *dma) { if (!dma->d_ops) return -EINVAL; sg_init_table(&dma->buf, 1); if (dma_chan[chan]) return -EBUSY; dma_chan[chan] = dma; return 0; } /* * Request DMA channel * * On certain platforms, we have to allocate an interrupt as well... */ int request_dma(unsigned int chan, const char *device_id) { dma_t *dma = dma_channel(chan); int ret; if (!dma) goto bad_dma; if (xchg(&dma->lock, 1) != 0) goto busy; dma->device_id = device_id; dma->active = 0; dma->invalid = 1; ret = 0; if (dma->d_ops->request) ret = dma->d_ops->request(chan, dma); if (ret) xchg(&dma->lock, 0); return ret; bad_dma: printk(KERN_ERR "dma: trying to allocate DMA%d\n", chan); return -EINVAL; busy: return -EBUSY; } EXPORT_SYMBOL(request_dma); /* * Free DMA channel * * On certain platforms, we have to free interrupt as well... */ void free_dma(unsigned int chan) { dma_t *dma = dma_channel(chan); if (!dma) goto bad_dma; if (dma->active) { printk(KERN_ERR "dma%d: freeing active DMA\n", chan); dma->d_ops->disable(chan, dma); dma->active = 0; } if (xchg(&dma->lock, 0) != 0) { if (dma->d_ops->free) dma->d_ops->free(chan, dma); return; } printk(KERN_ERR "dma%d: trying to free free DMA\n", chan); return; bad_dma: printk(KERN_ERR "dma: trying to free DMA%d\n", chan); } EXPORT_SYMBOL(free_dma); /* Set DMA Scatter-Gather list */ void set_dma_sg (unsigned int chan, struct scatterlist *sg, int nr_sg) { dma_t *dma = dma_channel(chan); if (dma->active) printk(KERN_ERR "dma%d: altering DMA SG while " "DMA active\n", chan); dma->sg = sg; dma->sgcount = nr_sg; dma->invalid = 1; } EXPORT_SYMBOL(set_dma_sg); /* Set DMA address * * Copy address to the structure, and set the invalid bit */ void __set_dma_addr (unsigned int chan, void *addr) { dma_t *dma = dma_channel(chan); if (dma->active) printk(KERN_ERR "dma%d: altering DMA address while " "DMA active\n", chan); dma->sg = NULL; dma->addr = addr; dma->invalid = 1; } EXPORT_SYMBOL(__set_dma_addr); /* Set DMA byte count * * Copy address to the structure, and set the invalid bit */ void set_dma_count (unsigned int chan, unsigned long count) { dma_t *dma = dma_channel(chan); if (dma->active) printk(KERN_ERR "dma%d: altering DMA count while " "DMA active\n", chan); dma->sg = NULL; dma->count = count; dma->invalid = 1; } EXPORT_SYMBOL(set_dma_count); /* Set DMA direction mode */ void set_dma_mode (unsigned int chan, unsigned int mode) { dma_t *dma = dma_channel(chan); if (dma->active) printk(KERN_ERR "dma%d: altering DMA mode while " "DMA active\n", chan); dma->dma_mode = mode; dma->invalid = 1; } EXPORT_SYMBOL(set_dma_mode); /* Enable DMA channel */ void enable_dma (unsigned int chan) { dma_t *dma = dma_channel(chan); if (!dma->lock) goto free_dma; if (dma->active == 0) { dma->active = 1; dma->d_ops->enable(chan, dma); } return; free_dma: printk(KERN_ERR "dma%d: trying to enable free DMA\n", chan); BUG(); } EXPORT_SYMBOL(enable_dma); /* Disable DMA channel */ void disable_dma (unsigned int chan) { dma_t *dma = dma_channel(chan); if (!dma->lock) goto free_dma; if (dma->active == 1) { dma->active = 0; dma->d_ops->disable(chan, dma); } return; free_dma: printk(KERN_ERR "dma%d: trying to disable free DMA\n", chan); BUG(); } EXPORT_SYMBOL(disable_dma); /* * Is the specified DMA channel active? */ int dma_channel_active(unsigned int chan) { dma_t *dma = dma_channel(chan); return dma->active; } EXPORT_SYMBOL(dma_channel_active); void set_dma_page(unsigned int chan, char pagenr) { printk(KERN_ERR "dma%d: trying to set_dma_page\n", chan); } EXPORT_SYMBOL(set_dma_page); void set_dma_speed(unsigned int chan, int cycle_ns) { dma_t *dma = dma_channel(chan); int ret = 0; if (dma->d_ops->setspeed) ret = dma->d_ops->setspeed(chan, dma, cycle_ns); dma->speed = ret; } EXPORT_SYMBOL(set_dma_speed); int get_dma_residue(unsigned int chan) { dma_t *dma = dma_channel(chan); int ret = 0; if (dma->d_ops->residue) ret = dma->d_ops->residue(chan, dma); return ret; } EXPORT_SYMBOL(get_dma_residue); #ifdef CONFIG_PROC_FS static int proc_dma_show(struct seq_file *m, void *v) { int i; for (i = 0 ; i < MAX_DMA_CHANNELS ; i++) { dma_t *dma = dma_channel(i); if (dma && dma->lock) seq_printf(m, "%2d: %s\n", i, dma->device_id); } return 0; } static int proc_dma_open(struct inode *inode, struct file *file) { return single_open(file, proc_dma_show, NULL); } static const struct file_operations proc_dma_operations = { .open = proc_dma_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init proc_dma_init(void) { proc_create("dma", 0, NULL, &proc_dma_operations); return 0; } __initcall(proc_dma_init); #endif
gpl-2.0
SaberMod/Linux-stable
drivers/mfd/ucb1x00-ts.c
9782
11092
/* * Touchscreen driver for UCB1x00-based touchscreens * * Copyright (C) 2001 Russell King, All Rights Reserved. * Copyright (C) 2005 Pavel Machek * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * 21-Jan-2002 <jco@ict.es> : * * Added support for synchronous A/D mode. This mode is useful to * avoid noise induced in the touchpanel by the LCD, provided that * the UCB1x00 has a valid LCD sync signal routed to its ADCSYNC pin. * It is important to note that the signal connected to the ADCSYNC * pin should provide pulses even when the LCD is blanked, otherwise * a pen touch needed to unblank the LCD will never be read. */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/sched.h> #include <linux/spinlock.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/input.h> #include <linux/device.h> #include <linux/freezer.h> #include <linux/slab.h> #include <linux/kthread.h> #include <linux/mfd/ucb1x00.h> #include <mach/collie.h> #include <asm/mach-types.h> struct ucb1x00_ts { struct input_dev *idev; struct ucb1x00 *ucb; spinlock_t irq_lock; unsigned irq_disabled; wait_queue_head_t irq_wait; struct task_struct *rtask; u16 x_res; u16 y_res; unsigned int adcsync:1; }; static int adcsync; static inline void ucb1x00_ts_evt_add(struct ucb1x00_ts *ts, u16 pressure, u16 x, u16 y) { struct input_dev *idev = ts->idev; input_report_abs(idev, ABS_X, x); input_report_abs(idev, ABS_Y, y); input_report_abs(idev, ABS_PRESSURE, pressure); input_report_key(idev, BTN_TOUCH, 1); input_sync(idev); } static inline void ucb1x00_ts_event_release(struct ucb1x00_ts *ts) { struct input_dev *idev = ts->idev; input_report_abs(idev, ABS_PRESSURE, 0); input_report_key(idev, BTN_TOUCH, 0); input_sync(idev); } /* * Switch to interrupt mode. */ static inline void ucb1x00_ts_mode_int(struct ucb1x00_ts *ts) { ucb1x00_reg_write(ts->ucb, UCB_TS_CR, UCB_TS_CR_TSMX_POW | UCB_TS_CR_TSPX_POW | UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_GND | UCB_TS_CR_MODE_INT); } /* * Switch to pressure mode, and read pressure. We don't need to wait * here, since both plates are being driven. */ static inline unsigned int ucb1x00_ts_read_pressure(struct ucb1x00_ts *ts) { if (machine_is_collie()) { ucb1x00_io_write(ts->ucb, COLLIE_TC35143_GPIO_TBL_CHK, 0); ucb1x00_reg_write(ts->ucb, UCB_TS_CR, UCB_TS_CR_TSPX_POW | UCB_TS_CR_TSMX_POW | UCB_TS_CR_MODE_POS | UCB_TS_CR_BIAS_ENA); udelay(55); return ucb1x00_adc_read(ts->ucb, UCB_ADC_INP_AD2, ts->adcsync); } else { ucb1x00_reg_write(ts->ucb, UCB_TS_CR, UCB_TS_CR_TSMX_POW | UCB_TS_CR_TSPX_POW | UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_GND | UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA); return ucb1x00_adc_read(ts->ucb, UCB_ADC_INP_TSPY, ts->adcsync); } } /* * Switch to X position mode and measure Y plate. We switch the plate * configuration in pressure mode, then switch to position mode. This * gives a faster response time. Even so, we need to wait about 55us * for things to stabilise. */ static inline unsigned int ucb1x00_ts_read_xpos(struct ucb1x00_ts *ts) { if (machine_is_collie()) ucb1x00_io_write(ts->ucb, 0, COLLIE_TC35143_GPIO_TBL_CHK); else { ucb1x00_reg_write(ts->ucb, UCB_TS_CR, UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW | UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA); ucb1x00_reg_write(ts->ucb, UCB_TS_CR, UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW | UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA); } ucb1x00_reg_write(ts->ucb, UCB_TS_CR, UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW | UCB_TS_CR_MODE_POS | UCB_TS_CR_BIAS_ENA); udelay(55); return ucb1x00_adc_read(ts->ucb, UCB_ADC_INP_TSPY, ts->adcsync); } /* * Switch to Y position mode and measure X plate. We switch the plate * configuration in pressure mode, then switch to position mode. This * gives a faster response time. Even so, we need to wait about 55us * for things to stabilise. */ static inline unsigned int ucb1x00_ts_read_ypos(struct ucb1x00_ts *ts) { if (machine_is_collie()) ucb1x00_io_write(ts->ucb, 0, COLLIE_TC35143_GPIO_TBL_CHK); else { ucb1x00_reg_write(ts->ucb, UCB_TS_CR, UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW | UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA); ucb1x00_reg_write(ts->ucb, UCB_TS_CR, UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW | UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA); } ucb1x00_reg_write(ts->ucb, UCB_TS_CR, UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW | UCB_TS_CR_MODE_POS | UCB_TS_CR_BIAS_ENA); udelay(55); return ucb1x00_adc_read(ts->ucb, UCB_ADC_INP_TSPX, ts->adcsync); } /* * Switch to X plate resistance mode. Set MX to ground, PX to * supply. Measure current. */ static inline unsigned int ucb1x00_ts_read_xres(struct ucb1x00_ts *ts) { ucb1x00_reg_write(ts->ucb, UCB_TS_CR, UCB_TS_CR_TSMX_GND | UCB_TS_CR_TSPX_POW | UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA); return ucb1x00_adc_read(ts->ucb, 0, ts->adcsync); } /* * Switch to Y plate resistance mode. Set MY to ground, PY to * supply. Measure current. */ static inline unsigned int ucb1x00_ts_read_yres(struct ucb1x00_ts *ts) { ucb1x00_reg_write(ts->ucb, UCB_TS_CR, UCB_TS_CR_TSMY_GND | UCB_TS_CR_TSPY_POW | UCB_TS_CR_MODE_PRES | UCB_TS_CR_BIAS_ENA); return ucb1x00_adc_read(ts->ucb, 0, ts->adcsync); } static inline int ucb1x00_ts_pen_down(struct ucb1x00_ts *ts) { unsigned int val = ucb1x00_reg_read(ts->ucb, UCB_TS_CR); if (machine_is_collie()) return (!(val & (UCB_TS_CR_TSPX_LOW))); else return (val & (UCB_TS_CR_TSPX_LOW | UCB_TS_CR_TSMX_LOW)); } /* * This is a RT kernel thread that handles the ADC accesses * (mainly so we can use semaphores in the UCB1200 core code * to serialise accesses to the ADC). */ static int ucb1x00_thread(void *_ts) { struct ucb1x00_ts *ts = _ts; DECLARE_WAITQUEUE(wait, current); bool frozen, ignore = false; int valid = 0; set_freezable(); add_wait_queue(&ts->irq_wait, &wait); while (!kthread_freezable_should_stop(&frozen)) { unsigned int x, y, p; signed long timeout; if (frozen) ignore = true; ucb1x00_adc_enable(ts->ucb); x = ucb1x00_ts_read_xpos(ts); y = ucb1x00_ts_read_ypos(ts); p = ucb1x00_ts_read_pressure(ts); /* * Switch back to interrupt mode. */ ucb1x00_ts_mode_int(ts); ucb1x00_adc_disable(ts->ucb); msleep(10); ucb1x00_enable(ts->ucb); if (ucb1x00_ts_pen_down(ts)) { set_current_state(TASK_INTERRUPTIBLE); spin_lock_irq(&ts->irq_lock); if (ts->irq_disabled) { ts->irq_disabled = 0; enable_irq(ts->ucb->irq_base + UCB_IRQ_TSPX); } spin_unlock_irq(&ts->irq_lock); ucb1x00_disable(ts->ucb); /* * If we spat out a valid sample set last time, * spit out a "pen off" sample here. */ if (valid) { ucb1x00_ts_event_release(ts); valid = 0; } timeout = MAX_SCHEDULE_TIMEOUT; } else { ucb1x00_disable(ts->ucb); /* * Filtering is policy. Policy belongs in user * space. We therefore leave it to user space * to do any filtering they please. */ if (!ignore) { ucb1x00_ts_evt_add(ts, p, x, y); valid = 1; } set_current_state(TASK_INTERRUPTIBLE); timeout = HZ / 100; } schedule_timeout(timeout); } remove_wait_queue(&ts->irq_wait, &wait); ts->rtask = NULL; return 0; } /* * We only detect touch screen _touches_ with this interrupt * handler, and even then we just schedule our task. */ static irqreturn_t ucb1x00_ts_irq(int irq, void *id) { struct ucb1x00_ts *ts = id; spin_lock(&ts->irq_lock); ts->irq_disabled = 1; disable_irq_nosync(ts->ucb->irq_base + UCB_IRQ_TSPX); spin_unlock(&ts->irq_lock); wake_up(&ts->irq_wait); return IRQ_HANDLED; } static int ucb1x00_ts_open(struct input_dev *idev) { struct ucb1x00_ts *ts = input_get_drvdata(idev); unsigned long flags = 0; int ret = 0; BUG_ON(ts->rtask); if (machine_is_collie()) flags = IRQF_TRIGGER_RISING; else flags = IRQF_TRIGGER_FALLING; ts->irq_disabled = 0; init_waitqueue_head(&ts->irq_wait); ret = request_irq(ts->ucb->irq_base + UCB_IRQ_TSPX, ucb1x00_ts_irq, flags, "ucb1x00-ts", ts); if (ret < 0) goto out; /* * If we do this at all, we should allow the user to * measure and read the X and Y resistance at any time. */ ucb1x00_adc_enable(ts->ucb); ts->x_res = ucb1x00_ts_read_xres(ts); ts->y_res = ucb1x00_ts_read_yres(ts); ucb1x00_adc_disable(ts->ucb); ts->rtask = kthread_run(ucb1x00_thread, ts, "ktsd"); if (!IS_ERR(ts->rtask)) { ret = 0; } else { free_irq(ts->ucb->irq_base + UCB_IRQ_TSPX, ts); ts->rtask = NULL; ret = -EFAULT; } out: return ret; } /* * Release touchscreen resources. Disable IRQs. */ static void ucb1x00_ts_close(struct input_dev *idev) { struct ucb1x00_ts *ts = input_get_drvdata(idev); if (ts->rtask) kthread_stop(ts->rtask); ucb1x00_enable(ts->ucb); free_irq(ts->ucb->irq_base + UCB_IRQ_TSPX, ts); ucb1x00_reg_write(ts->ucb, UCB_TS_CR, 0); ucb1x00_disable(ts->ucb); } /* * Initialisation. */ static int ucb1x00_ts_add(struct ucb1x00_dev *dev) { struct ucb1x00_ts *ts; struct input_dev *idev; int err; ts = kzalloc(sizeof(struct ucb1x00_ts), GFP_KERNEL); idev = input_allocate_device(); if (!ts || !idev) { err = -ENOMEM; goto fail; } ts->ucb = dev->ucb; ts->idev = idev; ts->adcsync = adcsync ? UCB_SYNC : UCB_NOSYNC; spin_lock_init(&ts->irq_lock); idev->name = "Touchscreen panel"; idev->id.product = ts->ucb->id; idev->open = ucb1x00_ts_open; idev->close = ucb1x00_ts_close; idev->dev.parent = &ts->ucb->dev; idev->evbit[0] = BIT_MASK(EV_ABS) | BIT_MASK(EV_KEY); idev->keybit[BIT_WORD(BTN_TOUCH)] = BIT_MASK(BTN_TOUCH); input_set_drvdata(idev, ts); ucb1x00_adc_enable(ts->ucb); ts->x_res = ucb1x00_ts_read_xres(ts); ts->y_res = ucb1x00_ts_read_yres(ts); ucb1x00_adc_disable(ts->ucb); input_set_abs_params(idev, ABS_X, 0, ts->x_res, 0, 0); input_set_abs_params(idev, ABS_Y, 0, ts->y_res, 0, 0); input_set_abs_params(idev, ABS_PRESSURE, 0, 0, 0, 0); err = input_register_device(idev); if (err) goto fail; dev->priv = ts; return 0; fail: input_free_device(idev); kfree(ts); return err; } static void ucb1x00_ts_remove(struct ucb1x00_dev *dev) { struct ucb1x00_ts *ts = dev->priv; input_unregister_device(ts->idev); kfree(ts); } static struct ucb1x00_driver ucb1x00_ts_driver = { .add = ucb1x00_ts_add, .remove = ucb1x00_ts_remove, }; static int __init ucb1x00_ts_init(void) { return ucb1x00_register_driver(&ucb1x00_ts_driver); } static void __exit ucb1x00_ts_exit(void) { ucb1x00_unregister_driver(&ucb1x00_ts_driver); } module_param(adcsync, int, 0444); module_init(ucb1x00_ts_init); module_exit(ucb1x00_ts_exit); MODULE_AUTHOR("Russell King <rmk@arm.linux.org.uk>"); MODULE_DESCRIPTION("UCB1x00 touchscreen driver"); MODULE_LICENSE("GPL");
gpl-2.0
suky/android_kernel_pantech_ef65l
arch/mips/mti-malta/malta-reset.c
11318
1638
/* * Carsten Langgaard, carstenl@mips.com * Copyright (C) 1999,2000 MIPS Technologies, Inc. All rights reserved. * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * ######################################################################## * * Reset the MIPS boards. * */ #include <linux/init.h> #include <linux/pm.h> #include <asm/io.h> #include <asm/reboot.h> #include <asm/mips-boards/generic.h> static void mips_machine_restart(char *command) { unsigned int __iomem *softres_reg = ioremap(SOFTRES_REG, sizeof(unsigned int)); __raw_writel(GORESET, softres_reg); } static void mips_machine_halt(void) { unsigned int __iomem *softres_reg = ioremap(SOFTRES_REG, sizeof(unsigned int)); __raw_writel(GORESET, softres_reg); } static int __init mips_reboot_setup(void) { _machine_restart = mips_machine_restart; _machine_halt = mips_machine_halt; pm_power_off = mips_machine_halt; return 0; } arch_initcall(mips_reboot_setup);
gpl-2.0
artefvck/X_Artefvck
mm/msync.c
11574
2479
/* * linux/mm/msync.c * * Copyright (C) 1994-1999 Linus Torvalds */ /* * The msync() system call. */ #include <linux/fs.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/file.h> #include <linux/syscalls.h> #include <linux/sched.h> /* * MS_SYNC syncs the entire file - including mappings. * * MS_ASYNC does not start I/O (it used to, up to 2.5.67). * Nor does it marks the relevant pages dirty (it used to up to 2.6.17). * Now it doesn't do anything, since dirty pages are properly tracked. * * The application may now run fsync() to * write out the dirty pages and wait on the writeout and check the result. * Or the application may run fadvise(FADV_DONTNEED) against the fd to start * async writeout immediately. * So by _not_ starting I/O in MS_ASYNC we provide complete flexibility to * applications. */ SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags) { unsigned long end; struct mm_struct *mm = current->mm; struct vm_area_struct *vma; int unmapped_error = 0; int error = -EINVAL; if (flags & ~(MS_ASYNC | MS_INVALIDATE | MS_SYNC)) goto out; if (start & ~PAGE_MASK) goto out; if ((flags & MS_ASYNC) && (flags & MS_SYNC)) goto out; error = -ENOMEM; len = (len + ~PAGE_MASK) & PAGE_MASK; end = start + len; if (end < start) goto out; error = 0; if (end == start) goto out; /* * If the interval [start,end) covers some unmapped address ranges, * just ignore them, but return -ENOMEM at the end. */ down_read(&mm->mmap_sem); vma = find_vma(mm, start); for (;;) { struct file *file; /* Still start < end. */ error = -ENOMEM; if (!vma) goto out_unlock; /* Here start < vma->vm_end. */ if (start < vma->vm_start) { start = vma->vm_start; if (start >= end) goto out_unlock; unmapped_error = -ENOMEM; } /* Here vma->vm_start <= start < vma->vm_end. */ if ((flags & MS_INVALIDATE) && (vma->vm_flags & VM_LOCKED)) { error = -EBUSY; goto out_unlock; } file = vma->vm_file; start = vma->vm_end; if ((flags & MS_SYNC) && file && (vma->vm_flags & VM_SHARED)) { get_file(file); up_read(&mm->mmap_sem); error = vfs_fsync(file, 0); fput(file); if (error || start >= end) goto out; down_read(&mm->mmap_sem); vma = find_vma(mm, start); } else { if (start >= end) { error = 0; goto out_unlock; } vma = vma->vm_next; } } out_unlock: up_read(&mm->mmap_sem); out: return error ? : unmapped_error; }
gpl-2.0
chevanlol360/Android_Kernel_LGE_Fx1
arch/sh/drivers/pci/ops-sh7786.c
12342
4765
/* * Generic SH7786 PCI-Express operations. * * Copyright (C) 2009 - 2010 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License v2. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/pci.h> #include <linux/io.h> #include <linux/spinlock.h> #include "pcie-sh7786.h" enum { PCI_ACCESS_READ, PCI_ACCESS_WRITE, }; static int sh7786_pcie_config_access(unsigned char access_type, struct pci_bus *bus, unsigned int devfn, int where, u32 *data) { struct pci_channel *chan = bus->sysdata; int dev, func, type, reg; dev = PCI_SLOT(devfn); func = PCI_FUNC(devfn); type = !!bus->parent; reg = where & ~3; if (bus->number > 255 || dev > 31 || func > 7) return PCIBIOS_FUNC_NOT_SUPPORTED; /* * While each channel has its own memory-mapped extended config * space, it's generally only accessible when in endpoint mode. * When in root complex mode, the controller is unable to target * itself with either type 0 or type 1 accesses, and indeed, any * controller initiated target transfer to its own config space * result in a completer abort. * * Each channel effectively only supports a single device, but as * the same channel <-> device access works for any PCI_SLOT() * value, we cheat a bit here and bind the controller's config * space to devfn 0 in order to enable self-enumeration. In this * case the regular PAR/PDR path is sidelined and the mangled * config access itself is initiated as a SuperHyway transaction. */ if (pci_is_root_bus(bus)) { if (dev == 0) { if (access_type == PCI_ACCESS_READ) *data = pci_read_reg(chan, PCI_REG(reg)); else pci_write_reg(chan, *data, PCI_REG(reg)); return PCIBIOS_SUCCESSFUL; } else if (dev > 1) return PCIBIOS_DEVICE_NOT_FOUND; } /* Clear errors */ pci_write_reg(chan, pci_read_reg(chan, SH4A_PCIEERRFR), SH4A_PCIEERRFR); /* Set the PIO address */ pci_write_reg(chan, (bus->number << 24) | (dev << 19) | (func << 16) | reg, SH4A_PCIEPAR); /* Enable the configuration access */ pci_write_reg(chan, (1 << 31) | (type << 8), SH4A_PCIEPCTLR); /* Check for errors */ if (pci_read_reg(chan, SH4A_PCIEERRFR) & 0x10) return PCIBIOS_DEVICE_NOT_FOUND; /* Check for master and target aborts */ if (pci_read_reg(chan, SH4A_PCIEPCICONF1) & ((1 << 29) | (1 << 28))) return PCIBIOS_DEVICE_NOT_FOUND; if (access_type == PCI_ACCESS_READ) *data = pci_read_reg(chan, SH4A_PCIEPDR); else pci_write_reg(chan, *data, SH4A_PCIEPDR); /* Disable the configuration access */ pci_write_reg(chan, 0, SH4A_PCIEPCTLR); return PCIBIOS_SUCCESSFUL; } static int sh7786_pcie_read(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val) { unsigned long flags; int ret; u32 data; if ((size == 2) && (where & 1)) return PCIBIOS_BAD_REGISTER_NUMBER; else if ((size == 4) && (where & 3)) return PCIBIOS_BAD_REGISTER_NUMBER; raw_spin_lock_irqsave(&pci_config_lock, flags); ret = sh7786_pcie_config_access(PCI_ACCESS_READ, bus, devfn, where, &data); if (ret != PCIBIOS_SUCCESSFUL) { *val = 0xffffffff; goto out; } if (size == 1) *val = (data >> ((where & 3) << 3)) & 0xff; else if (size == 2) *val = (data >> ((where & 2) << 3)) & 0xffff; else *val = data; dev_dbg(&bus->dev, "pcie-config-read: bus=%3d devfn=0x%04x " "where=0x%04x size=%d val=0x%08lx\n", bus->number, devfn, where, size, (unsigned long)*val); out: raw_spin_unlock_irqrestore(&pci_config_lock, flags); return ret; } static int sh7786_pcie_write(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val) { unsigned long flags; int shift, ret; u32 data; if ((size == 2) && (where & 1)) return PCIBIOS_BAD_REGISTER_NUMBER; else if ((size == 4) && (where & 3)) return PCIBIOS_BAD_REGISTER_NUMBER; raw_spin_lock_irqsave(&pci_config_lock, flags); ret = sh7786_pcie_config_access(PCI_ACCESS_READ, bus, devfn, where, &data); if (ret != PCIBIOS_SUCCESSFUL) goto out; dev_dbg(&bus->dev, "pcie-config-write: bus=%3d devfn=0x%04x " "where=0x%04x size=%d val=%08lx\n", bus->number, devfn, where, size, (unsigned long)val); if (size == 1) { shift = (where & 3) << 3; data &= ~(0xff << shift); data |= ((val & 0xff) << shift); } else if (size == 2) { shift = (where & 2) << 3; data &= ~(0xffff << shift); data |= ((val & 0xffff) << shift); } else data = val; ret = sh7786_pcie_config_access(PCI_ACCESS_WRITE, bus, devfn, where, &data); out: raw_spin_unlock_irqrestore(&pci_config_lock, flags); return ret; } struct pci_ops sh7786_pci_ops = { .read = sh7786_pcie_read, .write = sh7786_pcie_write, };
gpl-2.0
vitek999/Lenovo-a328
fs/ntfs/quota.c
14390
3724
/* * quota.c - NTFS kernel quota ($Quota) handling. Part of the Linux-NTFS * project. * * Copyright (c) 2004 Anton Altaparmakov * * This program/include file is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as published * by the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program/include file is distributed in the hope that it will be * useful, but WITHOUT ANY WARRANTY; without even the implied warranty * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program (in the main directory of the Linux-NTFS * distribution in the file COPYING); if not, write to the Free Software * Foundation,Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #ifdef NTFS_RW #include "index.h" #include "quota.h" #include "debug.h" #include "ntfs.h" /** * ntfs_mark_quotas_out_of_date - mark the quotas out of date on an ntfs volume * @vol: ntfs volume on which to mark the quotas out of date * * Mark the quotas out of date on the ntfs volume @vol and return 'true' on * success and 'false' on error. */ bool ntfs_mark_quotas_out_of_date(ntfs_volume *vol) { ntfs_index_context *ictx; QUOTA_CONTROL_ENTRY *qce; const le32 qid = QUOTA_DEFAULTS_ID; int err; ntfs_debug("Entering."); if (NVolQuotaOutOfDate(vol)) goto done; if (!vol->quota_ino || !vol->quota_q_ino) { ntfs_error(vol->sb, "Quota inodes are not open."); return false; } mutex_lock(&vol->quota_q_ino->i_mutex); ictx = ntfs_index_ctx_get(NTFS_I(vol->quota_q_ino)); if (!ictx) { ntfs_error(vol->sb, "Failed to get index context."); goto err_out; } err = ntfs_index_lookup(&qid, sizeof(qid), ictx); if (err) { if (err == -ENOENT) ntfs_error(vol->sb, "Quota defaults entry is not " "present."); else ntfs_error(vol->sb, "Lookup of quota defaults entry " "failed."); goto err_out; } if (ictx->data_len < offsetof(QUOTA_CONTROL_ENTRY, sid)) { ntfs_error(vol->sb, "Quota defaults entry size is invalid. " "Run chkdsk."); goto err_out; } qce = (QUOTA_CONTROL_ENTRY*)ictx->data; if (le32_to_cpu(qce->version) != QUOTA_VERSION) { ntfs_error(vol->sb, "Quota defaults entry version 0x%x is not " "supported.", le32_to_cpu(qce->version)); goto err_out; } ntfs_debug("Quota defaults flags = 0x%x.", le32_to_cpu(qce->flags)); /* If quotas are already marked out of date, no need to do anything. */ if (qce->flags & QUOTA_FLAG_OUT_OF_DATE) goto set_done; /* * If quota tracking is neither requested, nor enabled and there are no * pending deletes, no need to mark the quotas out of date. */ if (!(qce->flags & (QUOTA_FLAG_TRACKING_ENABLED | QUOTA_FLAG_TRACKING_REQUESTED | QUOTA_FLAG_PENDING_DELETES))) goto set_done; /* * Set the QUOTA_FLAG_OUT_OF_DATE bit thus marking quotas out of date. * This is verified on WinXP to be sufficient to cause windows to * rescan the volume on boot and update all quota entries. */ qce->flags |= QUOTA_FLAG_OUT_OF_DATE; /* Ensure the modified flags are written to disk. */ ntfs_index_entry_flush_dcache_page(ictx); ntfs_index_entry_mark_dirty(ictx); set_done: ntfs_index_ctx_put(ictx); mutex_unlock(&vol->quota_q_ino->i_mutex); /* * We set the flag so we do not try to mark the quotas out of date * again on remount. */ NVolSetQuotaOutOfDate(vol); done: ntfs_debug("Done."); return true; err_out: if (ictx) ntfs_index_ctx_put(ictx); mutex_unlock(&vol->quota_q_ino->i_mutex); return false; } #endif /* NTFS_RW */
gpl-2.0
tijuca/bluetooth-next
drivers/tty/serial/imx.c
55
53826
/* * Driver for Motorola/Freescale IMX serial ports * * Based on drivers/char/serial.c, by Linus Torvalds, Theodore Ts'o. * * Author: Sascha Hauer <sascha@saschahauer.de> * Copyright (C) 2004 Pengutronix * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #if defined(CONFIG_SERIAL_IMX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/module.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/console.h> #include <linux/sysrq.h> #include <linux/platform_device.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial_core.h> #include <linux/serial.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/rational.h> #include <linux/slab.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/io.h> #include <linux/dma-mapping.h> #include <asm/irq.h> #include <linux/platform_data/serial-imx.h> #include <linux/platform_data/dma-imx.h> /* Register definitions */ #define URXD0 0x0 /* Receiver Register */ #define URTX0 0x40 /* Transmitter Register */ #define UCR1 0x80 /* Control Register 1 */ #define UCR2 0x84 /* Control Register 2 */ #define UCR3 0x88 /* Control Register 3 */ #define UCR4 0x8c /* Control Register 4 */ #define UFCR 0x90 /* FIFO Control Register */ #define USR1 0x94 /* Status Register 1 */ #define USR2 0x98 /* Status Register 2 */ #define UESC 0x9c /* Escape Character Register */ #define UTIM 0xa0 /* Escape Timer Register */ #define UBIR 0xa4 /* BRM Incremental Register */ #define UBMR 0xa8 /* BRM Modulator Register */ #define UBRC 0xac /* Baud Rate Count Register */ #define IMX21_ONEMS 0xb0 /* One Millisecond register */ #define IMX1_UTS 0xd0 /* UART Test Register on i.mx1 */ #define IMX21_UTS 0xb4 /* UART Test Register on all other i.mx*/ /* UART Control Register Bit Fields.*/ #define URXD_DUMMY_READ (1<<16) #define URXD_CHARRDY (1<<15) #define URXD_ERR (1<<14) #define URXD_OVRRUN (1<<13) #define URXD_FRMERR (1<<12) #define URXD_BRK (1<<11) #define URXD_PRERR (1<<10) #define URXD_RX_DATA (0xFF<<0) #define UCR1_ADEN (1<<15) /* Auto detect interrupt */ #define UCR1_ADBR (1<<14) /* Auto detect baud rate */ #define UCR1_TRDYEN (1<<13) /* Transmitter ready interrupt enable */ #define UCR1_IDEN (1<<12) /* Idle condition interrupt */ #define UCR1_ICD_REG(x) (((x) & 3) << 10) /* idle condition detect */ #define UCR1_RRDYEN (1<<9) /* Recv ready interrupt enable */ #define UCR1_RDMAEN (1<<8) /* Recv ready DMA enable */ #define UCR1_IREN (1<<7) /* Infrared interface enable */ #define UCR1_TXMPTYEN (1<<6) /* Transimitter empty interrupt enable */ #define UCR1_RTSDEN (1<<5) /* RTS delta interrupt enable */ #define UCR1_SNDBRK (1<<4) /* Send break */ #define UCR1_TDMAEN (1<<3) /* Transmitter ready DMA enable */ #define IMX1_UCR1_UARTCLKEN (1<<2) /* UART clock enabled, i.mx1 only */ #define UCR1_ATDMAEN (1<<2) /* Aging DMA Timer Enable */ #define UCR1_DOZE (1<<1) /* Doze */ #define UCR1_UARTEN (1<<0) /* UART enabled */ #define UCR2_ESCI (1<<15) /* Escape seq interrupt enable */ #define UCR2_IRTS (1<<14) /* Ignore RTS pin */ #define UCR2_CTSC (1<<13) /* CTS pin control */ #define UCR2_CTS (1<<12) /* Clear to send */ #define UCR2_ESCEN (1<<11) /* Escape enable */ #define UCR2_PREN (1<<8) /* Parity enable */ #define UCR2_PROE (1<<7) /* Parity odd/even */ #define UCR2_STPB (1<<6) /* Stop */ #define UCR2_WS (1<<5) /* Word size */ #define UCR2_RTSEN (1<<4) /* Request to send interrupt enable */ #define UCR2_ATEN (1<<3) /* Aging Timer Enable */ #define UCR2_TXEN (1<<2) /* Transmitter enabled */ #define UCR2_RXEN (1<<1) /* Receiver enabled */ #define UCR2_SRST (1<<0) /* SW reset */ #define UCR3_DTREN (1<<13) /* DTR interrupt enable */ #define UCR3_PARERREN (1<<12) /* Parity enable */ #define UCR3_FRAERREN (1<<11) /* Frame error interrupt enable */ #define UCR3_DSR (1<<10) /* Data set ready */ #define UCR3_DCD (1<<9) /* Data carrier detect */ #define UCR3_RI (1<<8) /* Ring indicator */ #define UCR3_ADNIMP (1<<7) /* Autobaud Detection Not Improved */ #define UCR3_RXDSEN (1<<6) /* Receive status interrupt enable */ #define UCR3_AIRINTEN (1<<5) /* Async IR wake interrupt enable */ #define UCR3_AWAKEN (1<<4) /* Async wake interrupt enable */ #define IMX21_UCR3_RXDMUXSEL (1<<2) /* RXD Muxed Input Select */ #define UCR3_INVT (1<<1) /* Inverted Infrared transmission */ #define UCR3_BPEN (1<<0) /* Preset registers enable */ #define UCR4_CTSTL_SHF 10 /* CTS trigger level shift */ #define UCR4_CTSTL_MASK 0x3F /* CTS trigger is 6 bits wide */ #define UCR4_INVR (1<<9) /* Inverted infrared reception */ #define UCR4_ENIRI (1<<8) /* Serial infrared interrupt enable */ #define UCR4_WKEN (1<<7) /* Wake interrupt enable */ #define UCR4_REF16 (1<<6) /* Ref freq 16 MHz */ #define UCR4_IDDMAEN (1<<6) /* DMA IDLE Condition Detected */ #define UCR4_IRSC (1<<5) /* IR special case */ #define UCR4_TCEN (1<<3) /* Transmit complete interrupt enable */ #define UCR4_BKEN (1<<2) /* Break condition interrupt enable */ #define UCR4_OREN (1<<1) /* Receiver overrun interrupt enable */ #define UCR4_DREN (1<<0) /* Recv data ready interrupt enable */ #define UFCR_RXTL_SHF 0 /* Receiver trigger level shift */ #define UFCR_DCEDTE (1<<6) /* DCE/DTE mode select */ #define UFCR_RFDIV (7<<7) /* Reference freq divider mask */ #define UFCR_RFDIV_REG(x) (((x) < 7 ? 6 - (x) : 6) << 7) #define UFCR_TXTL_SHF 10 /* Transmitter trigger level shift */ #define USR1_PARITYERR (1<<15) /* Parity error interrupt flag */ #define USR1_RTSS (1<<14) /* RTS pin status */ #define USR1_TRDY (1<<13) /* Transmitter ready interrupt/dma flag */ #define USR1_RTSD (1<<12) /* RTS delta */ #define USR1_ESCF (1<<11) /* Escape seq interrupt flag */ #define USR1_FRAMERR (1<<10) /* Frame error interrupt flag */ #define USR1_RRDY (1<<9) /* Receiver ready interrupt/dma flag */ #define USR1_TIMEOUT (1<<7) /* Receive timeout interrupt status */ #define USR1_RXDS (1<<6) /* Receiver idle interrupt flag */ #define USR1_AIRINT (1<<5) /* Async IR wake interrupt flag */ #define USR1_AWAKE (1<<4) /* Aysnc wake interrupt flag */ #define USR2_ADET (1<<15) /* Auto baud rate detect complete */ #define USR2_TXFE (1<<14) /* Transmit buffer FIFO empty */ #define USR2_DTRF (1<<13) /* DTR edge interrupt flag */ #define USR2_IDLE (1<<12) /* Idle condition */ #define USR2_IRINT (1<<8) /* Serial infrared interrupt flag */ #define USR2_WAKE (1<<7) /* Wake */ #define USR2_RTSF (1<<4) /* RTS edge interrupt flag */ #define USR2_TXDC (1<<3) /* Transmitter complete */ #define USR2_BRCD (1<<2) /* Break condition */ #define USR2_ORE (1<<1) /* Overrun error */ #define USR2_RDR (1<<0) /* Recv data ready */ #define UTS_FRCPERR (1<<13) /* Force parity error */ #define UTS_LOOP (1<<12) /* Loop tx and rx */ #define UTS_TXEMPTY (1<<6) /* TxFIFO empty */ #define UTS_RXEMPTY (1<<5) /* RxFIFO empty */ #define UTS_TXFULL (1<<4) /* TxFIFO full */ #define UTS_RXFULL (1<<3) /* RxFIFO full */ #define UTS_SOFTRST (1<<0) /* Software reset */ /* We've been assigned a range on the "Low-density serial ports" major */ #define SERIAL_IMX_MAJOR 207 #define MINOR_START 16 #define DEV_NAME "ttymxc" /* * This determines how often we check the modem status signals * for any change. They generally aren't connected to an IRQ * so we have to poll them. We also check immediately before * filling the TX fifo incase CTS has been dropped. */ #define MCTRL_TIMEOUT (250*HZ/1000) #define DRIVER_NAME "IMX-uart" #define UART_NR 8 /* i.MX21 type uart runs on all i.mx except i.MX1 and i.MX6q */ enum imx_uart_type { IMX1_UART, IMX21_UART, IMX6Q_UART, }; /* device type dependent stuff */ struct imx_uart_data { unsigned uts_reg; enum imx_uart_type devtype; }; struct imx_port { struct uart_port port; struct timer_list timer; unsigned int old_status; unsigned int have_rtscts:1; unsigned int dte_mode:1; unsigned int irda_inv_rx:1; unsigned int irda_inv_tx:1; unsigned short trcv_delay; /* transceiver delay */ struct clk *clk_ipg; struct clk *clk_per; const struct imx_uart_data *devdata; /* DMA fields */ unsigned int dma_is_inited:1; unsigned int dma_is_enabled:1; unsigned int dma_is_rxing:1; unsigned int dma_is_txing:1; struct dma_chan *dma_chan_rx, *dma_chan_tx; struct scatterlist rx_sgl, tx_sgl[2]; void *rx_buf; unsigned int tx_bytes; unsigned int dma_tx_nents; wait_queue_head_t dma_wait; }; struct imx_port_ucrs { unsigned int ucr1; unsigned int ucr2; unsigned int ucr3; }; static struct imx_uart_data imx_uart_devdata[] = { [IMX1_UART] = { .uts_reg = IMX1_UTS, .devtype = IMX1_UART, }, [IMX21_UART] = { .uts_reg = IMX21_UTS, .devtype = IMX21_UART, }, [IMX6Q_UART] = { .uts_reg = IMX21_UTS, .devtype = IMX6Q_UART, }, }; static struct platform_device_id imx_uart_devtype[] = { { .name = "imx1-uart", .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX1_UART], }, { .name = "imx21-uart", .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX21_UART], }, { .name = "imx6q-uart", .driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX6Q_UART], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(platform, imx_uart_devtype); static const struct of_device_id imx_uart_dt_ids[] = { { .compatible = "fsl,imx6q-uart", .data = &imx_uart_devdata[IMX6Q_UART], }, { .compatible = "fsl,imx1-uart", .data = &imx_uart_devdata[IMX1_UART], }, { .compatible = "fsl,imx21-uart", .data = &imx_uart_devdata[IMX21_UART], }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, imx_uart_dt_ids); static inline unsigned uts_reg(struct imx_port *sport) { return sport->devdata->uts_reg; } static inline int is_imx1_uart(struct imx_port *sport) { return sport->devdata->devtype == IMX1_UART; } static inline int is_imx21_uart(struct imx_port *sport) { return sport->devdata->devtype == IMX21_UART; } static inline int is_imx6q_uart(struct imx_port *sport) { return sport->devdata->devtype == IMX6Q_UART; } /* * Save and restore functions for UCR1, UCR2 and UCR3 registers */ #if defined(CONFIG_SERIAL_IMX_CONSOLE) static void imx_port_ucrs_save(struct uart_port *port, struct imx_port_ucrs *ucr) { /* save control registers */ ucr->ucr1 = readl(port->membase + UCR1); ucr->ucr2 = readl(port->membase + UCR2); ucr->ucr3 = readl(port->membase + UCR3); } static void imx_port_ucrs_restore(struct uart_port *port, struct imx_port_ucrs *ucr) { /* restore control registers */ writel(ucr->ucr1, port->membase + UCR1); writel(ucr->ucr2, port->membase + UCR2); writel(ucr->ucr3, port->membase + UCR3); } #endif /* * Handle any change of modem status signal since we were last called. */ static void imx_mctrl_check(struct imx_port *sport) { unsigned int status, changed; status = sport->port.ops->get_mctrl(&sport->port); changed = status ^ sport->old_status; if (changed == 0) return; sport->old_status = status; if (changed & TIOCM_RI) sport->port.icount.rng++; if (changed & TIOCM_DSR) sport->port.icount.dsr++; if (changed & TIOCM_CAR) uart_handle_dcd_change(&sport->port, status & TIOCM_CAR); if (changed & TIOCM_CTS) uart_handle_cts_change(&sport->port, status & TIOCM_CTS); wake_up_interruptible(&sport->port.state->port.delta_msr_wait); } /* * This is our per-port timeout handler, for checking the * modem status signals. */ static void imx_timeout(unsigned long data) { struct imx_port *sport = (struct imx_port *)data; unsigned long flags; if (sport->port.state) { spin_lock_irqsave(&sport->port.lock, flags); imx_mctrl_check(sport); spin_unlock_irqrestore(&sport->port.lock, flags); mod_timer(&sport->timer, jiffies + MCTRL_TIMEOUT); } } /* * interrupts disabled on entry */ static void imx_stop_tx(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; unsigned long temp; /* * We are maybe in the SMP context, so if the DMA TX thread is running * on other cpu, we have to wait for it to finish. */ if (sport->dma_is_enabled && sport->dma_is_txing) return; temp = readl(port->membase + UCR1); writel(temp & ~UCR1_TXMPTYEN, port->membase + UCR1); /* in rs485 mode disable transmitter if shifter is empty */ if (port->rs485.flags & SER_RS485_ENABLED && readl(port->membase + USR2) & USR2_TXDC) { temp = readl(port->membase + UCR2); if (port->rs485.flags & SER_RS485_RTS_AFTER_SEND) temp &= ~UCR2_CTS; else temp |= UCR2_CTS; writel(temp, port->membase + UCR2); temp = readl(port->membase + UCR4); temp &= ~UCR4_TCEN; writel(temp, port->membase + UCR4); } } /* * interrupts disabled on entry */ static void imx_stop_rx(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; unsigned long temp; if (sport->dma_is_enabled && sport->dma_is_rxing) { if (sport->port.suspended) { dmaengine_terminate_all(sport->dma_chan_rx); sport->dma_is_rxing = 0; } else { return; } } temp = readl(sport->port.membase + UCR2); writel(temp & ~UCR2_RXEN, sport->port.membase + UCR2); /* disable the `Receiver Ready Interrrupt` */ temp = readl(sport->port.membase + UCR1); writel(temp & ~UCR1_RRDYEN, sport->port.membase + UCR1); } /* * Set the modem control timer to fire immediately. */ static void imx_enable_ms(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; mod_timer(&sport->timer, jiffies); } static void imx_dma_tx(struct imx_port *sport); static inline void imx_transmit_buffer(struct imx_port *sport) { struct circ_buf *xmit = &sport->port.state->xmit; unsigned long temp; if (sport->port.x_char) { /* Send next char */ writel(sport->port.x_char, sport->port.membase + URTX0); sport->port.icount.tx++; sport->port.x_char = 0; return; } if (uart_circ_empty(xmit) || uart_tx_stopped(&sport->port)) { imx_stop_tx(&sport->port); return; } if (sport->dma_is_enabled) { /* * We've just sent a X-char Ensure the TX DMA is enabled * and the TX IRQ is disabled. **/ temp = readl(sport->port.membase + UCR1); temp &= ~UCR1_TXMPTYEN; if (sport->dma_is_txing) { temp |= UCR1_TDMAEN; writel(temp, sport->port.membase + UCR1); } else { writel(temp, sport->port.membase + UCR1); imx_dma_tx(sport); } } while (!uart_circ_empty(xmit) && !(readl(sport->port.membase + uts_reg(sport)) & UTS_TXFULL)) { /* send xmit->buf[xmit->tail] * out the port here */ writel(xmit->buf[xmit->tail], sport->port.membase + URTX0); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); sport->port.icount.tx++; } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&sport->port); if (uart_circ_empty(xmit)) imx_stop_tx(&sport->port); } static void dma_tx_callback(void *data) { struct imx_port *sport = data; struct scatterlist *sgl = &sport->tx_sgl[0]; struct circ_buf *xmit = &sport->port.state->xmit; unsigned long flags; unsigned long temp; spin_lock_irqsave(&sport->port.lock, flags); dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE); temp = readl(sport->port.membase + UCR1); temp &= ~UCR1_TDMAEN; writel(temp, sport->port.membase + UCR1); /* update the stat */ xmit->tail = (xmit->tail + sport->tx_bytes) & (UART_XMIT_SIZE - 1); sport->port.icount.tx += sport->tx_bytes; dev_dbg(sport->port.dev, "we finish the TX DMA.\n"); sport->dma_is_txing = 0; spin_unlock_irqrestore(&sport->port.lock, flags); if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&sport->port); if (waitqueue_active(&sport->dma_wait)) { wake_up(&sport->dma_wait); dev_dbg(sport->port.dev, "exit in %s.\n", __func__); return; } spin_lock_irqsave(&sport->port.lock, flags); if (!uart_circ_empty(xmit) && !uart_tx_stopped(&sport->port)) imx_dma_tx(sport); spin_unlock_irqrestore(&sport->port.lock, flags); } static void imx_dma_tx(struct imx_port *sport) { struct circ_buf *xmit = &sport->port.state->xmit; struct scatterlist *sgl = sport->tx_sgl; struct dma_async_tx_descriptor *desc; struct dma_chan *chan = sport->dma_chan_tx; struct device *dev = sport->port.dev; unsigned long temp; int ret; if (sport->dma_is_txing) return; sport->tx_bytes = uart_circ_chars_pending(xmit); if (xmit->tail < xmit->head) { sport->dma_tx_nents = 1; sg_init_one(sgl, xmit->buf + xmit->tail, sport->tx_bytes); } else { sport->dma_tx_nents = 2; sg_init_table(sgl, 2); sg_set_buf(sgl, xmit->buf + xmit->tail, UART_XMIT_SIZE - xmit->tail); sg_set_buf(sgl + 1, xmit->buf, xmit->head); } ret = dma_map_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE); if (ret == 0) { dev_err(dev, "DMA mapping error for TX.\n"); return; } desc = dmaengine_prep_slave_sg(chan, sgl, sport->dma_tx_nents, DMA_MEM_TO_DEV, DMA_PREP_INTERRUPT); if (!desc) { dma_unmap_sg(dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE); dev_err(dev, "We cannot prepare for the TX slave dma!\n"); return; } desc->callback = dma_tx_callback; desc->callback_param = sport; dev_dbg(dev, "TX: prepare to send %lu bytes by DMA.\n", uart_circ_chars_pending(xmit)); temp = readl(sport->port.membase + UCR1); temp |= UCR1_TDMAEN; writel(temp, sport->port.membase + UCR1); /* fire it */ sport->dma_is_txing = 1; dmaengine_submit(desc); dma_async_issue_pending(chan); return; } /* * interrupts disabled on entry */ static void imx_start_tx(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; unsigned long temp; if (port->rs485.flags & SER_RS485_ENABLED) { /* enable transmitter and shifter empty irq */ temp = readl(port->membase + UCR2); if (port->rs485.flags & SER_RS485_RTS_ON_SEND) temp &= ~UCR2_CTS; else temp |= UCR2_CTS; writel(temp, port->membase + UCR2); temp = readl(port->membase + UCR4); temp |= UCR4_TCEN; writel(temp, port->membase + UCR4); } if (!sport->dma_is_enabled) { temp = readl(sport->port.membase + UCR1); writel(temp | UCR1_TXMPTYEN, sport->port.membase + UCR1); } if (sport->dma_is_enabled) { if (sport->port.x_char) { /* We have X-char to send, so enable TX IRQ and * disable TX DMA to let TX interrupt to send X-char */ temp = readl(sport->port.membase + UCR1); temp &= ~UCR1_TDMAEN; temp |= UCR1_TXMPTYEN; writel(temp, sport->port.membase + UCR1); return; } if (!uart_circ_empty(&port->state->xmit) && !uart_tx_stopped(port)) imx_dma_tx(sport); return; } } static irqreturn_t imx_rtsint(int irq, void *dev_id) { struct imx_port *sport = dev_id; unsigned int val; unsigned long flags; spin_lock_irqsave(&sport->port.lock, flags); writel(USR1_RTSD, sport->port.membase + USR1); val = readl(sport->port.membase + USR1) & USR1_RTSS; uart_handle_cts_change(&sport->port, !!val); wake_up_interruptible(&sport->port.state->port.delta_msr_wait); spin_unlock_irqrestore(&sport->port.lock, flags); return IRQ_HANDLED; } static irqreturn_t imx_txint(int irq, void *dev_id) { struct imx_port *sport = dev_id; unsigned long flags; spin_lock_irqsave(&sport->port.lock, flags); imx_transmit_buffer(sport); spin_unlock_irqrestore(&sport->port.lock, flags); return IRQ_HANDLED; } static irqreturn_t imx_rxint(int irq, void *dev_id) { struct imx_port *sport = dev_id; unsigned int rx, flg, ignored = 0; struct tty_port *port = &sport->port.state->port; unsigned long flags, temp; spin_lock_irqsave(&sport->port.lock, flags); while (readl(sport->port.membase + USR2) & USR2_RDR) { flg = TTY_NORMAL; sport->port.icount.rx++; rx = readl(sport->port.membase + URXD0); temp = readl(sport->port.membase + USR2); if (temp & USR2_BRCD) { writel(USR2_BRCD, sport->port.membase + USR2); if (uart_handle_break(&sport->port)) continue; } if (uart_handle_sysrq_char(&sport->port, (unsigned char)rx)) continue; if (unlikely(rx & URXD_ERR)) { if (rx & URXD_BRK) sport->port.icount.brk++; else if (rx & URXD_PRERR) sport->port.icount.parity++; else if (rx & URXD_FRMERR) sport->port.icount.frame++; if (rx & URXD_OVRRUN) sport->port.icount.overrun++; if (rx & sport->port.ignore_status_mask) { if (++ignored > 100) goto out; continue; } rx &= (sport->port.read_status_mask | 0xFF); if (rx & URXD_BRK) flg = TTY_BREAK; else if (rx & URXD_PRERR) flg = TTY_PARITY; else if (rx & URXD_FRMERR) flg = TTY_FRAME; if (rx & URXD_OVRRUN) flg = TTY_OVERRUN; #ifdef SUPPORT_SYSRQ sport->port.sysrq = 0; #endif } if (sport->port.ignore_status_mask & URXD_DUMMY_READ) goto out; tty_insert_flip_char(port, rx, flg); } out: spin_unlock_irqrestore(&sport->port.lock, flags); tty_flip_buffer_push(port); return IRQ_HANDLED; } static int start_rx_dma(struct imx_port *sport); /* * If the RXFIFO is filled with some data, and then we * arise a DMA operation to receive them. */ static void imx_dma_rxint(struct imx_port *sport) { unsigned long temp; unsigned long flags; spin_lock_irqsave(&sport->port.lock, flags); temp = readl(sport->port.membase + USR2); if ((temp & USR2_RDR) && !sport->dma_is_rxing) { sport->dma_is_rxing = 1; /* disable the `Recerver Ready Interrrupt` */ temp = readl(sport->port.membase + UCR1); temp &= ~(UCR1_RRDYEN); writel(temp, sport->port.membase + UCR1); /* tell the DMA to receive the data. */ start_rx_dma(sport); } spin_unlock_irqrestore(&sport->port.lock, flags); } static irqreturn_t imx_int(int irq, void *dev_id) { struct imx_port *sport = dev_id; unsigned int sts; unsigned int sts2; sts = readl(sport->port.membase + USR1); sts2 = readl(sport->port.membase + USR2); if (sts & USR1_RRDY) { if (sport->dma_is_enabled) imx_dma_rxint(sport); else imx_rxint(irq, dev_id); } if ((sts & USR1_TRDY && readl(sport->port.membase + UCR1) & UCR1_TXMPTYEN) || (sts2 & USR2_TXDC && readl(sport->port.membase + UCR4) & UCR4_TCEN)) imx_txint(irq, dev_id); if (sts & USR1_RTSD) imx_rtsint(irq, dev_id); if (sts & USR1_AWAKE) writel(USR1_AWAKE, sport->port.membase + USR1); if (sts2 & USR2_ORE) { dev_err(sport->port.dev, "Rx FIFO overrun\n"); sport->port.icount.overrun++; writel(USR2_ORE, sport->port.membase + USR2); } return IRQ_HANDLED; } /* * Return TIOCSER_TEMT when transmitter is not busy. */ static unsigned int imx_tx_empty(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; unsigned int ret; ret = (readl(sport->port.membase + USR2) & USR2_TXDC) ? TIOCSER_TEMT : 0; /* If the TX DMA is working, return 0. */ if (sport->dma_is_enabled && sport->dma_is_txing) ret = 0; return ret; } /* * We have a modem side uart, so the meanings of RTS and CTS are inverted. */ static unsigned int imx_get_mctrl(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; unsigned int tmp = TIOCM_DSR | TIOCM_CAR; if (readl(sport->port.membase + USR1) & USR1_RTSS) tmp |= TIOCM_CTS; if (readl(sport->port.membase + UCR2) & UCR2_CTS) tmp |= TIOCM_RTS; if (readl(sport->port.membase + uts_reg(sport)) & UTS_LOOP) tmp |= TIOCM_LOOP; return tmp; } static void imx_set_mctrl(struct uart_port *port, unsigned int mctrl) { struct imx_port *sport = (struct imx_port *)port; unsigned long temp; if (!(port->rs485.flags & SER_RS485_ENABLED)) { temp = readl(sport->port.membase + UCR2); temp &= ~(UCR2_CTS | UCR2_CTSC); if (mctrl & TIOCM_RTS) temp |= UCR2_CTS | UCR2_CTSC; writel(temp, sport->port.membase + UCR2); } temp = readl(sport->port.membase + uts_reg(sport)) & ~UTS_LOOP; if (mctrl & TIOCM_LOOP) temp |= UTS_LOOP; writel(temp, sport->port.membase + uts_reg(sport)); } /* * Interrupts always disabled. */ static void imx_break_ctl(struct uart_port *port, int break_state) { struct imx_port *sport = (struct imx_port *)port; unsigned long flags, temp; spin_lock_irqsave(&sport->port.lock, flags); temp = readl(sport->port.membase + UCR1) & ~UCR1_SNDBRK; if (break_state != 0) temp |= UCR1_SNDBRK; writel(temp, sport->port.membase + UCR1); spin_unlock_irqrestore(&sport->port.lock, flags); } #define TXTL 2 /* reset default */ #define RXTL 1 /* reset default */ static int imx_setup_ufcr(struct imx_port *sport, unsigned int mode) { unsigned int val; /* set receiver / transmitter trigger level */ val = readl(sport->port.membase + UFCR) & (UFCR_RFDIV | UFCR_DCEDTE); val |= TXTL << UFCR_TXTL_SHF | RXTL; writel(val, sport->port.membase + UFCR); return 0; } #define RX_BUF_SIZE (PAGE_SIZE) static void imx_rx_dma_done(struct imx_port *sport) { unsigned long temp; unsigned long flags; spin_lock_irqsave(&sport->port.lock, flags); /* Enable this interrupt when the RXFIFO is empty. */ temp = readl(sport->port.membase + UCR1); temp |= UCR1_RRDYEN; writel(temp, sport->port.membase + UCR1); sport->dma_is_rxing = 0; /* Is the shutdown waiting for us? */ if (waitqueue_active(&sport->dma_wait)) wake_up(&sport->dma_wait); spin_unlock_irqrestore(&sport->port.lock, flags); } /* * There are three kinds of RX DMA interrupts(such as in the MX6Q): * [1] the RX DMA buffer is full. * [2] the Aging timer expires(wait for 8 bytes long) * [3] the Idle Condition Detect(enabled the UCR4_IDDMAEN). * * The [2] is trigger when a character was been sitting in the FIFO * meanwhile [3] can wait for 32 bytes long when the RX line is * on IDLE state and RxFIFO is empty. */ static void dma_rx_callback(void *data) { struct imx_port *sport = data; struct dma_chan *chan = sport->dma_chan_rx; struct scatterlist *sgl = &sport->rx_sgl; struct tty_port *port = &sport->port.state->port; struct dma_tx_state state; enum dma_status status; unsigned int count; /* unmap it first */ dma_unmap_sg(sport->port.dev, sgl, 1, DMA_FROM_DEVICE); status = dmaengine_tx_status(chan, (dma_cookie_t)0, &state); count = RX_BUF_SIZE - state.residue; dev_dbg(sport->port.dev, "We get %d bytes.\n", count); if (count) { if (!(sport->port.ignore_status_mask & URXD_DUMMY_READ)) tty_insert_flip_string(port, sport->rx_buf, count); tty_flip_buffer_push(port); start_rx_dma(sport); } else if (readl(sport->port.membase + USR2) & USR2_RDR) { /* * start rx_dma directly once data in RXFIFO, more efficient * than before: * 1. call imx_rx_dma_done to stop dma if no data received * 2. wait next RDR interrupt to start dma transfer. */ start_rx_dma(sport); } else { /* * stop dma to prevent too many IDLE event trigged if no data * in RXFIFO */ imx_rx_dma_done(sport); } } static int start_rx_dma(struct imx_port *sport) { struct scatterlist *sgl = &sport->rx_sgl; struct dma_chan *chan = sport->dma_chan_rx; struct device *dev = sport->port.dev; struct dma_async_tx_descriptor *desc; int ret; sg_init_one(sgl, sport->rx_buf, RX_BUF_SIZE); ret = dma_map_sg(dev, sgl, 1, DMA_FROM_DEVICE); if (ret == 0) { dev_err(dev, "DMA mapping error for RX.\n"); return -EINVAL; } desc = dmaengine_prep_slave_sg(chan, sgl, 1, DMA_DEV_TO_MEM, DMA_PREP_INTERRUPT); if (!desc) { dma_unmap_sg(dev, sgl, 1, DMA_FROM_DEVICE); dev_err(dev, "We cannot prepare for the RX slave dma!\n"); return -EINVAL; } desc->callback = dma_rx_callback; desc->callback_param = sport; dev_dbg(dev, "RX: prepare for the DMA.\n"); dmaengine_submit(desc); dma_async_issue_pending(chan); return 0; } static void imx_uart_dma_exit(struct imx_port *sport) { if (sport->dma_chan_rx) { dma_release_channel(sport->dma_chan_rx); sport->dma_chan_rx = NULL; kfree(sport->rx_buf); sport->rx_buf = NULL; } if (sport->dma_chan_tx) { dma_release_channel(sport->dma_chan_tx); sport->dma_chan_tx = NULL; } sport->dma_is_inited = 0; } static int imx_uart_dma_init(struct imx_port *sport) { struct dma_slave_config slave_config = {}; struct device *dev = sport->port.dev; int ret; /* Prepare for RX : */ sport->dma_chan_rx = dma_request_slave_channel(dev, "rx"); if (!sport->dma_chan_rx) { dev_dbg(dev, "cannot get the DMA channel.\n"); ret = -EINVAL; goto err; } slave_config.direction = DMA_DEV_TO_MEM; slave_config.src_addr = sport->port.mapbase + URXD0; slave_config.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; slave_config.src_maxburst = RXTL; ret = dmaengine_slave_config(sport->dma_chan_rx, &slave_config); if (ret) { dev_err(dev, "error in RX dma configuration.\n"); goto err; } sport->rx_buf = kzalloc(PAGE_SIZE, GFP_KERNEL); if (!sport->rx_buf) { ret = -ENOMEM; goto err; } /* Prepare for TX : */ sport->dma_chan_tx = dma_request_slave_channel(dev, "tx"); if (!sport->dma_chan_tx) { dev_err(dev, "cannot get the TX DMA channel!\n"); ret = -EINVAL; goto err; } slave_config.direction = DMA_MEM_TO_DEV; slave_config.dst_addr = sport->port.mapbase + URTX0; slave_config.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; slave_config.dst_maxburst = TXTL; ret = dmaengine_slave_config(sport->dma_chan_tx, &slave_config); if (ret) { dev_err(dev, "error in TX dma configuration."); goto err; } sport->dma_is_inited = 1; return 0; err: imx_uart_dma_exit(sport); return ret; } static void imx_enable_dma(struct imx_port *sport) { unsigned long temp; init_waitqueue_head(&sport->dma_wait); /* set UCR1 */ temp = readl(sport->port.membase + UCR1); temp |= UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN | /* wait for 32 idle frames for IDDMA interrupt */ UCR1_ICD_REG(3); writel(temp, sport->port.membase + UCR1); /* set UCR4 */ temp = readl(sport->port.membase + UCR4); temp |= UCR4_IDDMAEN; writel(temp, sport->port.membase + UCR4); sport->dma_is_enabled = 1; } static void imx_disable_dma(struct imx_port *sport) { unsigned long temp; /* clear UCR1 */ temp = readl(sport->port.membase + UCR1); temp &= ~(UCR1_RDMAEN | UCR1_TDMAEN | UCR1_ATDMAEN); writel(temp, sport->port.membase + UCR1); /* clear UCR2 */ temp = readl(sport->port.membase + UCR2); temp &= ~(UCR2_CTSC | UCR2_CTS); writel(temp, sport->port.membase + UCR2); /* clear UCR4 */ temp = readl(sport->port.membase + UCR4); temp &= ~UCR4_IDDMAEN; writel(temp, sport->port.membase + UCR4); sport->dma_is_enabled = 0; } /* half the RX buffer size */ #define CTSTL 16 static int imx_startup(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; int retval, i; unsigned long flags, temp; retval = clk_prepare_enable(sport->clk_per); if (retval) return retval; retval = clk_prepare_enable(sport->clk_ipg); if (retval) { clk_disable_unprepare(sport->clk_per); return retval; } imx_setup_ufcr(sport, 0); /* disable the DREN bit (Data Ready interrupt enable) before * requesting IRQs */ temp = readl(sport->port.membase + UCR4); /* set the trigger level for CTS */ temp &= ~(UCR4_CTSTL_MASK << UCR4_CTSTL_SHF); temp |= CTSTL << UCR4_CTSTL_SHF; writel(temp & ~UCR4_DREN, sport->port.membase + UCR4); /* Reset fifo's and state machines */ i = 100; temp = readl(sport->port.membase + UCR2); temp &= ~UCR2_SRST; writel(temp, sport->port.membase + UCR2); while (!(readl(sport->port.membase + UCR2) & UCR2_SRST) && (--i > 0)) udelay(1); /* Can we enable the DMA support? */ if (is_imx6q_uart(sport) && !uart_console(port) && !sport->dma_is_inited) imx_uart_dma_init(sport); spin_lock_irqsave(&sport->port.lock, flags); /* * Finally, clear and enable interrupts */ writel(USR1_RTSD, sport->port.membase + USR1); writel(USR2_ORE, sport->port.membase + USR2); if (sport->dma_is_inited && !sport->dma_is_enabled) imx_enable_dma(sport); temp = readl(sport->port.membase + UCR1); temp |= UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN; writel(temp, sport->port.membase + UCR1); temp = readl(sport->port.membase + UCR4); temp |= UCR4_OREN; writel(temp, sport->port.membase + UCR4); temp = readl(sport->port.membase + UCR2); temp |= (UCR2_RXEN | UCR2_TXEN); if (!sport->have_rtscts) temp |= UCR2_IRTS; writel(temp, sport->port.membase + UCR2); if (!is_imx1_uart(sport)) { temp = readl(sport->port.membase + UCR3); temp |= IMX21_UCR3_RXDMUXSEL | UCR3_ADNIMP; writel(temp, sport->port.membase + UCR3); } /* * Enable modem status interrupts */ imx_enable_ms(&sport->port); spin_unlock_irqrestore(&sport->port.lock, flags); return 0; } static void imx_shutdown(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; unsigned long temp; unsigned long flags; if (sport->dma_is_enabled) { int ret; /* We have to wait for the DMA to finish. */ ret = wait_event_interruptible(sport->dma_wait, !sport->dma_is_rxing && !sport->dma_is_txing); if (ret != 0) { sport->dma_is_rxing = 0; sport->dma_is_txing = 0; dmaengine_terminate_all(sport->dma_chan_tx); dmaengine_terminate_all(sport->dma_chan_rx); } spin_lock_irqsave(&sport->port.lock, flags); imx_stop_tx(port); imx_stop_rx(port); imx_disable_dma(sport); spin_unlock_irqrestore(&sport->port.lock, flags); imx_uart_dma_exit(sport); } spin_lock_irqsave(&sport->port.lock, flags); temp = readl(sport->port.membase + UCR2); temp &= ~(UCR2_TXEN); writel(temp, sport->port.membase + UCR2); spin_unlock_irqrestore(&sport->port.lock, flags); /* * Stop our timer. */ del_timer_sync(&sport->timer); /* * Disable all interrupts, port and break condition. */ spin_lock_irqsave(&sport->port.lock, flags); temp = readl(sport->port.membase + UCR1); temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN); writel(temp, sport->port.membase + UCR1); spin_unlock_irqrestore(&sport->port.lock, flags); clk_disable_unprepare(sport->clk_per); clk_disable_unprepare(sport->clk_ipg); } static void imx_flush_buffer(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; struct scatterlist *sgl = &sport->tx_sgl[0]; unsigned long temp; int i = 100, ubir, ubmr, uts; if (!sport->dma_chan_tx) return; sport->tx_bytes = 0; dmaengine_terminate_all(sport->dma_chan_tx); if (sport->dma_is_txing) { dma_unmap_sg(sport->port.dev, sgl, sport->dma_tx_nents, DMA_TO_DEVICE); temp = readl(sport->port.membase + UCR1); temp &= ~UCR1_TDMAEN; writel(temp, sport->port.membase + UCR1); sport->dma_is_txing = false; } /* * According to the Reference Manual description of the UART SRST bit: * "Reset the transmit and receive state machines, * all FIFOs and register USR1, USR2, UBIR, UBMR, UBRC, URXD, UTXD * and UTS[6-3]". As we don't need to restore the old values from * USR1, USR2, URXD, UTXD, only save/restore the other four registers */ ubir = readl(sport->port.membase + UBIR); ubmr = readl(sport->port.membase + UBMR); uts = readl(sport->port.membase + IMX21_UTS); temp = readl(sport->port.membase + UCR2); temp &= ~UCR2_SRST; writel(temp, sport->port.membase + UCR2); while (!(readl(sport->port.membase + UCR2) & UCR2_SRST) && (--i > 0)) udelay(1); /* Restore the registers */ writel(ubir, sport->port.membase + UBIR); writel(ubmr, sport->port.membase + UBMR); writel(uts, sport->port.membase + IMX21_UTS); } static void imx_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { struct imx_port *sport = (struct imx_port *)port; unsigned long flags; unsigned int ucr2, old_ucr1, old_txrxen, baud, quot; unsigned int old_csize = old ? old->c_cflag & CSIZE : CS8; unsigned int div, ufcr; unsigned long num, denom; uint64_t tdiv64; /* * We only support CS7 and CS8. */ while ((termios->c_cflag & CSIZE) != CS7 && (termios->c_cflag & CSIZE) != CS8) { termios->c_cflag &= ~CSIZE; termios->c_cflag |= old_csize; old_csize = CS8; } if ((termios->c_cflag & CSIZE) == CS8) ucr2 = UCR2_WS | UCR2_SRST | UCR2_IRTS; else ucr2 = UCR2_SRST | UCR2_IRTS; if (termios->c_cflag & CRTSCTS) { if (sport->have_rtscts) { ucr2 &= ~UCR2_IRTS; if (port->rs485.flags & SER_RS485_ENABLED) { /* * RTS is mandatory for rs485 operation, so keep * it under manual control and keep transmitter * disabled. */ if (!(port->rs485.flags & SER_RS485_RTS_AFTER_SEND)) ucr2 |= UCR2_CTS; } else { ucr2 |= UCR2_CTSC; } } else { termios->c_cflag &= ~CRTSCTS; } } else if (port->rs485.flags & SER_RS485_ENABLED) /* disable transmitter */ if (!(port->rs485.flags & SER_RS485_RTS_AFTER_SEND)) ucr2 |= UCR2_CTS; if (termios->c_cflag & CSTOPB) ucr2 |= UCR2_STPB; if (termios->c_cflag & PARENB) { ucr2 |= UCR2_PREN; if (termios->c_cflag & PARODD) ucr2 |= UCR2_PROE; } del_timer_sync(&sport->timer); /* * Ask the core to calculate the divisor for us. */ baud = uart_get_baud_rate(port, termios, old, 50, port->uartclk / 16); quot = uart_get_divisor(port, baud); spin_lock_irqsave(&sport->port.lock, flags); sport->port.read_status_mask = 0; if (termios->c_iflag & INPCK) sport->port.read_status_mask |= (URXD_FRMERR | URXD_PRERR); if (termios->c_iflag & (BRKINT | PARMRK)) sport->port.read_status_mask |= URXD_BRK; /* * Characters to ignore */ sport->port.ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) sport->port.ignore_status_mask |= URXD_PRERR | URXD_FRMERR; if (termios->c_iflag & IGNBRK) { sport->port.ignore_status_mask |= URXD_BRK; /* * If we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (termios->c_iflag & IGNPAR) sport->port.ignore_status_mask |= URXD_OVRRUN; } if ((termios->c_cflag & CREAD) == 0) sport->port.ignore_status_mask |= URXD_DUMMY_READ; /* * Update the per-port timeout. */ uart_update_timeout(port, termios->c_cflag, baud); /* * disable interrupts and drain transmitter */ old_ucr1 = readl(sport->port.membase + UCR1); writel(old_ucr1 & ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN), sport->port.membase + UCR1); while (!(readl(sport->port.membase + USR2) & USR2_TXDC)) barrier(); /* then, disable everything */ old_txrxen = readl(sport->port.membase + UCR2); writel(old_txrxen & ~(UCR2_TXEN | UCR2_RXEN), sport->port.membase + UCR2); old_txrxen &= (UCR2_TXEN | UCR2_RXEN); /* custom-baudrate handling */ div = sport->port.uartclk / (baud * 16); if (baud == 38400 && quot != div) baud = sport->port.uartclk / (quot * 16); div = sport->port.uartclk / (baud * 16); if (div > 7) div = 7; if (!div) div = 1; rational_best_approximation(16 * div * baud, sport->port.uartclk, 1 << 16, 1 << 16, &num, &denom); tdiv64 = sport->port.uartclk; tdiv64 *= num; do_div(tdiv64, denom * 16 * div); tty_termios_encode_baud_rate(termios, (speed_t)tdiv64, (speed_t)tdiv64); num -= 1; denom -= 1; ufcr = readl(sport->port.membase + UFCR); ufcr = (ufcr & (~UFCR_RFDIV)) | UFCR_RFDIV_REG(div); if (sport->dte_mode) ufcr |= UFCR_DCEDTE; writel(ufcr, sport->port.membase + UFCR); writel(num, sport->port.membase + UBIR); writel(denom, sport->port.membase + UBMR); if (!is_imx1_uart(sport)) writel(sport->port.uartclk / div / 1000, sport->port.membase + IMX21_ONEMS); writel(old_ucr1, sport->port.membase + UCR1); /* set the parity, stop bits and data size */ writel(ucr2 | old_txrxen, sport->port.membase + UCR2); if (UART_ENABLE_MS(&sport->port, termios->c_cflag)) imx_enable_ms(&sport->port); spin_unlock_irqrestore(&sport->port.lock, flags); } static const char *imx_type(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; return sport->port.type == PORT_IMX ? "IMX" : NULL; } /* * Configure/autoconfigure the port. */ static void imx_config_port(struct uart_port *port, int flags) { struct imx_port *sport = (struct imx_port *)port; if (flags & UART_CONFIG_TYPE) sport->port.type = PORT_IMX; } /* * Verify the new serial_struct (for TIOCSSERIAL). * The only change we allow are to the flags and type, and * even then only between PORT_IMX and PORT_UNKNOWN */ static int imx_verify_port(struct uart_port *port, struct serial_struct *ser) { struct imx_port *sport = (struct imx_port *)port; int ret = 0; if (ser->type != PORT_UNKNOWN && ser->type != PORT_IMX) ret = -EINVAL; if (sport->port.irq != ser->irq) ret = -EINVAL; if (ser->io_type != UPIO_MEM) ret = -EINVAL; if (sport->port.uartclk / 16 != ser->baud_base) ret = -EINVAL; if (sport->port.mapbase != (unsigned long)ser->iomem_base) ret = -EINVAL; if (sport->port.iobase != ser->port) ret = -EINVAL; if (ser->hub6 != 0) ret = -EINVAL; return ret; } #if defined(CONFIG_CONSOLE_POLL) static int imx_poll_init(struct uart_port *port) { struct imx_port *sport = (struct imx_port *)port; unsigned long flags; unsigned long temp; int retval; retval = clk_prepare_enable(sport->clk_ipg); if (retval) return retval; retval = clk_prepare_enable(sport->clk_per); if (retval) clk_disable_unprepare(sport->clk_ipg); imx_setup_ufcr(sport, 0); spin_lock_irqsave(&sport->port.lock, flags); temp = readl(sport->port.membase + UCR1); if (is_imx1_uart(sport)) temp |= IMX1_UCR1_UARTCLKEN; temp |= UCR1_UARTEN | UCR1_RRDYEN; temp &= ~(UCR1_TXMPTYEN | UCR1_RTSDEN); writel(temp, sport->port.membase + UCR1); temp = readl(sport->port.membase + UCR2); temp |= UCR2_RXEN; writel(temp, sport->port.membase + UCR2); spin_unlock_irqrestore(&sport->port.lock, flags); return 0; } static int imx_poll_get_char(struct uart_port *port) { if (!(readl_relaxed(port->membase + USR2) & USR2_RDR)) return NO_POLL_CHAR; return readl_relaxed(port->membase + URXD0) & URXD_RX_DATA; } static void imx_poll_put_char(struct uart_port *port, unsigned char c) { unsigned int status; /* drain */ do { status = readl_relaxed(port->membase + USR1); } while (~status & USR1_TRDY); /* write */ writel_relaxed(c, port->membase + URTX0); /* flush */ do { status = readl_relaxed(port->membase + USR2); } while (~status & USR2_TXDC); } #endif static int imx_rs485_config(struct uart_port *port, struct serial_rs485 *rs485conf) { struct imx_port *sport = (struct imx_port *)port; /* unimplemented */ rs485conf->delay_rts_before_send = 0; rs485conf->delay_rts_after_send = 0; rs485conf->flags |= SER_RS485_RX_DURING_TX; /* RTS is required to control the transmitter */ if (!sport->have_rtscts) rs485conf->flags &= ~SER_RS485_ENABLED; if (rs485conf->flags & SER_RS485_ENABLED) { unsigned long temp; /* disable transmitter */ temp = readl(sport->port.membase + UCR2); temp &= ~UCR2_CTSC; if (rs485conf->flags & SER_RS485_RTS_AFTER_SEND) temp &= ~UCR2_CTS; else temp |= UCR2_CTS; writel(temp, sport->port.membase + UCR2); } port->rs485 = *rs485conf; return 0; } static struct uart_ops imx_pops = { .tx_empty = imx_tx_empty, .set_mctrl = imx_set_mctrl, .get_mctrl = imx_get_mctrl, .stop_tx = imx_stop_tx, .start_tx = imx_start_tx, .stop_rx = imx_stop_rx, .enable_ms = imx_enable_ms, .break_ctl = imx_break_ctl, .startup = imx_startup, .shutdown = imx_shutdown, .flush_buffer = imx_flush_buffer, .set_termios = imx_set_termios, .type = imx_type, .config_port = imx_config_port, .verify_port = imx_verify_port, #if defined(CONFIG_CONSOLE_POLL) .poll_init = imx_poll_init, .poll_get_char = imx_poll_get_char, .poll_put_char = imx_poll_put_char, #endif }; static struct imx_port *imx_ports[UART_NR]; #ifdef CONFIG_SERIAL_IMX_CONSOLE static void imx_console_putchar(struct uart_port *port, int ch) { struct imx_port *sport = (struct imx_port *)port; while (readl(sport->port.membase + uts_reg(sport)) & UTS_TXFULL) barrier(); writel(ch, sport->port.membase + URTX0); } /* * Interrupts are disabled on entering */ static void imx_console_write(struct console *co, const char *s, unsigned int count) { struct imx_port *sport = imx_ports[co->index]; struct imx_port_ucrs old_ucr; unsigned int ucr1; unsigned long flags = 0; int locked = 1; int retval; retval = clk_enable(sport->clk_per); if (retval) return; retval = clk_enable(sport->clk_ipg); if (retval) { clk_disable(sport->clk_per); return; } if (sport->port.sysrq) locked = 0; else if (oops_in_progress) locked = spin_trylock_irqsave(&sport->port.lock, flags); else spin_lock_irqsave(&sport->port.lock, flags); /* * First, save UCR1/2/3 and then disable interrupts */ imx_port_ucrs_save(&sport->port, &old_ucr); ucr1 = old_ucr.ucr1; if (is_imx1_uart(sport)) ucr1 |= IMX1_UCR1_UARTCLKEN; ucr1 |= UCR1_UARTEN; ucr1 &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN); writel(ucr1, sport->port.membase + UCR1); writel(old_ucr.ucr2 | UCR2_TXEN, sport->port.membase + UCR2); uart_console_write(&sport->port, s, count, imx_console_putchar); /* * Finally, wait for transmitter to become empty * and restore UCR1/2/3 */ while (!(readl(sport->port.membase + USR2) & USR2_TXDC)); imx_port_ucrs_restore(&sport->port, &old_ucr); if (locked) spin_unlock_irqrestore(&sport->port.lock, flags); clk_disable(sport->clk_ipg); clk_disable(sport->clk_per); } /* * If the port was already initialised (eg, by a boot loader), * try to determine the current setup. */ static void __init imx_console_get_options(struct imx_port *sport, int *baud, int *parity, int *bits) { if (readl(sport->port.membase + UCR1) & UCR1_UARTEN) { /* ok, the port was enabled */ unsigned int ucr2, ubir, ubmr, uartclk; unsigned int baud_raw; unsigned int ucfr_rfdiv; ucr2 = readl(sport->port.membase + UCR2); *parity = 'n'; if (ucr2 & UCR2_PREN) { if (ucr2 & UCR2_PROE) *parity = 'o'; else *parity = 'e'; } if (ucr2 & UCR2_WS) *bits = 8; else *bits = 7; ubir = readl(sport->port.membase + UBIR) & 0xffff; ubmr = readl(sport->port.membase + UBMR) & 0xffff; ucfr_rfdiv = (readl(sport->port.membase + UFCR) & UFCR_RFDIV) >> 7; if (ucfr_rfdiv == 6) ucfr_rfdiv = 7; else ucfr_rfdiv = 6 - ucfr_rfdiv; uartclk = clk_get_rate(sport->clk_per); uartclk /= ucfr_rfdiv; { /* * The next code provides exact computation of * baud_raw = round(((uartclk/16) * (ubir + 1)) / (ubmr + 1)) * without need of float support or long long division, * which would be required to prevent 32bit arithmetic overflow */ unsigned int mul = ubir + 1; unsigned int div = 16 * (ubmr + 1); unsigned int rem = uartclk % div; baud_raw = (uartclk / div) * mul; baud_raw += (rem * mul + div / 2) / div; *baud = (baud_raw + 50) / 100 * 100; } if (*baud != baud_raw) pr_info("Console IMX rounded baud rate from %d to %d\n", baud_raw, *baud); } } static int __init imx_console_setup(struct console *co, char *options) { struct imx_port *sport; int baud = 9600; int bits = 8; int parity = 'n'; int flow = 'n'; int retval; /* * Check whether an invalid uart number has been specified, and * if so, search for the first available port that does have * console support. */ if (co->index == -1 || co->index >= ARRAY_SIZE(imx_ports)) co->index = 0; sport = imx_ports[co->index]; if (sport == NULL) return -ENODEV; /* For setting the registers, we only need to enable the ipg clock. */ retval = clk_prepare_enable(sport->clk_ipg); if (retval) goto error_console; if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); else imx_console_get_options(sport, &baud, &parity, &bits); imx_setup_ufcr(sport, 0); retval = uart_set_options(&sport->port, co, baud, parity, bits, flow); clk_disable(sport->clk_ipg); if (retval) { clk_unprepare(sport->clk_ipg); goto error_console; } retval = clk_prepare(sport->clk_per); if (retval) clk_disable_unprepare(sport->clk_ipg); error_console: return retval; } static struct uart_driver imx_reg; static struct console imx_console = { .name = DEV_NAME, .write = imx_console_write, .device = uart_console_device, .setup = imx_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &imx_reg, }; #define IMX_CONSOLE &imx_console #else #define IMX_CONSOLE NULL #endif static struct uart_driver imx_reg = { .owner = THIS_MODULE, .driver_name = DRIVER_NAME, .dev_name = DEV_NAME, .major = SERIAL_IMX_MAJOR, .minor = MINOR_START, .nr = ARRAY_SIZE(imx_ports), .cons = IMX_CONSOLE, }; static int serial_imx_suspend(struct platform_device *dev, pm_message_t state) { struct imx_port *sport = platform_get_drvdata(dev); unsigned int val; /* enable wakeup from i.MX UART */ val = readl(sport->port.membase + UCR3); val |= UCR3_AWAKEN; writel(val, sport->port.membase + UCR3); uart_suspend_port(&imx_reg, &sport->port); return 0; } static int serial_imx_resume(struct platform_device *dev) { struct imx_port *sport = platform_get_drvdata(dev); unsigned int val; /* disable wakeup from i.MX UART */ val = readl(sport->port.membase + UCR3); val &= ~UCR3_AWAKEN; writel(val, sport->port.membase + UCR3); uart_resume_port(&imx_reg, &sport->port); return 0; } #ifdef CONFIG_OF /* * This function returns 1 iff pdev isn't a device instatiated by dt, 0 iff it * could successfully get all information from dt or a negative errno. */ static int serial_imx_probe_dt(struct imx_port *sport, struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; const struct of_device_id *of_id = of_match_device(imx_uart_dt_ids, &pdev->dev); int ret; if (!np) /* no device tree device */ return 1; ret = of_alias_get_id(np, "serial"); if (ret < 0) { dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret); return ret; } sport->port.line = ret; if (of_get_property(np, "fsl,uart-has-rtscts", NULL)) sport->have_rtscts = 1; if (of_get_property(np, "fsl,dte-mode", NULL)) sport->dte_mode = 1; sport->devdata = of_id->data; return 0; } #else static inline int serial_imx_probe_dt(struct imx_port *sport, struct platform_device *pdev) { return 1; } #endif static void serial_imx_probe_pdata(struct imx_port *sport, struct platform_device *pdev) { struct imxuart_platform_data *pdata = dev_get_platdata(&pdev->dev); sport->port.line = pdev->id; sport->devdata = (struct imx_uart_data *) pdev->id_entry->driver_data; if (!pdata) return; if (pdata->flags & IMXUART_HAVE_RTSCTS) sport->have_rtscts = 1; } static int serial_imx_probe(struct platform_device *pdev) { struct imx_port *sport; void __iomem *base; int ret = 0; struct resource *res; int txirq, rxirq, rtsirq; sport = devm_kzalloc(&pdev->dev, sizeof(*sport), GFP_KERNEL); if (!sport) return -ENOMEM; ret = serial_imx_probe_dt(sport, pdev); if (ret > 0) serial_imx_probe_pdata(sport, pdev); else if (ret < 0) return ret; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) return PTR_ERR(base); rxirq = platform_get_irq(pdev, 0); txirq = platform_get_irq(pdev, 1); rtsirq = platform_get_irq(pdev, 2); sport->port.dev = &pdev->dev; sport->port.mapbase = res->start; sport->port.membase = base; sport->port.type = PORT_IMX, sport->port.iotype = UPIO_MEM; sport->port.irq = rxirq; sport->port.fifosize = 32; sport->port.ops = &imx_pops; sport->port.rs485_config = imx_rs485_config; sport->port.rs485.flags = SER_RS485_RTS_ON_SEND | SER_RS485_RX_DURING_TX; sport->port.flags = UPF_BOOT_AUTOCONF; init_timer(&sport->timer); sport->timer.function = imx_timeout; sport->timer.data = (unsigned long)sport; sport->clk_ipg = devm_clk_get(&pdev->dev, "ipg"); if (IS_ERR(sport->clk_ipg)) { ret = PTR_ERR(sport->clk_ipg); dev_err(&pdev->dev, "failed to get ipg clk: %d\n", ret); return ret; } sport->clk_per = devm_clk_get(&pdev->dev, "per"); if (IS_ERR(sport->clk_per)) { ret = PTR_ERR(sport->clk_per); dev_err(&pdev->dev, "failed to get per clk: %d\n", ret); return ret; } sport->port.uartclk = clk_get_rate(sport->clk_per); /* * Allocate the IRQ(s) i.MX1 has three interrupts whereas later * chips only have one interrupt. */ if (txirq > 0) { ret = devm_request_irq(&pdev->dev, rxirq, imx_rxint, 0, dev_name(&pdev->dev), sport); if (ret) return ret; ret = devm_request_irq(&pdev->dev, txirq, imx_txint, 0, dev_name(&pdev->dev), sport); if (ret) return ret; } else { ret = devm_request_irq(&pdev->dev, rxirq, imx_int, 0, dev_name(&pdev->dev), sport); if (ret) return ret; } imx_ports[sport->port.line] = sport; platform_set_drvdata(pdev, sport); return uart_add_one_port(&imx_reg, &sport->port); } static int serial_imx_remove(struct platform_device *pdev) { struct imx_port *sport = platform_get_drvdata(pdev); return uart_remove_one_port(&imx_reg, &sport->port); } static struct platform_driver serial_imx_driver = { .probe = serial_imx_probe, .remove = serial_imx_remove, .suspend = serial_imx_suspend, .resume = serial_imx_resume, .id_table = imx_uart_devtype, .driver = { .name = "imx-uart", .of_match_table = imx_uart_dt_ids, }, }; static int __init imx_serial_init(void) { int ret = uart_register_driver(&imx_reg); if (ret) return ret; ret = platform_driver_register(&serial_imx_driver); if (ret != 0) uart_unregister_driver(&imx_reg); return ret; } static void __exit imx_serial_exit(void) { platform_driver_unregister(&serial_imx_driver); uart_unregister_driver(&imx_reg); } module_init(imx_serial_init); module_exit(imx_serial_exit); MODULE_AUTHOR("Sascha Hauer"); MODULE_DESCRIPTION("IMX generic serial port driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:imx-uart");
gpl-2.0
dutchanddutch/ti81xx-linux
fs/udf/namei.c
55
35785
/* * namei.c * * PURPOSE * Inode name handling routines for the OSTA-UDF(tm) filesystem. * * COPYRIGHT * This file is distributed under the terms of the GNU General Public * License (GPL). Copies of the GPL can be obtained from: * ftp://prep.ai.mit.edu/pub/gnu/GPL * Each contributing author retains all rights to their own work. * * (C) 1998-2004 Ben Fennema * (C) 1999-2000 Stelias Computing Inc * * HISTORY * * 12/12/98 blf Created. Split out the lookup code from dir.c * 04/19/99 blf link, mknod, symlink support */ #include "udfdecl.h" #include "udf_i.h" #include "udf_sb.h" #include <linux/string.h> #include <linux/errno.h> #include <linux/mm.h> #include <linux/slab.h> #include <linux/smp_lock.h> #include <linux/buffer_head.h> #include <linux/sched.h> #include <linux/crc-itu-t.h> #include <linux/exportfs.h> static inline int udf_match(int len1, const unsigned char *name1, int len2, const unsigned char *name2) { if (len1 != len2) return 0; return !memcmp(name1, name2, len1); } int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi, struct fileIdentDesc *sfi, struct udf_fileident_bh *fibh, uint8_t *impuse, uint8_t *fileident) { uint16_t crclen = fibh->eoffset - fibh->soffset - sizeof(struct tag); uint16_t crc; int offset; uint16_t liu = le16_to_cpu(cfi->lengthOfImpUse); uint8_t lfi = cfi->lengthFileIdent; int padlen = fibh->eoffset - fibh->soffset - liu - lfi - sizeof(struct fileIdentDesc); int adinicb = 0; if (UDF_I(inode)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) adinicb = 1; offset = fibh->soffset + sizeof(struct fileIdentDesc); if (impuse) { if (adinicb || (offset + liu < 0)) { memcpy((uint8_t *)sfi->impUse, impuse, liu); } else if (offset >= 0) { memcpy(fibh->ebh->b_data + offset, impuse, liu); } else { memcpy((uint8_t *)sfi->impUse, impuse, -offset); memcpy(fibh->ebh->b_data, impuse - offset, liu + offset); } } offset += liu; if (fileident) { if (adinicb || (offset + lfi < 0)) { memcpy((uint8_t *)sfi->fileIdent + liu, fileident, lfi); } else if (offset >= 0) { memcpy(fibh->ebh->b_data + offset, fileident, lfi); } else { memcpy((uint8_t *)sfi->fileIdent + liu, fileident, -offset); memcpy(fibh->ebh->b_data, fileident - offset, lfi + offset); } } offset += lfi; if (adinicb || (offset + padlen < 0)) { memset((uint8_t *)sfi->padding + liu + lfi, 0x00, padlen); } else if (offset >= 0) { memset(fibh->ebh->b_data + offset, 0x00, padlen); } else { memset((uint8_t *)sfi->padding + liu + lfi, 0x00, -offset); memset(fibh->ebh->b_data, 0x00, padlen + offset); } crc = crc_itu_t(0, (uint8_t *)cfi + sizeof(struct tag), sizeof(struct fileIdentDesc) - sizeof(struct tag)); if (fibh->sbh == fibh->ebh) { crc = crc_itu_t(crc, (uint8_t *)sfi->impUse, crclen + sizeof(struct tag) - sizeof(struct fileIdentDesc)); } else if (sizeof(struct fileIdentDesc) >= -fibh->soffset) { crc = crc_itu_t(crc, fibh->ebh->b_data + sizeof(struct fileIdentDesc) + fibh->soffset, crclen + sizeof(struct tag) - sizeof(struct fileIdentDesc)); } else { crc = crc_itu_t(crc, (uint8_t *)sfi->impUse, -fibh->soffset - sizeof(struct fileIdentDesc)); crc = crc_itu_t(crc, fibh->ebh->b_data, fibh->eoffset); } cfi->descTag.descCRC = cpu_to_le16(crc); cfi->descTag.descCRCLength = cpu_to_le16(crclen); cfi->descTag.tagChecksum = udf_tag_checksum(&cfi->descTag); if (adinicb || (sizeof(struct fileIdentDesc) <= -fibh->soffset)) { memcpy((uint8_t *)sfi, (uint8_t *)cfi, sizeof(struct fileIdentDesc)); } else { memcpy((uint8_t *)sfi, (uint8_t *)cfi, -fibh->soffset); memcpy(fibh->ebh->b_data, (uint8_t *)cfi - fibh->soffset, sizeof(struct fileIdentDesc) + fibh->soffset); } if (adinicb) { mark_inode_dirty(inode); } else { if (fibh->sbh != fibh->ebh) mark_buffer_dirty_inode(fibh->ebh, inode); mark_buffer_dirty_inode(fibh->sbh, inode); } return 0; } static struct fileIdentDesc *udf_find_entry(struct inode *dir, const struct qstr *child, struct udf_fileident_bh *fibh, struct fileIdentDesc *cfi) { struct fileIdentDesc *fi = NULL; loff_t f_pos; int block, flen; unsigned char *fname = NULL; unsigned char *nameptr; uint8_t lfi; uint16_t liu; loff_t size; struct kernel_lb_addr eloc; uint32_t elen; sector_t offset; struct extent_position epos = {}; struct udf_inode_info *dinfo = UDF_I(dir); int isdotdot = child->len == 2 && child->name[0] == '.' && child->name[1] == '.'; size = udf_ext0_offset(dir) + dir->i_size; f_pos = udf_ext0_offset(dir); fibh->sbh = fibh->ebh = NULL; fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1); if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, &epos, &eloc, &elen, &offset) != (EXT_RECORDED_ALLOCATED >> 30)) goto out_err; block = udf_get_lb_pblock(dir->i_sb, &eloc, offset); if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) epos.offset -= sizeof(struct short_ad); else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) epos.offset -= sizeof(struct long_ad); } else offset = 0; fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block); if (!fibh->sbh) goto out_err; } fname = kmalloc(UDF_NAME_LEN, GFP_NOFS); if (!fname) goto out_err; while (f_pos < size) { fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc, &elen, &offset); if (!fi) goto out_err; liu = le16_to_cpu(cfi->lengthOfImpUse); lfi = cfi->lengthFileIdent; if (fibh->sbh == fibh->ebh) { nameptr = fi->fileIdent + liu; } else { int poffset; /* Unpaded ending offset */ poffset = fibh->soffset + sizeof(struct fileIdentDesc) + liu + lfi; if (poffset >= lfi) nameptr = (uint8_t *)(fibh->ebh->b_data + poffset - lfi); else { nameptr = fname; memcpy(nameptr, fi->fileIdent + liu, lfi - poffset); memcpy(nameptr + lfi - poffset, fibh->ebh->b_data, poffset); } } if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) { if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNDELETE)) continue; } if ((cfi->fileCharacteristics & FID_FILE_CHAR_HIDDEN) != 0) { if (!UDF_QUERY_FLAG(dir->i_sb, UDF_FLAG_UNHIDE)) continue; } if ((cfi->fileCharacteristics & FID_FILE_CHAR_PARENT) && isdotdot) { brelse(epos.bh); return fi; } if (!lfi) continue; flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi); if (flen && udf_match(flen, fname, child->len, child->name)) goto out_ok; } out_err: fi = NULL; if (fibh->sbh != fibh->ebh) brelse(fibh->ebh); brelse(fibh->sbh); out_ok: brelse(epos.bh); kfree(fname); return fi; } static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { struct inode *inode = NULL; struct fileIdentDesc cfi; struct udf_fileident_bh fibh; if (dentry->d_name.len > UDF_NAME_LEN - 2) return ERR_PTR(-ENAMETOOLONG); lock_kernel(); #ifdef UDF_RECOVERY /* temporary shorthand for specifying files by inode number */ if (!strncmp(dentry->d_name.name, ".B=", 3)) { struct kernel_lb_addr lb = { .logicalBlockNum = 0, .partitionReferenceNum = simple_strtoul(dentry->d_name.name + 3, NULL, 0), }; inode = udf_iget(dir->i_sb, lb); if (!inode) { unlock_kernel(); return ERR_PTR(-EACCES); } } else #endif /* UDF_RECOVERY */ if (udf_find_entry(dir, &dentry->d_name, &fibh, &cfi)) { struct kernel_lb_addr loc; if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); loc = lelb_to_cpu(cfi.icb.extLocation); inode = udf_iget(dir->i_sb, &loc); if (!inode) { unlock_kernel(); return ERR_PTR(-EACCES); } } unlock_kernel(); return d_splice_alias(inode, dentry); } static struct fileIdentDesc *udf_add_entry(struct inode *dir, struct dentry *dentry, struct udf_fileident_bh *fibh, struct fileIdentDesc *cfi, int *err) { struct super_block *sb = dir->i_sb; struct fileIdentDesc *fi = NULL; unsigned char *name = NULL; int namelen; loff_t f_pos; loff_t size = udf_ext0_offset(dir) + dir->i_size; int nfidlen; uint8_t lfi; uint16_t liu; int block; struct kernel_lb_addr eloc; uint32_t elen = 0; sector_t offset; struct extent_position epos = {}; struct udf_inode_info *dinfo; fibh->sbh = fibh->ebh = NULL; name = kmalloc(UDF_NAME_LEN, GFP_NOFS); if (!name) { *err = -ENOMEM; goto out_err; } if (dentry) { if (!dentry->d_name.len) { *err = -EINVAL; goto out_err; } namelen = udf_put_filename(sb, dentry->d_name.name, name, dentry->d_name.len); if (!namelen) { *err = -ENAMETOOLONG; goto out_err; } } else { namelen = 0; } nfidlen = (sizeof(struct fileIdentDesc) + namelen + 3) & ~3; f_pos = udf_ext0_offset(dir); fibh->soffset = fibh->eoffset = f_pos & (dir->i_sb->s_blocksize - 1); dinfo = UDF_I(dir); if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, &epos, &eloc, &elen, &offset) != (EXT_RECORDED_ALLOCATED >> 30)) { block = udf_get_lb_pblock(dir->i_sb, &dinfo->i_location, 0); fibh->soffset = fibh->eoffset = sb->s_blocksize; goto add; } block = udf_get_lb_pblock(dir->i_sb, &eloc, offset); if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) epos.offset -= sizeof(struct short_ad); else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) epos.offset -= sizeof(struct long_ad); } else offset = 0; fibh->sbh = fibh->ebh = udf_tread(dir->i_sb, block); if (!fibh->sbh) { *err = -EIO; goto out_err; } block = dinfo->i_location.logicalBlockNum; } while (f_pos < size) { fi = udf_fileident_read(dir, &f_pos, fibh, cfi, &epos, &eloc, &elen, &offset); if (!fi) { *err = -EIO; goto out_err; } liu = le16_to_cpu(cfi->lengthOfImpUse); lfi = cfi->lengthFileIdent; if ((cfi->fileCharacteristics & FID_FILE_CHAR_DELETED) != 0) { if (((sizeof(struct fileIdentDesc) + liu + lfi + 3) & ~3) == nfidlen) { cfi->descTag.tagSerialNum = cpu_to_le16(1); cfi->fileVersionNum = cpu_to_le16(1); cfi->fileCharacteristics = 0; cfi->lengthFileIdent = namelen; cfi->lengthOfImpUse = cpu_to_le16(0); if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name)) goto out_ok; else { *err = -EIO; goto out_err; } } } } add: f_pos += nfidlen; if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB && sb->s_blocksize - fibh->eoffset < nfidlen) { brelse(epos.bh); epos.bh = NULL; fibh->soffset -= udf_ext0_offset(dir); fibh->eoffset -= udf_ext0_offset(dir); f_pos -= udf_ext0_offset(dir); if (fibh->sbh != fibh->ebh) brelse(fibh->ebh); brelse(fibh->sbh); fibh->sbh = fibh->ebh = udf_expand_dir_adinicb(dir, &block, err); if (!fibh->sbh) goto out_err; epos.block = dinfo->i_location; epos.offset = udf_file_entry_alloc_offset(dir); /* Load extent udf_expand_dir_adinicb() has created */ udf_current_aext(dir, &epos, &eloc, &elen, 1); } /* Entry fits into current block? */ if (sb->s_blocksize - fibh->eoffset >= nfidlen) { fibh->soffset = fibh->eoffset; fibh->eoffset += nfidlen; if (fibh->sbh != fibh->ebh) { brelse(fibh->sbh); fibh->sbh = fibh->ebh; } if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { block = dinfo->i_location.logicalBlockNum; fi = (struct fileIdentDesc *) (dinfo->i_ext.i_data + fibh->soffset - udf_ext0_offset(dir) + dinfo->i_lenEAttr); } else { block = eloc.logicalBlockNum + ((elen - 1) >> dir->i_sb->s_blocksize_bits); fi = (struct fileIdentDesc *) (fibh->sbh->b_data + fibh->soffset); } } else { /* Round up last extent in the file */ elen = (elen + sb->s_blocksize - 1) & ~(sb->s_blocksize - 1); if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) epos.offset -= sizeof(struct short_ad); else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) epos.offset -= sizeof(struct long_ad); udf_write_aext(dir, &epos, &eloc, elen, 1); dinfo->i_lenExtents = (dinfo->i_lenExtents + sb->s_blocksize - 1) & ~(sb->s_blocksize - 1); fibh->soffset = fibh->eoffset - sb->s_blocksize; fibh->eoffset += nfidlen - sb->s_blocksize; if (fibh->sbh != fibh->ebh) { brelse(fibh->sbh); fibh->sbh = fibh->ebh; } block = eloc.logicalBlockNum + ((elen - 1) >> dir->i_sb->s_blocksize_bits); fibh->ebh = udf_bread(dir, f_pos >> dir->i_sb->s_blocksize_bits, 1, err); if (!fibh->ebh) goto out_err; if (!fibh->soffset) { if (udf_next_aext(dir, &epos, &eloc, &elen, 1) == (EXT_RECORDED_ALLOCATED >> 30)) { block = eloc.logicalBlockNum + ((elen - 1) >> dir->i_sb->s_blocksize_bits); } else block++; brelse(fibh->sbh); fibh->sbh = fibh->ebh; fi = (struct fileIdentDesc *)(fibh->sbh->b_data); } else { fi = (struct fileIdentDesc *) (fibh->sbh->b_data + sb->s_blocksize + fibh->soffset); } } memset(cfi, 0, sizeof(struct fileIdentDesc)); if (UDF_SB(sb)->s_udfrev >= 0x0200) udf_new_tag((char *)cfi, TAG_IDENT_FID, 3, 1, block, sizeof(struct tag)); else udf_new_tag((char *)cfi, TAG_IDENT_FID, 2, 1, block, sizeof(struct tag)); cfi->fileVersionNum = cpu_to_le16(1); cfi->lengthFileIdent = namelen; cfi->lengthOfImpUse = cpu_to_le16(0); if (!udf_write_fi(dir, cfi, fi, fibh, NULL, name)) { dir->i_size += nfidlen; if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) dinfo->i_lenAlloc += nfidlen; else { /* Find the last extent and truncate it to proper size */ while (udf_next_aext(dir, &epos, &eloc, &elen, 1) == (EXT_RECORDED_ALLOCATED >> 30)) ; elen -= dinfo->i_lenExtents - dir->i_size; if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) epos.offset -= sizeof(struct short_ad); else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) epos.offset -= sizeof(struct long_ad); udf_write_aext(dir, &epos, &eloc, elen, 1); dinfo->i_lenExtents = dir->i_size; } mark_inode_dirty(dir); goto out_ok; } else { *err = -EIO; goto out_err; } out_err: fi = NULL; if (fibh->sbh != fibh->ebh) brelse(fibh->ebh); brelse(fibh->sbh); out_ok: brelse(epos.bh); kfree(name); return fi; } static int udf_delete_entry(struct inode *inode, struct fileIdentDesc *fi, struct udf_fileident_bh *fibh, struct fileIdentDesc *cfi) { cfi->fileCharacteristics |= FID_FILE_CHAR_DELETED; if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT)) memset(&(cfi->icb), 0x00, sizeof(struct long_ad)); return udf_write_fi(inode, cfi, fi, fibh, NULL, NULL); } static int udf_create(struct inode *dir, struct dentry *dentry, int mode, struct nameidata *nd) { struct udf_fileident_bh fibh; struct inode *inode; struct fileIdentDesc cfi, *fi; int err; struct udf_inode_info *iinfo; lock_kernel(); inode = udf_new_inode(dir, mode, &err); if (!inode) { unlock_kernel(); return err; } iinfo = UDF_I(inode); if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) inode->i_data.a_ops = &udf_adinicb_aops; else inode->i_data.a_ops = &udf_aops; inode->i_op = &udf_file_inode_operations; inode->i_fop = &udf_file_operations; mark_inode_dirty(inode); fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); if (!fi) { inode->i_nlink--; mark_inode_dirty(inode); iput(inode); unlock_kernel(); return err; } cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location); *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = cpu_to_le32(iinfo->i_unique & 0x00000000FFFFFFFFUL); udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) mark_inode_dirty(dir); if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); unlock_kernel(); d_instantiate(dentry, inode); return 0; } static int udf_mknod(struct inode *dir, struct dentry *dentry, int mode, dev_t rdev) { struct inode *inode; struct udf_fileident_bh fibh; struct fileIdentDesc cfi, *fi; int err; struct udf_inode_info *iinfo; if (!old_valid_dev(rdev)) return -EINVAL; lock_kernel(); err = -EIO; inode = udf_new_inode(dir, mode, &err); if (!inode) goto out; iinfo = UDF_I(inode); init_special_inode(inode, mode, rdev); fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); if (!fi) { inode->i_nlink--; mark_inode_dirty(inode); iput(inode); unlock_kernel(); return err; } cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location); *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = cpu_to_le32(iinfo->i_unique & 0x00000000FFFFFFFFUL); udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) mark_inode_dirty(dir); mark_inode_dirty(inode); if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); d_instantiate(dentry, inode); err = 0; out: unlock_kernel(); return err; } static int udf_mkdir(struct inode *dir, struct dentry *dentry, int mode) { struct inode *inode; struct udf_fileident_bh fibh; struct fileIdentDesc cfi, *fi; int err; struct udf_inode_info *dinfo = UDF_I(dir); struct udf_inode_info *iinfo; lock_kernel(); err = -EMLINK; if (dir->i_nlink >= (256 << sizeof(dir->i_nlink)) - 1) goto out; err = -EIO; inode = udf_new_inode(dir, S_IFDIR | mode, &err); if (!inode) goto out; iinfo = UDF_I(inode); inode->i_op = &udf_dir_inode_operations; inode->i_fop = &udf_dir_operations; fi = udf_add_entry(inode, NULL, &fibh, &cfi, &err); if (!fi) { inode->i_nlink--; mark_inode_dirty(inode); iput(inode); goto out; } inode->i_nlink = 2; cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); cfi.icb.extLocation = cpu_to_lelb(dinfo->i_location); *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = cpu_to_le32(dinfo->i_unique & 0x00000000FFFFFFFFUL); cfi.fileCharacteristics = FID_FILE_CHAR_DIRECTORY | FID_FILE_CHAR_PARENT; udf_write_fi(inode, &cfi, fi, &fibh, NULL, NULL); brelse(fibh.sbh); mark_inode_dirty(inode); fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); if (!fi) { inode->i_nlink = 0; mark_inode_dirty(inode); iput(inode); goto out; } cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location); *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = cpu_to_le32(iinfo->i_unique & 0x00000000FFFFFFFFUL); cfi.fileCharacteristics |= FID_FILE_CHAR_DIRECTORY; udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); inc_nlink(dir); mark_inode_dirty(dir); d_instantiate(dentry, inode); if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); err = 0; out: unlock_kernel(); return err; } static int empty_dir(struct inode *dir) { struct fileIdentDesc *fi, cfi; struct udf_fileident_bh fibh; loff_t f_pos; loff_t size = udf_ext0_offset(dir) + dir->i_size; int block; struct kernel_lb_addr eloc; uint32_t elen; sector_t offset; struct extent_position epos = {}; struct udf_inode_info *dinfo = UDF_I(dir); f_pos = udf_ext0_offset(dir); fibh.soffset = fibh.eoffset = f_pos & (dir->i_sb->s_blocksize - 1); if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) fibh.sbh = fibh.ebh = NULL; else if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, &epos, &eloc, &elen, &offset) == (EXT_RECORDED_ALLOCATED >> 30)) { block = udf_get_lb_pblock(dir->i_sb, &eloc, offset); if ((++offset << dir->i_sb->s_blocksize_bits) < elen) { if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT) epos.offset -= sizeof(struct short_ad); else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG) epos.offset -= sizeof(struct long_ad); } else offset = 0; fibh.sbh = fibh.ebh = udf_tread(dir->i_sb, block); if (!fibh.sbh) { brelse(epos.bh); return 0; } } else { brelse(epos.bh); return 0; } while (f_pos < size) { fi = udf_fileident_read(dir, &f_pos, &fibh, &cfi, &epos, &eloc, &elen, &offset); if (!fi) { if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); brelse(epos.bh); return 0; } if (cfi.lengthFileIdent && (cfi.fileCharacteristics & FID_FILE_CHAR_DELETED) == 0) { if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); brelse(epos.bh); return 0; } } if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); brelse(epos.bh); return 1; } static int udf_rmdir(struct inode *dir, struct dentry *dentry) { int retval; struct inode *inode = dentry->d_inode; struct udf_fileident_bh fibh; struct fileIdentDesc *fi, cfi; struct kernel_lb_addr tloc; retval = -ENOENT; lock_kernel(); fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi); if (!fi) goto out; retval = -EIO; tloc = lelb_to_cpu(cfi.icb.extLocation); if (udf_get_lb_pblock(dir->i_sb, &tloc, 0) != inode->i_ino) goto end_rmdir; retval = -ENOTEMPTY; if (!empty_dir(inode)) goto end_rmdir; retval = udf_delete_entry(dir, fi, &fibh, &cfi); if (retval) goto end_rmdir; if (inode->i_nlink != 2) udf_warning(inode->i_sb, "udf_rmdir", "empty directory has nlink != 2 (%d)", inode->i_nlink); clear_nlink(inode); inode->i_size = 0; inode_dec_link_count(dir); inode->i_ctime = dir->i_ctime = dir->i_mtime = current_fs_time(dir->i_sb); mark_inode_dirty(dir); end_rmdir: if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); out: unlock_kernel(); return retval; } static int udf_unlink(struct inode *dir, struct dentry *dentry) { int retval; struct inode *inode = dentry->d_inode; struct udf_fileident_bh fibh; struct fileIdentDesc *fi; struct fileIdentDesc cfi; struct kernel_lb_addr tloc; retval = -ENOENT; lock_kernel(); fi = udf_find_entry(dir, &dentry->d_name, &fibh, &cfi); if (!fi) goto out; retval = -EIO; tloc = lelb_to_cpu(cfi.icb.extLocation); if (udf_get_lb_pblock(dir->i_sb, &tloc, 0) != inode->i_ino) goto end_unlink; if (!inode->i_nlink) { udf_debug("Deleting nonexistent file (%lu), %d\n", inode->i_ino, inode->i_nlink); inode->i_nlink = 1; } retval = udf_delete_entry(dir, fi, &fibh, &cfi); if (retval) goto end_unlink; dir->i_ctime = dir->i_mtime = current_fs_time(dir->i_sb); mark_inode_dirty(dir); inode_dec_link_count(inode); inode->i_ctime = dir->i_ctime; retval = 0; end_unlink: if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); out: unlock_kernel(); return retval; } static int udf_symlink(struct inode *dir, struct dentry *dentry, const char *symname) { struct inode *inode; struct pathComponent *pc; const char *compstart; struct udf_fileident_bh fibh; struct extent_position epos = {}; int eoffset, elen = 0; struct fileIdentDesc *fi; struct fileIdentDesc cfi; uint8_t *ea; int err; int block; unsigned char *name = NULL; int namelen; struct buffer_head *bh; struct udf_inode_info *iinfo; lock_kernel(); inode = udf_new_inode(dir, S_IFLNK | S_IRWXUGO, &err); if (!inode) goto out; name = kmalloc(UDF_NAME_LEN, GFP_NOFS); if (!name) { err = -ENOMEM; goto out_no_entry; } iinfo = UDF_I(inode); inode->i_data.a_ops = &udf_symlink_aops; inode->i_op = &udf_symlink_inode_operations; if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) { struct kernel_lb_addr eloc; uint32_t bsize; block = udf_new_block(inode->i_sb, inode, iinfo->i_location.partitionReferenceNum, iinfo->i_location.logicalBlockNum, &err); if (!block) goto out_no_entry; epos.block = iinfo->i_location; epos.offset = udf_file_entry_alloc_offset(inode); epos.bh = NULL; eloc.logicalBlockNum = block; eloc.partitionReferenceNum = iinfo->i_location.partitionReferenceNum; bsize = inode->i_sb->s_blocksize; iinfo->i_lenExtents = bsize; udf_add_aext(inode, &epos, &eloc, bsize, 0); brelse(epos.bh); block = udf_get_pblock(inode->i_sb, block, iinfo->i_location.partitionReferenceNum, 0); epos.bh = udf_tgetblk(inode->i_sb, block); lock_buffer(epos.bh); memset(epos.bh->b_data, 0x00, inode->i_sb->s_blocksize); set_buffer_uptodate(epos.bh); unlock_buffer(epos.bh); mark_buffer_dirty_inode(epos.bh, inode); ea = epos.bh->b_data + udf_ext0_offset(inode); } else ea = iinfo->i_ext.i_data + iinfo->i_lenEAttr; eoffset = inode->i_sb->s_blocksize - udf_ext0_offset(inode); pc = (struct pathComponent *)ea; if (*symname == '/') { do { symname++; } while (*symname == '/'); pc->componentType = 1; pc->lengthComponentIdent = 0; pc->componentFileVersionNum = 0; elen += sizeof(struct pathComponent); } err = -ENAMETOOLONG; while (*symname) { if (elen + sizeof(struct pathComponent) > eoffset) goto out_no_entry; pc = (struct pathComponent *)(ea + elen); compstart = symname; do { symname++; } while (*symname && *symname != '/'); pc->componentType = 5; pc->lengthComponentIdent = 0; pc->componentFileVersionNum = 0; if (compstart[0] == '.') { if ((symname - compstart) == 1) pc->componentType = 4; else if ((symname - compstart) == 2 && compstart[1] == '.') pc->componentType = 3; } if (pc->componentType == 5) { namelen = udf_put_filename(inode->i_sb, compstart, name, symname - compstart); if (!namelen) goto out_no_entry; if (elen + sizeof(struct pathComponent) + namelen > eoffset) goto out_no_entry; else pc->lengthComponentIdent = namelen; memcpy(pc->componentIdent, name, namelen); } elen += sizeof(struct pathComponent) + pc->lengthComponentIdent; if (*symname) { do { symname++; } while (*symname == '/'); } } brelse(epos.bh); inode->i_size = elen; if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) iinfo->i_lenAlloc = inode->i_size; else udf_truncate_tail_extent(inode); mark_inode_dirty(inode); fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); if (!fi) goto out_no_entry; cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); cfi.icb.extLocation = cpu_to_lelb(iinfo->i_location); bh = UDF_SB(inode->i_sb)->s_lvid_bh; if (bh) { struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)bh->b_data; struct logicalVolHeaderDesc *lvhd; uint64_t uniqueID; lvhd = (struct logicalVolHeaderDesc *) lvid->logicalVolContentsUse; uniqueID = le64_to_cpu(lvhd->uniqueID); *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL); if (!(++uniqueID & 0x00000000FFFFFFFFUL)) uniqueID += 16; lvhd->uniqueID = cpu_to_le64(uniqueID); mark_buffer_dirty(bh); } udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) mark_inode_dirty(dir); if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); d_instantiate(dentry, inode); err = 0; out: kfree(name); unlock_kernel(); return err; out_no_entry: inode_dec_link_count(inode); iput(inode); goto out; } static int udf_link(struct dentry *old_dentry, struct inode *dir, struct dentry *dentry) { struct inode *inode = old_dentry->d_inode; struct udf_fileident_bh fibh; struct fileIdentDesc cfi, *fi; int err; struct buffer_head *bh; lock_kernel(); if (inode->i_nlink >= (256 << sizeof(inode->i_nlink)) - 1) { unlock_kernel(); return -EMLINK; } fi = udf_add_entry(dir, dentry, &fibh, &cfi, &err); if (!fi) { unlock_kernel(); return err; } cfi.icb.extLength = cpu_to_le32(inode->i_sb->s_blocksize); cfi.icb.extLocation = cpu_to_lelb(UDF_I(inode)->i_location); bh = UDF_SB(inode->i_sb)->s_lvid_bh; if (bh) { struct logicalVolIntegrityDesc *lvid = (struct logicalVolIntegrityDesc *)bh->b_data; struct logicalVolHeaderDesc *lvhd; uint64_t uniqueID; lvhd = (struct logicalVolHeaderDesc *) (lvid->logicalVolContentsUse); uniqueID = le64_to_cpu(lvhd->uniqueID); *(__le32 *)((struct allocDescImpUse *)cfi.icb.impUse)->impUse = cpu_to_le32(uniqueID & 0x00000000FFFFFFFFUL); if (!(++uniqueID & 0x00000000FFFFFFFFUL)) uniqueID += 16; lvhd->uniqueID = cpu_to_le64(uniqueID); mark_buffer_dirty(bh); } udf_write_fi(dir, &cfi, fi, &fibh, NULL, NULL); if (UDF_I(dir)->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) mark_inode_dirty(dir); if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); inc_nlink(inode); inode->i_ctime = current_fs_time(inode->i_sb); mark_inode_dirty(inode); ihold(inode); d_instantiate(dentry, inode); unlock_kernel(); return 0; } /* Anybody can rename anything with this: the permission checks are left to the * higher-level routines. */ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry, struct inode *new_dir, struct dentry *new_dentry) { struct inode *old_inode = old_dentry->d_inode; struct inode *new_inode = new_dentry->d_inode; struct udf_fileident_bh ofibh, nfibh; struct fileIdentDesc *ofi = NULL, *nfi = NULL, *dir_fi = NULL; struct fileIdentDesc ocfi, ncfi; struct buffer_head *dir_bh = NULL; int retval = -ENOENT; struct kernel_lb_addr tloc; struct udf_inode_info *old_iinfo = UDF_I(old_inode); lock_kernel(); ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi); if (ofi) { if (ofibh.sbh != ofibh.ebh) brelse(ofibh.ebh); brelse(ofibh.sbh); } tloc = lelb_to_cpu(ocfi.icb.extLocation); if (!ofi || udf_get_lb_pblock(old_dir->i_sb, &tloc, 0) != old_inode->i_ino) goto end_rename; nfi = udf_find_entry(new_dir, &new_dentry->d_name, &nfibh, &ncfi); if (nfi) { if (!new_inode) { if (nfibh.sbh != nfibh.ebh) brelse(nfibh.ebh); brelse(nfibh.sbh); nfi = NULL; } } if (S_ISDIR(old_inode->i_mode)) { int offset = udf_ext0_offset(old_inode); if (new_inode) { retval = -ENOTEMPTY; if (!empty_dir(new_inode)) goto end_rename; } retval = -EIO; if (old_iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) { dir_fi = udf_get_fileident( old_iinfo->i_ext.i_data - (old_iinfo->i_efe ? sizeof(struct extendedFileEntry) : sizeof(struct fileEntry)), old_inode->i_sb->s_blocksize, &offset); } else { dir_bh = udf_bread(old_inode, 0, 0, &retval); if (!dir_bh) goto end_rename; dir_fi = udf_get_fileident(dir_bh->b_data, old_inode->i_sb->s_blocksize, &offset); } if (!dir_fi) goto end_rename; tloc = lelb_to_cpu(dir_fi->icb.extLocation); if (udf_get_lb_pblock(old_inode->i_sb, &tloc, 0) != old_dir->i_ino) goto end_rename; retval = -EMLINK; if (!new_inode && new_dir->i_nlink >= (256 << sizeof(new_dir->i_nlink)) - 1) goto end_rename; } if (!nfi) { nfi = udf_add_entry(new_dir, new_dentry, &nfibh, &ncfi, &retval); if (!nfi) goto end_rename; } /* * Like most other Unix systems, set the ctime for inodes on a * rename. */ old_inode->i_ctime = current_fs_time(old_inode->i_sb); mark_inode_dirty(old_inode); /* * ok, that's it */ ncfi.fileVersionNum = ocfi.fileVersionNum; ncfi.fileCharacteristics = ocfi.fileCharacteristics; memcpy(&(ncfi.icb), &(ocfi.icb), sizeof(struct long_ad)); udf_write_fi(new_dir, &ncfi, nfi, &nfibh, NULL, NULL); /* The old fid may have moved - find it again */ ofi = udf_find_entry(old_dir, &old_dentry->d_name, &ofibh, &ocfi); udf_delete_entry(old_dir, ofi, &ofibh, &ocfi); if (new_inode) { new_inode->i_ctime = current_fs_time(new_inode->i_sb); inode_dec_link_count(new_inode); } old_dir->i_ctime = old_dir->i_mtime = current_fs_time(old_dir->i_sb); mark_inode_dirty(old_dir); if (dir_fi) { dir_fi->icb.extLocation = cpu_to_lelb(UDF_I(new_dir)->i_location); udf_update_tag((char *)dir_fi, (sizeof(struct fileIdentDesc) + le16_to_cpu(dir_fi->lengthOfImpUse) + 3) & ~3); if (old_iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB) mark_inode_dirty(old_inode); else mark_buffer_dirty_inode(dir_bh, old_inode); inode_dec_link_count(old_dir); if (new_inode) inode_dec_link_count(new_inode); else { inc_nlink(new_dir); mark_inode_dirty(new_dir); } } if (ofi) { if (ofibh.sbh != ofibh.ebh) brelse(ofibh.ebh); brelse(ofibh.sbh); } retval = 0; end_rename: brelse(dir_bh); if (nfi) { if (nfibh.sbh != nfibh.ebh) brelse(nfibh.ebh); brelse(nfibh.sbh); } unlock_kernel(); return retval; } static struct dentry *udf_get_parent(struct dentry *child) { struct kernel_lb_addr tloc; struct inode *inode = NULL; struct qstr dotdot = {.name = "..", .len = 2}; struct fileIdentDesc cfi; struct udf_fileident_bh fibh; lock_kernel(); if (!udf_find_entry(child->d_inode, &dotdot, &fibh, &cfi)) goto out_unlock; if (fibh.sbh != fibh.ebh) brelse(fibh.ebh); brelse(fibh.sbh); tloc = lelb_to_cpu(cfi.icb.extLocation); inode = udf_iget(child->d_inode->i_sb, &tloc); if (!inode) goto out_unlock; unlock_kernel(); return d_obtain_alias(inode); out_unlock: unlock_kernel(); return ERR_PTR(-EACCES); } static struct dentry *udf_nfs_get_inode(struct super_block *sb, u32 block, u16 partref, __u32 generation) { struct inode *inode; struct kernel_lb_addr loc; if (block == 0) return ERR_PTR(-ESTALE); loc.logicalBlockNum = block; loc.partitionReferenceNum = partref; inode = udf_iget(sb, &loc); if (inode == NULL) return ERR_PTR(-ENOMEM); if (generation && inode->i_generation != generation) { iput(inode); return ERR_PTR(-ESTALE); } return d_obtain_alias(inode); } static struct dentry *udf_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { if ((fh_len != 3 && fh_len != 5) || (fh_type != FILEID_UDF_WITH_PARENT && fh_type != FILEID_UDF_WITHOUT_PARENT)) return NULL; return udf_nfs_get_inode(sb, fid->udf.block, fid->udf.partref, fid->udf.generation); } static struct dentry *udf_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { if (fh_len != 5 || fh_type != FILEID_UDF_WITH_PARENT) return NULL; return udf_nfs_get_inode(sb, fid->udf.parent_block, fid->udf.parent_partref, fid->udf.parent_generation); } static int udf_encode_fh(struct dentry *de, __u32 *fh, int *lenp, int connectable) { int len = *lenp; struct inode *inode = de->d_inode; struct kernel_lb_addr location = UDF_I(inode)->i_location; struct fid *fid = (struct fid *)fh; int type = FILEID_UDF_WITHOUT_PARENT; if (len < 3 || (connectable && len < 5)) return 255; *lenp = 3; fid->udf.block = location.logicalBlockNum; fid->udf.partref = location.partitionReferenceNum; fid->udf.generation = inode->i_generation; if (connectable && !S_ISDIR(inode->i_mode)) { spin_lock(&de->d_lock); inode = de->d_parent->d_inode; location = UDF_I(inode)->i_location; fid->udf.parent_block = location.logicalBlockNum; fid->udf.parent_partref = location.partitionReferenceNum; fid->udf.parent_generation = inode->i_generation; spin_unlock(&de->d_lock); *lenp = 5; type = FILEID_UDF_WITH_PARENT; } return type; } const struct export_operations udf_export_ops = { .encode_fh = udf_encode_fh, .fh_to_dentry = udf_fh_to_dentry, .fh_to_parent = udf_fh_to_parent, .get_parent = udf_get_parent, }; const struct inode_operations udf_dir_inode_operations = { .lookup = udf_lookup, .create = udf_create, .link = udf_link, .unlink = udf_unlink, .symlink = udf_symlink, .mkdir = udf_mkdir, .rmdir = udf_rmdir, .mknod = udf_mknod, .rename = udf_rename, }; const struct inode_operations udf_symlink_inode_operations = { .readlink = generic_readlink, .follow_link = page_follow_link_light, .put_link = page_put_link, };
gpl-2.0
lostemp/android-kernel-v3.10
kernel/sysctl.c
55
62058
/* * sysctl.c: General linux system control interface * * Begun 24 March 1995, Stephen Tweedie * Added /proc support, Dec 1995 * Added bdflush entry and intvec min/max checking, 2/23/96, Tom Dyas. * Added hooks for /proc/sys/net (minor, minor patch), 96/4/1, Mike Shaver. * Added kernel/java-{interpreter,appletviewer}, 96/5/10, Mike Shaver. * Dynamic registration fixes, Stephen Tweedie. * Added kswapd-interval, ctrl-alt-del, printk stuff, 1/8/97, Chris Horn. * Made sysctl support optional via CONFIG_SYSCTL, 1/10/97, Chris * Horn. * Added proc_doulongvec_ms_jiffies_minmax, 09/08/99, Carlos H. Bauer. * Added proc_doulongvec_minmax, 09/08/99, Carlos H. Bauer. * Changed linked lists to use list.h instead of lists.h, 02/24/00, Bill * Wendling. * The list_for_each() macro wasn't appropriate for the sysctl loop. * Removed it and replaced it with older style, 03/23/00, Bill Wendling */ #include <linux/module.h> #include <linux/mm.h> #include <linux/swap.h> #include <linux/slab.h> #include <linux/sysctl.h> #include <linux/bitmap.h> #include <linux/signal.h> #include <linux/printk.h> #include <linux/proc_fs.h> #include <linux/security.h> #include <linux/ctype.h> #include <linux/kmemcheck.h> #include <linux/kmemleak.h> #include <linux/fs.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/kobject.h> #include <linux/net.h> #include <linux/sysrq.h> #include <linux/highuid.h> #include <linux/writeback.h> #include <linux/ratelimit.h> #include <linux/compaction.h> #include <linux/hugetlb.h> #include <linux/initrd.h> #include <linux/key.h> #include <linux/times.h> #include <linux/limits.h> #include <linux/dcache.h> #include <linux/dnotify.h> #include <linux/syscalls.h> #include <linux/vmstat.h> #include <linux/nfs_fs.h> #include <linux/acpi.h> #include <linux/reboot.h> #include <linux/ftrace.h> #include <linux/perf_event.h> #include <linux/kprobes.h> #include <linux/pipe_fs_i.h> #include <linux/oom.h> #include <linux/kmod.h> #include <linux/capability.h> #include <linux/binfmts.h> #include <linux/sched/sysctl.h> #include <asm/uaccess.h> #include <asm/processor.h> #ifdef CONFIG_X86 #include <asm/nmi.h> #include <asm/stacktrace.h> #include <asm/io.h> #endif #ifdef CONFIG_SPARC #include <asm/setup.h> #endif #ifdef CONFIG_BSD_PROCESS_ACCT #include <linux/acct.h> #endif #ifdef CONFIG_RT_MUTEXES #include <linux/rtmutex.h> #endif #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_LOCK_STAT) #include <linux/lockdep.h> #endif #ifdef CONFIG_CHR_DEV_SG #include <scsi/sg.h> #endif #ifdef CONFIG_LOCKUP_DETECTOR #include <linux/nmi.h> #endif #if defined(CONFIG_SYSCTL) /* External variables not in a header file. */ extern int sysctl_overcommit_memory; extern int sysctl_overcommit_ratio; extern int max_threads; extern int suid_dumpable; #ifdef CONFIG_COREDUMP extern int core_uses_pid; extern char core_pattern[]; extern unsigned int core_pipe_limit; #endif extern int pid_max; extern int extra_free_kbytes; extern int min_free_order_shift; extern int pid_max_min, pid_max_max; extern int percpu_pagelist_fraction; extern int compat_log; extern int latencytop_enabled; extern int sysctl_nr_open_min, sysctl_nr_open_max; #ifndef CONFIG_MMU extern int sysctl_nr_trim_pages; #endif #ifdef CONFIG_BLOCK extern int blk_iopoll_enabled; #endif /* Constants used for minimum and maximum */ #ifdef CONFIG_LOCKUP_DETECTOR static int sixty = 60; static int neg_one = -1; #endif static int zero; static int __maybe_unused one = 1; static int __maybe_unused two = 2; static int __maybe_unused three = 3; static unsigned long one_ul = 1; static int one_hundred = 100; #ifdef CONFIG_PRINTK static int ten_thousand = 10000; #endif /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */ static unsigned long dirty_bytes_min = 2 * PAGE_SIZE; /* this is needed for the proc_dointvec_minmax for [fs_]overflow UID and GID */ static int maxolduid = 65535; static int minolduid; static int min_percpu_pagelist_fract = 8; static int ngroups_max = NGROUPS_MAX; static const int cap_last_cap = CAP_LAST_CAP; #ifdef CONFIG_INOTIFY_USER #include <linux/inotify.h> #endif #ifdef CONFIG_SPARC #endif #ifdef CONFIG_SPARC64 extern int sysctl_tsb_ratio; #endif #ifdef __hppa__ extern int pwrsw_enabled; #endif #ifdef CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW extern int unaligned_enabled; #endif #ifdef CONFIG_IA64 extern int unaligned_dump_stack; #endif #ifdef CONFIG_SYSCTL_ARCH_UNALIGN_NO_WARN extern int no_unaligned_warning; #endif #ifdef CONFIG_PROC_SYSCTL static int proc_do_cad_pid(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); static int proc_taint(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #endif #ifdef CONFIG_PRINTK static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #endif static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #ifdef CONFIG_COREDUMP static int proc_dostring_coredump(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos); #endif #ifdef CONFIG_MAGIC_SYSRQ /* Note: sysrq code uses it's own private copy */ static int __sysrq_enabled = SYSRQ_DEFAULT_ENABLE; static int sysrq_sysctl_handler(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int error; error = proc_dointvec(table, write, buffer, lenp, ppos); if (error) return error; if (write) sysrq_toggle_support(__sysrq_enabled); return 0; } #endif static struct ctl_table kern_table[]; static struct ctl_table vm_table[]; static struct ctl_table fs_table[]; static struct ctl_table debug_table[]; static struct ctl_table dev_table[]; extern struct ctl_table random_table[]; #ifdef CONFIG_EPOLL extern struct ctl_table epoll_table[]; #endif #ifdef HAVE_ARCH_PICK_MMAP_LAYOUT int sysctl_legacy_va_layout; #endif /* The default sysctl tables: */ static struct ctl_table sysctl_base_table[] = { { .procname = "kernel", .mode = 0555, .child = kern_table, }, { .procname = "vm", .mode = 0555, .child = vm_table, }, { .procname = "fs", .mode = 0555, .child = fs_table, }, { .procname = "debug", .mode = 0555, .child = debug_table, }, { .procname = "dev", .mode = 0555, .child = dev_table, }, { } }; #ifdef CONFIG_SCHED_DEBUG static int min_sched_granularity_ns = 100000; /* 100 usecs */ static int max_sched_granularity_ns = NSEC_PER_SEC; /* 1 second */ static int min_wakeup_granularity_ns; /* 0 usecs */ static int max_wakeup_granularity_ns = NSEC_PER_SEC; /* 1 second */ #ifdef CONFIG_SMP static int min_sched_tunable_scaling = SCHED_TUNABLESCALING_NONE; static int max_sched_tunable_scaling = SCHED_TUNABLESCALING_END-1; #endif /* CONFIG_SMP */ #endif /* CONFIG_SCHED_DEBUG */ #ifdef CONFIG_COMPACTION static int min_extfrag_threshold; static int max_extfrag_threshold = 1000; #endif static struct ctl_table kern_table[] = { { .procname = "sched_child_runs_first", .data = &sysctl_sched_child_runs_first, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, #ifdef CONFIG_SCHED_DEBUG { .procname = "sched_min_granularity_ns", .data = &sysctl_sched_min_granularity, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = sched_proc_update_handler, .extra1 = &min_sched_granularity_ns, .extra2 = &max_sched_granularity_ns, }, { .procname = "sched_latency_ns", .data = &sysctl_sched_latency, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = sched_proc_update_handler, .extra1 = &min_sched_granularity_ns, .extra2 = &max_sched_granularity_ns, }, { .procname = "sched_wakeup_granularity_ns", .data = &sysctl_sched_wakeup_granularity, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = sched_proc_update_handler, .extra1 = &min_wakeup_granularity_ns, .extra2 = &max_wakeup_granularity_ns, }, #ifdef CONFIG_SMP { .procname = "sched_tunable_scaling", .data = &sysctl_sched_tunable_scaling, .maxlen = sizeof(enum sched_tunable_scaling), .mode = 0644, .proc_handler = sched_proc_update_handler, .extra1 = &min_sched_tunable_scaling, .extra2 = &max_sched_tunable_scaling, }, { .procname = "sched_migration_cost_ns", .data = &sysctl_sched_migration_cost, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "sched_nr_migrate", .data = &sysctl_sched_nr_migrate, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "sched_time_avg_ms", .data = &sysctl_sched_time_avg, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "sched_shares_window_ns", .data = &sysctl_sched_shares_window, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "timer_migration", .data = &sysctl_timer_migration, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, #endif /* CONFIG_SMP */ #ifdef CONFIG_NUMA_BALANCING { .procname = "numa_balancing_scan_delay_ms", .data = &sysctl_numa_balancing_scan_delay, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "numa_balancing_scan_period_min_ms", .data = &sysctl_numa_balancing_scan_period_min, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "numa_balancing_scan_period_reset", .data = &sysctl_numa_balancing_scan_period_reset, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "numa_balancing_scan_period_max_ms", .data = &sysctl_numa_balancing_scan_period_max, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "numa_balancing_scan_size_mb", .data = &sysctl_numa_balancing_scan_size, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif /* CONFIG_NUMA_BALANCING */ #endif /* CONFIG_SCHED_DEBUG */ { .procname = "sched_rt_period_us", .data = &sysctl_sched_rt_period, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = sched_rt_handler, }, { .procname = "sched_rt_runtime_us", .data = &sysctl_sched_rt_runtime, .maxlen = sizeof(int), .mode = 0644, .proc_handler = sched_rt_handler, }, { .procname = "sched_rr_timeslice_ms", .data = &sched_rr_timeslice, .maxlen = sizeof(int), .mode = 0644, .proc_handler = sched_rr_handler, }, #ifdef CONFIG_SCHED_AUTOGROUP { .procname = "sched_autogroup_enabled", .data = &sysctl_sched_autogroup_enabled, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, #endif #ifdef CONFIG_CFS_BANDWIDTH { .procname = "sched_cfs_bandwidth_slice_us", .data = &sysctl_sched_cfs_bandwidth_slice, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &one, }, #endif #ifdef CONFIG_PROVE_LOCKING { .procname = "prove_locking", .data = &prove_locking, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_LOCK_STAT { .procname = "lock_stat", .data = &lock_stat, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "panic", .data = &panic_timeout, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #ifdef CONFIG_COREDUMP { .procname = "core_uses_pid", .data = &core_uses_pid, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "core_pattern", .data = core_pattern, .maxlen = CORENAME_MAX_SIZE, .mode = 0644, .proc_handler = proc_dostring_coredump, }, { .procname = "core_pipe_limit", .data = &core_pipe_limit, .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_PROC_SYSCTL { .procname = "tainted", .maxlen = sizeof(long), .mode = 0644, .proc_handler = proc_taint, }, #endif #ifdef CONFIG_LATENCYTOP { .procname = "latencytop", .data = &latencytop_enabled, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_BLK_DEV_INITRD { .procname = "real-root-dev", .data = &real_root_dev, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "print-fatal-signals", .data = &print_fatal_signals, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #ifdef CONFIG_SPARC { .procname = "reboot-cmd", .data = reboot_command, .maxlen = 256, .mode = 0644, .proc_handler = proc_dostring, }, { .procname = "stop-a", .data = &stop_a_enabled, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "scons-poweroff", .data = &scons_pwroff, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_SPARC64 { .procname = "tsb-ratio", .data = &sysctl_tsb_ratio, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef __hppa__ { .procname = "soft-power", .data = &pwrsw_enabled, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_SYSCTL_ARCH_UNALIGN_ALLOW { .procname = "unaligned-trap", .data = &unaligned_enabled, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "ctrl-alt-del", .data = &C_A_D, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #ifdef CONFIG_FUNCTION_TRACER { .procname = "ftrace_enabled", .data = &ftrace_enabled, .maxlen = sizeof(int), .mode = 0644, .proc_handler = ftrace_enable_sysctl, }, #endif #ifdef CONFIG_STACK_TRACER { .procname = "stack_tracer_enabled", .data = &stack_tracer_enabled, .maxlen = sizeof(int), .mode = 0644, .proc_handler = stack_trace_sysctl, }, #endif #ifdef CONFIG_TRACING { .procname = "ftrace_dump_on_oops", .data = &ftrace_dump_on_oops, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_MODULES { .procname = "modprobe", .data = &modprobe_path, .maxlen = KMOD_PATH_LEN, .mode = 0644, .proc_handler = proc_dostring, }, { .procname = "modules_disabled", .data = &modules_disabled, .maxlen = sizeof(int), .mode = 0644, /* only handle a transition from default "0" to "1" */ .proc_handler = proc_dointvec_minmax, .extra1 = &one, .extra2 = &one, }, #endif { .procname = "hotplug", .data = &uevent_helper, .maxlen = UEVENT_HELPER_PATH_LEN, .mode = 0644, .proc_handler = proc_dostring, }, #ifdef CONFIG_CHR_DEV_SG { .procname = "sg-big-buff", .data = &sg_big_buff, .maxlen = sizeof (int), .mode = 0444, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_BSD_PROCESS_ACCT { .procname = "acct", .data = &acct_parm, .maxlen = 3*sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_MAGIC_SYSRQ { .procname = "sysrq", .data = &__sysrq_enabled, .maxlen = sizeof (int), .mode = 0644, .proc_handler = sysrq_sysctl_handler, }, #endif #ifdef CONFIG_PROC_SYSCTL { .procname = "cad_pid", .data = NULL, .maxlen = sizeof (int), .mode = 0600, .proc_handler = proc_do_cad_pid, }, #endif { .procname = "threads-max", .data = &max_threads, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "random", .mode = 0555, .child = random_table, }, { .procname = "usermodehelper", .mode = 0555, .child = usermodehelper_table, }, { .procname = "overflowuid", .data = &overflowuid, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &minolduid, .extra2 = &maxolduid, }, { .procname = "overflowgid", .data = &overflowgid, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &minolduid, .extra2 = &maxolduid, }, #ifdef CONFIG_S390 #ifdef CONFIG_MATHEMU { .procname = "ieee_emulation_warnings", .data = &sysctl_ieee_emulation_warnings, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "userprocess_debug", .data = &show_unhandled_signals, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "pid_max", .data = &pid_max, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &pid_max_min, .extra2 = &pid_max_max, }, { .procname = "panic_on_oops", .data = &panic_on_oops, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #if defined CONFIG_PRINTK { .procname = "printk", .data = &console_loglevel, .maxlen = 4*sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "printk_ratelimit", .data = &printk_ratelimit_state.interval, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "printk_ratelimit_burst", .data = &printk_ratelimit_state.burst, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "printk_delay", .data = &printk_delay_msec, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &ten_thousand, }, { .procname = "dmesg_restrict", .data = &dmesg_restrict, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax_sysadmin, .extra1 = &zero, .extra2 = &one, }, { .procname = "kptr_restrict", .data = &kptr_restrict, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax_sysadmin, .extra1 = &zero, .extra2 = &two, }, #endif { .procname = "ngroups_max", .data = &ngroups_max, .maxlen = sizeof (int), .mode = 0444, .proc_handler = proc_dointvec, }, { .procname = "cap_last_cap", .data = (void *)&cap_last_cap, .maxlen = sizeof(int), .mode = 0444, .proc_handler = proc_dointvec, }, #if defined(CONFIG_LOCKUP_DETECTOR) { .procname = "watchdog", .data = &watchdog_enabled, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dowatchdog, .extra1 = &zero, .extra2 = &one, }, { .procname = "watchdog_thresh", .data = &watchdog_thresh, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dowatchdog, .extra1 = &neg_one, .extra2 = &sixty, }, { .procname = "softlockup_panic", .data = &softlockup_panic, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, { .procname = "nmi_watchdog", .data = &watchdog_enabled, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dowatchdog, .extra1 = &zero, .extra2 = &one, }, #endif #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_X86) { .procname = "unknown_nmi_panic", .data = &unknown_nmi_panic, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #if defined(CONFIG_X86) { .procname = "panic_on_unrecovered_nmi", .data = &panic_on_unrecovered_nmi, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "panic_on_io_nmi", .data = &panic_on_io_nmi, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #ifdef CONFIG_DEBUG_STACKOVERFLOW { .procname = "panic_on_stackoverflow", .data = &sysctl_panic_on_stackoverflow, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "bootloader_type", .data = &bootloader_type, .maxlen = sizeof (int), .mode = 0444, .proc_handler = proc_dointvec, }, { .procname = "bootloader_version", .data = &bootloader_version, .maxlen = sizeof (int), .mode = 0444, .proc_handler = proc_dointvec, }, { .procname = "kstack_depth_to_print", .data = &kstack_depth_to_print, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "io_delay_type", .data = &io_delay_type, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #if defined(CONFIG_MMU) { .procname = "randomize_va_space", .data = &randomize_va_space, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #if defined(CONFIG_S390) && defined(CONFIG_SMP) { .procname = "spin_retry", .data = &spin_retry, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #if defined(CONFIG_ACPI_SLEEP) && defined(CONFIG_X86) { .procname = "acpi_video_flags", .data = &acpi_realmode_flags, .maxlen = sizeof (unsigned long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, #endif #ifdef CONFIG_SYSCTL_ARCH_UNALIGN_NO_WARN { .procname = "ignore-unaligned-usertrap", .data = &no_unaligned_warning, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_IA64 { .procname = "unaligned-dump-stack", .data = &unaligned_dump_stack, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_DETECT_HUNG_TASK { .procname = "hung_task_panic", .data = &sysctl_hung_task_panic, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, { .procname = "hung_task_check_count", .data = &sysctl_hung_task_check_count, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, { .procname = "hung_task_timeout_secs", .data = &sysctl_hung_task_timeout_secs, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = proc_dohung_task_timeout_secs, }, { .procname = "hung_task_warnings", .data = &sysctl_hung_task_warnings, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, #endif #ifdef CONFIG_COMPAT { .procname = "compat-log", .data = &compat_log, .maxlen = sizeof (int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_RT_MUTEXES { .procname = "max_lock_depth", .data = &max_lock_depth, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { .procname = "poweroff_cmd", .data = &poweroff_cmd, .maxlen = POWEROFF_CMD_PATH_LEN, .mode = 0644, .proc_handler = proc_dostring, }, #ifdef CONFIG_KEYS { .procname = "keys", .mode = 0555, .child = key_sysctls, }, #endif #ifdef CONFIG_RCU_TORTURE_TEST { .procname = "rcutorture_runnable", .data = &rcutorture_runnable, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_PERF_EVENTS /* * User-space scripts rely on the existence of this file * as a feature check for perf_events being enabled. * * So it's an ABI, do not remove! */ { .procname = "perf_event_paranoid", .data = &sysctl_perf_event_paranoid, .maxlen = sizeof(sysctl_perf_event_paranoid), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "perf_event_mlock_kb", .data = &sysctl_perf_event_mlock, .maxlen = sizeof(sysctl_perf_event_mlock), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "perf_event_max_sample_rate", .data = &sysctl_perf_event_sample_rate, .maxlen = sizeof(sysctl_perf_event_sample_rate), .mode = 0644, .proc_handler = perf_proc_update_handler, }, #endif #ifdef CONFIG_KMEMCHECK { .procname = "kmemcheck", .data = &kmemcheck_enabled, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_BLOCK { .procname = "blk_iopoll", .data = &blk_iopoll_enabled, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif { } }; static struct ctl_table vm_table[] = { { .procname = "overcommit_memory", .data = &sysctl_overcommit_memory, .maxlen = sizeof(sysctl_overcommit_memory), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &two, }, { .procname = "panic_on_oom", .data = &sysctl_panic_on_oom, .maxlen = sizeof(sysctl_panic_on_oom), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &two, }, { .procname = "oom_kill_allocating_task", .data = &sysctl_oom_kill_allocating_task, .maxlen = sizeof(sysctl_oom_kill_allocating_task), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "oom_dump_tasks", .data = &sysctl_oom_dump_tasks, .maxlen = sizeof(sysctl_oom_dump_tasks), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "overcommit_ratio", .data = &sysctl_overcommit_ratio, .maxlen = sizeof(sysctl_overcommit_ratio), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "page-cluster", .data = &page_cluster, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, }, { .procname = "dirty_background_ratio", .data = &dirty_background_ratio, .maxlen = sizeof(dirty_background_ratio), .mode = 0644, .proc_handler = dirty_background_ratio_handler, .extra1 = &zero, .extra2 = &one_hundred, }, { .procname = "dirty_background_bytes", .data = &dirty_background_bytes, .maxlen = sizeof(dirty_background_bytes), .mode = 0644, .proc_handler = dirty_background_bytes_handler, .extra1 = &one_ul, }, { .procname = "dirty_ratio", .data = &vm_dirty_ratio, .maxlen = sizeof(vm_dirty_ratio), .mode = 0644, .proc_handler = dirty_ratio_handler, .extra1 = &zero, .extra2 = &one_hundred, }, { .procname = "dirty_bytes", .data = &vm_dirty_bytes, .maxlen = sizeof(vm_dirty_bytes), .mode = 0644, .proc_handler = dirty_bytes_handler, .extra1 = &dirty_bytes_min, }, { .procname = "dirty_writeback_centisecs", .data = &dirty_writeback_interval, .maxlen = sizeof(dirty_writeback_interval), .mode = 0644, .proc_handler = dirty_writeback_centisecs_handler, }, { .procname = "dirty_expire_centisecs", .data = &dirty_expire_interval, .maxlen = sizeof(dirty_expire_interval), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, }, { .procname = "nr_pdflush_threads", .mode = 0444 /* read-only */, .proc_handler = pdflush_proc_obsolete, }, { .procname = "swappiness", .data = &vm_swappiness, .maxlen = sizeof(vm_swappiness), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one_hundred, }, #ifdef CONFIG_HUGETLB_PAGE { .procname = "nr_hugepages", .data = NULL, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = hugetlb_sysctl_handler, .extra1 = (void *)&hugetlb_zero, .extra2 = (void *)&hugetlb_infinity, }, #ifdef CONFIG_NUMA { .procname = "nr_hugepages_mempolicy", .data = NULL, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = &hugetlb_mempolicy_sysctl_handler, .extra1 = (void *)&hugetlb_zero, .extra2 = (void *)&hugetlb_infinity, }, #endif { .procname = "hugetlb_shm_group", .data = &sysctl_hugetlb_shm_group, .maxlen = sizeof(gid_t), .mode = 0644, .proc_handler = proc_dointvec, }, { .procname = "hugepages_treat_as_movable", .data = &hugepages_treat_as_movable, .maxlen = sizeof(int), .mode = 0644, .proc_handler = hugetlb_treat_movable_handler, }, { .procname = "nr_overcommit_hugepages", .data = NULL, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = hugetlb_overcommit_handler, .extra1 = (void *)&hugetlb_zero, .extra2 = (void *)&hugetlb_infinity, }, #endif { .procname = "lowmem_reserve_ratio", .data = &sysctl_lowmem_reserve_ratio, .maxlen = sizeof(sysctl_lowmem_reserve_ratio), .mode = 0644, .proc_handler = lowmem_reserve_ratio_sysctl_handler, }, { .procname = "drop_caches", .data = &sysctl_drop_caches, .maxlen = sizeof(int), .mode = 0644, .proc_handler = drop_caches_sysctl_handler, .extra1 = &one, .extra2 = &three, }, #ifdef CONFIG_COMPACTION { .procname = "compact_memory", .data = &sysctl_compact_memory, .maxlen = sizeof(int), .mode = 0200, .proc_handler = sysctl_compaction_handler, }, { .procname = "extfrag_threshold", .data = &sysctl_extfrag_threshold, .maxlen = sizeof(int), .mode = 0644, .proc_handler = sysctl_extfrag_handler, .extra1 = &min_extfrag_threshold, .extra2 = &max_extfrag_threshold, }, #endif /* CONFIG_COMPACTION */ { .procname = "min_free_kbytes", .data = &min_free_kbytes, .maxlen = sizeof(min_free_kbytes), .mode = 0644, .proc_handler = min_free_kbytes_sysctl_handler, .extra1 = &zero, }, { .procname = "extra_free_kbytes", .data = &extra_free_kbytes, .maxlen = sizeof(extra_free_kbytes), .mode = 0644, .proc_handler = min_free_kbytes_sysctl_handler, .extra1 = &zero, }, { .procname = "min_free_order_shift", .data = &min_free_order_shift, .maxlen = sizeof(min_free_order_shift), .mode = 0644, .proc_handler = &proc_dointvec }, { .procname = "percpu_pagelist_fraction", .data = &percpu_pagelist_fraction, .maxlen = sizeof(percpu_pagelist_fraction), .mode = 0644, .proc_handler = percpu_pagelist_fraction_sysctl_handler, .extra1 = &min_percpu_pagelist_fract, }, #ifdef CONFIG_MMU { .procname = "max_map_count", .data = &sysctl_max_map_count, .maxlen = sizeof(sysctl_max_map_count), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, }, #else { .procname = "nr_trim_pages", .data = &sysctl_nr_trim_pages, .maxlen = sizeof(sysctl_nr_trim_pages), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, }, #endif { .procname = "laptop_mode", .data = &laptop_mode, .maxlen = sizeof(laptop_mode), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "block_dump", .data = &block_dump, .maxlen = sizeof(block_dump), .mode = 0644, .proc_handler = proc_dointvec, .extra1 = &zero, }, { .procname = "vfs_cache_pressure", .data = &sysctl_vfs_cache_pressure, .maxlen = sizeof(sysctl_vfs_cache_pressure), .mode = 0644, .proc_handler = proc_dointvec, .extra1 = &zero, }, #ifdef HAVE_ARCH_PICK_MMAP_LAYOUT { .procname = "legacy_va_layout", .data = &sysctl_legacy_va_layout, .maxlen = sizeof(sysctl_legacy_va_layout), .mode = 0644, .proc_handler = proc_dointvec, .extra1 = &zero, }, #endif #ifdef CONFIG_NUMA { .procname = "zone_reclaim_mode", .data = &zone_reclaim_mode, .maxlen = sizeof(zone_reclaim_mode), .mode = 0644, .proc_handler = proc_dointvec, .extra1 = &zero, }, { .procname = "min_unmapped_ratio", .data = &sysctl_min_unmapped_ratio, .maxlen = sizeof(sysctl_min_unmapped_ratio), .mode = 0644, .proc_handler = sysctl_min_unmapped_ratio_sysctl_handler, .extra1 = &zero, .extra2 = &one_hundred, }, { .procname = "min_slab_ratio", .data = &sysctl_min_slab_ratio, .maxlen = sizeof(sysctl_min_slab_ratio), .mode = 0644, .proc_handler = sysctl_min_slab_ratio_sysctl_handler, .extra1 = &zero, .extra2 = &one_hundred, }, #endif #ifdef CONFIG_SMP { .procname = "stat_interval", .data = &sysctl_stat_interval, .maxlen = sizeof(sysctl_stat_interval), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, #endif #ifdef CONFIG_MMU { .procname = "mmap_min_addr", .data = &dac_mmap_min_addr, .maxlen = sizeof(unsigned long), .mode = 0644, .proc_handler = mmap_min_addr_handler, }, #endif #ifdef CONFIG_NUMA { .procname = "numa_zonelist_order", .data = &numa_zonelist_order, .maxlen = NUMA_ZONELIST_ORDER_LEN, .mode = 0644, .proc_handler = numa_zonelist_order_handler, }, #endif #if (defined(CONFIG_X86_32) && !defined(CONFIG_UML))|| \ (defined(CONFIG_SUPERH) && defined(CONFIG_VSYSCALL)) { .procname = "vdso_enabled", .data = &vdso_enabled, .maxlen = sizeof(vdso_enabled), .mode = 0644, .proc_handler = proc_dointvec, .extra1 = &zero, }, #endif #ifdef CONFIG_HIGHMEM { .procname = "highmem_is_dirtyable", .data = &vm_highmem_is_dirtyable, .maxlen = sizeof(vm_highmem_is_dirtyable), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, #endif { .procname = "scan_unevictable_pages", .data = &scan_unevictable_pages, .maxlen = sizeof(scan_unevictable_pages), .mode = 0644, .proc_handler = scan_unevictable_handler, }, #ifdef CONFIG_MEMORY_FAILURE { .procname = "memory_failure_early_kill", .data = &sysctl_memory_failure_early_kill, .maxlen = sizeof(sysctl_memory_failure_early_kill), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, { .procname = "memory_failure_recovery", .data = &sysctl_memory_failure_recovery, .maxlen = sizeof(sysctl_memory_failure_recovery), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, #endif { .procname = "user_reserve_kbytes", .data = &sysctl_user_reserve_kbytes, .maxlen = sizeof(sysctl_user_reserve_kbytes), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, { .procname = "admin_reserve_kbytes", .data = &sysctl_admin_reserve_kbytes, .maxlen = sizeof(sysctl_admin_reserve_kbytes), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, { } }; #if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE) static struct ctl_table binfmt_misc_table[] = { { } }; #endif static struct ctl_table fs_table[] = { { .procname = "inode-nr", .data = &inodes_stat, .maxlen = 2*sizeof(int), .mode = 0444, .proc_handler = proc_nr_inodes, }, { .procname = "inode-state", .data = &inodes_stat, .maxlen = 7*sizeof(int), .mode = 0444, .proc_handler = proc_nr_inodes, }, { .procname = "file-nr", .data = &files_stat, .maxlen = sizeof(files_stat), .mode = 0444, .proc_handler = proc_nr_files, }, { .procname = "file-max", .data = &files_stat.max_files, .maxlen = sizeof(files_stat.max_files), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, { .procname = "nr_open", .data = &sysctl_nr_open, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &sysctl_nr_open_min, .extra2 = &sysctl_nr_open_max, }, { .procname = "dentry-state", .data = &dentry_stat, .maxlen = 6*sizeof(int), .mode = 0444, .proc_handler = proc_nr_dentry, }, { .procname = "overflowuid", .data = &fs_overflowuid, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &minolduid, .extra2 = &maxolduid, }, { .procname = "overflowgid", .data = &fs_overflowgid, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &minolduid, .extra2 = &maxolduid, }, #ifdef CONFIG_FILE_LOCKING { .procname = "leases-enable", .data = &leases_enable, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_DNOTIFY { .procname = "dir-notify-enable", .data = &dir_notify_enable, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_MMU #ifdef CONFIG_FILE_LOCKING { .procname = "lease-break-time", .data = &lease_break_time, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec, }, #endif #ifdef CONFIG_AIO { .procname = "aio-nr", .data = &aio_nr, .maxlen = sizeof(aio_nr), .mode = 0444, .proc_handler = proc_doulongvec_minmax, }, { .procname = "aio-max-nr", .data = &aio_max_nr, .maxlen = sizeof(aio_max_nr), .mode = 0644, .proc_handler = proc_doulongvec_minmax, }, #endif /* CONFIG_AIO */ #ifdef CONFIG_INOTIFY_USER { .procname = "inotify", .mode = 0555, .child = inotify_table, }, #endif #ifdef CONFIG_EPOLL { .procname = "epoll", .mode = 0555, .child = epoll_table, }, #endif #endif { .procname = "protected_symlinks", .data = &sysctl_protected_symlinks, .maxlen = sizeof(int), .mode = 0600, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, { .procname = "protected_hardlinks", .data = &sysctl_protected_hardlinks, .maxlen = sizeof(int), .mode = 0600, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, .extra2 = &one, }, { .procname = "suid_dumpable", .data = &suid_dumpable, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec_minmax_coredump, .extra1 = &zero, .extra2 = &two, }, #if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE) { .procname = "binfmt_misc", .mode = 0555, .child = binfmt_misc_table, }, #endif { .procname = "pipe-max-size", .data = &pipe_max_size, .maxlen = sizeof(int), .mode = 0644, .proc_handler = &pipe_proc_fn, .extra1 = &pipe_min_size, }, { } }; static struct ctl_table debug_table[] = { #ifdef CONFIG_SYSCTL_EXCEPTION_TRACE { .procname = "exception-trace", .data = &show_unhandled_signals, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_dointvec }, #endif #if defined(CONFIG_OPTPROBES) { .procname = "kprobes-optimization", .data = &sysctl_kprobes_optimization, .maxlen = sizeof(int), .mode = 0644, .proc_handler = proc_kprobes_optimization_handler, .extra1 = &zero, .extra2 = &one, }, #endif { } }; static struct ctl_table dev_table[] = { { } }; int __init sysctl_init(void) { struct ctl_table_header *hdr; hdr = register_sysctl_table(sysctl_base_table); kmemleak_not_leak(hdr); return 0; } #endif /* CONFIG_SYSCTL */ /* * /proc/sys support */ #ifdef CONFIG_PROC_SYSCTL static int _proc_do_string(void* data, int maxlen, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { size_t len; char __user *p; char c; if (!data || !maxlen || !*lenp) { *lenp = 0; return 0; } if (write) { len = 0; p = buffer; while (len < *lenp) { if (get_user(c, p++)) return -EFAULT; if (c == 0 || c == '\n') break; len++; } if (len >= maxlen) len = maxlen-1; if(copy_from_user(data, buffer, len)) return -EFAULT; ((char *) data)[len] = 0; *ppos += *lenp; } else { len = strlen(data); if (len > maxlen) len = maxlen; if (*ppos > len) { *lenp = 0; return 0; } data += *ppos; len -= *ppos; if (len > *lenp) len = *lenp; if (len) if(copy_to_user(buffer, data, len)) return -EFAULT; if (len < *lenp) { if(put_user('\n', ((char __user *) buffer) + len)) return -EFAULT; len++; } *lenp = len; *ppos += len; } return 0; } /** * proc_dostring - read a string sysctl * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * Reads/writes a string from/to the user buffer. If the kernel * buffer provided is not large enough to hold the string, the * string is truncated. The copied string is %NULL-terminated. * If the string is being read by the user process, it is copied * and a newline '\n' is added. It is truncated if the buffer is * not large enough. * * Returns 0 on success. */ int proc_dostring(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return _proc_do_string(table->data, table->maxlen, write, buffer, lenp, ppos); } static size_t proc_skip_spaces(char **buf) { size_t ret; char *tmp = skip_spaces(*buf); ret = tmp - *buf; *buf = tmp; return ret; } static void proc_skip_char(char **buf, size_t *size, const char v) { while (*size) { if (**buf != v) break; (*size)--; (*buf)++; } } #define TMPBUFLEN 22 /** * proc_get_long - reads an ASCII formatted integer from a user buffer * * @buf: a kernel buffer * @size: size of the kernel buffer * @val: this is where the number will be stored * @neg: set to %TRUE if number is negative * @perm_tr: a vector which contains the allowed trailers * @perm_tr_len: size of the perm_tr vector * @tr: pointer to store the trailer character * * In case of success %0 is returned and @buf and @size are updated with * the amount of bytes read. If @tr is non-NULL and a trailing * character exists (size is non-zero after returning from this * function), @tr is updated with the trailing character. */ static int proc_get_long(char **buf, size_t *size, unsigned long *val, bool *neg, const char *perm_tr, unsigned perm_tr_len, char *tr) { int len; char *p, tmp[TMPBUFLEN]; if (!*size) return -EINVAL; len = *size; if (len > TMPBUFLEN - 1) len = TMPBUFLEN - 1; memcpy(tmp, *buf, len); tmp[len] = 0; p = tmp; if (*p == '-' && *size > 1) { *neg = true; p++; } else *neg = false; if (!isdigit(*p)) return -EINVAL; *val = simple_strtoul(p, &p, 0); len = p - tmp; /* We don't know if the next char is whitespace thus we may accept * invalid integers (e.g. 1234...a) or two integers instead of one * (e.g. 123...1). So lets not allow such large numbers. */ if (len == TMPBUFLEN - 1) return -EINVAL; if (len < *size && perm_tr_len && !memchr(perm_tr, *p, perm_tr_len)) return -EINVAL; if (tr && (len < *size)) *tr = *p; *buf += len; *size -= len; return 0; } /** * proc_put_long - converts an integer to a decimal ASCII formatted string * * @buf: the user buffer * @size: the size of the user buffer * @val: the integer to be converted * @neg: sign of the number, %TRUE for negative * * In case of success %0 is returned and @buf and @size are updated with * the amount of bytes written. */ static int proc_put_long(void __user **buf, size_t *size, unsigned long val, bool neg) { int len; char tmp[TMPBUFLEN], *p = tmp; sprintf(p, "%s%lu", neg ? "-" : "", val); len = strlen(tmp); if (len > *size) len = *size; if (copy_to_user(*buf, tmp, len)) return -EFAULT; *size -= len; *buf += len; return 0; } #undef TMPBUFLEN static int proc_put_char(void __user **buf, size_t *size, char c) { if (*size) { char __user **buffer = (char __user **)buf; if (put_user(c, *buffer)) return -EFAULT; (*size)--, (*buffer)++; *buf = *buffer; } return 0; } static int do_proc_dointvec_conv(bool *negp, unsigned long *lvalp, int *valp, int write, void *data) { if (write) { *valp = *negp ? -*lvalp : *lvalp; } else { int val = *valp; if (val < 0) { *negp = true; *lvalp = (unsigned long)-val; } else { *negp = false; *lvalp = (unsigned long)val; } } return 0; } static const char proc_wspace_sep[] = { ' ', '\t', '\n' }; static int __do_proc_dointvec(void *tbl_data, struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos, int (*conv)(bool *negp, unsigned long *lvalp, int *valp, int write, void *data), void *data) { int *i, vleft, first = 1, err = 0; unsigned long page = 0; size_t left; char *kbuf; if (!tbl_data || !table->maxlen || !*lenp || (*ppos && !write)) { *lenp = 0; return 0; } i = (int *) tbl_data; vleft = table->maxlen / sizeof(*i); left = *lenp; if (!conv) conv = do_proc_dointvec_conv; if (write) { if (left > PAGE_SIZE - 1) left = PAGE_SIZE - 1; page = __get_free_page(GFP_TEMPORARY); kbuf = (char *) page; if (!kbuf) return -ENOMEM; if (copy_from_user(kbuf, buffer, left)) { err = -EFAULT; goto free; } kbuf[left] = 0; } for (; left && vleft--; i++, first=0) { unsigned long lval; bool neg; if (write) { left -= proc_skip_spaces(&kbuf); if (!left) break; err = proc_get_long(&kbuf, &left, &lval, &neg, proc_wspace_sep, sizeof(proc_wspace_sep), NULL); if (err) break; if (conv(&neg, &lval, i, 1, data)) { err = -EINVAL; break; } } else { if (conv(&neg, &lval, i, 0, data)) { err = -EINVAL; break; } if (!first) err = proc_put_char(&buffer, &left, '\t'); if (err) break; err = proc_put_long(&buffer, &left, lval, neg); if (err) break; } } if (!write && !first && left && !err) err = proc_put_char(&buffer, &left, '\n'); if (write && !err && left) left -= proc_skip_spaces(&kbuf); free: if (write) { free_page(page); if (first) return err ? : -EINVAL; } *lenp -= left; *ppos += *lenp; return err; } static int do_proc_dointvec(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos, int (*conv)(bool *negp, unsigned long *lvalp, int *valp, int write, void *data), void *data) { return __do_proc_dointvec(table->data, table, write, buffer, lenp, ppos, conv, data); } /** * proc_dointvec - read a vector of integers * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * Reads/writes up to table->maxlen/sizeof(unsigned int) integer * values from/to the user buffer, treated as an ASCII string. * * Returns 0 on success. */ int proc_dointvec(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_dointvec(table,write,buffer,lenp,ppos, NULL,NULL); } /* * Taint values can only be increased * This means we can safely use a temporary. */ static int proc_taint(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct ctl_table t; unsigned long tmptaint = get_taint(); int err; if (write && !capable(CAP_SYS_ADMIN)) return -EPERM; t = *table; t.data = &tmptaint; err = proc_doulongvec_minmax(&t, write, buffer, lenp, ppos); if (err < 0) return err; if (write) { /* * Poor man's atomic or. Not worth adding a primitive * to everyone's atomic.h for this */ int i; for (i = 0; i < BITS_PER_LONG && tmptaint >> i; i++) { if ((tmptaint >> i) & 1) add_taint(i, LOCKDEP_STILL_OK); } } return err; } #ifdef CONFIG_PRINTK static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { if (write && !capable(CAP_SYS_ADMIN)) return -EPERM; return proc_dointvec_minmax(table, write, buffer, lenp, ppos); } #endif struct do_proc_dointvec_minmax_conv_param { int *min; int *max; }; static int do_proc_dointvec_minmax_conv(bool *negp, unsigned long *lvalp, int *valp, int write, void *data) { struct do_proc_dointvec_minmax_conv_param *param = data; if (write) { int val = *negp ? -*lvalp : *lvalp; if ((param->min && *param->min > val) || (param->max && *param->max < val)) return -EINVAL; *valp = val; } else { int val = *valp; if (val < 0) { *negp = true; *lvalp = (unsigned long)-val; } else { *negp = false; *lvalp = (unsigned long)val; } } return 0; } /** * proc_dointvec_minmax - read a vector of integers with min/max values * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * Reads/writes up to table->maxlen/sizeof(unsigned int) integer * values from/to the user buffer, treated as an ASCII string. * * This routine will ensure the values are within the range specified by * table->extra1 (min) and table->extra2 (max). * * Returns 0 on success. */ int proc_dointvec_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct do_proc_dointvec_minmax_conv_param param = { .min = (int *) table->extra1, .max = (int *) table->extra2, }; return do_proc_dointvec(table, write, buffer, lenp, ppos, do_proc_dointvec_minmax_conv, &param); } static void validate_coredump_safety(void) { #ifdef CONFIG_COREDUMP if (suid_dumpable == SUID_DUMP_ROOT && core_pattern[0] != '/' && core_pattern[0] != '|') { printk(KERN_WARNING "Unsafe core_pattern used with "\ "suid_dumpable=2. Pipe handler or fully qualified "\ "core dump path required.\n"); } #endif } static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int error = proc_dointvec_minmax(table, write, buffer, lenp, ppos); if (!error) validate_coredump_safety(); return error; } #ifdef CONFIG_COREDUMP static int proc_dostring_coredump(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int error = proc_dostring(table, write, buffer, lenp, ppos); if (!error) validate_coredump_safety(); return error; } #endif static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos, unsigned long convmul, unsigned long convdiv) { unsigned long *i, *min, *max; int vleft, first = 1, err = 0; unsigned long page = 0; size_t left; char *kbuf; if (!data || !table->maxlen || !*lenp || (*ppos && !write)) { *lenp = 0; return 0; } i = (unsigned long *) data; min = (unsigned long *) table->extra1; max = (unsigned long *) table->extra2; vleft = table->maxlen / sizeof(unsigned long); left = *lenp; if (write) { if (left > PAGE_SIZE - 1) left = PAGE_SIZE - 1; page = __get_free_page(GFP_TEMPORARY); kbuf = (char *) page; if (!kbuf) return -ENOMEM; if (copy_from_user(kbuf, buffer, left)) { err = -EFAULT; goto free; } kbuf[left] = 0; } for (; left && vleft--; i++, first = 0) { unsigned long val; if (write) { bool neg; left -= proc_skip_spaces(&kbuf); err = proc_get_long(&kbuf, &left, &val, &neg, proc_wspace_sep, sizeof(proc_wspace_sep), NULL); if (err) break; if (neg) continue; if ((min && val < *min) || (max && val > *max)) continue; *i = val; } else { val = convdiv * (*i) / convmul; if (!first) err = proc_put_char(&buffer, &left, '\t'); err = proc_put_long(&buffer, &left, val, false); if (err) break; } } if (!write && !first && left && !err) err = proc_put_char(&buffer, &left, '\n'); if (write && !err) left -= proc_skip_spaces(&kbuf); free: if (write) { free_page(page); if (first) return err ? : -EINVAL; } *lenp -= left; *ppos += *lenp; return err; } static int do_proc_doulongvec_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos, unsigned long convmul, unsigned long convdiv) { return __do_proc_doulongvec_minmax(table->data, table, write, buffer, lenp, ppos, convmul, convdiv); } /** * proc_doulongvec_minmax - read a vector of long integers with min/max values * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * Reads/writes up to table->maxlen/sizeof(unsigned long) unsigned long * values from/to the user buffer, treated as an ASCII string. * * This routine will ensure the values are within the range specified by * table->extra1 (min) and table->extra2 (max). * * Returns 0 on success. */ int proc_doulongvec_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_doulongvec_minmax(table, write, buffer, lenp, ppos, 1l, 1l); } /** * proc_doulongvec_ms_jiffies_minmax - read a vector of millisecond values with min/max values * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * Reads/writes up to table->maxlen/sizeof(unsigned long) unsigned long * values from/to the user buffer, treated as an ASCII string. The values * are treated as milliseconds, and converted to jiffies when they are stored. * * This routine will ensure the values are within the range specified by * table->extra1 (min) and table->extra2 (max). * * Returns 0 on success. */ int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_doulongvec_minmax(table, write, buffer, lenp, ppos, HZ, 1000l); } static int do_proc_dointvec_jiffies_conv(bool *negp, unsigned long *lvalp, int *valp, int write, void *data) { if (write) { if (*lvalp > LONG_MAX / HZ) return 1; *valp = *negp ? -(*lvalp*HZ) : (*lvalp*HZ); } else { int val = *valp; unsigned long lval; if (val < 0) { *negp = true; lval = (unsigned long)-val; } else { *negp = false; lval = (unsigned long)val; } *lvalp = lval / HZ; } return 0; } static int do_proc_dointvec_userhz_jiffies_conv(bool *negp, unsigned long *lvalp, int *valp, int write, void *data) { if (write) { if (USER_HZ < HZ && *lvalp > (LONG_MAX / HZ) * USER_HZ) return 1; *valp = clock_t_to_jiffies(*negp ? -*lvalp : *lvalp); } else { int val = *valp; unsigned long lval; if (val < 0) { *negp = true; lval = (unsigned long)-val; } else { *negp = false; lval = (unsigned long)val; } *lvalp = jiffies_to_clock_t(lval); } return 0; } static int do_proc_dointvec_ms_jiffies_conv(bool *negp, unsigned long *lvalp, int *valp, int write, void *data) { if (write) { *valp = msecs_to_jiffies(*negp ? -*lvalp : *lvalp); } else { int val = *valp; unsigned long lval; if (val < 0) { *negp = true; lval = (unsigned long)-val; } else { *negp = false; lval = (unsigned long)val; } *lvalp = jiffies_to_msecs(lval); } return 0; } /** * proc_dointvec_jiffies - read a vector of integers as seconds * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * Reads/writes up to table->maxlen/sizeof(unsigned int) integer * values from/to the user buffer, treated as an ASCII string. * The values read are assumed to be in seconds, and are converted into * jiffies. * * Returns 0 on success. */ int proc_dointvec_jiffies(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_dointvec(table,write,buffer,lenp,ppos, do_proc_dointvec_jiffies_conv,NULL); } /** * proc_dointvec_userhz_jiffies - read a vector of integers as 1/USER_HZ seconds * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: pointer to the file position * * Reads/writes up to table->maxlen/sizeof(unsigned int) integer * values from/to the user buffer, treated as an ASCII string. * The values read are assumed to be in 1/USER_HZ seconds, and * are converted into jiffies. * * Returns 0 on success. */ int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_dointvec(table,write,buffer,lenp,ppos, do_proc_dointvec_userhz_jiffies_conv,NULL); } /** * proc_dointvec_ms_jiffies - read a vector of integers as 1 milliseconds * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * @ppos: the current position in the file * * Reads/writes up to table->maxlen/sizeof(unsigned int) integer * values from/to the user buffer, treated as an ASCII string. * The values read are assumed to be in 1/1000 seconds, and * are converted into jiffies. * * Returns 0 on success. */ int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return do_proc_dointvec(table, write, buffer, lenp, ppos, do_proc_dointvec_ms_jiffies_conv, NULL); } static int proc_do_cad_pid(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { struct pid *new_pid; pid_t tmp; int r; tmp = pid_vnr(cad_pid); r = __do_proc_dointvec(&tmp, table, write, buffer, lenp, ppos, NULL, NULL); if (r || !write) return r; new_pid = find_get_pid(tmp); if (!new_pid) return -ESRCH; put_pid(xchg(&cad_pid, new_pid)); return 0; } /** * proc_do_large_bitmap - read/write from/to a large bitmap * @table: the sysctl table * @write: %TRUE if this is a write to the sysctl file * @buffer: the user buffer * @lenp: the size of the user buffer * @ppos: file position * * The bitmap is stored at table->data and the bitmap length (in bits) * in table->maxlen. * * We use a range comma separated format (e.g. 1,3-4,10-10) so that * large bitmaps may be represented in a compact manner. Writing into * the file will clear the bitmap then update it with the given input. * * Returns 0 on success. */ int proc_do_large_bitmap(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { int err = 0; bool first = 1; size_t left = *lenp; unsigned long bitmap_len = table->maxlen; unsigned long *bitmap = (unsigned long *) table->data; unsigned long *tmp_bitmap = NULL; char tr_a[] = { '-', ',', '\n' }, tr_b[] = { ',', '\n', 0 }, c; if (!bitmap_len || !left || (*ppos && !write)) { *lenp = 0; return 0; } if (write) { unsigned long page = 0; char *kbuf; if (left > PAGE_SIZE - 1) left = PAGE_SIZE - 1; page = __get_free_page(GFP_TEMPORARY); kbuf = (char *) page; if (!kbuf) return -ENOMEM; if (copy_from_user(kbuf, buffer, left)) { free_page(page); return -EFAULT; } kbuf[left] = 0; tmp_bitmap = kzalloc(BITS_TO_LONGS(bitmap_len) * sizeof(unsigned long), GFP_KERNEL); if (!tmp_bitmap) { free_page(page); return -ENOMEM; } proc_skip_char(&kbuf, &left, '\n'); while (!err && left) { unsigned long val_a, val_b; bool neg; err = proc_get_long(&kbuf, &left, &val_a, &neg, tr_a, sizeof(tr_a), &c); if (err) break; if (val_a >= bitmap_len || neg) { err = -EINVAL; break; } val_b = val_a; if (left) { kbuf++; left--; } if (c == '-') { err = proc_get_long(&kbuf, &left, &val_b, &neg, tr_b, sizeof(tr_b), &c); if (err) break; if (val_b >= bitmap_len || neg || val_a > val_b) { err = -EINVAL; break; } if (left) { kbuf++; left--; } } bitmap_set(tmp_bitmap, val_a, val_b - val_a + 1); first = 0; proc_skip_char(&kbuf, &left, '\n'); } free_page(page); } else { unsigned long bit_a, bit_b = 0; while (left) { bit_a = find_next_bit(bitmap, bitmap_len, bit_b); if (bit_a >= bitmap_len) break; bit_b = find_next_zero_bit(bitmap, bitmap_len, bit_a + 1) - 1; if (!first) { err = proc_put_char(&buffer, &left, ','); if (err) break; } err = proc_put_long(&buffer, &left, bit_a, false); if (err) break; if (bit_a != bit_b) { err = proc_put_char(&buffer, &left, '-'); if (err) break; err = proc_put_long(&buffer, &left, bit_b, false); if (err) break; } first = 0; bit_b++; } if (!err) err = proc_put_char(&buffer, &left, '\n'); } if (!err) { if (write) { if (*ppos) bitmap_or(bitmap, bitmap, tmp_bitmap, bitmap_len); else bitmap_copy(bitmap, tmp_bitmap, bitmap_len); } kfree(tmp_bitmap); *lenp -= left; *ppos += *lenp; return 0; } else { kfree(tmp_bitmap); return err; } } #else /* CONFIG_PROC_SYSCTL */ int proc_dostring(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } int proc_dointvec(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } int proc_dointvec_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } int proc_dointvec_jiffies(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } int proc_dointvec_userhz_jiffies(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } int proc_dointvec_ms_jiffies(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } int proc_doulongvec_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } int proc_doulongvec_ms_jiffies_minmax(struct ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) { return -ENOSYS; } #endif /* CONFIG_PROC_SYSCTL */ /* * No sense putting this after each symbol definition, twice, * exception granted :-) */ EXPORT_SYMBOL(proc_dointvec); EXPORT_SYMBOL(proc_dointvec_jiffies); EXPORT_SYMBOL(proc_dointvec_minmax); EXPORT_SYMBOL(proc_dointvec_userhz_jiffies); EXPORT_SYMBOL(proc_dointvec_ms_jiffies); EXPORT_SYMBOL(proc_dostring); EXPORT_SYMBOL(proc_doulongvec_minmax); EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
gpl-2.0
pantoniou/linux-beagle-track-mainline
drivers/platform/x86/hp-wmi.c
55
25226
/* * HP WMI hotkeys * * Copyright (C) 2008 Red Hat <mjg@redhat.com> * Copyright (C) 2010, 2011 Anssi Hannula <anssi.hannula@iki.fi> * * Portions based on wistron_btns.c: * Copyright (C) 2005 Miloslav Trmac <mitr@volny.cz> * Copyright (C) 2005 Bernhard Rosenkraenzer <bero@arklinux.org> * Copyright (C) 2005 Dmitry Torokhov <dtor@mail.ru> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/slab.h> #include <linux/types.h> #include <linux/input.h> #include <linux/input/sparse-keymap.h> #include <linux/platform_device.h> #include <linux/acpi.h> #include <linux/rfkill.h> #include <linux/string.h> MODULE_AUTHOR("Matthew Garrett <mjg59@srcf.ucam.org>"); MODULE_DESCRIPTION("HP laptop WMI hotkeys driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("wmi:95F24279-4D7B-4334-9387-ACCDC67EF61C"); MODULE_ALIAS("wmi:5FB7F034-2C63-45e9-BE91-3D44E2C707E4"); #define HPWMI_EVENT_GUID "95F24279-4D7B-4334-9387-ACCDC67EF61C" #define HPWMI_BIOS_GUID "5FB7F034-2C63-45e9-BE91-3D44E2C707E4" enum hp_wmi_radio { HPWMI_WIFI = 0x0, HPWMI_BLUETOOTH = 0x1, HPWMI_WWAN = 0x2, HPWMI_GPS = 0x3, }; enum hp_wmi_event_ids { HPWMI_DOCK_EVENT = 0x01, HPWMI_PARK_HDD = 0x02, HPWMI_SMART_ADAPTER = 0x03, HPWMI_BEZEL_BUTTON = 0x04, HPWMI_WIRELESS = 0x05, HPWMI_CPU_BATTERY_THROTTLE = 0x06, HPWMI_LOCK_SWITCH = 0x07, HPWMI_LID_SWITCH = 0x08, HPWMI_SCREEN_ROTATION = 0x09, HPWMI_COOLSENSE_SYSTEM_MOBILE = 0x0A, HPWMI_COOLSENSE_SYSTEM_HOT = 0x0B, HPWMI_PROXIMITY_SENSOR = 0x0C, HPWMI_BACKLIT_KB_BRIGHTNESS = 0x0D, HPWMI_PEAKSHIFT_PERIOD = 0x0F, HPWMI_BATTERY_CHARGE_PERIOD = 0x10, }; struct bios_args { u32 signature; u32 command; u32 commandtype; u32 datasize; u32 data; }; enum hp_wmi_commandtype { HPWMI_DISPLAY_QUERY = 0x01, HPWMI_HDDTEMP_QUERY = 0x02, HPWMI_ALS_QUERY = 0x03, HPWMI_HARDWARE_QUERY = 0x04, HPWMI_WIRELESS_QUERY = 0x05, HPWMI_BATTERY_QUERY = 0x07, HPWMI_BIOS_QUERY = 0x09, HPWMI_FEATURE_QUERY = 0x0b, HPWMI_HOTKEY_QUERY = 0x0c, HPWMI_FEATURE2_QUERY = 0x0d, HPWMI_WIRELESS2_QUERY = 0x1b, HPWMI_POSTCODEERROR_QUERY = 0x2a, }; enum hp_wmi_command { HPWMI_READ = 0x01, HPWMI_WRITE = 0x02, HPWMI_ODM = 0x03, }; enum hp_wmi_hardware_mask { HPWMI_DOCK_MASK = 0x01, HPWMI_TABLET_MASK = 0x04, }; #define BIOS_ARGS_INIT(write, ctype, size) \ (struct bios_args) { .signature = 0x55434553, \ .command = (write) ? 0x2 : 0x1, \ .commandtype = (ctype), \ .datasize = (size), \ .data = 0 } struct bios_return { u32 sigpass; u32 return_code; }; enum hp_return_value { HPWMI_RET_WRONG_SIGNATURE = 0x02, HPWMI_RET_UNKNOWN_COMMAND = 0x03, HPWMI_RET_UNKNOWN_CMDTYPE = 0x04, HPWMI_RET_INVALID_PARAMETERS = 0x05, }; enum hp_wireless2_bits { HPWMI_POWER_STATE = 0x01, HPWMI_POWER_SOFT = 0x02, HPWMI_POWER_BIOS = 0x04, HPWMI_POWER_HARD = 0x08, }; #define IS_HWBLOCKED(x) ((x & (HPWMI_POWER_BIOS | HPWMI_POWER_HARD)) \ != (HPWMI_POWER_BIOS | HPWMI_POWER_HARD)) #define IS_SWBLOCKED(x) !(x & HPWMI_POWER_SOFT) struct bios_rfkill2_device_state { u8 radio_type; u8 bus_type; u16 vendor_id; u16 product_id; u16 subsys_vendor_id; u16 subsys_product_id; u8 rfkill_id; u8 power; u8 unknown[4]; }; /* 7 devices fit into the 128 byte buffer */ #define HPWMI_MAX_RFKILL2_DEVICES 7 struct bios_rfkill2_state { u8 unknown[7]; u8 count; u8 pad[8]; struct bios_rfkill2_device_state device[HPWMI_MAX_RFKILL2_DEVICES]; }; static const struct key_entry hp_wmi_keymap[] = { { KE_KEY, 0x02, { KEY_BRIGHTNESSUP } }, { KE_KEY, 0x03, { KEY_BRIGHTNESSDOWN } }, { KE_KEY, 0x20e6, { KEY_PROG1 } }, { KE_KEY, 0x20e8, { KEY_MEDIA } }, { KE_KEY, 0x2142, { KEY_MEDIA } }, { KE_KEY, 0x213b, { KEY_INFO } }, { KE_KEY, 0x2169, { KEY_ROTATE_DISPLAY } }, { KE_KEY, 0x216a, { KEY_SETUP } }, { KE_KEY, 0x231b, { KEY_HELP } }, { KE_END, 0 } }; static struct input_dev *hp_wmi_input_dev; static struct platform_device *hp_wmi_platform_dev; static struct rfkill *wifi_rfkill; static struct rfkill *bluetooth_rfkill; static struct rfkill *wwan_rfkill; struct rfkill2_device { u8 id; int num; struct rfkill *rfkill; }; static int rfkill2_count; static struct rfkill2_device rfkill2[HPWMI_MAX_RFKILL2_DEVICES]; /* * hp_wmi_perform_query * * query: The commandtype (enum hp_wmi_commandtype) * write: The command (enum hp_wmi_command) * buffer: Buffer used as input and/or output * insize: Size of input buffer * outsize: Size of output buffer * * returns zero on success * an HP WMI query specific error code (which is positive) * -EINVAL if the query was not successful at all * -EINVAL if the output buffer size exceeds buffersize * * Note: The buffersize must at least be the maximum of the input and output * size. E.g. Battery info query is defined to have 1 byte input * and 128 byte output. The caller would do: * buffer = kzalloc(128, GFP_KERNEL); * ret = hp_wmi_perform_query(HPWMI_BATTERY_QUERY, HPWMI_READ, buffer, 1, 128) */ static int hp_wmi_perform_query(int query, enum hp_wmi_command command, void *buffer, int insize, int outsize) { struct bios_return *bios_return; int actual_outsize; union acpi_object *obj; struct bios_args args = { .signature = 0x55434553, .command = command, .commandtype = query, .datasize = insize, .data = 0, }; struct acpi_buffer input = { sizeof(struct bios_args), &args }; struct acpi_buffer output = { ACPI_ALLOCATE_BUFFER, NULL }; int ret = 0; if (WARN_ON(insize > sizeof(args.data))) return -EINVAL; memcpy(&args.data, buffer, insize); wmi_evaluate_method(HPWMI_BIOS_GUID, 0, 0x3, &input, &output); obj = output.pointer; if (!obj) return -EINVAL; if (obj->type != ACPI_TYPE_BUFFER) { ret = -EINVAL; goto out_free; } bios_return = (struct bios_return *)obj->buffer.pointer; ret = bios_return->return_code; if (ret) { if (ret != HPWMI_RET_UNKNOWN_CMDTYPE) pr_warn("query 0x%x returned error 0x%x\n", query, ret); goto out_free; } /* Ignore output data of zero size */ if (!outsize) goto out_free; actual_outsize = min(outsize, (int)(obj->buffer.length - sizeof(*bios_return))); memcpy(buffer, obj->buffer.pointer + sizeof(*bios_return), actual_outsize); memset(buffer + actual_outsize, 0, outsize - actual_outsize); out_free: kfree(obj); return ret; } static int hp_wmi_read_int(int query) { int val = 0, ret; ret = hp_wmi_perform_query(query, HPWMI_READ, &val, sizeof(val), sizeof(val)); if (ret) return ret < 0 ? ret : -EINVAL; return val; } static int hp_wmi_hw_state(int mask) { int state = hp_wmi_read_int(HPWMI_HARDWARE_QUERY); if (state < 0) return state; return state & 0x1; } static int __init hp_wmi_bios_2008_later(void) { int state = 0; int ret = hp_wmi_perform_query(HPWMI_FEATURE_QUERY, HPWMI_READ, &state, sizeof(state), sizeof(state)); if (!ret) return 1; return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO; } static int __init hp_wmi_bios_2009_later(void) { int state = 0; int ret = hp_wmi_perform_query(HPWMI_FEATURE2_QUERY, HPWMI_READ, &state, sizeof(state), sizeof(state)); if (!ret) return 1; return (ret == HPWMI_RET_UNKNOWN_CMDTYPE) ? 0 : -ENXIO; } static int __init hp_wmi_enable_hotkeys(void) { int value = 0x6e; int ret = hp_wmi_perform_query(HPWMI_BIOS_QUERY, HPWMI_WRITE, &value, sizeof(value), 0); return ret <= 0 ? ret : -EINVAL; } static int hp_wmi_set_block(void *data, bool blocked) { enum hp_wmi_radio r = (enum hp_wmi_radio) data; int query = BIT(r + 8) | ((!blocked) << r); int ret; ret = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, HPWMI_WRITE, &query, sizeof(query), 0); return ret <= 0 ? ret : -EINVAL; } static const struct rfkill_ops hp_wmi_rfkill_ops = { .set_block = hp_wmi_set_block, }; static bool hp_wmi_get_sw_state(enum hp_wmi_radio r) { int mask = 0x200 << (r * 8); int wireless = hp_wmi_read_int(HPWMI_WIRELESS_QUERY); /* TBD: Pass error */ WARN_ONCE(wireless < 0, "error executing HPWMI_WIRELESS_QUERY"); return !(wireless & mask); } static bool hp_wmi_get_hw_state(enum hp_wmi_radio r) { int mask = 0x800 << (r * 8); int wireless = hp_wmi_read_int(HPWMI_WIRELESS_QUERY); /* TBD: Pass error */ WARN_ONCE(wireless < 0, "error executing HPWMI_WIRELESS_QUERY"); return !(wireless & mask); } static int hp_wmi_rfkill2_set_block(void *data, bool blocked) { int rfkill_id = (int)(long)data; char buffer[4] = { 0x01, 0x00, rfkill_id, !blocked }; int ret; ret = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_WRITE, buffer, sizeof(buffer), 0); return ret <= 0 ? ret : -EINVAL; } static const struct rfkill_ops hp_wmi_rfkill2_ops = { .set_block = hp_wmi_rfkill2_set_block, }; static int hp_wmi_rfkill2_refresh(void) { struct bios_rfkill2_state state; int err, i; err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state, 0, sizeof(state)); if (err) return err; for (i = 0; i < rfkill2_count; i++) { int num = rfkill2[i].num; struct bios_rfkill2_device_state *devstate; devstate = &state.device[num]; if (num >= state.count || devstate->rfkill_id != rfkill2[i].id) { pr_warn("power configuration of the wireless devices unexpectedly changed\n"); continue; } rfkill_set_states(rfkill2[i].rfkill, IS_SWBLOCKED(devstate->power), IS_HWBLOCKED(devstate->power)); } return 0; } static ssize_t display_show(struct device *dev, struct device_attribute *attr, char *buf) { int value = hp_wmi_read_int(HPWMI_DISPLAY_QUERY); if (value < 0) return value; return sprintf(buf, "%d\n", value); } static ssize_t hddtemp_show(struct device *dev, struct device_attribute *attr, char *buf) { int value = hp_wmi_read_int(HPWMI_HDDTEMP_QUERY); if (value < 0) return value; return sprintf(buf, "%d\n", value); } static ssize_t als_show(struct device *dev, struct device_attribute *attr, char *buf) { int value = hp_wmi_read_int(HPWMI_ALS_QUERY); if (value < 0) return value; return sprintf(buf, "%d\n", value); } static ssize_t dock_show(struct device *dev, struct device_attribute *attr, char *buf) { int value = hp_wmi_hw_state(HPWMI_DOCK_MASK); if (value < 0) return value; return sprintf(buf, "%d\n", value); } static ssize_t tablet_show(struct device *dev, struct device_attribute *attr, char *buf) { int value = hp_wmi_hw_state(HPWMI_TABLET_MASK); if (value < 0) return value; return sprintf(buf, "%d\n", value); } static ssize_t postcode_show(struct device *dev, struct device_attribute *attr, char *buf) { /* Get the POST error code of previous boot failure. */ int value = hp_wmi_read_int(HPWMI_POSTCODEERROR_QUERY); if (value < 0) return value; return sprintf(buf, "0x%x\n", value); } static ssize_t als_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { u32 tmp = simple_strtoul(buf, NULL, 10); int ret = hp_wmi_perform_query(HPWMI_ALS_QUERY, HPWMI_WRITE, &tmp, sizeof(tmp), sizeof(tmp)); if (ret) return ret < 0 ? ret : -EINVAL; return count; } static ssize_t postcode_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { long unsigned int tmp2; int ret; u32 tmp; ret = kstrtoul(buf, 10, &tmp2); if (!ret && tmp2 != 1) ret = -EINVAL; if (ret) goto out; /* Clear the POST error code. It is kept until until cleared. */ tmp = (u32) tmp2; ret = hp_wmi_perform_query(HPWMI_POSTCODEERROR_QUERY, HPWMI_WRITE, &tmp, sizeof(tmp), sizeof(tmp)); out: if (ret) return ret < 0 ? ret : -EINVAL; return count; } static DEVICE_ATTR_RO(display); static DEVICE_ATTR_RO(hddtemp); static DEVICE_ATTR_RW(als); static DEVICE_ATTR_RO(dock); static DEVICE_ATTR_RO(tablet); static DEVICE_ATTR_RW(postcode); static void hp_wmi_notify(u32 value, void *context) { struct acpi_buffer response = { ACPI_ALLOCATE_BUFFER, NULL }; u32 event_id, event_data; union acpi_object *obj; acpi_status status; u32 *location; int key_code; status = wmi_get_event_data(value, &response); if (status != AE_OK) { pr_info("bad event status 0x%x\n", status); return; } obj = (union acpi_object *)response.pointer; if (!obj) return; if (obj->type != ACPI_TYPE_BUFFER) { pr_info("Unknown response received %d\n", obj->type); kfree(obj); return; } /* * Depending on ACPI version the concatenation of id and event data * inside _WED function will result in a 8 or 16 byte buffer. */ location = (u32 *)obj->buffer.pointer; if (obj->buffer.length == 8) { event_id = *location; event_data = *(location + 1); } else if (obj->buffer.length == 16) { event_id = *location; event_data = *(location + 2); } else { pr_info("Unknown buffer length %d\n", obj->buffer.length); kfree(obj); return; } kfree(obj); switch (event_id) { case HPWMI_DOCK_EVENT: if (test_bit(SW_DOCK, hp_wmi_input_dev->swbit)) input_report_switch(hp_wmi_input_dev, SW_DOCK, hp_wmi_hw_state(HPWMI_DOCK_MASK)); if (test_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit)) input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, hp_wmi_hw_state(HPWMI_TABLET_MASK)); input_sync(hp_wmi_input_dev); break; case HPWMI_PARK_HDD: break; case HPWMI_SMART_ADAPTER: break; case HPWMI_BEZEL_BUTTON: key_code = hp_wmi_read_int(HPWMI_HOTKEY_QUERY); if (key_code < 0) break; if (!sparse_keymap_report_event(hp_wmi_input_dev, key_code, 1, true)) pr_info("Unknown key code - 0x%x\n", key_code); break; case HPWMI_WIRELESS: if (rfkill2_count) { hp_wmi_rfkill2_refresh(); break; } if (wifi_rfkill) rfkill_set_states(wifi_rfkill, hp_wmi_get_sw_state(HPWMI_WIFI), hp_wmi_get_hw_state(HPWMI_WIFI)); if (bluetooth_rfkill) rfkill_set_states(bluetooth_rfkill, hp_wmi_get_sw_state(HPWMI_BLUETOOTH), hp_wmi_get_hw_state(HPWMI_BLUETOOTH)); if (wwan_rfkill) rfkill_set_states(wwan_rfkill, hp_wmi_get_sw_state(HPWMI_WWAN), hp_wmi_get_hw_state(HPWMI_WWAN)); break; case HPWMI_CPU_BATTERY_THROTTLE: pr_info("Unimplemented CPU throttle because of 3 Cell battery event detected\n"); break; case HPWMI_LOCK_SWITCH: break; case HPWMI_LID_SWITCH: break; case HPWMI_SCREEN_ROTATION: break; case HPWMI_COOLSENSE_SYSTEM_MOBILE: break; case HPWMI_COOLSENSE_SYSTEM_HOT: break; case HPWMI_PROXIMITY_SENSOR: break; case HPWMI_BACKLIT_KB_BRIGHTNESS: break; case HPWMI_PEAKSHIFT_PERIOD: break; case HPWMI_BATTERY_CHARGE_PERIOD: break; default: pr_info("Unknown event_id - %d - 0x%x\n", event_id, event_data); break; } } static int __init hp_wmi_input_setup(void) { acpi_status status; int err, val; hp_wmi_input_dev = input_allocate_device(); if (!hp_wmi_input_dev) return -ENOMEM; hp_wmi_input_dev->name = "HP WMI hotkeys"; hp_wmi_input_dev->phys = "wmi/input0"; hp_wmi_input_dev->id.bustype = BUS_HOST; __set_bit(EV_SW, hp_wmi_input_dev->evbit); /* Dock */ val = hp_wmi_hw_state(HPWMI_DOCK_MASK); if (!(val < 0)) { __set_bit(SW_DOCK, hp_wmi_input_dev->swbit); input_report_switch(hp_wmi_input_dev, SW_DOCK, val); } /* Tablet mode */ val = hp_wmi_hw_state(HPWMI_TABLET_MASK); if (!(val < 0)) { __set_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit); input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, val); } err = sparse_keymap_setup(hp_wmi_input_dev, hp_wmi_keymap, NULL); if (err) goto err_free_dev; /* Set initial hardware state */ input_sync(hp_wmi_input_dev); if (!hp_wmi_bios_2009_later() && hp_wmi_bios_2008_later()) hp_wmi_enable_hotkeys(); status = wmi_install_notify_handler(HPWMI_EVENT_GUID, hp_wmi_notify, NULL); if (ACPI_FAILURE(status)) { err = -EIO; goto err_free_dev; } err = input_register_device(hp_wmi_input_dev); if (err) goto err_uninstall_notifier; return 0; err_uninstall_notifier: wmi_remove_notify_handler(HPWMI_EVENT_GUID); err_free_dev: input_free_device(hp_wmi_input_dev); return err; } static void hp_wmi_input_destroy(void) { wmi_remove_notify_handler(HPWMI_EVENT_GUID); input_unregister_device(hp_wmi_input_dev); } static void cleanup_sysfs(struct platform_device *device) { device_remove_file(&device->dev, &dev_attr_display); device_remove_file(&device->dev, &dev_attr_hddtemp); device_remove_file(&device->dev, &dev_attr_als); device_remove_file(&device->dev, &dev_attr_dock); device_remove_file(&device->dev, &dev_attr_tablet); device_remove_file(&device->dev, &dev_attr_postcode); } static int __init hp_wmi_rfkill_setup(struct platform_device *device) { int err, wireless; wireless = hp_wmi_read_int(HPWMI_WIRELESS_QUERY); if (wireless < 0) return wireless; err = hp_wmi_perform_query(HPWMI_WIRELESS_QUERY, HPWMI_WRITE, &wireless, sizeof(wireless), 0); if (err) return err; if (wireless & 0x1) { wifi_rfkill = rfkill_alloc("hp-wifi", &device->dev, RFKILL_TYPE_WLAN, &hp_wmi_rfkill_ops, (void *) HPWMI_WIFI); if (!wifi_rfkill) return -ENOMEM; rfkill_init_sw_state(wifi_rfkill, hp_wmi_get_sw_state(HPWMI_WIFI)); rfkill_set_hw_state(wifi_rfkill, hp_wmi_get_hw_state(HPWMI_WIFI)); err = rfkill_register(wifi_rfkill); if (err) goto register_wifi_error; } if (wireless & 0x2) { bluetooth_rfkill = rfkill_alloc("hp-bluetooth", &device->dev, RFKILL_TYPE_BLUETOOTH, &hp_wmi_rfkill_ops, (void *) HPWMI_BLUETOOTH); if (!bluetooth_rfkill) { err = -ENOMEM; goto register_bluetooth_error; } rfkill_init_sw_state(bluetooth_rfkill, hp_wmi_get_sw_state(HPWMI_BLUETOOTH)); rfkill_set_hw_state(bluetooth_rfkill, hp_wmi_get_hw_state(HPWMI_BLUETOOTH)); err = rfkill_register(bluetooth_rfkill); if (err) goto register_bluetooth_error; } if (wireless & 0x4) { wwan_rfkill = rfkill_alloc("hp-wwan", &device->dev, RFKILL_TYPE_WWAN, &hp_wmi_rfkill_ops, (void *) HPWMI_WWAN); if (!wwan_rfkill) { err = -ENOMEM; goto register_wwan_error; } rfkill_init_sw_state(wwan_rfkill, hp_wmi_get_sw_state(HPWMI_WWAN)); rfkill_set_hw_state(wwan_rfkill, hp_wmi_get_hw_state(HPWMI_WWAN)); err = rfkill_register(wwan_rfkill); if (err) goto register_wwan_error; } return 0; register_wwan_error: rfkill_destroy(wwan_rfkill); wwan_rfkill = NULL; if (bluetooth_rfkill) rfkill_unregister(bluetooth_rfkill); register_bluetooth_error: rfkill_destroy(bluetooth_rfkill); bluetooth_rfkill = NULL; if (wifi_rfkill) rfkill_unregister(wifi_rfkill); register_wifi_error: rfkill_destroy(wifi_rfkill); wifi_rfkill = NULL; return err; } static int __init hp_wmi_rfkill2_setup(struct platform_device *device) { struct bios_rfkill2_state state; int err, i; err = hp_wmi_perform_query(HPWMI_WIRELESS2_QUERY, HPWMI_READ, &state, 0, sizeof(state)); if (err) return err < 0 ? err : -EINVAL; if (state.count > HPWMI_MAX_RFKILL2_DEVICES) { pr_warn("unable to parse 0x1b query output\n"); return -EINVAL; } for (i = 0; i < state.count; i++) { struct rfkill *rfkill; enum rfkill_type type; char *name; switch (state.device[i].radio_type) { case HPWMI_WIFI: type = RFKILL_TYPE_WLAN; name = "hp-wifi"; break; case HPWMI_BLUETOOTH: type = RFKILL_TYPE_BLUETOOTH; name = "hp-bluetooth"; break; case HPWMI_WWAN: type = RFKILL_TYPE_WWAN; name = "hp-wwan"; break; case HPWMI_GPS: type = RFKILL_TYPE_GPS; name = "hp-gps"; break; default: pr_warn("unknown device type 0x%x\n", state.device[i].radio_type); continue; } if (!state.device[i].vendor_id) { pr_warn("zero device %d while %d reported\n", i, state.count); continue; } rfkill = rfkill_alloc(name, &device->dev, type, &hp_wmi_rfkill2_ops, (void *)(long)i); if (!rfkill) { err = -ENOMEM; goto fail; } rfkill2[rfkill2_count].id = state.device[i].rfkill_id; rfkill2[rfkill2_count].num = i; rfkill2[rfkill2_count].rfkill = rfkill; rfkill_init_sw_state(rfkill, IS_SWBLOCKED(state.device[i].power)); rfkill_set_hw_state(rfkill, IS_HWBLOCKED(state.device[i].power)); if (!(state.device[i].power & HPWMI_POWER_BIOS)) pr_info("device %s blocked by BIOS\n", name); err = rfkill_register(rfkill); if (err) { rfkill_destroy(rfkill); goto fail; } rfkill2_count++; } return 0; fail: for (; rfkill2_count > 0; rfkill2_count--) { rfkill_unregister(rfkill2[rfkill2_count - 1].rfkill); rfkill_destroy(rfkill2[rfkill2_count - 1].rfkill); } return err; } static int __init hp_wmi_bios_setup(struct platform_device *device) { int err; /* clear detected rfkill devices */ wifi_rfkill = NULL; bluetooth_rfkill = NULL; wwan_rfkill = NULL; rfkill2_count = 0; if (hp_wmi_rfkill_setup(device)) hp_wmi_rfkill2_setup(device); err = device_create_file(&device->dev, &dev_attr_display); if (err) goto add_sysfs_error; err = device_create_file(&device->dev, &dev_attr_hddtemp); if (err) goto add_sysfs_error; err = device_create_file(&device->dev, &dev_attr_als); if (err) goto add_sysfs_error; err = device_create_file(&device->dev, &dev_attr_dock); if (err) goto add_sysfs_error; err = device_create_file(&device->dev, &dev_attr_tablet); if (err) goto add_sysfs_error; err = device_create_file(&device->dev, &dev_attr_postcode); if (err) goto add_sysfs_error; return 0; add_sysfs_error: cleanup_sysfs(device); return err; } static int __exit hp_wmi_bios_remove(struct platform_device *device) { int i; cleanup_sysfs(device); for (i = 0; i < rfkill2_count; i++) { rfkill_unregister(rfkill2[i].rfkill); rfkill_destroy(rfkill2[i].rfkill); } if (wifi_rfkill) { rfkill_unregister(wifi_rfkill); rfkill_destroy(wifi_rfkill); } if (bluetooth_rfkill) { rfkill_unregister(bluetooth_rfkill); rfkill_destroy(bluetooth_rfkill); } if (wwan_rfkill) { rfkill_unregister(wwan_rfkill); rfkill_destroy(wwan_rfkill); } return 0; } static int hp_wmi_resume_handler(struct device *device) { /* * Hardware state may have changed while suspended, so trigger * input events for the current state. As this is a switch, * the input layer will only actually pass it on if the state * changed. */ if (hp_wmi_input_dev) { if (test_bit(SW_DOCK, hp_wmi_input_dev->swbit)) input_report_switch(hp_wmi_input_dev, SW_DOCK, hp_wmi_hw_state(HPWMI_DOCK_MASK)); if (test_bit(SW_TABLET_MODE, hp_wmi_input_dev->swbit)) input_report_switch(hp_wmi_input_dev, SW_TABLET_MODE, hp_wmi_hw_state(HPWMI_TABLET_MASK)); input_sync(hp_wmi_input_dev); } if (rfkill2_count) hp_wmi_rfkill2_refresh(); if (wifi_rfkill) rfkill_set_states(wifi_rfkill, hp_wmi_get_sw_state(HPWMI_WIFI), hp_wmi_get_hw_state(HPWMI_WIFI)); if (bluetooth_rfkill) rfkill_set_states(bluetooth_rfkill, hp_wmi_get_sw_state(HPWMI_BLUETOOTH), hp_wmi_get_hw_state(HPWMI_BLUETOOTH)); if (wwan_rfkill) rfkill_set_states(wwan_rfkill, hp_wmi_get_sw_state(HPWMI_WWAN), hp_wmi_get_hw_state(HPWMI_WWAN)); return 0; } static const struct dev_pm_ops hp_wmi_pm_ops = { .resume = hp_wmi_resume_handler, .restore = hp_wmi_resume_handler, }; static struct platform_driver hp_wmi_driver = { .driver = { .name = "hp-wmi", .pm = &hp_wmi_pm_ops, }, .remove = __exit_p(hp_wmi_bios_remove), }; static int __init hp_wmi_init(void) { int event_capable = wmi_has_guid(HPWMI_EVENT_GUID); int bios_capable = wmi_has_guid(HPWMI_BIOS_GUID); int err; if (!bios_capable && !event_capable) return -ENODEV; if (event_capable) { err = hp_wmi_input_setup(); if (err) return err; } if (bios_capable) { hp_wmi_platform_dev = platform_device_register_simple("hp-wmi", -1, NULL, 0); if (IS_ERR(hp_wmi_platform_dev)) { err = PTR_ERR(hp_wmi_platform_dev); goto err_destroy_input; } err = platform_driver_probe(&hp_wmi_driver, hp_wmi_bios_setup); if (err) goto err_unregister_device; } return 0; err_unregister_device: platform_device_unregister(hp_wmi_platform_dev); err_destroy_input: if (event_capable) hp_wmi_input_destroy(); return err; } module_init(hp_wmi_init); static void __exit hp_wmi_exit(void) { if (wmi_has_guid(HPWMI_EVENT_GUID)) hp_wmi_input_destroy(); if (hp_wmi_platform_dev) { platform_device_unregister(hp_wmi_platform_dev); platform_driver_unregister(&hp_wmi_driver); } } module_exit(hp_wmi_exit);
gpl-2.0
noobnl/android_kernel_samsung_d2-jb_2.5.1
drivers/misc/inv_mpu/mpu-dev.c
55
68578
/* $License: Copyright (C) 2011 InvenSense Corporation, All Rights Reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. $ */ #include <linux/i2c.h> #include <linux/i2c-dev.h> #include <linux/interrupt.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/stat.h> #include <linux/irq.h> #include <linux/gpio.h> #include <linux/signal.h> #include <linux/miscdevice.h> #include <linux/slab.h> #include <linux/version.h> #include <linux/pm.h> #include <linux/mutex.h> #include <linux/suspend.h> #include <linux/poll.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/mm.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/uaccess.h> #include <linux/io.h> #include <linux/hrtimer.h> #include "mpuirq.h" #include "slaveirq.h" #include "mlsl.h" #include "mldl_cfg.h" #include <linux/mpu_411.h> #include "accel/mpu6050.h" #include "mpu-dev.h" #ifdef CONFIG_INPUT_YAS_MAGNETOMETER #include "compass/yas530_ext.h" #endif #ifdef CONFIG_SENSORS_AK8975 #include "compass/ak89753.h" #endif #include <linux/akm8975.h> #ifdef CONFIG_HAS_EARLYSUSPEND #include <linux/earlysuspend.h> #endif #define MAG_VENDOR "AKM" #define MAG_PART_ID "AK8963C" #define MPU_VENDOR "INVENSENSE" #define MPU_PART_ID "MPU-6050" #define MPU_EARLY_SUSPEND_IN_DRIVER 1 #define CALIBRATION_FILE_PATH "/efs/calibration_data" #define CALIBRATION_DATA_AMOUNT 100 struct acc_data cal_data = {0, 0, 0}; /* Platform data for the MPU */ struct mpu_private_data { struct miscdevice dev; struct i2c_client *client; /* mldl_cfg data */ struct mldl_cfg mldl_cfg; struct mpu_ram mpu_ram; struct mpu_gyro_cfg mpu_gyro_cfg; struct mpu_offsets mpu_offsets; struct mpu_chip_info mpu_chip_info; struct inv_mpu_cfg inv_mpu_cfg; struct inv_mpu_state inv_mpu_state; struct mutex mutex; wait_queue_head_t mpu_event_wait; struct completion completion; struct timer_list timeout; struct notifier_block nb; struct mpuirq_data mpu_pm_event; int response_timeout; /* In seconds */ unsigned long event; int pid; struct module *slave_modules[EXT_SLAVE_NUM_TYPES]; struct { atomic_t enable; unsigned char is_activated; unsigned char turned_by_mpu_accel; } mpu_accel; struct hrtimer activate_timer; int activate_timeout; #ifdef CONFIG_HAS_EARLYSUSPEND struct early_suspend early_suspend; #endif }; static struct i2c_client *this_client; #define IDEAL_X 0 #define IDEAL_Y 0 #define IDEAL_Z 1024 struct mpu_private_data *mpu_private_data; void mpu_accel_enable_set(int enable) { struct mpu_private_data *mpu = (struct mpu_private_data *) i2c_get_clientdata(this_client); struct i2c_client *client = mpu->client; struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES]; struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave; int ii; for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) { if (!pdata_slave[ii]) slave_adapter[ii] = NULL; else slave_adapter[ii] = i2c_get_adapter(pdata_slave[ii]->adapt_num); } slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE] = client->adapter; if (enable) { mpu->mpu_accel.is_activated = !(mldl_cfg->inv_mpu_state->status & MPU_ACCEL_IS_SUSPENDED); (void)inv_mpu_resume(mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_ACCEL], slave_adapter[EXT_SLAVE_TYPE_COMPASS], slave_adapter[EXT_SLAVE_TYPE_PRESSURE], INV_THREE_AXIS_ACCEL); mpu->mpu_accel.turned_by_mpu_accel = 1; } else { if (!mpu->mpu_accel.is_activated && mpu->mpu_accel.turned_by_mpu_accel) { (void)inv_mpu_suspend(mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_ACCEL], slave_adapter[EXT_SLAVE_TYPE_COMPASS], slave_adapter[EXT_SLAVE_TYPE_PRESSURE], INV_THREE_AXIS_ACCEL); mpu->mpu_accel.turned_by_mpu_accel = 0; } } atomic_set(&mpu->mpu_accel.enable, enable); } int read_accel_raw_xyz(struct acc_data *acc) { s16 x, y, z; struct mpu_private_data *mpu = (struct mpu_private_data *) i2c_get_clientdata(this_client); struct i2c_client *client = mpu->client; struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; int retval = 0; unsigned char data[6]; struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES]; struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave; int cal_div = (mldl_cfg->mpu_chip_info->accel_sens_trim) / 1024; int ii; for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) { if (!pdata_slave[ii]) slave_adapter[ii] = NULL; else slave_adapter[ii] = i2c_get_adapter(pdata_slave[ii]->adapt_num); } slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE] = client->adapter; retval = inv_serial_read(slave_adapter[EXT_SLAVE_TYPE_ACCEL], 0x68, 0x3B, 6, data); x = (s16)((data[0] << 8) | data[1]) / cal_div; y = (s16)((data[2] << 8) | data[3]) / cal_div; z = (s16)((data[4] << 8) | data[5]) / cal_div; acc->x = x; acc->y = y; acc->z = z; return 0; } static int accel_open_calibration(void) { struct file *cal_filp = NULL; int err = 0; mm_segment_t old_fs; old_fs = get_fs(); set_fs(KERNEL_DS); cal_filp = filp_open(CALIBRATION_FILE_PATH, O_RDONLY, 0666); if (IS_ERR(cal_filp)) { pr_err("%s: Can't open calibration file\n", __func__); set_fs(old_fs); err = PTR_ERR(cal_filp); return err; } err = cal_filp->f_op->read(cal_filp, (char *)&cal_data, 3 * sizeof(s16), &cal_filp->f_pos); if (err != 3 * sizeof(s16)) { pr_err("%s: Can't read the cal data from file\n", __func__); err = -EIO; } printk(KERN_INFO"%s: (%u,%u,%u)\n", __func__, cal_data.x, cal_data.y, cal_data.z); filp_close(cal_filp, current->files); set_fs(old_fs); return err; } static int accel_do_calibrate(int enable) { struct mpu_private_data *mpu = (struct mpu_private_data *) i2c_get_clientdata(this_client); struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; struct acc_data data = { 0, }; struct file *cal_filp = NULL; int sum[3] = { 0, }; int err = 0; int i; mm_segment_t old_fs; int cal_div = (mldl_cfg->mpu_chip_info->accel_sens_trim) / 1024; int ideal_z = IDEAL_Z * mldl_cfg->pdata->orientation[8]; /*mutex_lock(&mpu->mutex);*/ mpu_accel_enable_set(1); mdelay(1); for (i = 0; i < CALIBRATION_DATA_AMOUNT; i++) { err = read_accel_raw_xyz(&data); if (err < 0) { pr_err("%s: accel_read_accel_raw_xyz() " "failed in the %dth loop\n", __func__, i); return err; } sum[0] += data.x; sum[1] += data.y; sum[2] += data.z; } mpu_accel_enable_set(0); mdelay(1); /*mutex_unlock(&mpu->mutex);*/ if (enable) { cal_data.x = ((sum[0] / CALIBRATION_DATA_AMOUNT) - IDEAL_X) * cal_div; cal_data.y = ((sum[1] / CALIBRATION_DATA_AMOUNT) - IDEAL_Y) * cal_div; cal_data.z = ((sum[2] / CALIBRATION_DATA_AMOUNT) - ideal_z) * cal_div; } else { cal_data.x = 0; cal_data.y = 0; cal_data.z = 0; } printk(KERN_INFO "%s: cal data (%d,%d,%d)\n", __func__, cal_data.x, cal_data.y, cal_data.z); old_fs = get_fs(); set_fs(KERNEL_DS); cal_filp = filp_open(CALIBRATION_FILE_PATH, O_CREAT | O_TRUNC | O_WRONLY, 0666); if (IS_ERR(cal_filp)) { pr_err("%s: Can't open calibration file\n", __func__); set_fs(old_fs); err = PTR_ERR(cal_filp); return err; } err = cal_filp->f_op->write(cal_filp, (char *)&cal_data, 3 * sizeof(s16), &cal_filp->f_pos); if (err != 3 * sizeof(s16)) { pr_err("%s: Can't write the cal data to file\n", __func__); err = -EIO; } filp_close(cal_filp, current->files); set_fs(old_fs); return err; } static void mpu_pm_timeout(u_long data) { struct mpu_private_data *mpu = (struct mpu_private_data *)data; struct i2c_client *client = mpu->client; dev_dbg(&client->adapter->dev, "%s\n", __func__); complete(&mpu->completion); } static int mpu_early_notifier_callback(struct mpu_private_data *mpu, unsigned long event, void *unused) { struct i2c_client *client = mpu->client; struct timeval event_time; dev_dbg(&client->adapter->dev, "%s: %ld\n", __func__, event); /* Prevent the file handle from being closed before we initialize the completion event */ printk(KERN_INFO"[%s] event = %lu\n", __func__, event); mutex_lock(&mpu->mutex); if (!(mpu->pid) || (event != PM_SUSPEND_PREPARE && event != PM_POST_SUSPEND)) { mutex_unlock(&mpu->mutex); return NOTIFY_OK; } if (event == PM_SUSPEND_PREPARE) mpu->event |= MPU_PM_EVENT_SUSPEND_PREPARE; if (event == PM_POST_SUSPEND) mpu->event |= MPU_PM_EVENT_POST_SUSPEND; do_gettimeofday(&event_time); mpu->mpu_pm_event.interruptcount++; mpu->mpu_pm_event.irqtime = (((long long)event_time.tv_sec) << 32) + event_time.tv_usec; mpu->mpu_pm_event.data_type = MPUIRQ_DATA_TYPE_PM_EVENT; mpu->mpu_pm_event.data = mpu->event; if (mpu->response_timeout > 0) { mpu->timeout.expires = jiffies + mpu->response_timeout * HZ; add_timer(&mpu->timeout); } INIT_COMPLETION(mpu->completion); mutex_unlock(&mpu->mutex); wake_up_interruptible(&mpu->mpu_event_wait); wait_for_completion(&mpu->completion); del_timer_sync(&mpu->timeout); dev_dbg(&client->adapter->dev, "%s: %ld DONE\n", __func__, event); return NOTIFY_OK; } static int mpu_dev_open(struct inode *inode, struct file *file) { struct mpu_private_data *mpu = container_of(file->private_data, struct mpu_private_data, dev); struct i2c_client *client = mpu->client; int result; int ii; dev_dbg(&client->adapter->dev, "%s\n", __func__); dev_dbg(&client->adapter->dev, "current->pid %d\n", current->pid); accel_open_calibration(); result = mutex_lock_interruptible(&mpu->mutex); if (mpu->pid) { mutex_unlock(&mpu->mutex); return -EBUSY; } mpu->pid = current->pid; /* Reset the sensors to the default */ if (result) { dev_err(&client->adapter->dev, "%s: mutex_lock_interruptible returned %d\n", __func__, result); return result; } for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) __module_get(mpu->slave_modules[ii]); mutex_unlock(&mpu->mutex); return 0; } /* close function - called when the "file" /dev/mpu is closed in userspace */ static int mpu_release(struct inode *inode, struct file *file) { struct mpu_private_data *mpu = container_of(file->private_data, struct mpu_private_data, dev); struct i2c_client *client = mpu->client; struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; int result = 0; int ii; struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES]; struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave; for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) { if (!pdata_slave[ii]) slave_adapter[ii] = NULL; else slave_adapter[ii] = i2c_get_adapter(pdata_slave[ii]->adapt_num); } slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE] = client->adapter; mutex_lock(&mpu->mutex); mldl_cfg->inv_mpu_cfg->requested_sensors = 0; result = inv_mpu_suspend(mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_ACCEL], slave_adapter[EXT_SLAVE_TYPE_COMPASS], slave_adapter[EXT_SLAVE_TYPE_PRESSURE], INV_ALL_SENSORS); mpu->pid = 0; for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) module_put(mpu->slave_modules[ii]); mutex_unlock(&mpu->mutex); complete(&mpu->completion); dev_dbg(&client->adapter->dev, "mpu_release\n"); return result; } /* read function called when from /dev/mpu is read. Read from the FIFO */ static ssize_t mpu_read(struct file *file, char __user *buf, size_t count, loff_t *offset) { struct mpu_private_data *mpu = container_of(file->private_data, struct mpu_private_data, dev); struct i2c_client *client = mpu->client; size_t len = sizeof(mpu->mpu_pm_event) + sizeof(unsigned long); int err; if (!mpu->event && (!(file->f_flags & O_NONBLOCK))) wait_event_interruptible(mpu->mpu_event_wait, mpu->event); if (!mpu->event || !buf || count < sizeof(mpu->mpu_pm_event)) return 0; err = copy_to_user(buf, &mpu->mpu_pm_event, sizeof(mpu->mpu_pm_event)); if (err) { dev_err(&client->adapter->dev, "Copy to user returned %d\n", err); return -EFAULT; } mpu->event = 0; return len; } static unsigned int mpu_poll(struct file *file, struct poll_table_struct *poll) { struct mpu_private_data *mpu = container_of(file->private_data, struct mpu_private_data, dev); int mask = 0; poll_wait(file, &mpu->mpu_event_wait, poll); if (mpu->event) mask |= POLLIN | POLLRDNORM; return mask; } static int mpu_dev_ioctl_get_ext_slave_platform_data( struct i2c_client *client, struct ext_slave_platform_data __user *arg) { struct mpu_private_data *mpu = (struct mpu_private_data *)i2c_get_clientdata(client); struct ext_slave_platform_data *pdata_slave; struct ext_slave_platform_data local_pdata_slave; if (copy_from_user(&local_pdata_slave, arg, sizeof(local_pdata_slave))) return -EFAULT; if (local_pdata_slave.type >= EXT_SLAVE_NUM_TYPES) return -EINVAL; pdata_slave = mpu->mldl_cfg.pdata_slave[local_pdata_slave.type]; /* All but private data and irq_data */ if (!pdata_slave) return -ENODEV; if (copy_to_user(arg, pdata_slave, sizeof(*pdata_slave))) return -EFAULT; return 0; } static int mpu_dev_ioctl_get_mpu_platform_data( struct i2c_client *client, struct mpu_platform_data __user *arg) { struct mpu_private_data *mpu = (struct mpu_private_data *)i2c_get_clientdata(client); struct mpu_platform_data *pdata = mpu->mldl_cfg.pdata; if (copy_to_user(arg, pdata, sizeof(*pdata))) return -EFAULT; return 0; } static int mpu_dev_ioctl_get_ext_slave_descr( struct i2c_client *client, struct ext_slave_descr __user *arg) { struct mpu_private_data *mpu = (struct mpu_private_data *)i2c_get_clientdata(client); struct ext_slave_descr *slave; struct ext_slave_descr local_slave; if (copy_from_user(&local_slave, arg, sizeof(local_slave))) return -EFAULT; if (local_slave.type >= EXT_SLAVE_NUM_TYPES) return -EINVAL; slave = mpu->mldl_cfg.slave[local_slave.type]; /* All but private data and irq_data */ if (!slave) return -ENODEV; if (copy_to_user(arg, slave, sizeof(*slave))) return -EFAULT; return 0; } /** * slave_config() - Pass a requested slave configuration to the slave sensor * * @adapter the adaptor to use to communicate with the slave * @mldl_cfg the mldl configuration structuer * @slave pointer to the slave descriptor * @usr_config The configuration to pass to the slave sensor * * returns 0 or non-zero error code */ static int inv_mpu_config(struct mldl_cfg *mldl_cfg, void *gyro_adapter, struct ext_slave_config __user *usr_config) { int retval = 0; struct ext_slave_config config; retval = copy_from_user(&config, usr_config, sizeof(config)); if (retval) return -EFAULT; if (config.len && config.data) { void *data; data = kmalloc(config.len, GFP_KERNEL); if (!data) return -ENOMEM; retval = copy_from_user(data, (void __user *)config.data, config.len); if (retval) { retval = -EFAULT; kfree(data); return retval; } config.data = data; } retval = gyro_config(gyro_adapter, mldl_cfg, &config); kfree(config.data); return retval; } static int inv_mpu_get_config(struct mldl_cfg *mldl_cfg, void *gyro_adapter, struct ext_slave_config __user *usr_config) { int retval = 0; struct ext_slave_config config; void *user_data; retval = copy_from_user(&config, usr_config, sizeof(config)); if (retval) return -EFAULT; user_data = config.data; if (config.len && config.data) { void *data; data = kmalloc(config.len, GFP_KERNEL); if (!data) return -ENOMEM; retval = copy_from_user(data, (void __user *)config.data, config.len); if (retval) { retval = -EFAULT; kfree(data); return retval; } config.data = data; } retval = gyro_get_config(gyro_adapter, mldl_cfg, &config); if (!retval) retval = copy_to_user((unsigned char __user *)user_data, config.data, config.len); kfree(config.data); return retval; } static int slave_config(struct mldl_cfg *mldl_cfg, void *gyro_adapter, void *slave_adapter, struct ext_slave_descr *slave, struct ext_slave_platform_data *pdata, struct ext_slave_config __user *usr_config) { int retval = 0; struct ext_slave_config config; if ((!slave) || (!slave->config)) return -ENODEV; retval = copy_from_user(&config, usr_config, sizeof(config)); if (retval) return -EFAULT; if (config.len && config.data) { void *data; data = kmalloc(config.len, GFP_KERNEL); if (!data) return -ENOMEM; retval = copy_from_user(data, (void __user *)config.data, config.len); if (retval) { retval = -EFAULT; kfree(data); return retval; } config.data = data; } retval = inv_mpu_slave_config(mldl_cfg, gyro_adapter, slave_adapter, &config, slave, pdata); kfree(config.data); return retval; } static int slave_get_config(struct mldl_cfg *mldl_cfg, void *gyro_adapter, void *slave_adapter, struct ext_slave_descr *slave, struct ext_slave_platform_data *pdata, struct ext_slave_config __user *usr_config) { int retval = 0; struct ext_slave_config config; void *user_data; if (!(slave) || !(slave->get_config)) return -ENODEV; retval = copy_from_user(&config, usr_config, sizeof(config)); if (retval) return -EFAULT; user_data = config.data; if (config.len && config.data) { void *data; data = kmalloc(config.len, GFP_KERNEL); if (!data) return -ENOMEM; retval = copy_from_user(data, (void __user *)config.data, config.len); if (retval) { retval = -EFAULT; kfree(data); return retval; } config.data = data; } retval = inv_mpu_get_slave_config(mldl_cfg, gyro_adapter, slave_adapter, &config, slave, pdata); if (retval) { kfree(config.data); return retval; } retval = copy_to_user((unsigned char __user *)user_data, config.data, config.len); kfree(config.data); return retval; } static int inv_slave_read(struct mldl_cfg *mldl_cfg, void *gyro_adapter, void *slave_adapter, struct ext_slave_descr *slave, struct ext_slave_platform_data *pdata, void __user *usr_data) { int retval; unsigned char *data; data = kzalloc(slave->read_len, GFP_KERNEL); if (!data) return -EFAULT; retval = inv_mpu_slave_read(mldl_cfg, gyro_adapter, slave_adapter, slave, pdata, data); if ((!retval) && (copy_to_user((unsigned char __user *)usr_data, data, slave->read_len))) retval = -EFAULT; kfree(data); return retval; } static int mpu_handle_mlsl(void *sl_handle, unsigned char addr, unsigned int cmd, struct mpu_read_write __user *usr_msg) { int retval = 0; struct mpu_read_write msg; unsigned char *user_data; retval = copy_from_user(&msg, usr_msg, sizeof(msg)); if (retval) return -EFAULT; user_data = msg.data; if (msg.length && msg.data) { unsigned char *data; data = kmalloc(msg.length, GFP_KERNEL); if (!data) return -ENOMEM; retval = copy_from_user(data, (void __user *)msg.data, msg.length); if (retval) { retval = -EFAULT; kfree(data); return retval; } msg.data = data; } else { return -EPERM; } switch (cmd) { case MPU_READ: retval = inv_serial_read(sl_handle, addr, (unsigned char)msg.address, msg.length, msg.data); break; case MPU_WRITE: retval = inv_serial_write(sl_handle, addr, msg.length, msg.data); break; case MPU_READ_MEM: retval = inv_serial_read_mem(sl_handle, addr, msg.address, msg.length, msg.data); break; case MPU_WRITE_MEM: retval = inv_serial_write_mem(sl_handle, addr, msg.address, msg.length, msg.data); break; case MPU_READ_FIFO: retval = inv_serial_read_fifo(sl_handle, addr, msg.length, msg.data); break; case MPU_WRITE_FIFO: retval = inv_serial_write_fifo(sl_handle, addr, msg.length, msg.data); break; }; if (retval) { dev_err(&((struct i2c_adapter *)sl_handle)->dev, "%s: i2c %d error %d\n", __func__, cmd, retval); kfree(msg.data); return retval; } retval = copy_to_user((unsigned char __user *)user_data, msg.data, msg.length); kfree(msg.data); return retval; } static enum hrtimer_restart mpu_actiavte_sensors_callback(struct hrtimer *timer) { struct mpu_private_data *mpu = mpu_private_data; struct i2c_client *client = mpu->client; struct timeval event_time; dev_dbg(&client->adapter->dev, "%s\n", __func__); /* Prevent the file handle from being closed before we initialize the completion event */ mpu->event |= MPU_KN_EVENT_ENABLE_SENSORS; do_gettimeofday(&event_time); mpu->mpu_pm_event.interruptcount++; mpu->mpu_pm_event.irqtime = (((long long)event_time.tv_sec) << 32) + event_time.tv_usec; mpu->mpu_pm_event.data_type = MPUIRQ_DATA_TYPE_PM_EVENT; mpu->mpu_pm_event.data = mpu->event; wake_up_interruptible(&mpu->mpu_event_wait); return HRTIMER_NORESTART; } static long mpu_dev_ioctl_activate_sensors(struct mpu_private_data *mpu) { ktime_t ktime; struct i2c_client *client = mpu->client; dev_dbg(&client->adapter->dev, "%s\n", __func__); hrtimer_cancel(&mpu->activate_timer); ktime = ktime_set(mpu->activate_timeout / 1000, (mpu->activate_timeout % 1000) * 1000000); return hrtimer_start(&mpu->activate_timer, ktime, HRTIMER_MODE_REL); } /* ioctl - I/O control */ static long mpu_dev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct mpu_private_data *mpu = container_of(file->private_data, struct mpu_private_data, dev); struct i2c_client *client = mpu->client; struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; int retval = 0; struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES]; struct ext_slave_descr **slave = mldl_cfg->slave; struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave; int ii; for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) { if (!pdata_slave[ii]) slave_adapter[ii] = NULL; else slave_adapter[ii] = i2c_get_adapter(pdata_slave[ii]->adapt_num); } slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE] = client->adapter; retval = mutex_lock_interruptible(&mpu->mutex); if (retval) { dev_err(&client->adapter->dev, "%s: mutex_lock_interruptible returned %d\n", __func__, retval); return retval; } switch (cmd) { case MPU_GET_EXT_SLAVE_PLATFORM_DATA: retval = mpu_dev_ioctl_get_ext_slave_platform_data( client, (struct ext_slave_platform_data __user *)arg); break; case MPU_GET_MPU_PLATFORM_DATA: retval = mpu_dev_ioctl_get_mpu_platform_data( client, (struct mpu_platform_data __user *)arg); break; case MPU_GET_EXT_SLAVE_DESCR: retval = mpu_dev_ioctl_get_ext_slave_descr( client, (struct ext_slave_descr __user *)arg); break; case MPU_READ: case MPU_WRITE: case MPU_READ_MEM: case MPU_WRITE_MEM: case MPU_READ_FIFO: case MPU_WRITE_FIFO: retval = mpu_handle_mlsl( slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], mldl_cfg->mpu_chip_info->addr, cmd, (struct mpu_read_write __user *)arg); break; case MPU_CONFIG_GYRO: retval = inv_mpu_config( mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], (struct ext_slave_config __user *)arg); break; case MPU_CONFIG_ACCEL: retval = slave_config( mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_ACCEL], slave[EXT_SLAVE_TYPE_ACCEL], pdata_slave[EXT_SLAVE_TYPE_ACCEL], (struct ext_slave_config __user *)arg); break; case MPU_CONFIG_COMPASS: retval = slave_config( mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_COMPASS], slave[EXT_SLAVE_TYPE_COMPASS], pdata_slave[EXT_SLAVE_TYPE_COMPASS], (struct ext_slave_config __user *)arg); break; case MPU_CONFIG_PRESSURE: retval = slave_config( mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_PRESSURE], slave[EXT_SLAVE_TYPE_PRESSURE], pdata_slave[EXT_SLAVE_TYPE_PRESSURE], (struct ext_slave_config __user *)arg); break; case MPU_GET_CONFIG_GYRO: retval = inv_mpu_get_config( mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], (struct ext_slave_config __user *)arg); break; case MPU_GET_CONFIG_ACCEL: retval = slave_get_config( mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_ACCEL], slave[EXT_SLAVE_TYPE_ACCEL], pdata_slave[EXT_SLAVE_TYPE_ACCEL], (struct ext_slave_config __user *)arg); break; case MPU_GET_CONFIG_COMPASS: retval = slave_get_config( mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_COMPASS], slave[EXT_SLAVE_TYPE_COMPASS], pdata_slave[EXT_SLAVE_TYPE_COMPASS], (struct ext_slave_config __user *)arg); break; case MPU_GET_CONFIG_PRESSURE: retval = slave_get_config( mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_PRESSURE], slave[EXT_SLAVE_TYPE_PRESSURE], pdata_slave[EXT_SLAVE_TYPE_PRESSURE], (struct ext_slave_config __user *)arg); break; case MPU_SUSPEND: retval = inv_mpu_suspend( mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_ACCEL], slave_adapter[EXT_SLAVE_TYPE_COMPASS], slave_adapter[EXT_SLAVE_TYPE_PRESSURE], arg); break; case MPU_RESUME: retval = inv_mpu_resume( mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_ACCEL], slave_adapter[EXT_SLAVE_TYPE_COMPASS], slave_adapter[EXT_SLAVE_TYPE_PRESSURE], arg); break; case MPU_PM_EVENT_HANDLED: dev_dbg(&client->adapter->dev, "%s: %d\n", __func__, cmd); complete(&mpu->completion); break; case MPU_READ_ACCEL: retval = inv_slave_read( mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_ACCEL], slave[EXT_SLAVE_TYPE_ACCEL], pdata_slave[EXT_SLAVE_TYPE_ACCEL], (unsigned char __user *)arg); break; case MPU_READ_COMPASS: retval = inv_slave_read( mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_COMPASS], slave[EXT_SLAVE_TYPE_COMPASS], pdata_slave[EXT_SLAVE_TYPE_COMPASS], (unsigned char __user *)arg); break; case MPU_READ_PRESSURE: retval = inv_slave_read( mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_PRESSURE], slave[EXT_SLAVE_TYPE_PRESSURE], pdata_slave[EXT_SLAVE_TYPE_PRESSURE], (unsigned char __user *)arg); break; case MPU_GET_REQUESTED_SENSORS: if (copy_to_user( (__u32 __user *)arg, &mldl_cfg->inv_mpu_cfg->requested_sensors, sizeof(mldl_cfg->inv_mpu_cfg->requested_sensors))) retval = -EFAULT; break; case MPU_SET_REQUESTED_SENSORS: mldl_cfg->inv_mpu_cfg->requested_sensors = arg; break; case MPU_GET_IGNORE_SYSTEM_SUSPEND: if (copy_to_user( (unsigned char __user *)arg, &mldl_cfg->inv_mpu_cfg->ignore_system_suspend, sizeof(mldl_cfg->inv_mpu_cfg->ignore_system_suspend))) retval = -EFAULT; break; case MPU_SET_IGNORE_SYSTEM_SUSPEND: mldl_cfg->inv_mpu_cfg->ignore_system_suspend = arg; break; case MPU_GET_MLDL_STATUS: if (copy_to_user( (unsigned char __user *)arg, &mldl_cfg->inv_mpu_state->status, sizeof(mldl_cfg->inv_mpu_state->status))) retval = -EFAULT; break; case MPU_GET_I2C_SLAVES_ENABLED: if (copy_to_user( (unsigned char __user *)arg, &mldl_cfg->inv_mpu_state->i2c_slaves_enabled, sizeof(mldl_cfg->inv_mpu_state->i2c_slaves_enabled))) retval = -EFAULT; break; case MPU_READ_ACCEL_OFFSET: { retval = copy_to_user((signed short __user *)arg, &cal_data, sizeof(cal_data)); if (INV_SUCCESS != retval) dev_err(&client->adapter->dev, "%s: cmd %x, arg %lu\n", __func__, cmd, arg); } break; case MPU_ACTIVATE_SENSORS: mpu_dev_ioctl_activate_sensors(mpu); break; default: dev_err(&client->adapter->dev, "%s: Unknown cmd %x, arg %lu\n", __func__, cmd, arg); retval = -EINVAL; }; mutex_unlock(&mpu->mutex); dev_dbg(&client->adapter->dev, "%s: %08x, %08lx, %d\n", __func__, cmd, arg, retval); if (retval > 0) retval = -retval; return retval; } #ifdef CONFIG_HAS_EARLYSUSPEND void mpu_dev_early_suspend(struct early_suspend *h) { struct mpu_private_data *mpu = (struct mpu_private_data *)i2c_get_clientdata(this_client); struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES]; struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave; int ii; printk(KERN_INFO"[@@@@@%s@@@@@]\n", __func__); mpu_early_notifier_callback(mpu, PM_SUSPEND_PREPARE, NULL); for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) { if (!pdata_slave[ii]) slave_adapter[ii] = NULL; else slave_adapter[ii] = i2c_get_adapter(pdata_slave[ii]->adapt_num); } slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE] = this_client->adapter; mutex_lock(&mpu->mutex); if (!mldl_cfg->inv_mpu_cfg->ignore_system_suspend) { (void)inv_mpu_suspend(mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_ACCEL], slave_adapter[EXT_SLAVE_TYPE_COMPASS], slave_adapter[EXT_SLAVE_TYPE_PRESSURE], INV_ALL_SENSORS); } mutex_unlock(&mpu->mutex); } void mpu_dev_early_resume(struct early_suspend *h) { struct mpu_private_data *mpu = (struct mpu_private_data *)i2c_get_clientdata(this_client); struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES]; struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave; int ii; printk(KERN_INFO"[@@@@@%s@@@@@]\n", __func__); for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) { if (!pdata_slave[ii]) slave_adapter[ii] = NULL; else slave_adapter[ii] = i2c_get_adapter(pdata_slave[ii]->adapt_num); } slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE] = this_client->adapter; mutex_lock(&mpu->mutex); if (mpu->pid && !mldl_cfg->inv_mpu_cfg->ignore_system_suspend) { (void)inv_mpu_resume(mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_ACCEL], slave_adapter[EXT_SLAVE_TYPE_COMPASS], slave_adapter[EXT_SLAVE_TYPE_PRESSURE], mldl_cfg->inv_mpu_cfg->requested_sensors); } mutex_unlock(&mpu->mutex); mpu_early_notifier_callback(mpu, PM_POST_SUSPEND, NULL); } #endif void mpu_shutdown(struct i2c_client *client) { struct mpu_private_data *mpu = (struct mpu_private_data *)i2c_get_clientdata(client); struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES]; struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave; int ii; for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) { if (!pdata_slave[ii]) slave_adapter[ii] = NULL; else slave_adapter[ii] = i2c_get_adapter(pdata_slave[ii]->adapt_num); } slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE] = client->adapter; mutex_lock(&mpu->mutex); (void)inv_mpu_suspend(mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_ACCEL], slave_adapter[EXT_SLAVE_TYPE_COMPASS], slave_adapter[EXT_SLAVE_TYPE_PRESSURE], INV_ALL_SENSORS); mutex_unlock(&mpu->mutex); dev_dbg(&client->adapter->dev, "%s\n", __func__); } int mpu_dev_suspend(struct i2c_client *client, pm_message_t mesg) { struct mpu_private_data *mpu = (struct mpu_private_data *)i2c_get_clientdata(client); struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES]; struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave; int ii; printk(KERN_INFO"@@@@@%s@@@@@\n", __func__); for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) { if (!pdata_slave[ii]) slave_adapter[ii] = NULL; else slave_adapter[ii] = i2c_get_adapter(pdata_slave[ii]->adapt_num); } slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE] = client->adapter; mutex_lock(&mpu->mutex); if (!mldl_cfg->inv_mpu_cfg->ignore_system_suspend) { dev_dbg(&client->adapter->dev, "%s: suspending on event %d\n", __func__, mesg.event); (void)inv_mpu_suspend(mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_ACCEL], slave_adapter[EXT_SLAVE_TYPE_COMPASS], slave_adapter[EXT_SLAVE_TYPE_PRESSURE], INV_ALL_SENSORS); } else { dev_dbg(&client->adapter->dev, "%s: Already suspended %d\n", __func__, mesg.event); } mutex_unlock(&mpu->mutex); if (mldl_cfg->pdata->poweron) mldl_cfg->pdata->poweron(0); return 0; } int mpu_dev_resume(struct i2c_client *client) { struct mpu_private_data *mpu = (struct mpu_private_data *)i2c_get_clientdata(client); struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES]; struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave; int ii; printk(KERN_INFO"@@@@@%s@@@@@\n", __func__); if (mldl_cfg->pdata->poweron) mldl_cfg->pdata->poweron(1); for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) { if (!pdata_slave[ii]) slave_adapter[ii] = NULL; else slave_adapter[ii] = i2c_get_adapter(pdata_slave[ii]->adapt_num); } slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE] = client->adapter; mutex_lock(&mpu->mutex); if (mpu->pid && !mldl_cfg->inv_mpu_cfg->ignore_system_suspend) { (void)inv_mpu_resume(mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_ACCEL], slave_adapter[EXT_SLAVE_TYPE_COMPASS], slave_adapter[EXT_SLAVE_TYPE_PRESSURE], mldl_cfg->inv_mpu_cfg->requested_sensors); dev_dbg(&client->adapter->dev, "%s for pid %d\n", __func__, mpu->pid); } mutex_unlock(&mpu->mutex); return 0; } /* define which file operations are supported */ static const struct file_operations mpu_fops = { .owner = THIS_MODULE, .read = mpu_read, .poll = mpu_poll, .unlocked_ioctl = mpu_dev_ioctl, .open = mpu_dev_open, .release = mpu_release, }; int inv_mpu_register_slave(struct module *slave_module, struct i2c_client *slave_client, struct ext_slave_platform_data *slave_pdata, struct ext_slave_descr *(*get_slave_descr)(void)) { struct mpu_private_data *mpu = mpu_private_data; struct mldl_cfg *mldl_cfg; struct ext_slave_descr *slave_descr; struct ext_slave_platform_data **pdata_slave; char *irq_name = NULL; int result = 0; if (!slave_client || !slave_pdata || !get_slave_descr) return -EINVAL; if (!mpu) { dev_err(&slave_client->adapter->dev, "%s: Null mpu_private_data\n", __func__); return -EINVAL; } mldl_cfg = &mpu->mldl_cfg; pdata_slave = mldl_cfg->pdata_slave; slave_descr = get_slave_descr(); if (!slave_descr) { dev_err(&slave_client->adapter->dev, "%s: Null ext_slave_descr\n", __func__); return -EINVAL; } mutex_lock(&mpu->mutex); if (mpu->pid) { mutex_unlock(&mpu->mutex); return -EBUSY; } if (pdata_slave[slave_descr->type]) { result = -EBUSY; goto out_unlock_mutex; } slave_pdata->address = slave_client->addr; slave_pdata->irq = slave_client->irq; slave_pdata->adapt_num = i2c_adapter_id(slave_client->adapter); dev_info(&slave_client->adapter->dev, "%s: +%s Type %d: Addr: %2x IRQ: %2d, Adapt: %2d\n", __func__, slave_descr->name, slave_descr->type, slave_pdata->address, slave_pdata->irq, slave_pdata->adapt_num); switch (slave_descr->type) { case EXT_SLAVE_TYPE_ACCEL: irq_name = "accelirq"; break; case EXT_SLAVE_TYPE_COMPASS: irq_name = "compassirq"; break; case EXT_SLAVE_TYPE_PRESSURE: irq_name = "pressureirq"; break; default: irq_name = "none"; }; if (slave_descr->type == EXT_SLAVE_TYPE_COMPASS && slave_descr->init) { int retry_cnt = 3; #if defined(CONFIG_MPU_SENSORS_AK8975_411) unsigned int reset = mldl_cfg->pdata->reset; #endif do { retry_cnt--; result = slave_descr->init(slave_client->adapter, slave_descr, slave_pdata); if (result) { dev_err(&slave_client->adapter->dev, "%s init failed %d, cnt %d\n", slave_descr->name, result, retry_cnt); #if defined(CONFIG_MPU_SENSORS_AK8975_411) gpio_direction_output(reset, 0); usleep_range(30, 30); gpio_set_value_cansleep(reset, 1); usleep_range(30, 30); #endif } else { break; } } while (retry_cnt); if (result) { dev_err(&slave_client->adapter->dev, "%s init failed %d, cnt %d\n", slave_descr->name, result, retry_cnt); goto out_unlock_mutex; } } else { if (slave_descr->init) { result = slave_descr->init(slave_client->adapter, slave_descr, slave_pdata); if (result) { dev_err(&slave_client->adapter->dev, "%s init failed %d\n", slave_descr->name, result); goto out_unlock_mutex; } } } if (slave_descr->type == EXT_SLAVE_TYPE_ACCEL && slave_descr->id == ACCEL_ID_MPU6050 && slave_descr->config) { /* pass a reference to the mldl_cfg data structure to the mpu6050 accel "class" */ struct ext_slave_config config; config.key = MPU_SLAVE_CONFIG_INTERNAL_REFERENCE; config.len = sizeof(struct mldl_cfg *); config.apply = true; config.data = mldl_cfg; result = slave_descr->config( slave_client->adapter, slave_descr, slave_pdata, &config); if (result) { LOG_RESULT_LOCATION(result); goto out_slavedescr_exit; } } pdata_slave[slave_descr->type] = slave_pdata; mpu->slave_modules[slave_descr->type] = slave_module; mldl_cfg->slave[slave_descr->type] = slave_descr; goto out_unlock_mutex; out_slavedescr_exit: if (slave_descr->exit) slave_descr->exit(slave_client->adapter, slave_descr, slave_pdata); out_unlock_mutex: mutex_unlock(&mpu->mutex); if (!result && irq_name && (slave_pdata->irq > 0)) { int warn_result; dev_info(&slave_client->adapter->dev, "Installing %s irq using %d\n", irq_name, slave_pdata->irq); warn_result = slaveirq_init(slave_client->adapter, slave_pdata, irq_name); if (warn_result) dev_warn(&slave_client->adapter->dev, "%s irq assigned error: %d\n", slave_descr->name, warn_result); } else { dev_warn(&slave_client->adapter->dev, "%s irq not assigned: %d %d %d\n", slave_descr->name, result, (int)irq_name, slave_pdata->irq); } return result; } EXPORT_SYMBOL(inv_mpu_register_slave); void inv_mpu_unregister_slave(struct i2c_client *slave_client, struct ext_slave_platform_data *slave_pdata, struct ext_slave_descr *(*get_slave_descr)(void)) { struct mpu_private_data *mpu = mpu_private_data; struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; struct ext_slave_descr *slave_descr; int result; if (!slave_client || !slave_pdata || !get_slave_descr) return; dev_info(&slave_client->adapter->dev, "%s\n", __func__); if (slave_pdata->irq) slaveirq_exit(slave_pdata); slave_descr = get_slave_descr(); if (!slave_descr) return; mutex_lock(&mpu->mutex); if (slave_descr->exit) { result = slave_descr->exit(slave_client->adapter, slave_descr, slave_pdata); if (result) dev_err(&slave_client->adapter->dev, "Accel exit failed %d\n", result); } mldl_cfg->slave[slave_descr->type] = NULL; mldl_cfg->pdata_slave[slave_descr->type] = NULL; mpu->slave_modules[slave_descr->type] = NULL; mutex_unlock(&mpu->mutex); } EXPORT_SYMBOL(inv_mpu_unregister_slave); static unsigned short normal_i2c[] = { I2C_CLIENT_END }; static const struct i2c_device_id mpu_id[] = { {"mpu3050", 0}, {"mpu6050", 0}, {"mpu6050_no_accel", 0}, {} }; MODULE_DEVICE_TABLE(i2c, mpu_id); static int mpu3050_factory_on(struct i2c_client *client) { struct mpu_private_data *mpu = (struct mpu_private_data *)i2c_get_clientdata(this_client); struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; int prev_gyro_suspended = 0; struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES]; struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave; int ii; printk(KERN_INFO"@@@@@ %s : %d @@@@@\n", __func__, __LINE__); for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) { if (!pdata_slave[ii]) slave_adapter[ii] = NULL; else slave_adapter[ii] = i2c_get_adapter(pdata_slave[ii]->adapt_num); } slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE] = client->adapter; mutex_lock(&mpu->mutex); if (1) { (void)inv_mpu_resume(mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_ACCEL], slave_adapter[EXT_SLAVE_TYPE_COMPASS], slave_adapter[EXT_SLAVE_TYPE_PRESSURE], mldl_cfg->inv_mpu_cfg->requested_sensors); } mutex_unlock(&mpu->mutex); return prev_gyro_suspended; } static ssize_t mpu3050_power_on(struct device *dev, struct device_attribute *attr, char *buf) { int count = 0; dev_dbg(dev, "this_client = %d\n", (int)this_client); count = sprintf(buf, "%d\n", (this_client != NULL ? 1 : 0)); return count; } static ssize_t mpu3050_get_temp(struct device *dev, struct device_attribute *attr, char *buf) { int count = 0; short int temperature = 0; unsigned char data[2]; struct mpu_private_data *mpu = (struct mpu_private_data *)i2c_get_clientdata(this_client); struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES]; struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave; int ii; for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) { if (!pdata_slave[ii]) slave_adapter[ii] = NULL; else slave_adapter[ii] = i2c_get_adapter(pdata_slave[ii]->adapt_num); } slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE] = this_client->adapter; mpu3050_factory_on(this_client); /*MPUREG_TEMP_OUT_H, 27 0x1b */ /*MPUREG_TEMP_OUT_L, 28 0x1c */ /* TEMP_OUT_H/L: 16-bit temperature data (2's complement data format) */ inv_serial_read(slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], DEFAULT_MPU_SLAVEADDR, MPUREG_TEMP_OUT_H, 2, data); temperature = (short) (((data[0]) << 8) | data[1]); temperature = (((temperature + 521) / 340) + 35); printk(KERN_INFO"read temperature = %d\n", temperature); count = sprintf(buf, "%d\n", temperature); return count; } static ssize_t mpu3050_acc_read(struct device *dev, struct device_attribute *attr, char *buf) { s16 x, y, z, temp; int count = 0; struct mpu_private_data *mpu = (struct mpu_private_data *) i2c_get_clientdata(this_client); struct i2c_client *client = mpu->client; struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; int retval = 0; unsigned char data[6]; struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES]; struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave; int ii; for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) { if (!pdata_slave[ii]) slave_adapter[ii] = NULL; else slave_adapter[ii] = i2c_get_adapter(pdata_slave[ii]->adapt_num); } slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE] = client->adapter; retval = inv_serial_read(slave_adapter[EXT_SLAVE_TYPE_ACCEL], 0x68, 0x3B, 6, data); x = (s16)(((data[0] << 8) | data[1]) - cal_data.x);/*CAL_DIV;*/ y = (s16)(((data[2] << 8) | data[3]) - cal_data.y);/*CAL_DIV;*/ z = (s16)(((data[4] << 8) | data[5]) - cal_data.z);/*CAL_DIV;*/ if (mldl_cfg->pdata->orientation[0]) { x *= mldl_cfg->pdata->orientation[0]; y *= mldl_cfg->pdata->orientation[4]; } else { temp = x*mldl_cfg->pdata->orientation[1]; x = y*mldl_cfg->pdata->orientation[3]; y = temp; } z *= mldl_cfg->pdata->orientation[8]; count = sprintf(buf, "%d, %d, %d\n", x, y, z); return count; } static ssize_t accel_calibration_show(struct device *dev, struct device_attribute *attr, char *buf) { int count = 0; int ret = 1; printk(buf, "%d %d %d\n", cal_data.x, cal_data.y, cal_data.z); if (!cal_data.x && !cal_data.y && !cal_data.z) ret = -1; count = sprintf(buf, "%d %d %d %d\n", ret, cal_data.x, cal_data.y, cal_data.z); return count; } static ssize_t accel_calibration_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t size) { int err; int enable = 0; err = kstrtoint(buf, 10, &enable); if (err) { pr_err("ERROR: %s got bad char\n", __func__); return -EINVAL; } err = accel_do_calibrate(enable); if (err < 0) { pr_err("%s: accel_do_calibrate() failed\n", __func__); return err; } return size; } static int mpu_alert_factory_on(struct i2c_client *client) { struct mpu_private_data *mpu = (struct mpu_private_data *)i2c_get_clientdata(this_client); struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; int prev_gyro_suspended = 0; struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES]; struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave; int ii; printk(KERN_INFO"@@@@@ %s : %d @@@@@\n", __func__, __LINE__); for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) { if (!pdata_slave[ii]) slave_adapter[ii] = NULL; else slave_adapter[ii] = i2c_get_adapter(pdata_slave[ii]->adapt_num); } slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE] = client->adapter; mutex_lock(&mpu->mutex); inv_mpu_resume(mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_ACCEL], slave_adapter[EXT_SLAVE_TYPE_COMPASS], slave_adapter[EXT_SLAVE_TYPE_PRESSURE], INV_THREE_AXIS_ACCEL); mutex_unlock(&mpu->mutex); return prev_gyro_suspended; } static ssize_t accel_reactive_alert_store(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { int err = 0; bool onoff = false, factory_test = false; struct mpu_private_data *mpu = (struct mpu_private_data *) i2c_get_clientdata(this_client); struct i2c_client *client = mpu->client; struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; struct i2c_adapter *slave_adapter; struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave; unsigned char reg_data = 0; slave_adapter = i2c_get_adapter(pdata_slave[EXT_SLAVE_TYPE_ACCEL]->adapt_num); if (sysfs_streq(buf, "1")) { onoff = true; } else if (sysfs_streq(buf, "0")) { onoff = false; } else if (sysfs_streq(buf, "2")) { onoff = true; factory_test = true; err = inv_serial_read(slave_adapter, 0x68, MPUREG_INT_ENABLE, sizeof(reg_data), &reg_data); if (err) pr_err("%s: read INT failed\n", __func__); reg_data |= BIT_RAW_RDY_EN; err = inv_serial_single_write(slave_adapter, 0x68, MPUREG_INT_ENABLE, reg_data); if (err) pr_err("%s: i2c write INT reg failed\n", __func__); err = inv_serial_read(slave_adapter, 0x68, MPUREG_INT_ENABLE, sizeof(reg_data), &reg_data); if (err) pr_err("%s: read INT failed\n", __func__); mpu_alert_factory_on(this_client); err = inv_serial_single_write(slave_adapter, 0x68, MPUREG_SMPLRT_DIV, (unsigned char)19); if (err) pr_err("%s: set_DIV\n", __func__); } else { pr_err("%s: invalid value %d\n", __func__, *buf); return -EINVAL; } if (onoff && !mldl_cfg->inv_mpu_state->accel_reactive) { pr_info("reactive alert is on.\n"); enable_irq_wake(client->irq); } else if (!onoff && mldl_cfg->inv_mpu_state->accel_reactive) { pr_info("reactive alert is off.\n"); disable_irq_wake(client->irq); err = inv_serial_read(slave_adapter, 0x68, MPUREG_INT_ENABLE, sizeof(reg_data), &reg_data); if (err) pr_err("%s: read INT failed\n", __func__); reg_data &= ~BIT_MOT_EN; err = inv_serial_single_write(slave_adapter, 0x68, MPUREG_INT_ENABLE, reg_data); if (err) pr_err("%s: i2c write INT reg failed\n", __func__); } mldl_cfg->inv_mpu_state->use_accel_reactive = onoff; mldl_cfg->inv_mpu_state->accel_reactive = onoff; mldl_cfg->inv_mpu_state->reactive_factory = factory_test; return count; } static ssize_t accel_reactive_alert_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mpu_private_data *mpu = (struct mpu_private_data *) i2c_get_clientdata(this_client); struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; if (mldl_cfg->inv_mpu_state->use_accel_reactive && !mldl_cfg->inv_mpu_state->accel_reactive) return sprintf(buf, "%d\n", 1); else return sprintf(buf, "%d\n", 0); } static ssize_t mpu_vendor_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%s\n", MPU_VENDOR); } static ssize_t mpu_name_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%s\n", MPU_PART_ID); } #if defined(CONFIG_MPU_SENSORS_AK8975_411) static int akm8975_wait_for_data_ready(struct i2c_adapter *sl_adapter) { int err; u8 buf; int count = 10; while (1) { msleep(20); err = inv_serial_read(sl_adapter, 0x0C, AK8975_REG_ST1, sizeof(buf), &buf); if (err) { pr_err("%s: read data over i2c failed\n", __func__); return -EIO; } if (buf&0x1) break; count--; if (!count) break; } return 0; } static ssize_t ak8975_adc(struct device *dev, struct device_attribute *attr, char *strbuf) { struct mpu_private_data *mpu = (struct mpu_private_data *) i2c_get_clientdata(this_client); struct i2c_client *client = mpu->client; struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; u8 buf[8]; s16 x, y, z; int err, success; struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES]; struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave; int ii; if (pdata_slave[EXT_SLAVE_TYPE_COMPASS] == NULL) return snprintf(strbuf, PAGE_SIZE, "%s, %d, %d, %d\n", "NG", 0, 0, 0); for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) { if (!pdata_slave[ii]) slave_adapter[ii] = NULL; else slave_adapter[ii] = i2c_get_adapter(pdata_slave[ii]->adapt_num); } slave_adapter[EXT_SLAVE_TYPE_COMPASS] = client->adapter; mutex_lock(&mpu->mutex); /* start ADC conversion */ err = inv_serial_single_write(slave_adapter[EXT_SLAVE_TYPE_COMPASS], 0x0C, AK8975_REG_CNTL, AK8975_MODE_SNG_MEASURE); if (err) pr_err("ak8975_adc write err:%d\n", err); /* wait for ADC conversion to complete */ err = akm8975_wait_for_data_ready (slave_adapter[EXT_SLAVE_TYPE_COMPASS]); if (err) { pr_err("%s: wait for data ready failed\n", __func__); return err; } msleep(20);/*msleep(10);*/ /* get the value and report it */ err = inv_serial_read(slave_adapter[EXT_SLAVE_TYPE_COMPASS], 0x0C, AK8975_REG_ST1, sizeof(buf), buf); if (err) { pr_err("%s: read data over i2c failed %d\n", __func__, err); mutex_unlock(&mpu->mutex); return -EIO; } mutex_unlock(&mpu->mutex); /* buf[0] is status1, buf[7] is status2 */ if ((buf[0] == 0) | (buf[7] == 1)) success = 0; else success = 1; x = buf[1] | (buf[2] << 8); y = buf[3] | (buf[4] << 8); z = buf[5] | (buf[6] << 8); pr_err("%s: raw x = %d, y = %d, z = %d\n", __func__, x, y, z); return snprintf(strbuf, PAGE_SIZE, "%s,%d,%d,%d\n", (success ? "OK" : "NG"), x, y, z); } static ssize_t ak8975_check_cntl(struct device *dev, struct device_attribute *attr, char *buf) { struct mpu_private_data *mpu = (struct mpu_private_data *) i2c_get_clientdata(this_client); struct i2c_client *client = mpu->client; struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES]; struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave; int ii, err; u8 data; if (pdata_slave[EXT_SLAVE_TYPE_COMPASS] == NULL) return snprintf(buf, PAGE_SIZE, "%s\n", "NG"); for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) { if (!pdata_slave[ii]) slave_adapter[ii] = NULL; else slave_adapter[ii] = i2c_get_adapter(pdata_slave[ii]->adapt_num); } slave_adapter[EXT_SLAVE_TYPE_COMPASS] = client->adapter; mutex_lock(&mpu->mutex); err = inv_serial_single_write(slave_adapter[EXT_SLAVE_TYPE_COMPASS], 0x0C, AK8975_REG_CNTL, AK8975_MODE_POWER_DOWN); if (err) { pr_err("ak8975_adc write err:%d\n", err); mutex_unlock(&mpu->mutex); return -EIO; } err = inv_serial_read(slave_adapter[EXT_SLAVE_TYPE_COMPASS], 0x0C, AK8975_REG_CNTL, sizeof(data), &data); if (err) { pr_err("%s: read data over i2c failed %d\n", __func__, err); mutex_unlock(&mpu->mutex); return -EIO; } mutex_unlock(&mpu->mutex); return snprintf(buf, PAGE_SIZE, "%s\n", data == AK8975_MODE_POWER_DOWN ? "OK" : "NG"); } static ssize_t akm8975_rawdata_show(struct device *dev, struct device_attribute *attr, char *buf) { struct mpu_private_data *mpu = (struct mpu_private_data *) i2c_get_clientdata(this_client); struct i2c_client *client = mpu->client; struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; short x = 0, y = 0, z = 0; int err; u8 data[8]; struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES]; struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave; int ii; for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) { if (!pdata_slave[ii]) slave_adapter[ii] = NULL; else slave_adapter[ii] = i2c_get_adapter(pdata_slave[ii]->adapt_num); } slave_adapter[EXT_SLAVE_TYPE_COMPASS] = client->adapter; mutex_lock(&mpu->mutex); err = inv_serial_single_write(slave_adapter[EXT_SLAVE_TYPE_COMPASS], 0x0C, AK8975_REG_CNTL, AK8975_MODE_SNG_MEASURE); if (err) { pr_err("ak8975_adc write err:%d\n", err); mutex_unlock(&mpu->mutex); goto done; } err = akm8975_wait_for_data_ready (slave_adapter[EXT_SLAVE_TYPE_COMPASS]); if (err) { mutex_unlock(&mpu->mutex); goto done; } /* get the value and report it */ err = inv_serial_read(slave_adapter[EXT_SLAVE_TYPE_COMPASS], 0x0C, AK8975_REG_ST1, sizeof(data), data); if (err) { pr_err("%s: read data over i2c failed %d\n", __func__, err); mutex_unlock(&mpu->mutex); return -EIO; } mutex_unlock(&mpu->mutex); if (err) { pr_err("%s: failed to read %d bytes of mag data\n", __func__, sizeof(data)); goto done; } if (data[0] & 0x01) { x = (data[2] << 8) + data[1]; y = (data[4] << 8) + data[3]; z = (data[6] << 8) + data[5]; } else pr_err("%s: invalid raw data(st1 = %d)\n", __func__, data[0] & 0x01); done: return snprintf(buf, PAGE_SIZE, "%d,%d,%d\n", x, y, z); } struct ak8975_config { char asa[COMPASS_NUM_AXES]; /* axis sensitivity adjustment */ }; struct ak8975_private_data { struct ak8975_config init; }; static ssize_t ak8975c_get_status(struct device *dev, struct device_attribute *attr, char *buf) { struct mpu_private_data *mpu = (struct mpu_private_data *) i2c_get_clientdata(this_client); struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave; int success; struct ak8975_private_data *private_data; if (pdata_slave[EXT_SLAVE_TYPE_COMPASS] == NULL) return snprintf(buf, PAGE_SIZE, "%s\n", "NG"); private_data = (struct ak8975_private_data *) pdata_slave[EXT_SLAVE_TYPE_COMPASS]->private_data; if ((private_data->init.asa[0] == 0) | (private_data->init.asa[0] == 0xff) | (private_data->init.asa[1] == 0) | (private_data->init.asa[1] == 0xff) | (private_data->init.asa[2] == 0) | (private_data->init.asa[2] == 0xff)) success = 0; else success = 1; return snprintf(buf, PAGE_SIZE, "%s\n", (success ? "OK" : "NG")); } int ak8975c_selftest(struct i2c_adapter *slave_adapter, struct ak8975_private_data *private_data, int *sf) { int err; u8 data; u8 buf[6]; int count = 20; s16 x, y, z; /* set ATSC self test bit to 1 */ err = inv_serial_single_write(slave_adapter, 0x0C, AK8975_REG_ASTC, 0x40); /* start self test */ err = inv_serial_single_write(slave_adapter, 0x0C, AK8975_REG_CNTL, AK8975_MODE_SELF_TEST); /* wait for data ready */ while (1) { msleep(20); err = inv_serial_read(slave_adapter, 0x0C, AK8975_REG_ST1, sizeof(data), &data); if (data == 1) break; count--; if (!count) break; } err = inv_serial_read(slave_adapter, 0x0C, AK8975_REG_HXL, sizeof(buf), buf); /* set ATSC self test bit to 0 */ err = inv_serial_single_write(slave_adapter, 0x0C, AK8975_REG_ASTC, 0x00); x = buf[0] | (buf[1] << 8); y = buf[2] | (buf[3] << 8); z = buf[4] | (buf[5] << 8); /* Hadj = (H*(Asa+128))/256 */ x = (x*(private_data->init.asa[0] + 128)) >> 8; y = (y*(private_data->init.asa[1] + 128)) >> 8; z = (z*(private_data->init.asa[2] + 128)) >> 8; pr_info("%s: self test x = %d, y = %d, z = %d\n", __func__, x, y, z); if ((x >= -200) && (x <= 200)) pr_info("%s: x passed self test, expect -200<=x<=200\n", __func__); else pr_info("%s: x failed self test, expect -200<=x<=200\n", __func__); if ((y >= -200) && (y <= 200)) pr_info("%s: y passed self test, expect -200<=y<=200\n", __func__); else pr_info("%s: y failed self test, expect -200<=y<=200\n", __func__); if ((z >= -3200) && (z <= -800)) pr_info("%s: z passed self test, expect -3200<=z<=-800\n", __func__); else pr_info("%s: z failed self test, expect -3200<=z<=-800\n", __func__); sf[0] = x; sf[1] = y; sf[2] = z; if (((x >= -200) && (x <= 200)) && ((y >= -200) && (y <= 200)) && ((z >= -3200) && (z <= -800))) return 1; else return 0; } static ssize_t ak8975c_get_selftest(struct device *dev, struct device_attribute *attr, char *buf) { struct mpu_private_data *mpu = (struct mpu_private_data *) i2c_get_clientdata(this_client); struct i2c_client *client = mpu->client; struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES]; struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave; struct ak8975_private_data *private_data; int ii, success; int sf[3] = {0,}; int retry = 3; if (pdata_slave[EXT_SLAVE_TYPE_COMPASS] == NULL) return snprintf(buf, PAGE_SIZE, "%d, %d, %d, %d\n", 0, 0, 0, 0); private_data = (struct ak8975_private_data *) pdata_slave[EXT_SLAVE_TYPE_COMPASS]->private_data; for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) { if (!pdata_slave[ii]) slave_adapter[ii] = NULL; else slave_adapter[ii] = i2c_get_adapter(pdata_slave[ii]->adapt_num); } slave_adapter[EXT_SLAVE_TYPE_COMPASS] = client->adapter; do { retry--; success = ak8975c_selftest( slave_adapter[EXT_SLAVE_TYPE_COMPASS], private_data, sf); if (success) break; } while (retry > 0); return snprintf(buf, PAGE_SIZE, "%d, %d, %d, %d\n", success, sf[0], sf[1], sf[2]); } static ssize_t akm_vendor_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%s\n", MAG_VENDOR); } static ssize_t akm_name_show(struct device *dev, struct device_attribute *attr, char *buf) { return sprintf(buf, "%s\n", MAG_PART_ID); } #endif static DEVICE_ATTR(power_on, S_IRUGO, mpu3050_power_on, NULL); static DEVICE_ATTR(temperature, S_IRUGO, mpu3050_get_temp, NULL); static DEVICE_ATTR(calibration, S_IRUGO|S_IWUSR|S_IWGRP, accel_calibration_show, accel_calibration_store); static DEVICE_ATTR(raw_data, S_IRUGO, mpu3050_acc_read, NULL); static DEVICE_ATTR(reactive_alert, S_IRUGO|S_IWUSR|S_IWGRP, accel_reactive_alert_show, accel_reactive_alert_store); static DEVICE_ATTR(vendor, S_IRUGO, mpu_vendor_show, NULL); static DEVICE_ATTR(name, S_IRUGO, mpu_name_show, NULL); #if defined(CONFIG_MPU_SENSORS_AK8975_411) static DEVICE_ATTR(adc, S_IRUGO, ak8975_adc, NULL); static DEVICE_ATTR(dac, S_IRUGO, ak8975_check_cntl, NULL); static DEVICE_ATTR(status, S_IRUGO, ak8975c_get_status, NULL); static DEVICE_ATTR(selftest, S_IRUGO, ak8975c_get_selftest, NULL); static struct device_attribute dev_attr_mag_rawdata = __ATTR(raw_data, S_IRUGO, akm8975_rawdata_show, NULL); static struct device_attribute dev_attr_mag_vendor = __ATTR(vendor, S_IRUGO, akm_vendor_show, NULL); static struct device_attribute dev_attr_mag_name = __ATTR(name, S_IRUGO, akm_name_show, NULL); #endif static struct device_attribute *gyro_sensor_attrs[] = { &dev_attr_power_on, &dev_attr_temperature, &dev_attr_vendor, &dev_attr_name, /* &dev_attr_selftest,*/ NULL, }; static struct device_attribute *accel_sensor_attrs[] = { &dev_attr_raw_data, &dev_attr_calibration, &dev_attr_reactive_alert, &dev_attr_vendor, &dev_attr_name, NULL, }; #if defined(CONFIG_MPU_SENSORS_AK8975_411) static struct device_attribute *magnetic_sensor_attrs[] = { &dev_attr_adc, &dev_attr_mag_rawdata, &dev_attr_dac, &dev_attr_status, &dev_attr_selftest, &dev_attr_mag_vendor, &dev_attr_mag_name, NULL, }; #endif static struct device *gsensorcal; static struct device *gyro_sensor_device; static struct device *accel_sensor_device; #if defined(CONFIG_MPU_SENSORS_AK8975_411) static struct device *magnetic_sensor_device; #endif int mpu_probe(struct i2c_client *client, const struct i2c_device_id *devid) { struct mpu_platform_data *pdata; struct mpu_private_data *mpu; struct mldl_cfg *mldl_cfg; int res = 0; int ii = 0; dev_info(&client->adapter->dev, "%s: %d\n", __func__, ii++); if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) { res = -ENODEV; goto out_check_functionality_failed; } mpu = kzalloc(sizeof(struct mpu_private_data), GFP_KERNEL); if (!mpu) { res = -ENOMEM; goto out_alloc_data_failed; } mldl_cfg = &mpu->mldl_cfg; mldl_cfg->mpu_ram = &mpu->mpu_ram; mldl_cfg->mpu_gyro_cfg = &mpu->mpu_gyro_cfg; mldl_cfg->mpu_offsets = &mpu->mpu_offsets; mldl_cfg->mpu_chip_info = &mpu->mpu_chip_info; mldl_cfg->inv_mpu_cfg = &mpu->inv_mpu_cfg; mldl_cfg->inv_mpu_state = &mpu->inv_mpu_state; mldl_cfg->mpu_ram->length = MPU_MEM_NUM_RAM_BANKS * MPU_MEM_BANK_SIZE; mldl_cfg->mpu_ram->ram = kzalloc(mldl_cfg->mpu_ram->length, GFP_KERNEL); if (!mldl_cfg->mpu_ram->ram) { res = -ENOMEM; goto out_alloc_ram_failed; } mpu_private_data = mpu; i2c_set_clientdata(client, mpu); this_client = client; mpu->client = client; init_waitqueue_head(&mpu->mpu_event_wait); mutex_init(&mpu->mutex); init_completion(&mpu->completion); mpu->response_timeout = 1; /* Seconds */ mpu->timeout.function = mpu_pm_timeout; mpu->timeout.data = (u_long) mpu; init_timer(&mpu->timeout); mpu->activate_timeout = 10; hrtimer_init(&mpu->activate_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); mpu->activate_timer.function = mpu_actiavte_sensors_callback; pdata = (struct mpu_platform_data *)client->dev.platform_data; if (!pdata) { dev_err(&client->adapter->dev, "Missing platform data for mpu\n"); goto out_whoami_failed; } mldl_cfg->pdata = pdata; if (mldl_cfg->pdata->poweron) mldl_cfg->pdata->poweron(1); mldl_cfg->mpu_chip_info->addr = client->addr; res = inv_mpu_open(&mpu->mldl_cfg, client->adapter, NULL, NULL, NULL); if (res) { dev_err(&client->adapter->dev, "Unable to open %s %d\n", MPU_NAME, res); res = -ENODEV; goto out_whoami_failed; } mpu->dev.minor = MISC_DYNAMIC_MINOR; mpu->dev.name = "mpu"; mpu->dev.fops = &mpu_fops; res = misc_register(&mpu->dev); if (res < 0) { dev_err(&client->adapter->dev, "ERROR: misc_register returned %d\n", res); goto out_misc_register_failed; } if (client->irq) { dev_info(&client->adapter->dev, "Installing irq using %d\n", client->irq); res = mpuirq_init(client, mldl_cfg); if (res) goto out_mpuirq_failed; } else { dev_warn(&client->adapter->dev, "Missing %s IRQ\n", MPU_NAME); } if (!strcmp(mpu_id[1].name, devid->name)) { /* Special case to re-use the inv_mpu_register_slave */ struct ext_slave_platform_data *slave_pdata; slave_pdata = kzalloc(sizeof(*slave_pdata), GFP_KERNEL); if (!slave_pdata) { res = -ENOMEM; goto out_slave_pdata_kzalloc_failed; } slave_pdata->bus = EXT_SLAVE_BUS_PRIMARY; for (ii = 0; ii < 9; ii++) slave_pdata->orientation[ii] = pdata->orientation[ii]; res = inv_mpu_register_slave( NULL, client, slave_pdata, mpu6050_get_slave_descr); if (res) { /* if inv_mpu_register_slave fails there are no pointer references to the memory allocated to slave_pdata */ kfree(slave_pdata); goto out_slave_pdata_kzalloc_failed; } } #ifdef CONFIG_INPUT_YAS_MAGNETOMETER { __s8 orientation[9] = { 1, 0, 0, 0, 1, 0, 0, 0, 1 }; /* Special case to re-use the inv_mpu_register_slave */ struct ext_slave_platform_data *slave_pdata; slave_pdata = kzalloc(sizeof(*slave_pdata), GFP_KERNEL); if (!slave_pdata) { res = -ENOMEM; goto out_slave_pdata_kzalloc_failed; } slave_pdata->bus = EXT_SLAVE_BUS_PRIMARY; for (ii = 0; ii < 9; ii++) slave_pdata->orientation[ii] = orientation[ii]; res = inv_mpu_register_slave( NULL, client, slave_pdata, yas530_ext_get_slave_descr); if (res) { /* if inv_mpu_register_slave fails there are no pointer references to the memory allocated to slave_pdata */ kfree(slave_pdata); goto out_slave_pdata_kzalloc_failed; } } #endif /*CONFIG_INPUT_YAS_MAGNETOMETER*/ res = sensors_register(gyro_sensor_device, NULL, gyro_sensor_attrs, "gyro_sensor"); if (res) { printk(KERN_ERR "%s: cound not register gyro sensor device(%d).\n", __func__, res); } res = sensors_register(accel_sensor_device, NULL, accel_sensor_attrs, "accelerometer_sensor"); if (res) { printk(KERN_ERR "%s: cound not register accelerometer sensor device(%d).\n", __func__, res); goto out_sensor_register_failed; } #if defined(CONFIG_MPU_SENSORS_AK8975_411) res = sensors_register(magnetic_sensor_device, NULL, magnetic_sensor_attrs, "magnetic_sensor"); if (res) { printk(KERN_ERR "%s: cound not register magnetic sensor device(%d).\n", __func__, res); goto out_sensor_register_failed; } #endif gsensorcal = device_create(sec_class, NULL, 0, mpu, "gsensorcal"); if (IS_ERR(gsensorcal)) printk(KERN_ERR "Failed to create device!"); if (device_create_file(gsensorcal, &dev_attr_calibration) < 0) { printk(KERN_ERR "Failed to create device file(%s)!\n", dev_attr_calibration.attr.name); goto out_gsensorcal_failed; } #ifdef CONFIG_HAS_EARLYSUSPEND mpu->early_suspend.level = EARLY_SUSPEND_LEVEL_DISABLE_FB + 1; mpu->early_suspend.suspend = mpu_dev_early_suspend; mpu->early_suspend.resume = mpu_dev_early_resume; register_early_suspend(&mpu->early_suspend); #endif return res; out_gsensorcal_failed: out_sensor_register_failed: out_slave_pdata_kzalloc_failed: if (client->irq) mpuirq_exit(); out_mpuirq_failed: misc_deregister(&mpu->dev); out_misc_register_failed: inv_mpu_close(&mpu->mldl_cfg, client->adapter, NULL, NULL, NULL); out_whoami_failed: kfree(mldl_cfg->mpu_ram->ram); mpu_private_data = NULL; out_alloc_ram_failed: kfree(mpu); out_alloc_data_failed: out_check_functionality_failed: dev_err(&client->adapter->dev, "%s failed %d\n", __func__, res); return res; } static int mpu_remove(struct i2c_client *client) { struct mpu_private_data *mpu = i2c_get_clientdata(client); struct i2c_adapter *slave_adapter[EXT_SLAVE_NUM_TYPES]; struct mldl_cfg *mldl_cfg = &mpu->mldl_cfg; struct ext_slave_platform_data **pdata_slave = mldl_cfg->pdata_slave; int ii; for (ii = 0; ii < EXT_SLAVE_NUM_TYPES; ii++) { if (!pdata_slave[ii]) slave_adapter[ii] = NULL; else slave_adapter[ii] = i2c_get_adapter(pdata_slave[ii]->adapt_num); } slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE] = client->adapter; dev_dbg(&client->adapter->dev, "%s\n", __func__); inv_mpu_close(mldl_cfg, slave_adapter[EXT_SLAVE_TYPE_GYROSCOPE], slave_adapter[EXT_SLAVE_TYPE_ACCEL], slave_adapter[EXT_SLAVE_TYPE_COMPASS], slave_adapter[EXT_SLAVE_TYPE_PRESSURE]); if (mldl_cfg->slave[EXT_SLAVE_TYPE_ACCEL] && (mldl_cfg->slave[EXT_SLAVE_TYPE_ACCEL]->id == ACCEL_ID_MPU6050)) { struct ext_slave_platform_data *slave_pdata = mldl_cfg->pdata_slave[EXT_SLAVE_TYPE_ACCEL]; inv_mpu_unregister_slave( client, mldl_cfg->pdata_slave[EXT_SLAVE_TYPE_ACCEL], mpu6050_get_slave_descr); kfree(slave_pdata); } if (client->irq) mpuirq_exit(); misc_deregister(&mpu->dev); kfree(mpu->mldl_cfg.mpu_ram->ram); kfree(mpu); return 0; } static struct i2c_driver mpu_driver = { .class = I2C_CLASS_HWMON, .probe = mpu_probe, .remove = mpu_remove, .id_table = mpu_id, .driver = { .owner = THIS_MODULE, .name = MPU_NAME, }, .address_list = normal_i2c, .shutdown = mpu_shutdown, /* optional */ .suspend = mpu_dev_suspend, /* optional */ .resume = mpu_dev_resume, /* optional */ }; static int __init mpu_init(void) { int res = i2c_add_driver(&mpu_driver); pr_info("%s: Probe name %s\n", __func__, MPU_NAME); if (res) pr_err("%s failed\n", __func__); return res; } static void __exit mpu_exit(void) { pr_info("%s\n", __func__); i2c_del_driver(&mpu_driver); } module_init(mpu_init); module_exit(mpu_exit); MODULE_AUTHOR("Invensense Corporation"); MODULE_DESCRIPTION("User space character device interface for MPU"); MODULE_LICENSE("GPL"); MODULE_ALIAS(MPU_NAME);
gpl-2.0
mehrvarz/msm-kitkat-tm-usbhost-charge
arch/arm/mach-msm/board-8064-camera.c
311
17697
/* Copyright (c) 2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/i2c.h> #include <linux/gpio.h> #include <asm/mach-types.h> #include <mach/board.h> #include <mach/msm_bus_board.h> #include <mach/gpiomux.h> #include "devices.h" #include "board-8064.h" #ifdef CONFIG_MSM_CAMERA static struct gpiomux_setting cam_settings[] = { { .func = GPIOMUX_FUNC_GPIO, /*suspend*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_DOWN, }, { .func = GPIOMUX_FUNC_1, /*active 1*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_GPIO, /*active 2*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_2, /*active 3*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_5, /*active 4*/ .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }, { .func = GPIOMUX_FUNC_6, /*active 5*/ .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }, { .func = GPIOMUX_FUNC_2, /*active 6*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_UP, }, { .func = GPIOMUX_FUNC_3, /*active 7*/ .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_UP, }, { .func = GPIOMUX_FUNC_GPIO, /*i2c suspend*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_KEEPER, }, { .func = GPIOMUX_FUNC_9, /*active 9*/ .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_A, /*active 10*/ .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_6, /*active 11*/ .drv = GPIOMUX_DRV_8MA, .pull = GPIOMUX_PULL_NONE, }, { .func = GPIOMUX_FUNC_4, /*active 12*/ .drv = GPIOMUX_DRV_2MA, .pull = GPIOMUX_PULL_NONE, }, }; static struct msm_gpiomux_config apq8064_cam_common_configs[] = { { .gpio = 1, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[2], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 2, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[12], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 3, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[2], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 4, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[3], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 5, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[1], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 34, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[2], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 107, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[2], [GPIOMUX_SUSPENDED] = &cam_settings[0], }, }, { .gpio = 10, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[9], [GPIOMUX_SUSPENDED] = &cam_settings[8], }, }, { .gpio = 11, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[10], [GPIOMUX_SUSPENDED] = &cam_settings[8], }, }, { .gpio = 12, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[11], [GPIOMUX_SUSPENDED] = &cam_settings[8], }, }, { .gpio = 13, .settings = { [GPIOMUX_ACTIVE] = &cam_settings[11], [GPIOMUX_SUSPENDED] = &cam_settings[8], }, }, }; #define VFE_CAMIF_TIMER1_GPIO 3 #define VFE_CAMIF_TIMER2_GPIO 1 static struct msm_camera_sensor_flash_src msm_flash_src = { .flash_sr_type = MSM_CAMERA_FLASH_SRC_EXT, ._fsrc.ext_driver_src.led_en = VFE_CAMIF_TIMER1_GPIO, ._fsrc.ext_driver_src.led_flash_en = VFE_CAMIF_TIMER2_GPIO, ._fsrc.ext_driver_src.flash_id = MAM_CAMERA_EXT_LED_FLASH_SC628A, }; static struct msm_gpiomux_config apq8064_cam_2d_configs[] = { }; static struct msm_bus_vectors cam_init_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, }; static struct msm_bus_vectors cam_preview_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 27648000, .ib = 110592000, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, }; static struct msm_bus_vectors cam_video_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 274406400, .ib = 561807360, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 206807040, .ib = 488816640, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, }; static struct msm_bus_vectors cam_snapshot_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 274423680, .ib = 1097694720, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 540000000, .ib = 1350000000, }, }; static struct msm_bus_vectors cam_zsl_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 302071680, .ib = 1208286720, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 0, .ib = 0, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 540000000, .ib = 1350000000, }, }; static struct msm_bus_vectors cam_video_ls_vectors[] = { { .src = MSM_BUS_MASTER_VFE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 348192000, .ib = 617103360, }, { .src = MSM_BUS_MASTER_VPE, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 206807040, .ib = 488816640, }, { .src = MSM_BUS_MASTER_JPEG_ENC, .dst = MSM_BUS_SLAVE_EBI_CH0, .ab = 540000000, .ib = 1350000000, }, }; static struct msm_bus_paths cam_bus_client_config[] = { { ARRAY_SIZE(cam_init_vectors), cam_init_vectors, }, { ARRAY_SIZE(cam_preview_vectors), cam_preview_vectors, }, { ARRAY_SIZE(cam_video_vectors), cam_video_vectors, }, { ARRAY_SIZE(cam_snapshot_vectors), cam_snapshot_vectors, }, { ARRAY_SIZE(cam_zsl_vectors), cam_zsl_vectors, }, { ARRAY_SIZE(cam_video_ls_vectors), cam_video_ls_vectors, }, }; static struct msm_bus_scale_pdata cam_bus_client_pdata = { cam_bus_client_config, ARRAY_SIZE(cam_bus_client_config), .name = "msm_camera", }; static struct msm_camera_device_platform_data msm_camera_csi_device_data[] = { { .csid_core = 0, .is_vpe = 1, .cam_bus_scale_table = &cam_bus_client_pdata, }, { .csid_core = 1, .is_vpe = 1, .cam_bus_scale_table = &cam_bus_client_pdata, }, }; static struct camera_vreg_t apq_8064_back_cam_vreg[] = { {"cam_vdig", REG_LDO, 1200000, 1200000, 105000}, {"cam_vio", REG_VS, 0, 0, 0}, {"cam_vana", REG_LDO, 2800000, 2850000, 85600}, {"cam_vaf", REG_LDO, 2800000, 2850000, 300000}, }; static struct camera_vreg_t apq_8064_front_cam_vreg[] = { {"cam_vio", REG_VS, 0, 0, 0}, {"cam_vana", REG_LDO, 2800000, 2850000, 85600}, {"cam_vdig", REG_LDO, 1200000, 1200000, 105000}, {"cam_vaf", REG_LDO, 2800000, 2850000, 300000}, }; #define CAML_RSTN PM8921_GPIO_PM_TO_SYS(28) #define CAMR_RSTN 34 static struct gpio apq8064_common_cam_gpio[] = { }; static struct gpio apq8064_back_cam_gpio[] = { {5, GPIOF_DIR_IN, "CAMIF_MCLK"}, {CAML_RSTN, GPIOF_DIR_OUT, "CAM_RESET"}, }; static struct msm_gpio_set_tbl apq8064_back_cam_gpio_set_tbl[] = { {CAML_RSTN, GPIOF_OUT_INIT_LOW, 10000}, {CAML_RSTN, GPIOF_OUT_INIT_HIGH, 10000}, }; static struct msm_camera_gpio_conf apq8064_back_cam_gpio_conf = { .cam_gpiomux_conf_tbl = apq8064_cam_2d_configs, .cam_gpiomux_conf_tbl_size = ARRAY_SIZE(apq8064_cam_2d_configs), .cam_gpio_common_tbl = apq8064_common_cam_gpio, .cam_gpio_common_tbl_size = ARRAY_SIZE(apq8064_common_cam_gpio), .cam_gpio_req_tbl = apq8064_back_cam_gpio, .cam_gpio_req_tbl_size = ARRAY_SIZE(apq8064_back_cam_gpio), .cam_gpio_set_tbl = apq8064_back_cam_gpio_set_tbl, .cam_gpio_set_tbl_size = ARRAY_SIZE(apq8064_back_cam_gpio_set_tbl), }; static struct gpio apq8064_front_cam_gpio[] = { {4, GPIOF_DIR_IN, "CAMIF_MCLK"}, {12, GPIOF_DIR_IN, "CAMIF_I2C_DATA"}, {13, GPIOF_DIR_IN, "CAMIF_I2C_CLK"}, {CAMR_RSTN, GPIOF_DIR_OUT, "CAM_RESET"}, }; static struct msm_gpio_set_tbl apq8064_front_cam_gpio_set_tbl[] = { {CAMR_RSTN, GPIOF_OUT_INIT_LOW, 10000}, {CAMR_RSTN, GPIOF_OUT_INIT_HIGH, 10000}, }; static struct msm_camera_gpio_conf apq8064_front_cam_gpio_conf = { .cam_gpiomux_conf_tbl = apq8064_cam_2d_configs, .cam_gpiomux_conf_tbl_size = ARRAY_SIZE(apq8064_cam_2d_configs), .cam_gpio_common_tbl = apq8064_common_cam_gpio, .cam_gpio_common_tbl_size = ARRAY_SIZE(apq8064_common_cam_gpio), .cam_gpio_req_tbl = apq8064_front_cam_gpio, .cam_gpio_req_tbl_size = ARRAY_SIZE(apq8064_front_cam_gpio), .cam_gpio_set_tbl = apq8064_front_cam_gpio_set_tbl, .cam_gpio_set_tbl_size = ARRAY_SIZE(apq8064_front_cam_gpio_set_tbl), }; static struct msm_camera_i2c_conf apq8064_back_cam_i2c_conf = { .use_i2c_mux = 1, .mux_dev = &msm8960_device_i2c_mux_gsbi4, .i2c_mux_mode = MODE_L, }; static struct i2c_board_info msm_act_main_cam_i2c_info = { I2C_BOARD_INFO("msm_actuator", 0x11), }; static struct msm_actuator_info msm_act_main_cam_0_info = { .board_info = &msm_act_main_cam_i2c_info, .cam_name = MSM_ACTUATOR_MAIN_CAM_0, .bus_id = APQ_8064_GSBI4_QUP_I2C_BUS_ID, .vcm_pwd = 0, .vcm_enable = 0, }; static struct i2c_board_info msm_act_main_cam1_i2c_info = { I2C_BOARD_INFO("msm_actuator", 0x18), }; static struct msm_actuator_info msm_act_main_cam_1_info = { .board_info = &msm_act_main_cam1_i2c_info, .cam_name = MSM_ACTUATOR_MAIN_CAM_1, .bus_id = APQ_8064_GSBI4_QUP_I2C_BUS_ID, .vcm_pwd = 0, .vcm_enable = 0, }; static struct msm_camera_i2c_conf apq8064_front_cam_i2c_conf = { .use_i2c_mux = 1, .mux_dev = &msm8960_device_i2c_mux_gsbi4, .i2c_mux_mode = MODE_L, }; static struct msm_camera_sensor_flash_data flash_imx074 = { .flash_type = MSM_CAMERA_FLASH_LED, .flash_src = &msm_flash_src }; static struct msm_camera_csi_lane_params imx074_csi_lane_params = { .csi_lane_assign = 0xE4, .csi_lane_mask = 0xF, }; static struct msm_camera_sensor_platform_info sensor_board_info_imx074 = { .mount_angle = 90, .cam_vreg = apq_8064_back_cam_vreg, .num_vreg = ARRAY_SIZE(apq_8064_back_cam_vreg), .gpio_conf = &apq8064_back_cam_gpio_conf, .i2c_conf = &apq8064_back_cam_i2c_conf, .csi_lane_params = &imx074_csi_lane_params, }; static struct i2c_board_info imx074_eeprom_i2c_info = { I2C_BOARD_INFO("imx074_eeprom", 0x34 << 1), }; static struct msm_eeprom_info imx074_eeprom_info = { .board_info = &imx074_eeprom_i2c_info, .bus_id = APQ_8064_GSBI4_QUP_I2C_BUS_ID, }; static struct msm_camera_sensor_info msm_camera_sensor_imx074_data = { .sensor_name = "imx074", .pdata = &msm_camera_csi_device_data[0], .flash_data = &flash_imx074, .sensor_platform_info = &sensor_board_info_imx074, .csi_if = 1, .camera_type = BACK_CAMERA_2D, .sensor_type = BAYER_SENSOR, .actuator_info = &msm_act_main_cam_0_info, .eeprom_info = &imx074_eeprom_info, }; static struct msm_camera_csi_lane_params imx091_csi_lane_params = { .csi_lane_assign = 0xE4, .csi_lane_mask = 0xF, }; static struct camera_vreg_t apq_8064_imx091_vreg[] = { {"cam_vana", REG_LDO, 2800000, 2850000, 85600}, {"cam_vaf", REG_LDO, 2800000, 2850000, 300000}, {"cam_vdig", REG_LDO, 1200000, 1200000, 105000}, {"cam_vio", REG_VS, 0, 0, 0}, }; static struct msm_camera_sensor_flash_data flash_imx091 = { .flash_type = MSM_CAMERA_FLASH_NONE, }; static struct msm_camera_sensor_platform_info sensor_board_info_imx091 = { .mount_angle = 0, .cam_vreg = apq_8064_imx091_vreg, .num_vreg = ARRAY_SIZE(apq_8064_imx091_vreg), .gpio_conf = &apq8064_back_cam_gpio_conf, .i2c_conf = &apq8064_back_cam_i2c_conf, .csi_lane_params = &imx091_csi_lane_params, }; static struct i2c_board_info imx091_eeprom_i2c_info = { I2C_BOARD_INFO("imx091_eeprom", 0x21), }; static struct msm_eeprom_info imx091_eeprom_info = { .board_info = &imx091_eeprom_i2c_info, .bus_id = APQ_8064_GSBI4_QUP_I2C_BUS_ID, }; static struct msm_camera_sensor_info msm_camera_sensor_imx091_data = { .sensor_name = "imx091", .pdata = &msm_camera_csi_device_data[0], .flash_data = &flash_imx091, .sensor_platform_info = &sensor_board_info_imx091, .csi_if = 1, .camera_type = BACK_CAMERA_2D, .sensor_type = BAYER_SENSOR, .actuator_info = &msm_act_main_cam_1_info, .eeprom_info = &imx091_eeprom_info, }; static struct camera_vreg_t apq_8064_s5k3l1yx_vreg[] = { {"cam_vdig", REG_LDO, 1200000, 1200000, 105000}, {"cam_vana", REG_LDO, 2800000, 2850000, 85600}, {"cam_vio", REG_VS, 0, 0, 0}, {"cam_vaf", REG_LDO, 2800000, 2850000, 300000}, }; static struct msm_camera_sensor_flash_data flash_s5k3l1yx = { .flash_type = MSM_CAMERA_FLASH_NONE, }; static struct msm_camera_csi_lane_params s5k3l1yx_csi_lane_params = { .csi_lane_assign = 0xE4, .csi_lane_mask = 0xF, }; static struct msm_camera_sensor_platform_info sensor_board_info_s5k3l1yx = { .mount_angle = 90, .cam_vreg = apq_8064_s5k3l1yx_vreg, .num_vreg = ARRAY_SIZE(apq_8064_s5k3l1yx_vreg), .gpio_conf = &apq8064_back_cam_gpio_conf, .i2c_conf = &apq8064_back_cam_i2c_conf, .csi_lane_params = &s5k3l1yx_csi_lane_params, }; static struct msm_camera_sensor_info msm_camera_sensor_s5k3l1yx_data = { .sensor_name = "s5k3l1yx", .pdata = &msm_camera_csi_device_data[0], .flash_data = &flash_s5k3l1yx, .sensor_platform_info = &sensor_board_info_s5k3l1yx, .csi_if = 1, .camera_type = BACK_CAMERA_2D, .sensor_type = BAYER_SENSOR, }; static struct camera_vreg_t apq_8064_mt9m114_vreg[] = { {"cam_vio", REG_VS, 0, 0, 0}, {"cam_vdig", REG_LDO, 1200000, 1200000, 105000}, {"cam_vana", REG_LDO, 2800000, 2850000, 85600}, {"cam_vaf", REG_LDO, 2800000, 2850000, 300000}, }; static struct msm_camera_sensor_flash_data flash_mt9m114 = { .flash_type = MSM_CAMERA_FLASH_NONE }; static struct msm_camera_csi_lane_params mt9m114_csi_lane_params = { .csi_lane_assign = 0xE4, .csi_lane_mask = 0x1, }; static struct msm_camera_sensor_platform_info sensor_board_info_mt9m114 = { .mount_angle = 90, .cam_vreg = apq_8064_mt9m114_vreg, .num_vreg = ARRAY_SIZE(apq_8064_mt9m114_vreg), .gpio_conf = &apq8064_front_cam_gpio_conf, .i2c_conf = &apq8064_front_cam_i2c_conf, .csi_lane_params = &mt9m114_csi_lane_params, }; static struct msm_camera_sensor_info msm_camera_sensor_mt9m114_data = { .sensor_name = "mt9m114", .pdata = &msm_camera_csi_device_data[1], .flash_data = &flash_mt9m114, .sensor_platform_info = &sensor_board_info_mt9m114, .csi_if = 1, .camera_type = FRONT_CAMERA_2D, .sensor_type = YUV_SENSOR, }; static struct msm_camera_sensor_flash_data flash_ov2720 = { .flash_type = MSM_CAMERA_FLASH_NONE, }; static struct msm_camera_csi_lane_params ov2720_csi_lane_params = { .csi_lane_assign = 0xE4, .csi_lane_mask = 0x3, }; static struct msm_camera_sensor_platform_info sensor_board_info_ov2720 = { .mount_angle = 0, .cam_vreg = apq_8064_front_cam_vreg, .num_vreg = ARRAY_SIZE(apq_8064_front_cam_vreg), .gpio_conf = &apq8064_front_cam_gpio_conf, .i2c_conf = &apq8064_front_cam_i2c_conf, .csi_lane_params = &ov2720_csi_lane_params, }; static struct msm_camera_sensor_info msm_camera_sensor_ov2720_data = { .sensor_name = "ov2720", .pdata = &msm_camera_csi_device_data[1], .flash_data = &flash_ov2720, .sensor_platform_info = &sensor_board_info_ov2720, .csi_if = 1, .camera_type = FRONT_CAMERA_2D, .sensor_type = BAYER_SENSOR, }; static struct platform_device msm_camera_server = { .name = "msm_cam_server", .id = 0, }; void __init apq8064_init_cam(void) { msm_gpiomux_install(apq8064_cam_common_configs, ARRAY_SIZE(apq8064_cam_common_configs)); if (machine_is_apq8064_cdp()) { sensor_board_info_imx074.mount_angle = 0; sensor_board_info_mt9m114.mount_angle = 0; } else if (machine_is_apq8064_liquid()) sensor_board_info_imx074.mount_angle = 180; platform_device_register(&msm_camera_server); platform_device_register(&msm8960_device_i2c_mux_gsbi4); platform_device_register(&msm8960_device_csiphy0); platform_device_register(&msm8960_device_csiphy1); platform_device_register(&msm8960_device_csid0); platform_device_register(&msm8960_device_csid1); platform_device_register(&msm8960_device_ispif); platform_device_register(&msm8960_device_vfe); platform_device_register(&msm8960_device_vpe); } #ifdef CONFIG_I2C static struct i2c_board_info apq8064_camera_i2c_boardinfo[] = { { I2C_BOARD_INFO("imx074", 0x1A), .platform_data = &msm_camera_sensor_imx074_data, }, { I2C_BOARD_INFO("mt9m114", 0x48), .platform_data = &msm_camera_sensor_mt9m114_data, }, { I2C_BOARD_INFO("ov2720", 0x6C), .platform_data = &msm_camera_sensor_ov2720_data, }, { I2C_BOARD_INFO("sc628a", 0x6E), }, { I2C_BOARD_INFO("imx091", 0x34), .platform_data = &msm_camera_sensor_imx091_data, }, { I2C_BOARD_INFO("s5k3l1yx", 0x20), .platform_data = &msm_camera_sensor_s5k3l1yx_data, }, }; struct msm_camera_board_info apq8064_camera_board_info = { .board_info = apq8064_camera_i2c_boardinfo, .num_i2c_board_info = ARRAY_SIZE(apq8064_camera_i2c_boardinfo), }; #endif #endif
gpl-2.0
bonezuk/linux
drivers/rpmsg/virtio_rpmsg_bus.c
823
32570
/* * Virtio-based remote processor messaging bus * * Copyright (C) 2011 Texas Instruments, Inc. * Copyright (C) 2011 Google, Inc. * * Ohad Ben-Cohen <ohad@wizery.com> * Brian Swetland <swetland@google.com> * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/kernel.h> #include <linux/module.h> #include <linux/virtio.h> #include <linux/virtio_ids.h> #include <linux/virtio_config.h> #include <linux/scatterlist.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/idr.h> #include <linux/jiffies.h> #include <linux/sched.h> #include <linux/wait.h> #include <linux/rpmsg.h> #include <linux/mutex.h> /** * struct virtproc_info - virtual remote processor state * @vdev: the virtio device * @rvq: rx virtqueue * @svq: tx virtqueue * @rbufs: kernel address of rx buffers * @sbufs: kernel address of tx buffers * @num_bufs: total number of buffers for rx and tx * @last_sbuf: index of last tx buffer used * @bufs_dma: dma base addr of the buffers * @tx_lock: protects svq, sbufs and sleepers, to allow concurrent senders. * sending a message might require waking up a dozing remote * processor, which involves sleeping, hence the mutex. * @endpoints: idr of local endpoints, allows fast retrieval * @endpoints_lock: lock of the endpoints set * @sendq: wait queue of sending contexts waiting for a tx buffers * @sleepers: number of senders that are waiting for a tx buffer * @ns_ept: the bus's name service endpoint * * This structure stores the rpmsg state of a given virtio remote processor * device (there might be several virtio proc devices for each physical * remote processor). */ struct virtproc_info { struct virtio_device *vdev; struct virtqueue *rvq, *svq; void *rbufs, *sbufs; unsigned int num_bufs; int last_sbuf; dma_addr_t bufs_dma; struct mutex tx_lock; struct idr endpoints; struct mutex endpoints_lock; wait_queue_head_t sendq; atomic_t sleepers; struct rpmsg_endpoint *ns_ept; }; /** * struct rpmsg_channel_info - internal channel info representation * @name: name of service * @src: local address * @dst: destination address */ struct rpmsg_channel_info { char name[RPMSG_NAME_SIZE]; u32 src; u32 dst; }; #define to_rpmsg_channel(d) container_of(d, struct rpmsg_channel, dev) #define to_rpmsg_driver(d) container_of(d, struct rpmsg_driver, drv) /* * We're allocating buffers of 512 bytes each for communications. The * number of buffers will be computed from the number of buffers supported * by the vring, upto a maximum of 512 buffers (256 in each direction). * * Each buffer will have 16 bytes for the msg header and 496 bytes for * the payload. * * This will utilize a maximum total space of 256KB for the buffers. * * We might also want to add support for user-provided buffers in time. * This will allow bigger buffer size flexibility, and can also be used * to achieve zero-copy messaging. * * Note that these numbers are purely a decision of this driver - we * can change this without changing anything in the firmware of the remote * processor. */ #define MAX_RPMSG_NUM_BUFS (512) #define RPMSG_BUF_SIZE (512) /* * Local addresses are dynamically allocated on-demand. * We do not dynamically assign addresses from the low 1024 range, * in order to reserve that address range for predefined services. */ #define RPMSG_RESERVED_ADDRESSES (1024) /* Address 53 is reserved for advertising remote services */ #define RPMSG_NS_ADDR (53) /* sysfs show configuration fields */ #define rpmsg_show_attr(field, path, format_string) \ static ssize_t \ field##_show(struct device *dev, \ struct device_attribute *attr, char *buf) \ { \ struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); \ \ return sprintf(buf, format_string, rpdev->path); \ } /* for more info, see Documentation/ABI/testing/sysfs-bus-rpmsg */ rpmsg_show_attr(name, id.name, "%s\n"); rpmsg_show_attr(src, src, "0x%x\n"); rpmsg_show_attr(dst, dst, "0x%x\n"); rpmsg_show_attr(announce, announce ? "true" : "false", "%s\n"); /* * Unique (and free running) index for rpmsg devices. * * Yeah, we're not recycling those numbers (yet?). will be easy * to change if/when we want to. */ static unsigned int rpmsg_dev_index; static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, char *buf) { struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); return sprintf(buf, RPMSG_DEVICE_MODALIAS_FMT "\n", rpdev->id.name); } static struct device_attribute rpmsg_dev_attrs[] = { __ATTR_RO(name), __ATTR_RO(modalias), __ATTR_RO(dst), __ATTR_RO(src), __ATTR_RO(announce), __ATTR_NULL }; /* rpmsg devices and drivers are matched using the service name */ static inline int rpmsg_id_match(const struct rpmsg_channel *rpdev, const struct rpmsg_device_id *id) { return strncmp(id->name, rpdev->id.name, RPMSG_NAME_SIZE) == 0; } /* match rpmsg channel and rpmsg driver */ static int rpmsg_dev_match(struct device *dev, struct device_driver *drv) { struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); struct rpmsg_driver *rpdrv = to_rpmsg_driver(drv); const struct rpmsg_device_id *ids = rpdrv->id_table; unsigned int i; for (i = 0; ids[i].name[0]; i++) if (rpmsg_id_match(rpdev, &ids[i])) return 1; return 0; } static int rpmsg_uevent(struct device *dev, struct kobj_uevent_env *env) { struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); return add_uevent_var(env, "MODALIAS=" RPMSG_DEVICE_MODALIAS_FMT, rpdev->id.name); } /** * __ept_release() - deallocate an rpmsg endpoint * @kref: the ept's reference count * * This function deallocates an ept, and is invoked when its @kref refcount * drops to zero. * * Never invoke this function directly! */ static void __ept_release(struct kref *kref) { struct rpmsg_endpoint *ept = container_of(kref, struct rpmsg_endpoint, refcount); /* * At this point no one holds a reference to ept anymore, * so we can directly free it */ kfree(ept); } /* for more info, see below documentation of rpmsg_create_ept() */ static struct rpmsg_endpoint *__rpmsg_create_ept(struct virtproc_info *vrp, struct rpmsg_channel *rpdev, rpmsg_rx_cb_t cb, void *priv, u32 addr) { int id_min, id_max, id; struct rpmsg_endpoint *ept; struct device *dev = rpdev ? &rpdev->dev : &vrp->vdev->dev; ept = kzalloc(sizeof(*ept), GFP_KERNEL); if (!ept) { dev_err(dev, "failed to kzalloc a new ept\n"); return NULL; } kref_init(&ept->refcount); mutex_init(&ept->cb_lock); ept->rpdev = rpdev; ept->cb = cb; ept->priv = priv; /* do we need to allocate a local address ? */ if (addr == RPMSG_ADDR_ANY) { id_min = RPMSG_RESERVED_ADDRESSES; id_max = 0; } else { id_min = addr; id_max = addr + 1; } mutex_lock(&vrp->endpoints_lock); /* bind the endpoint to an rpmsg address (and allocate one if needed) */ id = idr_alloc(&vrp->endpoints, ept, id_min, id_max, GFP_KERNEL); if (id < 0) { dev_err(dev, "idr_alloc failed: %d\n", id); goto free_ept; } ept->addr = id; mutex_unlock(&vrp->endpoints_lock); return ept; free_ept: mutex_unlock(&vrp->endpoints_lock); kref_put(&ept->refcount, __ept_release); return NULL; } /** * rpmsg_create_ept() - create a new rpmsg_endpoint * @rpdev: rpmsg channel device * @cb: rx callback handler * @priv: private data for the driver's use * @addr: local rpmsg address to bind with @cb * * Every rpmsg address in the system is bound to an rx callback (so when * inbound messages arrive, they are dispatched by the rpmsg bus using the * appropriate callback handler) by means of an rpmsg_endpoint struct. * * This function allows drivers to create such an endpoint, and by that, * bind a callback, and possibly some private data too, to an rpmsg address * (either one that is known in advance, or one that will be dynamically * assigned for them). * * Simple rpmsg drivers need not call rpmsg_create_ept, because an endpoint * is already created for them when they are probed by the rpmsg bus * (using the rx callback provided when they registered to the rpmsg bus). * * So things should just work for simple drivers: they already have an * endpoint, their rx callback is bound to their rpmsg address, and when * relevant inbound messages arrive (i.e. messages which their dst address * equals to the src address of their rpmsg channel), the driver's handler * is invoked to process it. * * That said, more complicated drivers might do need to allocate * additional rpmsg addresses, and bind them to different rx callbacks. * To accomplish that, those drivers need to call this function. * * Drivers should provide their @rpdev channel (so the new endpoint would belong * to the same remote processor their channel belongs to), an rx callback * function, an optional private data (which is provided back when the * rx callback is invoked), and an address they want to bind with the * callback. If @addr is RPMSG_ADDR_ANY, then rpmsg_create_ept will * dynamically assign them an available rpmsg address (drivers should have * a very good reason why not to always use RPMSG_ADDR_ANY here). * * Returns a pointer to the endpoint on success, or NULL on error. */ struct rpmsg_endpoint *rpmsg_create_ept(struct rpmsg_channel *rpdev, rpmsg_rx_cb_t cb, void *priv, u32 addr) { return __rpmsg_create_ept(rpdev->vrp, rpdev, cb, priv, addr); } EXPORT_SYMBOL(rpmsg_create_ept); /** * __rpmsg_destroy_ept() - destroy an existing rpmsg endpoint * @vrp: virtproc which owns this ept * @ept: endpoing to destroy * * An internal function which destroy an ept without assuming it is * bound to an rpmsg channel. This is needed for handling the internal * name service endpoint, which isn't bound to an rpmsg channel. * See also __rpmsg_create_ept(). */ static void __rpmsg_destroy_ept(struct virtproc_info *vrp, struct rpmsg_endpoint *ept) { /* make sure new inbound messages can't find this ept anymore */ mutex_lock(&vrp->endpoints_lock); idr_remove(&vrp->endpoints, ept->addr); mutex_unlock(&vrp->endpoints_lock); /* make sure in-flight inbound messages won't invoke cb anymore */ mutex_lock(&ept->cb_lock); ept->cb = NULL; mutex_unlock(&ept->cb_lock); kref_put(&ept->refcount, __ept_release); } /** * rpmsg_destroy_ept() - destroy an existing rpmsg endpoint * @ept: endpoing to destroy * * Should be used by drivers to destroy an rpmsg endpoint previously * created with rpmsg_create_ept(). */ void rpmsg_destroy_ept(struct rpmsg_endpoint *ept) { __rpmsg_destroy_ept(ept->rpdev->vrp, ept); } EXPORT_SYMBOL(rpmsg_destroy_ept); /* * when an rpmsg driver is probed with a channel, we seamlessly create * it an endpoint, binding its rx callback to a unique local rpmsg * address. * * if we need to, we also announce about this channel to the remote * processor (needed in case the driver is exposing an rpmsg service). */ static int rpmsg_dev_probe(struct device *dev) { struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpdev->dev.driver); struct virtproc_info *vrp = rpdev->vrp; struct rpmsg_endpoint *ept; int err; ept = rpmsg_create_ept(rpdev, rpdrv->callback, NULL, rpdev->src); if (!ept) { dev_err(dev, "failed to create endpoint\n"); err = -ENOMEM; goto out; } rpdev->ept = ept; rpdev->src = ept->addr; err = rpdrv->probe(rpdev); if (err) { dev_err(dev, "%s: failed: %d\n", __func__, err); rpmsg_destroy_ept(ept); goto out; } /* need to tell remote processor's name service about this channel ? */ if (rpdev->announce && virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) { struct rpmsg_ns_msg nsm; strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE); nsm.addr = rpdev->src; nsm.flags = RPMSG_NS_CREATE; err = rpmsg_sendto(rpdev, &nsm, sizeof(nsm), RPMSG_NS_ADDR); if (err) dev_err(dev, "failed to announce service %d\n", err); } out: return err; } static int rpmsg_dev_remove(struct device *dev) { struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); struct rpmsg_driver *rpdrv = to_rpmsg_driver(rpdev->dev.driver); struct virtproc_info *vrp = rpdev->vrp; int err = 0; /* tell remote processor's name service we're removing this channel */ if (rpdev->announce && virtio_has_feature(vrp->vdev, VIRTIO_RPMSG_F_NS)) { struct rpmsg_ns_msg nsm; strncpy(nsm.name, rpdev->id.name, RPMSG_NAME_SIZE); nsm.addr = rpdev->src; nsm.flags = RPMSG_NS_DESTROY; err = rpmsg_sendto(rpdev, &nsm, sizeof(nsm), RPMSG_NS_ADDR); if (err) dev_err(dev, "failed to announce service %d\n", err); } rpdrv->remove(rpdev); rpmsg_destroy_ept(rpdev->ept); return err; } static struct bus_type rpmsg_bus = { .name = "rpmsg", .match = rpmsg_dev_match, .dev_attrs = rpmsg_dev_attrs, .uevent = rpmsg_uevent, .probe = rpmsg_dev_probe, .remove = rpmsg_dev_remove, }; /** * register_rpmsg_driver() - register an rpmsg driver with the rpmsg bus * @rpdrv: pointer to a struct rpmsg_driver * * Returns 0 on success, and an appropriate error value on failure. */ int register_rpmsg_driver(struct rpmsg_driver *rpdrv) { rpdrv->drv.bus = &rpmsg_bus; return driver_register(&rpdrv->drv); } EXPORT_SYMBOL(register_rpmsg_driver); /** * unregister_rpmsg_driver() - unregister an rpmsg driver from the rpmsg bus * @rpdrv: pointer to a struct rpmsg_driver * * Returns 0 on success, and an appropriate error value on failure. */ void unregister_rpmsg_driver(struct rpmsg_driver *rpdrv) { driver_unregister(&rpdrv->drv); } EXPORT_SYMBOL(unregister_rpmsg_driver); static void rpmsg_release_device(struct device *dev) { struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); kfree(rpdev); } /* * match an rpmsg channel with a channel info struct. * this is used to make sure we're not creating rpmsg devices for channels * that already exist. */ static int rpmsg_channel_match(struct device *dev, void *data) { struct rpmsg_channel_info *chinfo = data; struct rpmsg_channel *rpdev = to_rpmsg_channel(dev); if (chinfo->src != RPMSG_ADDR_ANY && chinfo->src != rpdev->src) return 0; if (chinfo->dst != RPMSG_ADDR_ANY && chinfo->dst != rpdev->dst) return 0; if (strncmp(chinfo->name, rpdev->id.name, RPMSG_NAME_SIZE)) return 0; /* found a match ! */ return 1; } /* * create an rpmsg channel using its name and address info. * this function will be used to create both static and dynamic * channels. */ static struct rpmsg_channel *rpmsg_create_channel(struct virtproc_info *vrp, struct rpmsg_channel_info *chinfo) { struct rpmsg_channel *rpdev; struct device *tmp, *dev = &vrp->vdev->dev; int ret; /* make sure a similar channel doesn't already exist */ tmp = device_find_child(dev, chinfo, rpmsg_channel_match); if (tmp) { /* decrement the matched device's refcount back */ put_device(tmp); dev_err(dev, "channel %s:%x:%x already exist\n", chinfo->name, chinfo->src, chinfo->dst); return NULL; } rpdev = kzalloc(sizeof(struct rpmsg_channel), GFP_KERNEL); if (!rpdev) { pr_err("kzalloc failed\n"); return NULL; } rpdev->vrp = vrp; rpdev->src = chinfo->src; rpdev->dst = chinfo->dst; /* * rpmsg server channels has predefined local address (for now), * and their existence needs to be announced remotely */ rpdev->announce = rpdev->src != RPMSG_ADDR_ANY ? true : false; strncpy(rpdev->id.name, chinfo->name, RPMSG_NAME_SIZE); /* very simple device indexing plumbing which is enough for now */ dev_set_name(&rpdev->dev, "rpmsg%d", rpmsg_dev_index++); rpdev->dev.parent = &vrp->vdev->dev; rpdev->dev.bus = &rpmsg_bus; rpdev->dev.release = rpmsg_release_device; ret = device_register(&rpdev->dev); if (ret) { dev_err(dev, "device_register failed: %d\n", ret); put_device(&rpdev->dev); return NULL; } return rpdev; } /* * find an existing channel using its name + address properties, * and destroy it */ static int rpmsg_destroy_channel(struct virtproc_info *vrp, struct rpmsg_channel_info *chinfo) { struct virtio_device *vdev = vrp->vdev; struct device *dev; dev = device_find_child(&vdev->dev, chinfo, rpmsg_channel_match); if (!dev) return -EINVAL; device_unregister(dev); put_device(dev); return 0; } /* super simple buffer "allocator" that is just enough for now */ static void *get_a_tx_buf(struct virtproc_info *vrp) { unsigned int len; void *ret; /* support multiple concurrent senders */ mutex_lock(&vrp->tx_lock); /* * either pick the next unused tx buffer * (half of our buffers are used for sending messages) */ if (vrp->last_sbuf < vrp->num_bufs / 2) ret = vrp->sbufs + RPMSG_BUF_SIZE * vrp->last_sbuf++; /* or recycle a used one */ else ret = virtqueue_get_buf(vrp->svq, &len); mutex_unlock(&vrp->tx_lock); return ret; } /** * rpmsg_upref_sleepers() - enable "tx-complete" interrupts, if needed * @vrp: virtual remote processor state * * This function is called before a sender is blocked, waiting for * a tx buffer to become available. * * If we already have blocking senders, this function merely increases * the "sleepers" reference count, and exits. * * Otherwise, if this is the first sender to block, we also enable * virtio's tx callbacks, so we'd be immediately notified when a tx * buffer is consumed (we rely on virtio's tx callback in order * to wake up sleeping senders as soon as a tx buffer is used by the * remote processor). */ static void rpmsg_upref_sleepers(struct virtproc_info *vrp) { /* support multiple concurrent senders */ mutex_lock(&vrp->tx_lock); /* are we the first sleeping context waiting for tx buffers ? */ if (atomic_inc_return(&vrp->sleepers) == 1) /* enable "tx-complete" interrupts before dozing off */ virtqueue_enable_cb(vrp->svq); mutex_unlock(&vrp->tx_lock); } /** * rpmsg_downref_sleepers() - disable "tx-complete" interrupts, if needed * @vrp: virtual remote processor state * * This function is called after a sender, that waited for a tx buffer * to become available, is unblocked. * * If we still have blocking senders, this function merely decreases * the "sleepers" reference count, and exits. * * Otherwise, if there are no more blocking senders, we also disable * virtio's tx callbacks, to avoid the overhead incurred with handling * those (now redundant) interrupts. */ static void rpmsg_downref_sleepers(struct virtproc_info *vrp) { /* support multiple concurrent senders */ mutex_lock(&vrp->tx_lock); /* are we the last sleeping context waiting for tx buffers ? */ if (atomic_dec_and_test(&vrp->sleepers)) /* disable "tx-complete" interrupts */ virtqueue_disable_cb(vrp->svq); mutex_unlock(&vrp->tx_lock); } /** * rpmsg_send_offchannel_raw() - send a message across to the remote processor * @rpdev: the rpmsg channel * @src: source address * @dst: destination address * @data: payload of message * @len: length of payload * @wait: indicates whether caller should block in case no TX buffers available * * This function is the base implementation for all of the rpmsg sending API. * * It will send @data of length @len to @dst, and say it's from @src. The * message will be sent to the remote processor which the @rpdev channel * belongs to. * * The message is sent using one of the TX buffers that are available for * communication with this remote processor. * * If @wait is true, the caller will be blocked until either a TX buffer is * available, or 15 seconds elapses (we don't want callers to * sleep indefinitely due to misbehaving remote processors), and in that * case -ERESTARTSYS is returned. The number '15' itself was picked * arbitrarily; there's little point in asking drivers to provide a timeout * value themselves. * * Otherwise, if @wait is false, and there are no TX buffers available, * the function will immediately fail, and -ENOMEM will be returned. * * Normally drivers shouldn't use this function directly; instead, drivers * should use the appropriate rpmsg_{try}send{to, _offchannel} API * (see include/linux/rpmsg.h). * * Returns 0 on success and an appropriate error value on failure. */ int rpmsg_send_offchannel_raw(struct rpmsg_channel *rpdev, u32 src, u32 dst, void *data, int len, bool wait) { struct virtproc_info *vrp = rpdev->vrp; struct device *dev = &rpdev->dev; struct scatterlist sg; struct rpmsg_hdr *msg; int err; /* bcasting isn't allowed */ if (src == RPMSG_ADDR_ANY || dst == RPMSG_ADDR_ANY) { dev_err(dev, "invalid addr (src 0x%x, dst 0x%x)\n", src, dst); return -EINVAL; } /* * We currently use fixed-sized buffers, and therefore the payload * length is limited. * * One of the possible improvements here is either to support * user-provided buffers (and then we can also support zero-copy * messaging), or to improve the buffer allocator, to support * variable-length buffer sizes. */ if (len > RPMSG_BUF_SIZE - sizeof(struct rpmsg_hdr)) { dev_err(dev, "message is too big (%d)\n", len); return -EMSGSIZE; } /* grab a buffer */ msg = get_a_tx_buf(vrp); if (!msg && !wait) return -ENOMEM; /* no free buffer ? wait for one (but bail after 15 seconds) */ while (!msg) { /* enable "tx-complete" interrupts, if not already enabled */ rpmsg_upref_sleepers(vrp); /* * sleep until a free buffer is available or 15 secs elapse. * the timeout period is not configurable because there's * little point in asking drivers to specify that. * if later this happens to be required, it'd be easy to add. */ err = wait_event_interruptible_timeout(vrp->sendq, (msg = get_a_tx_buf(vrp)), msecs_to_jiffies(15000)); /* disable "tx-complete" interrupts if we're the last sleeper */ rpmsg_downref_sleepers(vrp); /* timeout ? */ if (!err) { dev_err(dev, "timeout waiting for a tx buffer\n"); return -ERESTARTSYS; } } msg->len = len; msg->flags = 0; msg->src = src; msg->dst = dst; msg->reserved = 0; memcpy(msg->data, data, len); dev_dbg(dev, "TX From 0x%x, To 0x%x, Len %d, Flags %d, Reserved %d\n", msg->src, msg->dst, msg->len, msg->flags, msg->reserved); print_hex_dump(KERN_DEBUG, "rpmsg_virtio TX: ", DUMP_PREFIX_NONE, 16, 1, msg, sizeof(*msg) + msg->len, true); sg_init_one(&sg, msg, sizeof(*msg) + len); mutex_lock(&vrp->tx_lock); /* add message to the remote processor's virtqueue */ err = virtqueue_add_outbuf(vrp->svq, &sg, 1, msg, GFP_KERNEL); if (err) { /* * need to reclaim the buffer here, otherwise it's lost * (memory won't leak, but rpmsg won't use it again for TX). * this will wait for a buffer management overhaul. */ dev_err(dev, "virtqueue_add_outbuf failed: %d\n", err); goto out; } /* tell the remote processor it has a pending message to read */ virtqueue_kick(vrp->svq); out: mutex_unlock(&vrp->tx_lock); return err; } EXPORT_SYMBOL(rpmsg_send_offchannel_raw); static int rpmsg_recv_single(struct virtproc_info *vrp, struct device *dev, struct rpmsg_hdr *msg, unsigned int len) { struct rpmsg_endpoint *ept; struct scatterlist sg; int err; dev_dbg(dev, "From: 0x%x, To: 0x%x, Len: %d, Flags: %d, Reserved: %d\n", msg->src, msg->dst, msg->len, msg->flags, msg->reserved); print_hex_dump(KERN_DEBUG, "rpmsg_virtio RX: ", DUMP_PREFIX_NONE, 16, 1, msg, sizeof(*msg) + msg->len, true); /* * We currently use fixed-sized buffers, so trivially sanitize * the reported payload length. */ if (len > RPMSG_BUF_SIZE || msg->len > (len - sizeof(struct rpmsg_hdr))) { dev_warn(dev, "inbound msg too big: (%d, %d)\n", len, msg->len); return -EINVAL; } /* use the dst addr to fetch the callback of the appropriate user */ mutex_lock(&vrp->endpoints_lock); ept = idr_find(&vrp->endpoints, msg->dst); /* let's make sure no one deallocates ept while we use it */ if (ept) kref_get(&ept->refcount); mutex_unlock(&vrp->endpoints_lock); if (ept) { /* make sure ept->cb doesn't go away while we use it */ mutex_lock(&ept->cb_lock); if (ept->cb) ept->cb(ept->rpdev, msg->data, msg->len, ept->priv, msg->src); mutex_unlock(&ept->cb_lock); /* farewell, ept, we don't need you anymore */ kref_put(&ept->refcount, __ept_release); } else dev_warn(dev, "msg received with no recipient\n"); /* publish the real size of the buffer */ sg_init_one(&sg, msg, RPMSG_BUF_SIZE); /* add the buffer back to the remote processor's virtqueue */ err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, msg, GFP_KERNEL); if (err < 0) { dev_err(dev, "failed to add a virtqueue buffer: %d\n", err); return err; } return 0; } /* called when an rx buffer is used, and it's time to digest a message */ static void rpmsg_recv_done(struct virtqueue *rvq) { struct virtproc_info *vrp = rvq->vdev->priv; struct device *dev = &rvq->vdev->dev; struct rpmsg_hdr *msg; unsigned int len, msgs_received = 0; int err; msg = virtqueue_get_buf(rvq, &len); if (!msg) { dev_err(dev, "uhm, incoming signal, but no used buffer ?\n"); return; } while (msg) { err = rpmsg_recv_single(vrp, dev, msg, len); if (err) break; msgs_received++; msg = virtqueue_get_buf(rvq, &len); }; dev_dbg(dev, "Received %u messages\n", msgs_received); /* tell the remote processor we added another available rx buffer */ if (msgs_received) virtqueue_kick(vrp->rvq); } /* * This is invoked whenever the remote processor completed processing * a TX msg we just sent it, and the buffer is put back to the used ring. * * Normally, though, we suppress this "tx complete" interrupt in order to * avoid the incurred overhead. */ static void rpmsg_xmit_done(struct virtqueue *svq) { struct virtproc_info *vrp = svq->vdev->priv; dev_dbg(&svq->vdev->dev, "%s\n", __func__); /* wake up potential senders that are waiting for a tx buffer */ wake_up_interruptible(&vrp->sendq); } /* invoked when a name service announcement arrives */ static void rpmsg_ns_cb(struct rpmsg_channel *rpdev, void *data, int len, void *priv, u32 src) { struct rpmsg_ns_msg *msg = data; struct rpmsg_channel *newch; struct rpmsg_channel_info chinfo; struct virtproc_info *vrp = priv; struct device *dev = &vrp->vdev->dev; int ret; print_hex_dump(KERN_DEBUG, "NS announcement: ", DUMP_PREFIX_NONE, 16, 1, data, len, true); if (len != sizeof(*msg)) { dev_err(dev, "malformed ns msg (%d)\n", len); return; } /* * the name service ept does _not_ belong to a real rpmsg channel, * and is handled by the rpmsg bus itself. * for sanity reasons, make sure a valid rpdev has _not_ sneaked * in somehow. */ if (rpdev) { dev_err(dev, "anomaly: ns ept has an rpdev handle\n"); return; } /* don't trust the remote processor for null terminating the name */ msg->name[RPMSG_NAME_SIZE - 1] = '\0'; dev_info(dev, "%sing channel %s addr 0x%x\n", msg->flags & RPMSG_NS_DESTROY ? "destroy" : "creat", msg->name, msg->addr); strncpy(chinfo.name, msg->name, sizeof(chinfo.name)); chinfo.src = RPMSG_ADDR_ANY; chinfo.dst = msg->addr; if (msg->flags & RPMSG_NS_DESTROY) { ret = rpmsg_destroy_channel(vrp, &chinfo); if (ret) dev_err(dev, "rpmsg_destroy_channel failed: %d\n", ret); } else { newch = rpmsg_create_channel(vrp, &chinfo); if (!newch) dev_err(dev, "rpmsg_create_channel failed\n"); } } static int rpmsg_probe(struct virtio_device *vdev) { vq_callback_t *vq_cbs[] = { rpmsg_recv_done, rpmsg_xmit_done }; const char *names[] = { "input", "output" }; struct virtqueue *vqs[2]; struct virtproc_info *vrp; void *bufs_va; int err = 0, i; size_t total_buf_space; bool notify; vrp = kzalloc(sizeof(*vrp), GFP_KERNEL); if (!vrp) return -ENOMEM; vrp->vdev = vdev; idr_init(&vrp->endpoints); mutex_init(&vrp->endpoints_lock); mutex_init(&vrp->tx_lock); init_waitqueue_head(&vrp->sendq); /* We expect two virtqueues, rx and tx (and in this order) */ err = vdev->config->find_vqs(vdev, 2, vqs, vq_cbs, names); if (err) goto free_vrp; vrp->rvq = vqs[0]; vrp->svq = vqs[1]; /* we expect symmetric tx/rx vrings */ WARN_ON(virtqueue_get_vring_size(vrp->rvq) != virtqueue_get_vring_size(vrp->svq)); /* we need less buffers if vrings are small */ if (virtqueue_get_vring_size(vrp->rvq) < MAX_RPMSG_NUM_BUFS / 2) vrp->num_bufs = virtqueue_get_vring_size(vrp->rvq) * 2; else vrp->num_bufs = MAX_RPMSG_NUM_BUFS; total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE; /* allocate coherent memory for the buffers */ bufs_va = dma_alloc_coherent(vdev->dev.parent->parent, total_buf_space, &vrp->bufs_dma, GFP_KERNEL); if (!bufs_va) { err = -ENOMEM; goto vqs_del; } dev_dbg(&vdev->dev, "buffers: va %p, dma 0x%llx\n", bufs_va, (unsigned long long)vrp->bufs_dma); /* half of the buffers is dedicated for RX */ vrp->rbufs = bufs_va; /* and half is dedicated for TX */ vrp->sbufs = bufs_va + total_buf_space / 2; /* set up the receive buffers */ for (i = 0; i < vrp->num_bufs / 2; i++) { struct scatterlist sg; void *cpu_addr = vrp->rbufs + i * RPMSG_BUF_SIZE; sg_init_one(&sg, cpu_addr, RPMSG_BUF_SIZE); err = virtqueue_add_inbuf(vrp->rvq, &sg, 1, cpu_addr, GFP_KERNEL); WARN_ON(err); /* sanity check; this can't really happen */ } /* suppress "tx-complete" interrupts */ virtqueue_disable_cb(vrp->svq); vdev->priv = vrp; /* if supported by the remote processor, enable the name service */ if (virtio_has_feature(vdev, VIRTIO_RPMSG_F_NS)) { /* a dedicated endpoint handles the name service msgs */ vrp->ns_ept = __rpmsg_create_ept(vrp, NULL, rpmsg_ns_cb, vrp, RPMSG_NS_ADDR); if (!vrp->ns_ept) { dev_err(&vdev->dev, "failed to create the ns ept\n"); err = -ENOMEM; goto free_coherent; } } /* * Prepare to kick but don't notify yet - we can't do this before * device is ready. */ notify = virtqueue_kick_prepare(vrp->rvq); /* From this point on, we can notify and get callbacks. */ virtio_device_ready(vdev); /* tell the remote processor it can start sending messages */ /* * this might be concurrent with callbacks, but we are only * doing notify, not a full kick here, so that's ok. */ if (notify) virtqueue_notify(vrp->rvq); dev_info(&vdev->dev, "rpmsg host is online\n"); return 0; free_coherent: dma_free_coherent(vdev->dev.parent->parent, total_buf_space, bufs_va, vrp->bufs_dma); vqs_del: vdev->config->del_vqs(vrp->vdev); free_vrp: kfree(vrp); return err; } static int rpmsg_remove_device(struct device *dev, void *data) { device_unregister(dev); return 0; } static void rpmsg_remove(struct virtio_device *vdev) { struct virtproc_info *vrp = vdev->priv; size_t total_buf_space = vrp->num_bufs * RPMSG_BUF_SIZE; int ret; vdev->config->reset(vdev); ret = device_for_each_child(&vdev->dev, NULL, rpmsg_remove_device); if (ret) dev_warn(&vdev->dev, "can't remove rpmsg device: %d\n", ret); if (vrp->ns_ept) __rpmsg_destroy_ept(vrp, vrp->ns_ept); idr_destroy(&vrp->endpoints); vdev->config->del_vqs(vrp->vdev); dma_free_coherent(vdev->dev.parent->parent, total_buf_space, vrp->rbufs, vrp->bufs_dma); kfree(vrp); } static struct virtio_device_id id_table[] = { { VIRTIO_ID_RPMSG, VIRTIO_DEV_ANY_ID }, { 0 }, }; static unsigned int features[] = { VIRTIO_RPMSG_F_NS, }; static struct virtio_driver virtio_ipc_driver = { .feature_table = features, .feature_table_size = ARRAY_SIZE(features), .driver.name = KBUILD_MODNAME, .driver.owner = THIS_MODULE, .id_table = id_table, .probe = rpmsg_probe, .remove = rpmsg_remove, }; static int __init rpmsg_init(void) { int ret; ret = bus_register(&rpmsg_bus); if (ret) { pr_err("failed to register rpmsg bus: %d\n", ret); return ret; } ret = register_virtio_driver(&virtio_ipc_driver); if (ret) { pr_err("failed to register virtio driver: %d\n", ret); bus_unregister(&rpmsg_bus); } return ret; } subsys_initcall(rpmsg_init); static void __exit rpmsg_fini(void) { unregister_virtio_driver(&virtio_ipc_driver); bus_unregister(&rpmsg_bus); } module_exit(rpmsg_fini); MODULE_DEVICE_TABLE(virtio, id_table); MODULE_DESCRIPTION("Virtio-based remote processor messaging bus"); MODULE_LICENSE("GPL v2");
gpl-2.0
haolong886/rd_linux-2.6.32
drivers/video/nvidia/nv_backlight.c
823
3484
/* * Backlight code for nVidia based graphic cards * * Copyright 2004 Antonino Daplas <adaplas@pol.net> * Copyright (c) 2006 Michael Hanselmann <linux-kernel@hansmi.ch> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/backlight.h> #include <linux/fb.h> #include <linux/pci.h> #ifdef CONFIG_PMAC_BACKLIGHT #include <asm/backlight.h> #endif #include "nv_local.h" #include "nv_type.h" #include "nv_proto.h" /* We do not have any information about which values are allowed, thus * we used safe values. */ #define MIN_LEVEL 0x158 #define MAX_LEVEL 0x534 #define LEVEL_STEP ((MAX_LEVEL - MIN_LEVEL) / FB_BACKLIGHT_MAX) static int nvidia_bl_get_level_brightness(struct nvidia_par *par, int level) { struct fb_info *info = pci_get_drvdata(par->pci_dev); int nlevel; /* Get and convert the value */ /* No locking of bl_curve since we read a single value */ nlevel = MIN_LEVEL + info->bl_curve[level] * LEVEL_STEP; if (nlevel < 0) nlevel = 0; else if (nlevel < MIN_LEVEL) nlevel = MIN_LEVEL; else if (nlevel > MAX_LEVEL) nlevel = MAX_LEVEL; return nlevel; } static int nvidia_bl_update_status(struct backlight_device *bd) { struct nvidia_par *par = bl_get_data(bd); u32 tmp_pcrt, tmp_pmc, fpcontrol; int level; if (!par->FlatPanel) return 0; if (bd->props.power != FB_BLANK_UNBLANK || bd->props.fb_blank != FB_BLANK_UNBLANK) level = 0; else level = bd->props.brightness; tmp_pmc = NV_RD32(par->PMC, 0x10F0) & 0x0000FFFF; tmp_pcrt = NV_RD32(par->PCRTC0, 0x081C) & 0xFFFFFFFC; fpcontrol = NV_RD32(par->PRAMDAC, 0x0848) & 0xCFFFFFCC; if (level > 0) { tmp_pcrt |= 0x1; tmp_pmc |= (1 << 31); /* backlight bit */ tmp_pmc |= nvidia_bl_get_level_brightness(par, level) << 16; fpcontrol |= par->fpSyncs; } else fpcontrol |= 0x20000022; NV_WR32(par->PCRTC0, 0x081C, tmp_pcrt); NV_WR32(par->PMC, 0x10F0, tmp_pmc); NV_WR32(par->PRAMDAC, 0x848, fpcontrol); return 0; } static int nvidia_bl_get_brightness(struct backlight_device *bd) { return bd->props.brightness; } static struct backlight_ops nvidia_bl_ops = { .get_brightness = nvidia_bl_get_brightness, .update_status = nvidia_bl_update_status, }; void nvidia_bl_init(struct nvidia_par *par) { struct fb_info *info = pci_get_drvdata(par->pci_dev); struct backlight_device *bd; char name[12]; if (!par->FlatPanel) return; #ifdef CONFIG_PMAC_BACKLIGHT if (!machine_is(powermac) || !pmac_has_backlight_type("mnca")) return; #endif snprintf(name, sizeof(name), "nvidiabl%d", info->node); bd = backlight_device_register(name, info->dev, par, &nvidia_bl_ops); if (IS_ERR(bd)) { info->bl_dev = NULL; printk(KERN_WARNING "nvidia: Backlight registration failed\n"); goto error; } info->bl_dev = bd; fb_bl_default_curve(info, 0, 0x158 * FB_BACKLIGHT_MAX / MAX_LEVEL, 0x534 * FB_BACKLIGHT_MAX / MAX_LEVEL); bd->props.max_brightness = FB_BACKLIGHT_LEVELS - 1; bd->props.brightness = bd->props.max_brightness; bd->props.power = FB_BLANK_UNBLANK; backlight_update_status(bd); printk("nvidia: Backlight initialized (%s)\n", name); return; error: return; } void nvidia_bl_exit(struct nvidia_par *par) { struct fb_info *info = pci_get_drvdata(par->pci_dev); struct backlight_device *bd = info->bl_dev; backlight_device_unregister(bd); printk("nvidia: Backlight unloaded\n"); }
gpl-2.0
bl4ckic3/linux
drivers/media/rc/ir-rc5-decoder.c
1079
5565
/* ir-rc5-decoder.c - decoder for RC5(x) and StreamZap protocols * * Copyright (C) 2010 by Mauro Carvalho Chehab * Copyright (C) 2010 by Jarod Wilson <jarod@redhat.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * This decoder handles the 14 bit RC5 protocol, 15 bit "StreamZap" protocol * and 20 bit RC5x protocol. */ #include "rc-core-priv.h" #include <linux/module.h> #define RC5_NBITS 14 #define RC5_SZ_NBITS 15 #define RC5X_NBITS 20 #define CHECK_RC5X_NBITS 8 #define RC5_UNIT 888888 /* ns */ #define RC5_BIT_START (1 * RC5_UNIT) #define RC5_BIT_END (1 * RC5_UNIT) #define RC5X_SPACE (4 * RC5_UNIT) #define RC5_TRAILER (10 * RC5_UNIT) /* In reality, approx 100 */ enum rc5_state { STATE_INACTIVE, STATE_BIT_START, STATE_BIT_END, STATE_CHECK_RC5X, STATE_FINISHED, }; /** * ir_rc5_decode() - Decode one RC-5 pulse or space * @dev: the struct rc_dev descriptor of the device * @ev: the struct ir_raw_event descriptor of the pulse/space * * This function returns -EINVAL if the pulse violates the state machine */ static int ir_rc5_decode(struct rc_dev *dev, struct ir_raw_event ev) { struct rc5_dec *data = &dev->raw->rc5; u8 toggle; u32 scancode; enum rc_type protocol; if (!(dev->enabled_protocols & (RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ))) return 0; if (!is_timing_event(ev)) { if (ev.reset) data->state = STATE_INACTIVE; return 0; } if (!geq_margin(ev.duration, RC5_UNIT, RC5_UNIT / 2)) goto out; again: IR_dprintk(2, "RC5(x/sz) decode started at state %i (%uus %s)\n", data->state, TO_US(ev.duration), TO_STR(ev.pulse)); if (!geq_margin(ev.duration, RC5_UNIT, RC5_UNIT / 2)) return 0; switch (data->state) { case STATE_INACTIVE: if (!ev.pulse) break; data->state = STATE_BIT_START; data->count = 1; decrease_duration(&ev, RC5_BIT_START); goto again; case STATE_BIT_START: if (!ev.pulse && geq_margin(ev.duration, RC5_TRAILER, RC5_UNIT / 2)) { data->state = STATE_FINISHED; goto again; } if (!eq_margin(ev.duration, RC5_BIT_START, RC5_UNIT / 2)) break; data->bits <<= 1; if (!ev.pulse) data->bits |= 1; data->count++; data->state = STATE_BIT_END; return 0; case STATE_BIT_END: if (!is_transition(&ev, &dev->raw->prev_ev)) break; if (data->count == CHECK_RC5X_NBITS) data->state = STATE_CHECK_RC5X; else data->state = STATE_BIT_START; decrease_duration(&ev, RC5_BIT_END); goto again; case STATE_CHECK_RC5X: if (!ev.pulse && geq_margin(ev.duration, RC5X_SPACE, RC5_UNIT / 2)) { data->is_rc5x = true; decrease_duration(&ev, RC5X_SPACE); } else data->is_rc5x = false; data->state = STATE_BIT_START; goto again; case STATE_FINISHED: if (ev.pulse) break; if (data->is_rc5x && data->count == RC5X_NBITS) { /* RC5X */ u8 xdata, command, system; if (!(dev->enabled_protocols & RC_BIT_RC5X)) { data->state = STATE_INACTIVE; return 0; } xdata = (data->bits & 0x0003F) >> 0; command = (data->bits & 0x00FC0) >> 6; system = (data->bits & 0x1F000) >> 12; toggle = (data->bits & 0x20000) ? 1 : 0; command += (data->bits & 0x01000) ? 0 : 0x40; scancode = system << 16 | command << 8 | xdata; protocol = RC_TYPE_RC5X; } else if (!data->is_rc5x && data->count == RC5_NBITS) { /* RC5 */ u8 command, system; if (!(dev->enabled_protocols & RC_BIT_RC5)) { data->state = STATE_INACTIVE; return 0; } command = (data->bits & 0x0003F) >> 0; system = (data->bits & 0x007C0) >> 6; toggle = (data->bits & 0x00800) ? 1 : 0; command += (data->bits & 0x01000) ? 0 : 0x40; scancode = system << 8 | command; protocol = RC_TYPE_RC5; } else if (!data->is_rc5x && data->count == RC5_SZ_NBITS) { /* RC5 StreamZap */ u8 command, system; if (!(dev->enabled_protocols & RC_BIT_RC5_SZ)) { data->state = STATE_INACTIVE; return 0; } command = (data->bits & 0x0003F) >> 0; system = (data->bits & 0x02FC0) >> 6; toggle = (data->bits & 0x01000) ? 1 : 0; scancode = system << 6 | command; protocol = RC_TYPE_RC5_SZ; } else break; IR_dprintk(1, "RC5(x/sz) scancode 0x%06x (p: %u, t: %u)\n", scancode, protocol, toggle); rc_keydown(dev, protocol, scancode, toggle); data->state = STATE_INACTIVE; return 0; } out: IR_dprintk(1, "RC5(x/sz) decode failed at state %i count %d (%uus %s)\n", data->state, data->count, TO_US(ev.duration), TO_STR(ev.pulse)); data->state = STATE_INACTIVE; return -EINVAL; } static struct ir_raw_handler rc5_handler = { .protocols = RC_BIT_RC5 | RC_BIT_RC5X | RC_BIT_RC5_SZ, .decode = ir_rc5_decode, }; static int __init ir_rc5_decode_init(void) { ir_raw_handler_register(&rc5_handler); printk(KERN_INFO "IR RC5(x/sz) protocol handler initialized\n"); return 0; } static void __exit ir_rc5_decode_exit(void) { ir_raw_handler_unregister(&rc5_handler); } module_init(ir_rc5_decode_init); module_exit(ir_rc5_decode_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mauro Carvalho Chehab and Jarod Wilson"); MODULE_AUTHOR("Red Hat Inc. (http://www.redhat.com)"); MODULE_DESCRIPTION("RC5(x/sz) IR protocol decoder");
gpl-2.0
Ca1ne/Enoch-Sense-Kernel
drivers/staging/westbridge/astoria/api/src/cyaslowlevel.c
2103
33890
/* Cypress West Bridge API source file (cyaslowlevel.c) ## =========================== ## Copyright (C) 2010 Cypress Semiconductor ## ## This program is free software; you can redistribute it and/or ## modify it under the terms of the GNU General Public License ## as published by the Free Software Foundation; either version 2 ## of the License, or (at your option) any later version. ## ## This program is distributed in the hope that it will be useful, ## but WITHOUT ANY WARRANTY; without even the implied warranty of ## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ## GNU General Public License for more details. ## ## You should have received a copy of the GNU General Public License ## along with this program; if not, write to the Free Software ## Foundation, Inc., 51 Franklin Street, Fifth Floor ## Boston, MA 02110-1301, USA. ## =========================== */ #include "../../include/linux/westbridge/cyashal.h" #include "../../include/linux/westbridge/cyascast.h" #include "../../include/linux/westbridge/cyasdevice.h" #include "../../include/linux/westbridge/cyaslowlevel.h" #include "../../include/linux/westbridge/cyasintr.h" #include "../../include/linux/westbridge/cyaserr.h" #include "../../include/linux/westbridge/cyasregs.h" static const uint32_t cy_as_low_level_timeout_count = 65536 * 4; /* Forward declaration */ static cy_as_return_status_t cy_as_send_one(cy_as_device *dev_p, cy_as_ll_request_response *req_p); /* * This array holds the size of the largest request we will ever recevie from * the West Bridge device per context. The size is in 16 bit words. Note a * size of 0xffff indicates that there will be no requests on this context * from West Bridge. */ static uint16_t max_request_length[CY_RQT_CONTEXT_COUNT] = { 8, /* CY_RQT_GENERAL_RQT_CONTEXT - CY_RQT_INITIALIZATION_COMPLETE */ 8, /* CY_RQT_RESOURCE_RQT_CONTEXT - none */ 8, /* CY_RQT_STORAGE_RQT_CONTEXT - CY_RQT_MEDIA_CHANGED */ 128, /* CY_RQT_USB_RQT_CONTEXT - CY_RQT_USB_EVENT */ 8 /* CY_RQT_TUR_RQT_CONTEXT - CY_RQT_TURBO_CMD_FROM_HOST */ }; /* * For the given context, this function removes the request node at the head * of the queue from the context. This is called after all processing has * occurred on the given request and response and we are ready to remove this * entry from the queue. */ static void cy_as_ll_remove_request_queue_head(cy_as_device *dev_p, cy_as_context *ctxt_p) { uint32_t mask, state; cy_as_ll_request_list_node *node_p; (void)dev_p; cy_as_hal_assert(ctxt_p->request_queue_p != 0); mask = cy_as_hal_disable_interrupts(); node_p = ctxt_p->request_queue_p; ctxt_p->request_queue_p = node_p->next; cy_as_hal_enable_interrupts(mask); node_p->callback = 0; node_p->rqt = 0; node_p->resp = 0; /* * note that the caller allocates and destroys the request and * response. generally the destroy happens in the callback for * async requests and after the wait returns for sync. the * request and response may not actually be destroyed but may be * managed in other ways as well. it is the responsibilty of * the caller to deal with these in any case. the caller can do * this in the request/response callback function. */ state = cy_as_hal_disable_interrupts(); cy_as_hal_c_b_free(node_p); cy_as_hal_enable_interrupts(state); } /* * For the context given, this function sends the next request to * West Bridge via the mailbox register, if the next request is * ready to be sent and has not already been sent. */ static void cy_as_ll_send_next_request(cy_as_device *dev_p, cy_as_context *ctxt_p) { cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS; /* * ret == ret is equivalent to while (1) but eliminates compiler * warnings for some compilers. */ while (ret == ret) { cy_as_ll_request_list_node *node_p = ctxt_p->request_queue_p; if (node_p == 0) break; if (cy_as_request_get_node_state(node_p) != CY_AS_REQUEST_LIST_STATE_QUEUED) break; cy_as_request_set_node_state(node_p, CY_AS_REQUEST_LIST_STATE_WAITING); ret = cy_as_send_one(dev_p, node_p->rqt); if (ret == CY_AS_ERROR_SUCCESS) break; /* * if an error occurs in sending the request, tell the requester * about the error and remove the request from the queue. */ cy_as_request_set_node_state(node_p, CY_AS_REQUEST_LIST_STATE_RECEIVED); node_p->callback(dev_p, ctxt_p->number, node_p->rqt, node_p->resp, ret); cy_as_ll_remove_request_queue_head(dev_p, ctxt_p); /* * this falls through to the while loop to send the next request * since the previous request did not get sent. */ } } /* * This method removes an entry from the request queue of a given context. * The entry is removed only if it is not in transit. */ cy_as_remove_request_result_t cy_as_ll_remove_request(cy_as_device *dev_p, cy_as_context *ctxt_p, cy_as_ll_request_response *req_p, cy_bool force) { uint32_t imask; cy_as_ll_request_list_node *node_p; cy_as_ll_request_list_node *tmp_p; uint32_t state; imask = cy_as_hal_disable_interrupts(); if (ctxt_p->request_queue_p != 0 && ctxt_p->request_queue_p->rqt == req_p) { node_p = ctxt_p->request_queue_p; if ((cy_as_request_get_node_state(node_p) == CY_AS_REQUEST_LIST_STATE_WAITING) && (!force)) { cy_as_hal_enable_interrupts(imask); return cy_as_remove_request_in_transit; } ctxt_p->request_queue_p = node_p->next; } else { tmp_p = ctxt_p->request_queue_p; while (tmp_p != 0 && tmp_p->next != 0 && tmp_p->next->rqt != req_p) tmp_p = tmp_p->next; if (tmp_p == 0 || tmp_p->next == 0) { cy_as_hal_enable_interrupts(imask); return cy_as_remove_request_not_found; } node_p = tmp_p->next; tmp_p->next = node_p->next; } if (node_p->callback) node_p->callback(dev_p, ctxt_p->number, node_p->rqt, node_p->resp, CY_AS_ERROR_CANCELED); state = cy_as_hal_disable_interrupts(); cy_as_hal_c_b_free(node_p); cy_as_hal_enable_interrupts(state); cy_as_hal_enable_interrupts(imask); return cy_as_remove_request_sucessful; } void cy_as_ll_remove_all_requests(cy_as_device *dev_p, cy_as_context *ctxt_p) { cy_as_ll_request_list_node *node = ctxt_p->request_queue_p; while (node) { if (cy_as_request_get_node_state(ctxt_p->request_queue_p) != CY_AS_REQUEST_LIST_STATE_RECEIVED) cy_as_ll_remove_request(dev_p, ctxt_p, node->rqt, cy_true); node = node->next; } } static cy_bool cy_as_ll_is_in_queue(cy_as_context *ctxt_p, cy_as_ll_request_response *req_p) { uint32_t mask; cy_as_ll_request_list_node *node_p; mask = cy_as_hal_disable_interrupts(); node_p = ctxt_p->request_queue_p; while (node_p) { if (node_p->rqt == req_p) { cy_as_hal_enable_interrupts(mask); return cy_true; } node_p = node_p->next; } cy_as_hal_enable_interrupts(mask); return cy_false; } /* * This is the handler for mailbox data when we are trying to send data * to the West Bridge firmware. The firmware may be trying to send us * data and we need to queue this data to allow the firmware to move * forward and be in a state to receive our request. Here we just queue * the data and it is processed at a later time by the mailbox interrupt * handler. */ void cy_as_ll_queue_mailbox_data(cy_as_device *dev_p) { cy_as_context *ctxt_p; uint8_t context; uint16_t data[4]; int32_t i; /* Read the data from mailbox 0 to determine what to do with the data */ for (i = 3; i >= 0; i--) data[i] = cy_as_hal_read_register(dev_p->tag, cy_cast_int2U_int16(CY_AS_MEM_P0_MAILBOX0 + i)); context = cy_as_mbox_get_context(data[0]); if (context >= CY_RQT_CONTEXT_COUNT) { cy_as_hal_print_message("mailbox request/response received " "with invalid context value (%d)\n", context); return; } ctxt_p = dev_p->context[context]; /* * if we have queued too much data, drop future data. */ cy_as_hal_assert(ctxt_p->queue_index * sizeof(uint16_t) + sizeof(data) <= sizeof(ctxt_p->data_queue)); for (i = 0; i < 4; i++) ctxt_p->data_queue[ctxt_p->queue_index++] = data[i]; cy_as_hal_assert((ctxt_p->queue_index % 4) == 0); dev_p->ll_queued_data = cy_true; } void cy_as_mail_box_process_data(cy_as_device *dev_p, uint16_t *data) { cy_as_context *ctxt_p; uint8_t context; uint16_t *len_p; cy_as_ll_request_response *rec_p; uint8_t st; uint16_t src, dest; context = cy_as_mbox_get_context(data[0]); if (context >= CY_RQT_CONTEXT_COUNT) { cy_as_hal_print_message("mailbox request/response received " "with invalid context value (%d)\n", context); return; } ctxt_p = dev_p->context[context]; if (cy_as_mbox_is_request(data[0])) { cy_as_hal_assert(ctxt_p->req_p != 0); rec_p = ctxt_p->req_p; len_p = &ctxt_p->request_length; } else { if (ctxt_p->request_queue_p == 0 || cy_as_request_get_node_state(ctxt_p->request_queue_p) != CY_AS_REQUEST_LIST_STATE_WAITING) { cy_as_hal_print_message("mailbox response received on " "context that was not expecting a response\n"); cy_as_hal_print_message(" context: %d\n", context); cy_as_hal_print_message(" contents: 0x%04x 0x%04x " "0x%04x 0x%04x\n", data[0], data[1], data[2], data[3]); if (ctxt_p->request_queue_p != 0) cy_as_hal_print_message(" state: 0x%02x\n", ctxt_p->request_queue_p->state); return; } /* Make sure the request has an associated response */ cy_as_hal_assert(ctxt_p->request_queue_p->resp != 0); rec_p = ctxt_p->request_queue_p->resp; len_p = &ctxt_p->request_queue_p->length; } if (rec_p->stored == 0) { /* * this is the first cycle of the response */ cy_as_ll_request_response__set_code(rec_p, cy_as_mbox_get_code(data[0])); cy_as_ll_request_response__set_context(rec_p, context); if (cy_as_mbox_is_last(data[0])) { /* This is a single cycle response */ *len_p = rec_p->length; st = 1; } else { /* Ensure that enough memory has been * reserved for the response. */ cy_as_hal_assert(rec_p->length >= data[1]); *len_p = (data[1] < rec_p->length) ? data[1] : rec_p->length; st = 2; } } else st = 1; /* Trasnfer the data from the mailboxes to the response */ while (rec_p->stored < *len_p && st < 4) rec_p->data[rec_p->stored++] = data[st++]; if (cy_as_mbox_is_last(data[0])) { /* NB: The call-back that is made below can cause the * addition of more data in this queue, thus causing * a recursive overflow of the queue. this is prevented * by removing the request entry that is currently * being passed up from the data queue. if this is done, * the queue only needs to be as long as two request * entries from west bridge. */ if ((ctxt_p->rqt_index > 0) && (ctxt_p->rqt_index <= ctxt_p->queue_index)) { dest = 0; src = ctxt_p->rqt_index; while (src < ctxt_p->queue_index) ctxt_p->data_queue[dest++] = ctxt_p->data_queue[src++]; ctxt_p->rqt_index = 0; ctxt_p->queue_index = dest; cy_as_hal_assert((ctxt_p->queue_index % 4) == 0); } if (ctxt_p->request_queue_p != 0 && rec_p == ctxt_p->request_queue_p->resp) { /* * if this is the last cycle of the response, call the * callback and reset for the next response. */ cy_as_ll_request_response *resp_p = ctxt_p->request_queue_p->resp; resp_p->length = ctxt_p->request_queue_p->length; cy_as_request_set_node_state(ctxt_p->request_queue_p, CY_AS_REQUEST_LIST_STATE_RECEIVED); cy_as_device_set_in_callback(dev_p); ctxt_p->request_queue_p->callback(dev_p, context, ctxt_p->request_queue_p->rqt, resp_p, CY_AS_ERROR_SUCCESS); cy_as_device_clear_in_callback(dev_p); cy_as_ll_remove_request_queue_head(dev_p, ctxt_p); cy_as_ll_send_next_request(dev_p, ctxt_p); } else { /* Send the request to the appropriate * module to handle */ cy_as_ll_request_response *request_p = ctxt_p->req_p; ctxt_p->req_p = 0; if (ctxt_p->request_callback) { cy_as_device_set_in_callback(dev_p); ctxt_p->request_callback(dev_p, context, request_p, 0, CY_AS_ERROR_SUCCESS); cy_as_device_clear_in_callback(dev_p); } cy_as_ll_init_request(request_p, 0, context, request_p->length); ctxt_p->req_p = request_p; } } } /* * This is the handler for processing queued mailbox data */ void cy_as_mail_box_queued_data_handler(cy_as_device *dev_p) { uint16_t i; /* * if more data gets queued in between our entering this call * and the end of the iteration on all contexts; we should * continue processing the queued data. */ while (dev_p->ll_queued_data) { dev_p->ll_queued_data = cy_false; for (i = 0; i < CY_RQT_CONTEXT_COUNT; i++) { uint16_t offset; cy_as_context *ctxt_p = dev_p->context[i]; cy_as_hal_assert((ctxt_p->queue_index % 4) == 0); offset = 0; while (offset < ctxt_p->queue_index) { ctxt_p->rqt_index = offset + 4; cy_as_mail_box_process_data(dev_p, ctxt_p->data_queue + offset); offset = ctxt_p->rqt_index; } ctxt_p->queue_index = 0; } } } /* * This is the handler for the mailbox interrupt. This function reads * data from the mailbox registers until a complete request or response * is received. When a complete request is received, the callback * associated with requests on that context is called. When a complete * response is recevied, the callback associated with the request that * generated the response is called. */ void cy_as_mail_box_interrupt_handler(cy_as_device *dev_p) { cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE); /* * queue the mailbox data to preserve * order for later processing. */ cy_as_ll_queue_mailbox_data(dev_p); /* * process what was queued and anything that may be pending */ cy_as_mail_box_queued_data_handler(dev_p); } cy_as_return_status_t cy_as_ll_start(cy_as_device *dev_p) { uint16_t i; if (cy_as_device_is_low_level_running(dev_p)) return CY_AS_ERROR_ALREADY_RUNNING; dev_p->ll_sending_rqt = cy_false; dev_p->ll_abort_curr_rqt = cy_false; for (i = 0; i < CY_RQT_CONTEXT_COUNT; i++) { dev_p->context[i] = (cy_as_context *) cy_as_hal_alloc(sizeof(cy_as_context)); if (dev_p->context[i] == 0) return CY_AS_ERROR_OUT_OF_MEMORY; dev_p->context[i]->number = (uint8_t)i; dev_p->context[i]->request_callback = 0; dev_p->context[i]->request_queue_p = 0; dev_p->context[i]->last_node_p = 0; dev_p->context[i]->req_p = cy_as_ll_create_request(dev_p, 0, (uint8_t)i, max_request_length[i]); dev_p->context[i]->queue_index = 0; if (!cy_as_hal_create_sleep_channel (&dev_p->context[i]->channel)) return CY_AS_ERROR_CREATE_SLEEP_CHANNEL_FAILED; } cy_as_device_set_low_level_running(dev_p); return CY_AS_ERROR_SUCCESS; } /* * Shutdown the low level communications module. This operation will * also cancel any queued low level requests. */ cy_as_return_status_t cy_as_ll_stop(cy_as_device *dev_p) { uint8_t i; cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS; cy_as_context *ctxt_p; uint32_t mask; for (i = 0; i < CY_RQT_CONTEXT_COUNT; i++) { ctxt_p = dev_p->context[i]; if (!cy_as_hal_destroy_sleep_channel(&ctxt_p->channel)) return CY_AS_ERROR_DESTROY_SLEEP_CHANNEL_FAILED; /* * now, free any queued requests and assocaited responses */ while (ctxt_p->request_queue_p) { uint32_t state; cy_as_ll_request_list_node *node_p = ctxt_p->request_queue_p; /* Mark this pair as in a cancel operation */ cy_as_request_set_node_state(node_p, CY_AS_REQUEST_LIST_STATE_CANCELING); /* Tell the caller that we are canceling this request */ /* NB: The callback is responsible for destroying the * request and the response. we cannot count on the * contents of these two after calling the callback. */ node_p->callback(dev_p, i, node_p->rqt, node_p->resp, CY_AS_ERROR_CANCELED); /* Remove the pair from the queue */ mask = cy_as_hal_disable_interrupts(); ctxt_p->request_queue_p = node_p->next; cy_as_hal_enable_interrupts(mask); /* Free the list node */ state = cy_as_hal_disable_interrupts(); cy_as_hal_c_b_free(node_p); cy_as_hal_enable_interrupts(state); } cy_as_ll_destroy_request(dev_p, dev_p->context[i]->req_p); cy_as_hal_free(dev_p->context[i]); dev_p->context[i] = 0; } cy_as_device_set_low_level_stopped(dev_p); return ret; } void cy_as_ll_init_request(cy_as_ll_request_response *req_p, uint16_t code, uint16_t context, uint16_t length) { uint16_t totallen = sizeof(cy_as_ll_request_response) + (length - 1) * sizeof(uint16_t); cy_as_hal_mem_set(req_p, 0, totallen); req_p->length = length; cy_as_ll_request_response__set_code(req_p, code); cy_as_ll_request_response__set_context(req_p, context); cy_as_ll_request_response__set_request(req_p); } /* * Create a new request. */ cy_as_ll_request_response * cy_as_ll_create_request(cy_as_device *dev_p, uint16_t code, uint8_t context, uint16_t length) { cy_as_ll_request_response *req_p; uint32_t state; uint16_t totallen = sizeof(cy_as_ll_request_response) + (length - 1) * sizeof(uint16_t); (void)dev_p; state = cy_as_hal_disable_interrupts(); req_p = cy_as_hal_c_b_alloc(totallen); cy_as_hal_enable_interrupts(state); if (req_p) cy_as_ll_init_request(req_p, code, context, length); return req_p; } /* * Destroy a request. */ void cy_as_ll_destroy_request(cy_as_device *dev_p, cy_as_ll_request_response *req_p) { uint32_t state; (void)dev_p; (void)req_p; state = cy_as_hal_disable_interrupts(); cy_as_hal_c_b_free(req_p); cy_as_hal_enable_interrupts(state); } void cy_as_ll_init_response(cy_as_ll_request_response *req_p, uint16_t length) { uint16_t totallen = sizeof(cy_as_ll_request_response) + (length - 1) * sizeof(uint16_t); cy_as_hal_mem_set(req_p, 0, totallen); req_p->length = length; cy_as_ll_request_response__set_response(req_p); } /* * Create a new response */ cy_as_ll_request_response * cy_as_ll_create_response(cy_as_device *dev_p, uint16_t length) { cy_as_ll_request_response *req_p; uint32_t state; uint16_t totallen = sizeof(cy_as_ll_request_response) + (length - 1) * sizeof(uint16_t); (void)dev_p; state = cy_as_hal_disable_interrupts(); req_p = cy_as_hal_c_b_alloc(totallen); cy_as_hal_enable_interrupts(state); if (req_p) cy_as_ll_init_response(req_p, length); return req_p; } /* * Destroy the new response */ void cy_as_ll_destroy_response(cy_as_device *dev_p, cy_as_ll_request_response *req_p) { uint32_t state; (void)dev_p; (void)req_p; state = cy_as_hal_disable_interrupts(); cy_as_hal_c_b_free(req_p); cy_as_hal_enable_interrupts(state); } static uint16_t cy_as_read_intr_status( cy_as_device *dev_p) { uint32_t mask; cy_bool bloop = cy_true; uint16_t v = 0, last = 0xffff; /* * before determining if the mailboxes are ready for more data, * we first check the mailbox interrupt to see if we need to * receive data. this prevents a dead-lock condition that can * occur when both sides are trying to receive data. */ while (last == last) { /* * disable interrupts to be sure we don't process the mailbox * here and have the interrupt routine try to read this data * as well. */ mask = cy_as_hal_disable_interrupts(); /* * see if there is data to be read. */ v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_P0_INTR_REG); if ((v & CY_AS_MEM_P0_INTR_REG_MBINT) == 0) { cy_as_hal_enable_interrupts(mask); break; } /* * queue the mailbox data for later processing. * this allows the firmware to move forward and * service the requst from the P port. */ cy_as_ll_queue_mailbox_data(dev_p); /* * enable interrupts again to service mailbox * interrupts appropriately */ cy_as_hal_enable_interrupts(mask); } /* * now, all data is received */ last = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_MCU_MB_STAT) & CY_AS_MEM_P0_MCU_MBNOTRD; while (bloop) { v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_MCU_MB_STAT) & CY_AS_MEM_P0_MCU_MBNOTRD; if (v == last) break; last = v; } return v; } /* * Send a single request or response using the mail box register. * This function does not deal with the internal queues at all, * but only sends the request or response across to the firmware */ static cy_as_return_status_t cy_as_send_one( cy_as_device *dev_p, cy_as_ll_request_response *req_p) { int i; uint16_t mb0, v; int32_t loopcount; uint32_t int_stat; #ifdef _DEBUG if (cy_as_ll_request_response__is_request(req_p)) { switch (cy_as_ll_request_response__get_context(req_p)) { case CY_RQT_GENERAL_RQT_CONTEXT: cy_as_hal_assert(req_p->length * 2 + 2 < CY_CTX_GEN_MAX_DATA_SIZE); break; case CY_RQT_RESOURCE_RQT_CONTEXT: cy_as_hal_assert(req_p->length * 2 + 2 < CY_CTX_RES_MAX_DATA_SIZE); break; case CY_RQT_STORAGE_RQT_CONTEXT: cy_as_hal_assert(req_p->length * 2 + 2 < CY_CTX_STR_MAX_DATA_SIZE); break; case CY_RQT_USB_RQT_CONTEXT: cy_as_hal_assert(req_p->length * 2 + 2 < CY_CTX_USB_MAX_DATA_SIZE); break; } } #endif /* Write the request to the mail box registers */ if (req_p->length > 3) { uint16_t length = req_p->length; int which = 0; int st = 1; dev_p->ll_sending_rqt = cy_true; while (which < length) { loopcount = cy_as_low_level_timeout_count; do { v = cy_as_read_intr_status(dev_p); } while (v && loopcount-- > 0); if (v) { cy_as_hal_print_message( ">>>>>> LOW LEVEL TIMEOUT " "%x %x %x %x\n", cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_MCU_MAILBOX0), cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_MCU_MAILBOX1), cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_MCU_MAILBOX2), cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_MCU_MAILBOX3)); return CY_AS_ERROR_TIMEOUT; } if (dev_p->ll_abort_curr_rqt) { dev_p->ll_sending_rqt = cy_false; dev_p->ll_abort_curr_rqt = cy_false; return CY_AS_ERROR_CANCELED; } int_stat = cy_as_hal_disable_interrupts(); /* * check again whether the mailbox is free. * it is possible that an ISR came in and * wrote into the mailboxes since we last * checked the status. */ v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_MCU_MB_STAT) & CY_AS_MEM_P0_MCU_MBNOTRD; if (v) { /* Go back to the original check since * the mailbox is not free. */ cy_as_hal_enable_interrupts(int_stat); continue; } if (which == 0) { cy_as_hal_write_register(dev_p->tag, CY_AS_MEM_MCU_MAILBOX1, length); st = 2; } else { st = 1; } while ((which < length) && (st < 4)) { cy_as_hal_write_register(dev_p->tag, cy_cast_int2U_int16 (CY_AS_MEM_MCU_MAILBOX0 + st), req_p->data[which++]); st++; } mb0 = req_p->box0; if (which == length) { dev_p->ll_sending_rqt = cy_false; mb0 |= CY_AS_REQUEST_RESPONSE_LAST_MASK; } if (dev_p->ll_abort_curr_rqt) { dev_p->ll_sending_rqt = cy_false; dev_p->ll_abort_curr_rqt = cy_false; cy_as_hal_enable_interrupts(int_stat); return CY_AS_ERROR_CANCELED; } cy_as_hal_write_register(dev_p->tag, CY_AS_MEM_MCU_MAILBOX0, mb0); /* Wait for the MBOX interrupt to be high */ cy_as_hal_sleep150(); cy_as_hal_enable_interrupts(int_stat); } } else { check_mailbox_availability: /* * wait for the mailbox registers to become available. this * should be a very quick wait as the firmware is designed * to accept requests at interrupt time and queue them for * future processing. */ loopcount = cy_as_low_level_timeout_count; do { v = cy_as_read_intr_status(dev_p); } while (v && loopcount-- > 0); if (v) { cy_as_hal_print_message( ">>>>>> LOW LEVEL TIMEOUT %x %x %x %x\n", cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_MCU_MAILBOX0), cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_MCU_MAILBOX1), cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_MCU_MAILBOX2), cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_MCU_MAILBOX3)); return CY_AS_ERROR_TIMEOUT; } int_stat = cy_as_hal_disable_interrupts(); /* * check again whether the mailbox is free. it is * possible that an ISR came in and wrote into the * mailboxes since we last checked the status. */ v = cy_as_hal_read_register(dev_p->tag, CY_AS_MEM_MCU_MB_STAT) & CY_AS_MEM_P0_MCU_MBNOTRD; if (v) { /* Go back to the original check * since the mailbox is not free. */ cy_as_hal_enable_interrupts(int_stat); goto check_mailbox_availability; } /* Write the data associated with the request * into the mbox registers 1 - 3 */ v = 0; for (i = req_p->length - 1; i >= 0; i--) cy_as_hal_write_register(dev_p->tag, cy_cast_int2U_int16(CY_AS_MEM_MCU_MAILBOX1 + i), req_p->data[i]); /* Write the mbox register 0 to trigger the interrupt */ cy_as_hal_write_register(dev_p->tag, CY_AS_MEM_MCU_MAILBOX0, req_p->box0 | CY_AS_REQUEST_RESPONSE_LAST_MASK); cy_as_hal_sleep150(); cy_as_hal_enable_interrupts(int_stat); } return CY_AS_ERROR_SUCCESS; } /* * This function queues a single request to be sent to the firmware. */ extern cy_as_return_status_t cy_as_ll_send_request( cy_as_device *dev_p, /* The request to send */ cy_as_ll_request_response *req, /* Storage for a reply, must be sure * it is of sufficient size */ cy_as_ll_request_response *resp, /* If true, this is a synchronous request */ cy_bool sync, /* Callback to call when reply is received */ cy_as_response_callback cb ) { cy_as_context *ctxt_p; uint16_t box0 = req->box0; uint8_t context; cy_as_return_status_t ret = CY_AS_ERROR_SUCCESS; cy_as_ll_request_list_node *node_p; uint32_t mask, state; cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE); context = cy_as_mbox_get_context(box0); cy_as_hal_assert(context < CY_RQT_CONTEXT_COUNT); ctxt_p = dev_p->context[context]; /* Allocate the list node */ state = cy_as_hal_disable_interrupts(); node_p = cy_as_hal_c_b_alloc(sizeof(cy_as_ll_request_list_node)); cy_as_hal_enable_interrupts(state); if (node_p == 0) return CY_AS_ERROR_OUT_OF_MEMORY; /* Initialize the list node */ node_p->callback = cb; node_p->length = 0; node_p->next = 0; node_p->resp = resp; node_p->rqt = req; node_p->state = CY_AS_REQUEST_LIST_STATE_QUEUED; if (sync) cy_as_request_node_set_sync(node_p); /* Put the request into the queue */ mask = cy_as_hal_disable_interrupts(); if (ctxt_p->request_queue_p == 0) { /* Empty queue */ ctxt_p->request_queue_p = node_p; ctxt_p->last_node_p = node_p; } else { ctxt_p->last_node_p->next = node_p; ctxt_p->last_node_p = node_p; } cy_as_hal_enable_interrupts(mask); cy_as_ll_send_next_request(dev_p, ctxt_p); if (!cy_as_device_is_in_callback(dev_p)) { mask = cy_as_hal_disable_interrupts(); cy_as_mail_box_queued_data_handler(dev_p); cy_as_hal_enable_interrupts(mask); } return ret; } static void cy_as_ll_send_callback( cy_as_device *dev_p, uint8_t context, cy_as_ll_request_response *rqt, cy_as_ll_request_response *resp, cy_as_return_status_t ret) { (void)rqt; (void)resp; (void)ret; cy_as_hal_assert(dev_p->sig == CY_AS_DEVICE_HANDLE_SIGNATURE); /* * storage the state to return to the caller */ dev_p->ll_error = ret; /* * now wake the caller */ cy_as_hal_wake(&dev_p->context[context]->channel); } cy_as_return_status_t cy_as_ll_send_request_wait_reply( cy_as_device *dev_p, /* The request to send */ cy_as_ll_request_response *req, /* Storage for a reply, must be * sure it is of sufficient size */ cy_as_ll_request_response *resp ) { cy_as_return_status_t ret; uint8_t context; /* Larger 8 sec time-out to handle the init * delay for slower storage devices in USB FS. */ uint32_t loopcount = 800; cy_as_context *ctxt_p; /* Get the context for the request */ context = cy_as_ll_request_response__get_context(req); cy_as_hal_assert(context < CY_RQT_CONTEXT_COUNT); ctxt_p = dev_p->context[context]; ret = cy_as_ll_send_request(dev_p, req, resp, cy_true, cy_as_ll_send_callback); if (ret != CY_AS_ERROR_SUCCESS) return ret; while (loopcount-- > 0) { /* * sleep while we wait on the response. receiving the reply will * wake this thread. we will wait, at most 2 seconds (10 ms*200 * tries) before we timeout. note if the reply arrives, we will * not sleep the entire 10 ms, just til the reply arrives. */ cy_as_hal_sleep_on(&ctxt_p->channel, 10); /* * if the request has left the queue, it means the request has * been sent and the reply has been received. this means we can * return to the caller and be sure the reply has been received. */ if (!cy_as_ll_is_in_queue(ctxt_p, req)) return dev_p->ll_error; } /* Remove the QueueListNode for this request. */ cy_as_ll_remove_request(dev_p, ctxt_p, req, cy_true); return CY_AS_ERROR_TIMEOUT; } cy_as_return_status_t cy_as_ll_register_request_callback( cy_as_device *dev_p, uint8_t context, cy_as_response_callback cb) { cy_as_context *ctxt_p; cy_as_hal_assert(context < CY_RQT_CONTEXT_COUNT); ctxt_p = dev_p->context[context]; ctxt_p->request_callback = cb; return CY_AS_ERROR_SUCCESS; } void cy_as_ll_request_response__pack( cy_as_ll_request_response *req_p, uint32_t offset, uint32_t length, void *data_p) { uint16_t dt; uint8_t *dp = (uint8_t *)data_p; while (length > 1) { dt = ((*dp++) << 8); dt |= (*dp++); cy_as_ll_request_response__set_word(req_p, offset, dt); offset++; length -= 2; } if (length == 1) { dt = (*dp << 8); cy_as_ll_request_response__set_word(req_p, offset, dt); } } void cy_as_ll_request_response__unpack( cy_as_ll_request_response *req_p, uint32_t offset, uint32_t length, void *data_p) { uint8_t *dp = (uint8_t *)data_p; while (length-- > 0) { uint16_t val = cy_as_ll_request_response__get_word (req_p, offset++); *dp++ = (uint8_t)((val >> 8) & 0xff); if (length) { length--; *dp++ = (uint8_t)(val & 0xff); } } } extern cy_as_return_status_t cy_as_ll_send_status_response( cy_as_device *dev_p, uint8_t context, uint16_t code, uint8_t clear_storage) { cy_as_return_status_t ret; cy_as_ll_request_response resp; cy_as_ll_request_response *resp_p = &resp; cy_as_hal_mem_set(resp_p, 0, sizeof(resp)); resp_p->length = 1; cy_as_ll_request_response__set_response(resp_p); cy_as_ll_request_response__set_context(resp_p, context); if (clear_storage) cy_as_ll_request_response__set_clear_storage_flag(resp_p); cy_as_ll_request_response__set_code(resp_p, CY_RESP_SUCCESS_FAILURE); cy_as_ll_request_response__set_word(resp_p, 0, code); ret = cy_as_send_one(dev_p, resp_p); return ret; } extern cy_as_return_status_t cy_as_ll_send_data_response( cy_as_device *dev_p, uint8_t context, uint16_t code, uint16_t length, void *data) { cy_as_ll_request_response *resp_p; uint16_t wlen; uint8_t respbuf[256]; if (length > 192) return CY_AS_ERROR_INVALID_SIZE; /* Word length for bytes */ wlen = length / 2; /* If byte length odd, add one more */ if (length % 2) wlen++; /* One for the length of field */ wlen++; resp_p = (cy_as_ll_request_response *)respbuf; cy_as_hal_mem_set(resp_p, 0, sizeof(respbuf)); resp_p->length = wlen; cy_as_ll_request_response__set_context(resp_p, context); cy_as_ll_request_response__set_code(resp_p, code); cy_as_ll_request_response__set_word(resp_p, 0, length); cy_as_ll_request_response__pack(resp_p, 1, length, data); return cy_as_send_one(dev_p, resp_p); } static cy_bool cy_as_ll_is_e_p_transfer_related_request(cy_as_ll_request_response *rqt_p, cy_as_end_point_number_t ep) { uint16_t v; uint8_t type = cy_as_ll_request_response__get_code(rqt_p); if (cy_as_ll_request_response__get_context(rqt_p) != CY_RQT_USB_RQT_CONTEXT) return cy_false; /* * when cancelling outstanding EP0 data transfers, any pending * setup ACK requests also need to be cancelled. */ if ((ep == 0) && (type == CY_RQT_ACK_SETUP_PACKET)) return cy_true; if (type != CY_RQT_USB_EP_DATA) return cy_false; v = cy_as_ll_request_response__get_word(rqt_p, 0); if ((cy_as_end_point_number_t)((v >> 13) & 1) != ep) return cy_false; return cy_true; } cy_as_return_status_t cy_as_ll_remove_ep_data_requests(cy_as_device *dev_p, cy_as_end_point_number_t ep) { cy_as_context *ctxt_p; cy_as_ll_request_list_node *node_p; uint32_t imask; /* * first, remove any queued requests */ ctxt_p = dev_p->context[CY_RQT_USB_RQT_CONTEXT]; if (ctxt_p) { for (node_p = ctxt_p->request_queue_p; node_p; node_p = node_p->next) { if (cy_as_ll_is_e_p_transfer_related_request (node_p->rqt, ep)) { cy_as_ll_remove_request(dev_p, ctxt_p, node_p->rqt, cy_false); break; } } /* * now, deal with any request that may be in transit */ imask = cy_as_hal_disable_interrupts(); if (ctxt_p->request_queue_p != 0 && cy_as_ll_is_e_p_transfer_related_request (ctxt_p->request_queue_p->rqt, ep) && cy_as_request_get_node_state(ctxt_p->request_queue_p) == CY_AS_REQUEST_LIST_STATE_WAITING) { cy_as_hal_print_message("need to remove an in-transit " "request to antioch\n"); /* * if the request has not been fully sent to west bridge * yet, abort sending. otherwise, terminate the request * with a CANCELED status. firmware will already have * terminated this transfer. */ if (dev_p->ll_sending_rqt) dev_p->ll_abort_curr_rqt = cy_true; else { uint32_t state; node_p = ctxt_p->request_queue_p; if (node_p->callback) node_p->callback(dev_p, ctxt_p->number, node_p->rqt, node_p->resp, CY_AS_ERROR_CANCELED); ctxt_p->request_queue_p = node_p->next; state = cy_as_hal_disable_interrupts(); cy_as_hal_c_b_free(node_p); cy_as_hal_enable_interrupts(state); } } cy_as_hal_enable_interrupts(imask); } return CY_AS_ERROR_SUCCESS; }
gpl-2.0
kumajaya/android_kernel_samsung_universal5422
drivers/net/wireless/p54/p54spi.c
2103
17405
/* * Copyright (C) 2008 Christian Lamparter <chunkeey@web.de> * Copyright 2008 Johannes Berg <johannes@sipsolutions.net> * * This driver is a port from stlc45xx: * Copyright (C) 2008 Nokia Corporation and/or its subsidiary(-ies). * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA * 02110-1301 USA */ #include <linux/module.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/firmware.h> #include <linux/delay.h> #include <linux/irq.h> #include <linux/spi/spi.h> #include <linux/etherdevice.h> #include <linux/gpio.h> #include <linux/slab.h> #include "p54spi.h" #include "p54.h" #include "lmac.h" #ifdef CONFIG_P54_SPI_DEFAULT_EEPROM #include "p54spi_eeprom.h" #endif /* CONFIG_P54_SPI_DEFAULT_EEPROM */ MODULE_FIRMWARE("3826.arm"); /* * gpios should be handled in board files and provided via platform data, * but because it's currently impossible for p54spi to have a header file * in include/linux, let's use module paramaters for now */ static int p54spi_gpio_power = 97; module_param(p54spi_gpio_power, int, 0444); MODULE_PARM_DESC(p54spi_gpio_power, "gpio number for power line"); static int p54spi_gpio_irq = 87; module_param(p54spi_gpio_irq, int, 0444); MODULE_PARM_DESC(p54spi_gpio_irq, "gpio number for irq line"); static void p54spi_spi_read(struct p54s_priv *priv, u8 address, void *buf, size_t len) { struct spi_transfer t[2]; struct spi_message m; __le16 addr; /* We first push the address */ addr = cpu_to_le16(address << 8 | SPI_ADRS_READ_BIT_15); spi_message_init(&m); memset(t, 0, sizeof(t)); t[0].tx_buf = &addr; t[0].len = sizeof(addr); spi_message_add_tail(&t[0], &m); t[1].rx_buf = buf; t[1].len = len; spi_message_add_tail(&t[1], &m); spi_sync(priv->spi, &m); } static void p54spi_spi_write(struct p54s_priv *priv, u8 address, const void *buf, size_t len) { struct spi_transfer t[3]; struct spi_message m; __le16 addr; /* We first push the address */ addr = cpu_to_le16(address << 8); spi_message_init(&m); memset(t, 0, sizeof(t)); t[0].tx_buf = &addr; t[0].len = sizeof(addr); spi_message_add_tail(&t[0], &m); t[1].tx_buf = buf; t[1].len = len & ~1; spi_message_add_tail(&t[1], &m); if (len % 2) { __le16 last_word; last_word = cpu_to_le16(((u8 *)buf)[len - 1]); t[2].tx_buf = &last_word; t[2].len = sizeof(last_word); spi_message_add_tail(&t[2], &m); } spi_sync(priv->spi, &m); } static u32 p54spi_read32(struct p54s_priv *priv, u8 addr) { __le32 val; p54spi_spi_read(priv, addr, &val, sizeof(val)); return le32_to_cpu(val); } static inline void p54spi_write16(struct p54s_priv *priv, u8 addr, __le16 val) { p54spi_spi_write(priv, addr, &val, sizeof(val)); } static inline void p54spi_write32(struct p54s_priv *priv, u8 addr, __le32 val) { p54spi_spi_write(priv, addr, &val, sizeof(val)); } static int p54spi_wait_bit(struct p54s_priv *priv, u16 reg, u32 bits) { int i; for (i = 0; i < 2000; i++) { u32 buffer = p54spi_read32(priv, reg); if ((buffer & bits) == bits) return 1; } return 0; } static int p54spi_spi_write_dma(struct p54s_priv *priv, __le32 base, const void *buf, size_t len) { if (!p54spi_wait_bit(priv, SPI_ADRS_DMA_WRITE_CTRL, HOST_ALLOWED)) { dev_err(&priv->spi->dev, "spi_write_dma not allowed " "to DMA write.\n"); return -EAGAIN; } p54spi_write16(priv, SPI_ADRS_DMA_WRITE_CTRL, cpu_to_le16(SPI_DMA_WRITE_CTRL_ENABLE)); p54spi_write16(priv, SPI_ADRS_DMA_WRITE_LEN, cpu_to_le16(len)); p54spi_write32(priv, SPI_ADRS_DMA_WRITE_BASE, base); p54spi_spi_write(priv, SPI_ADRS_DMA_DATA, buf, len); return 0; } static int p54spi_request_firmware(struct ieee80211_hw *dev) { struct p54s_priv *priv = dev->priv; int ret; /* FIXME: should driver use it's own struct device? */ ret = request_firmware(&priv->firmware, "3826.arm", &priv->spi->dev); if (ret < 0) { dev_err(&priv->spi->dev, "request_firmware() failed: %d", ret); return ret; } ret = p54_parse_firmware(dev, priv->firmware); if (ret) { release_firmware(priv->firmware); return ret; } return 0; } static int p54spi_request_eeprom(struct ieee80211_hw *dev) { struct p54s_priv *priv = dev->priv; const struct firmware *eeprom; int ret; /* * allow users to customize their eeprom. */ ret = request_firmware(&eeprom, "3826.eeprom", &priv->spi->dev); if (ret < 0) { #ifdef CONFIG_P54_SPI_DEFAULT_EEPROM dev_info(&priv->spi->dev, "loading default eeprom...\n"); ret = p54_parse_eeprom(dev, (void *) p54spi_eeprom, sizeof(p54spi_eeprom)); #else dev_err(&priv->spi->dev, "Failed to request user eeprom\n"); #endif /* CONFIG_P54_SPI_DEFAULT_EEPROM */ } else { dev_info(&priv->spi->dev, "loading user eeprom...\n"); ret = p54_parse_eeprom(dev, (void *) eeprom->data, (int)eeprom->size); release_firmware(eeprom); } return ret; } static int p54spi_upload_firmware(struct ieee80211_hw *dev) { struct p54s_priv *priv = dev->priv; unsigned long fw_len, _fw_len; unsigned int offset = 0; int err = 0; u8 *fw; fw_len = priv->firmware->size; fw = kmemdup(priv->firmware->data, fw_len, GFP_KERNEL); if (!fw) return -ENOMEM; /* stop the device */ p54spi_write16(priv, SPI_ADRS_DEV_CTRL_STAT, cpu_to_le16( SPI_CTRL_STAT_HOST_OVERRIDE | SPI_CTRL_STAT_HOST_RESET | SPI_CTRL_STAT_START_HALTED)); msleep(TARGET_BOOT_SLEEP); p54spi_write16(priv, SPI_ADRS_DEV_CTRL_STAT, cpu_to_le16( SPI_CTRL_STAT_HOST_OVERRIDE | SPI_CTRL_STAT_START_HALTED)); msleep(TARGET_BOOT_SLEEP); while (fw_len > 0) { _fw_len = min_t(long, fw_len, SPI_MAX_PACKET_SIZE); err = p54spi_spi_write_dma(priv, cpu_to_le32( ISL38XX_DEV_FIRMWARE_ADDR + offset), (fw + offset), _fw_len); if (err < 0) goto out; fw_len -= _fw_len; offset += _fw_len; } BUG_ON(fw_len != 0); /* enable host interrupts */ p54spi_write32(priv, SPI_ADRS_HOST_INT_EN, cpu_to_le32(SPI_HOST_INTS_DEFAULT)); /* boot the device */ p54spi_write16(priv, SPI_ADRS_DEV_CTRL_STAT, cpu_to_le16( SPI_CTRL_STAT_HOST_OVERRIDE | SPI_CTRL_STAT_HOST_RESET | SPI_CTRL_STAT_RAM_BOOT)); msleep(TARGET_BOOT_SLEEP); p54spi_write16(priv, SPI_ADRS_DEV_CTRL_STAT, cpu_to_le16( SPI_CTRL_STAT_HOST_OVERRIDE | SPI_CTRL_STAT_RAM_BOOT)); msleep(TARGET_BOOT_SLEEP); out: kfree(fw); return err; } static void p54spi_power_off(struct p54s_priv *priv) { disable_irq(gpio_to_irq(p54spi_gpio_irq)); gpio_set_value(p54spi_gpio_power, 0); } static void p54spi_power_on(struct p54s_priv *priv) { gpio_set_value(p54spi_gpio_power, 1); enable_irq(gpio_to_irq(p54spi_gpio_irq)); /* * need to wait a while before device can be accessed, the length * is just a guess */ msleep(10); } static inline void p54spi_int_ack(struct p54s_priv *priv, u32 val) { p54spi_write32(priv, SPI_ADRS_HOST_INT_ACK, cpu_to_le32(val)); } static int p54spi_wakeup(struct p54s_priv *priv) { /* wake the chip */ p54spi_write32(priv, SPI_ADRS_ARM_INTERRUPTS, cpu_to_le32(SPI_TARGET_INT_WAKEUP)); /* And wait for the READY interrupt */ if (!p54spi_wait_bit(priv, SPI_ADRS_HOST_INTERRUPTS, SPI_HOST_INT_READY)) { dev_err(&priv->spi->dev, "INT_READY timeout\n"); return -EBUSY; } p54spi_int_ack(priv, SPI_HOST_INT_READY); return 0; } static inline void p54spi_sleep(struct p54s_priv *priv) { p54spi_write32(priv, SPI_ADRS_ARM_INTERRUPTS, cpu_to_le32(SPI_TARGET_INT_SLEEP)); } static void p54spi_int_ready(struct p54s_priv *priv) { p54spi_write32(priv, SPI_ADRS_HOST_INT_EN, cpu_to_le32( SPI_HOST_INT_UPDATE | SPI_HOST_INT_SW_UPDATE)); switch (priv->fw_state) { case FW_STATE_BOOTING: priv->fw_state = FW_STATE_READY; complete(&priv->fw_comp); break; case FW_STATE_RESETTING: priv->fw_state = FW_STATE_READY; /* TODO: reinitialize state */ break; default: break; } } static int p54spi_rx(struct p54s_priv *priv) { struct sk_buff *skb; u16 len; u16 rx_head[2]; #define READAHEAD_SZ (sizeof(rx_head)-sizeof(u16)) if (p54spi_wakeup(priv) < 0) return -EBUSY; /* Read data size and first data word in one SPI transaction * This is workaround for firmware/DMA bug, * when first data word gets lost under high load. */ p54spi_spi_read(priv, SPI_ADRS_DMA_DATA, rx_head, sizeof(rx_head)); len = rx_head[0]; if (len == 0) { p54spi_sleep(priv); dev_err(&priv->spi->dev, "rx request of zero bytes\n"); return 0; } /* Firmware may insert up to 4 padding bytes after the lmac header, * but it does not amend the size of SPI data transfer. * Such packets has correct data size in header, thus referencing * past the end of allocated skb. Reserve extra 4 bytes for this case */ skb = dev_alloc_skb(len + 4); if (!skb) { p54spi_sleep(priv); dev_err(&priv->spi->dev, "could not alloc skb"); return -ENOMEM; } if (len <= READAHEAD_SZ) { memcpy(skb_put(skb, len), rx_head + 1, len); } else { memcpy(skb_put(skb, READAHEAD_SZ), rx_head + 1, READAHEAD_SZ); p54spi_spi_read(priv, SPI_ADRS_DMA_DATA, skb_put(skb, len - READAHEAD_SZ), len - READAHEAD_SZ); } p54spi_sleep(priv); /* Put additional bytes to compensate for the possible * alignment-caused truncation */ skb_put(skb, 4); if (p54_rx(priv->hw, skb) == 0) dev_kfree_skb(skb); return 0; } static irqreturn_t p54spi_interrupt(int irq, void *config) { struct spi_device *spi = config; struct p54s_priv *priv = spi_get_drvdata(spi); ieee80211_queue_work(priv->hw, &priv->work); return IRQ_HANDLED; } static int p54spi_tx_frame(struct p54s_priv *priv, struct sk_buff *skb) { struct p54_hdr *hdr = (struct p54_hdr *) skb->data; int ret = 0; if (p54spi_wakeup(priv) < 0) return -EBUSY; ret = p54spi_spi_write_dma(priv, hdr->req_id, skb->data, skb->len); if (ret < 0) goto out; if (!p54spi_wait_bit(priv, SPI_ADRS_HOST_INTERRUPTS, SPI_HOST_INT_WR_READY)) { dev_err(&priv->spi->dev, "WR_READY timeout\n"); ret = -EAGAIN; goto out; } p54spi_int_ack(priv, SPI_HOST_INT_WR_READY); if (FREE_AFTER_TX(skb)) p54_free_skb(priv->hw, skb); out: p54spi_sleep(priv); return ret; } static int p54spi_wq_tx(struct p54s_priv *priv) { struct p54s_tx_info *entry; struct sk_buff *skb; struct ieee80211_tx_info *info; struct p54_tx_info *minfo; struct p54s_tx_info *dinfo; unsigned long flags; int ret = 0; spin_lock_irqsave(&priv->tx_lock, flags); while (!list_empty(&priv->tx_pending)) { entry = list_entry(priv->tx_pending.next, struct p54s_tx_info, tx_list); list_del_init(&entry->tx_list); spin_unlock_irqrestore(&priv->tx_lock, flags); dinfo = container_of((void *) entry, struct p54s_tx_info, tx_list); minfo = container_of((void *) dinfo, struct p54_tx_info, data); info = container_of((void *) minfo, struct ieee80211_tx_info, rate_driver_data); skb = container_of((void *) info, struct sk_buff, cb); ret = p54spi_tx_frame(priv, skb); if (ret < 0) { p54_free_skb(priv->hw, skb); return ret; } spin_lock_irqsave(&priv->tx_lock, flags); } spin_unlock_irqrestore(&priv->tx_lock, flags); return ret; } static void p54spi_op_tx(struct ieee80211_hw *dev, struct sk_buff *skb) { struct p54s_priv *priv = dev->priv; struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); struct p54_tx_info *mi = (struct p54_tx_info *) info->rate_driver_data; struct p54s_tx_info *di = (struct p54s_tx_info *) mi->data; unsigned long flags; BUILD_BUG_ON(sizeof(*di) > sizeof((mi->data))); spin_lock_irqsave(&priv->tx_lock, flags); list_add_tail(&di->tx_list, &priv->tx_pending); spin_unlock_irqrestore(&priv->tx_lock, flags); ieee80211_queue_work(priv->hw, &priv->work); } static void p54spi_work(struct work_struct *work) { struct p54s_priv *priv = container_of(work, struct p54s_priv, work); u32 ints; int ret; mutex_lock(&priv->mutex); if (priv->fw_state == FW_STATE_OFF) goto out; ints = p54spi_read32(priv, SPI_ADRS_HOST_INTERRUPTS); if (ints & SPI_HOST_INT_READY) { p54spi_int_ready(priv); p54spi_int_ack(priv, SPI_HOST_INT_READY); } if (priv->fw_state != FW_STATE_READY) goto out; if (ints & SPI_HOST_INT_UPDATE) { p54spi_int_ack(priv, SPI_HOST_INT_UPDATE); ret = p54spi_rx(priv); if (ret < 0) goto out; } if (ints & SPI_HOST_INT_SW_UPDATE) { p54spi_int_ack(priv, SPI_HOST_INT_SW_UPDATE); ret = p54spi_rx(priv); if (ret < 0) goto out; } ret = p54spi_wq_tx(priv); out: mutex_unlock(&priv->mutex); } static int p54spi_op_start(struct ieee80211_hw *dev) { struct p54s_priv *priv = dev->priv; unsigned long timeout; int ret = 0; if (mutex_lock_interruptible(&priv->mutex)) { ret = -EINTR; goto out; } priv->fw_state = FW_STATE_BOOTING; p54spi_power_on(priv); ret = p54spi_upload_firmware(dev); if (ret < 0) { p54spi_power_off(priv); goto out_unlock; } mutex_unlock(&priv->mutex); timeout = msecs_to_jiffies(2000); timeout = wait_for_completion_interruptible_timeout(&priv->fw_comp, timeout); if (!timeout) { dev_err(&priv->spi->dev, "firmware boot failed"); p54spi_power_off(priv); ret = -1; goto out; } if (mutex_lock_interruptible(&priv->mutex)) { ret = -EINTR; p54spi_power_off(priv); goto out; } WARN_ON(priv->fw_state != FW_STATE_READY); out_unlock: mutex_unlock(&priv->mutex); out: return ret; } static void p54spi_op_stop(struct ieee80211_hw *dev) { struct p54s_priv *priv = dev->priv; unsigned long flags; mutex_lock(&priv->mutex); WARN_ON(priv->fw_state != FW_STATE_READY); p54spi_power_off(priv); spin_lock_irqsave(&priv->tx_lock, flags); INIT_LIST_HEAD(&priv->tx_pending); spin_unlock_irqrestore(&priv->tx_lock, flags); priv->fw_state = FW_STATE_OFF; mutex_unlock(&priv->mutex); cancel_work_sync(&priv->work); } static int p54spi_probe(struct spi_device *spi) { struct p54s_priv *priv = NULL; struct ieee80211_hw *hw; int ret = -EINVAL; hw = p54_init_common(sizeof(*priv)); if (!hw) { dev_err(&spi->dev, "could not alloc ieee80211_hw"); return -ENOMEM; } priv = hw->priv; priv->hw = hw; spi_set_drvdata(spi, priv); priv->spi = spi; spi->bits_per_word = 16; spi->max_speed_hz = 24000000; ret = spi_setup(spi); if (ret < 0) { dev_err(&priv->spi->dev, "spi_setup failed"); goto err_free; } ret = gpio_request(p54spi_gpio_power, "p54spi power"); if (ret < 0) { dev_err(&priv->spi->dev, "power GPIO request failed: %d", ret); goto err_free; } ret = gpio_request(p54spi_gpio_irq, "p54spi irq"); if (ret < 0) { dev_err(&priv->spi->dev, "irq GPIO request failed: %d", ret); goto err_free_gpio_power; } gpio_direction_output(p54spi_gpio_power, 0); gpio_direction_input(p54spi_gpio_irq); ret = request_irq(gpio_to_irq(p54spi_gpio_irq), p54spi_interrupt, IRQF_DISABLED, "p54spi", priv->spi); if (ret < 0) { dev_err(&priv->spi->dev, "request_irq() failed"); goto err_free_gpio_irq; } irq_set_irq_type(gpio_to_irq(p54spi_gpio_irq), IRQ_TYPE_EDGE_RISING); disable_irq(gpio_to_irq(p54spi_gpio_irq)); INIT_WORK(&priv->work, p54spi_work); init_completion(&priv->fw_comp); INIT_LIST_HEAD(&priv->tx_pending); mutex_init(&priv->mutex); spin_lock_init(&priv->tx_lock); SET_IEEE80211_DEV(hw, &spi->dev); priv->common.open = p54spi_op_start; priv->common.stop = p54spi_op_stop; priv->common.tx = p54spi_op_tx; ret = p54spi_request_firmware(hw); if (ret < 0) goto err_free_common; ret = p54spi_request_eeprom(hw); if (ret) goto err_free_common; ret = p54_register_common(hw, &priv->spi->dev); if (ret) goto err_free_common; return 0; err_free_common: free_irq(gpio_to_irq(p54spi_gpio_irq), spi); err_free_gpio_irq: gpio_free(p54spi_gpio_irq); err_free_gpio_power: gpio_free(p54spi_gpio_power); err_free: p54_free_common(priv->hw); return ret; } static int p54spi_remove(struct spi_device *spi) { struct p54s_priv *priv = spi_get_drvdata(spi); p54_unregister_common(priv->hw); free_irq(gpio_to_irq(p54spi_gpio_irq), spi); gpio_free(p54spi_gpio_power); gpio_free(p54spi_gpio_irq); release_firmware(priv->firmware); mutex_destroy(&priv->mutex); p54_free_common(priv->hw); return 0; } static struct spi_driver p54spi_driver = { .driver = { .name = "p54spi", .owner = THIS_MODULE, }, .probe = p54spi_probe, .remove = p54spi_remove, }; static int __init p54spi_init(void) { int ret; ret = spi_register_driver(&p54spi_driver); if (ret < 0) { printk(KERN_ERR "failed to register SPI driver: %d", ret); goto out; } out: return ret; } static void __exit p54spi_exit(void) { spi_unregister_driver(&p54spi_driver); } module_init(p54spi_init); module_exit(p54spi_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Christian Lamparter <chunkeey@web.de>"); MODULE_ALIAS("spi:cx3110x"); MODULE_ALIAS("spi:p54spi"); MODULE_ALIAS("spi:stlc45xx");
gpl-2.0
Anik1199/kernel_sprout
drivers/pinctrl/sh-pfc/pfc-r8a7740.c
2103
100832
/* * R8A7740 processor support * * Copyright (C) 2011 Renesas Solutions Corp. * Copyright (C) 2011 Kuninori Morimoto <kuninori.morimoto.gx@renesas.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License as * published by the Free Software Foundation; version 2 of the * License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/kernel.h> #include <mach/r8a7740.h> #include <mach/irqs.h> #include "sh_pfc.h" #define CPU_ALL_PORT(fn, pfx, sfx) \ PORT_10(fn, pfx, sfx), PORT_90(fn, pfx, sfx), \ PORT_10(fn, pfx##10, sfx), PORT_90(fn, pfx##1, sfx), \ PORT_10(fn, pfx##20, sfx), \ PORT_1(fn, pfx##210, sfx), PORT_1(fn, pfx##211, sfx) enum { PINMUX_RESERVED = 0, /* PORT0_DATA -> PORT211_DATA */ PINMUX_DATA_BEGIN, PORT_ALL(DATA), PINMUX_DATA_END, /* PORT0_IN -> PORT211_IN */ PINMUX_INPUT_BEGIN, PORT_ALL(IN), PINMUX_INPUT_END, /* PORT0_IN_PU -> PORT211_IN_PU */ PINMUX_INPUT_PULLUP_BEGIN, PORT_ALL(IN_PU), PINMUX_INPUT_PULLUP_END, /* PORT0_IN_PD -> PORT211_IN_PD */ PINMUX_INPUT_PULLDOWN_BEGIN, PORT_ALL(IN_PD), PINMUX_INPUT_PULLDOWN_END, /* PORT0_OUT -> PORT211_OUT */ PINMUX_OUTPUT_BEGIN, PORT_ALL(OUT), PINMUX_OUTPUT_END, PINMUX_FUNCTION_BEGIN, PORT_ALL(FN_IN), /* PORT0_FN_IN -> PORT211_FN_IN */ PORT_ALL(FN_OUT), /* PORT0_FN_OUT -> PORT211_FN_OUT */ PORT_ALL(FN0), /* PORT0_FN0 -> PORT211_FN0 */ PORT_ALL(FN1), /* PORT0_FN1 -> PORT211_FN1 */ PORT_ALL(FN2), /* PORT0_FN2 -> PORT211_FN2 */ PORT_ALL(FN3), /* PORT0_FN3 -> PORT211_FN3 */ PORT_ALL(FN4), /* PORT0_FN4 -> PORT211_FN4 */ PORT_ALL(FN5), /* PORT0_FN5 -> PORT211_FN5 */ PORT_ALL(FN6), /* PORT0_FN6 -> PORT211_FN6 */ PORT_ALL(FN7), /* PORT0_FN7 -> PORT211_FN7 */ MSEL1CR_31_0, MSEL1CR_31_1, MSEL1CR_30_0, MSEL1CR_30_1, MSEL1CR_29_0, MSEL1CR_29_1, MSEL1CR_28_0, MSEL1CR_28_1, MSEL1CR_27_0, MSEL1CR_27_1, MSEL1CR_26_0, MSEL1CR_26_1, MSEL1CR_16_0, MSEL1CR_16_1, MSEL1CR_15_0, MSEL1CR_15_1, MSEL1CR_14_0, MSEL1CR_14_1, MSEL1CR_13_0, MSEL1CR_13_1, MSEL1CR_12_0, MSEL1CR_12_1, MSEL1CR_9_0, MSEL1CR_9_1, MSEL1CR_7_0, MSEL1CR_7_1, MSEL1CR_6_0, MSEL1CR_6_1, MSEL1CR_5_0, MSEL1CR_5_1, MSEL1CR_4_0, MSEL1CR_4_1, MSEL1CR_3_0, MSEL1CR_3_1, MSEL1CR_2_0, MSEL1CR_2_1, MSEL1CR_0_0, MSEL1CR_0_1, MSEL3CR_15_0, MSEL3CR_15_1, /* Trace / Debug ? */ MSEL3CR_6_0, MSEL3CR_6_1, MSEL4CR_19_0, MSEL4CR_19_1, MSEL4CR_18_0, MSEL4CR_18_1, MSEL4CR_15_0, MSEL4CR_15_1, MSEL4CR_10_0, MSEL4CR_10_1, MSEL4CR_6_0, MSEL4CR_6_1, MSEL4CR_4_0, MSEL4CR_4_1, MSEL4CR_1_0, MSEL4CR_1_1, MSEL5CR_31_0, MSEL5CR_31_1, /* irq/fiq output */ MSEL5CR_30_0, MSEL5CR_30_1, MSEL5CR_29_0, MSEL5CR_29_1, MSEL5CR_27_0, MSEL5CR_27_1, MSEL5CR_25_0, MSEL5CR_25_1, MSEL5CR_23_0, MSEL5CR_23_1, MSEL5CR_21_0, MSEL5CR_21_1, MSEL5CR_19_0, MSEL5CR_19_1, MSEL5CR_17_0, MSEL5CR_17_1, MSEL5CR_15_0, MSEL5CR_15_1, MSEL5CR_14_0, MSEL5CR_14_1, MSEL5CR_13_0, MSEL5CR_13_1, MSEL5CR_12_0, MSEL5CR_12_1, MSEL5CR_11_0, MSEL5CR_11_1, MSEL5CR_10_0, MSEL5CR_10_1, MSEL5CR_8_0, MSEL5CR_8_1, MSEL5CR_7_0, MSEL5CR_7_1, MSEL5CR_6_0, MSEL5CR_6_1, MSEL5CR_5_0, MSEL5CR_5_1, MSEL5CR_4_0, MSEL5CR_4_1, MSEL5CR_3_0, MSEL5CR_3_1, MSEL5CR_2_0, MSEL5CR_2_1, MSEL5CR_0_0, MSEL5CR_0_1, PINMUX_FUNCTION_END, PINMUX_MARK_BEGIN, /* IRQ */ IRQ0_PORT2_MARK, IRQ0_PORT13_MARK, IRQ1_MARK, IRQ2_PORT11_MARK, IRQ2_PORT12_MARK, IRQ3_PORT10_MARK, IRQ3_PORT14_MARK, IRQ4_PORT15_MARK, IRQ4_PORT172_MARK, IRQ5_PORT0_MARK, IRQ5_PORT1_MARK, IRQ6_PORT121_MARK, IRQ6_PORT173_MARK, IRQ7_PORT120_MARK, IRQ7_PORT209_MARK, IRQ8_MARK, IRQ9_PORT118_MARK, IRQ9_PORT210_MARK, IRQ10_MARK, IRQ11_MARK, IRQ12_PORT42_MARK, IRQ12_PORT97_MARK, IRQ13_PORT64_MARK, IRQ13_PORT98_MARK, IRQ14_PORT63_MARK, IRQ14_PORT99_MARK, IRQ15_PORT62_MARK, IRQ15_PORT100_MARK, IRQ16_PORT68_MARK, IRQ16_PORT211_MARK, IRQ17_MARK, IRQ18_MARK, IRQ19_MARK, IRQ20_MARK, IRQ21_MARK, IRQ22_MARK, IRQ23_MARK, IRQ24_MARK, IRQ25_MARK, IRQ26_PORT58_MARK, IRQ26_PORT81_MARK, IRQ27_PORT57_MARK, IRQ27_PORT168_MARK, IRQ28_PORT56_MARK, IRQ28_PORT169_MARK, IRQ29_PORT50_MARK, IRQ29_PORT170_MARK, IRQ30_PORT49_MARK, IRQ30_PORT171_MARK, IRQ31_PORT41_MARK, IRQ31_PORT167_MARK, /* Function */ /* DBGT */ DBGMDT2_MARK, DBGMDT1_MARK, DBGMDT0_MARK, DBGMD10_MARK, DBGMD11_MARK, DBGMD20_MARK, DBGMD21_MARK, /* FSI-A */ FSIAISLD_PORT0_MARK, /* FSIAISLD Port 0/5 */ FSIAISLD_PORT5_MARK, FSIASPDIF_PORT9_MARK, /* FSIASPDIF Port 9/18 */ FSIASPDIF_PORT18_MARK, FSIAOSLD1_MARK, FSIAOSLD2_MARK, FSIAOLR_MARK, FSIAOBT_MARK, FSIAOSLD_MARK, FSIAOMC_MARK, FSIACK_MARK, FSIAILR_MARK, FSIAIBT_MARK, /* FSI-B */ FSIBCK_MARK, /* FMSI */ FMSISLD_PORT1_MARK, /* FMSISLD Port 1/6 */ FMSISLD_PORT6_MARK, FMSIILR_MARK, FMSIIBT_MARK, FMSIOLR_MARK, FMSIOBT_MARK, FMSICK_MARK, FMSOILR_MARK, FMSOIBT_MARK, FMSOOLR_MARK, FMSOOBT_MARK, FMSOSLD_MARK, FMSOCK_MARK, /* SCIFA0 */ SCIFA0_SCK_MARK, SCIFA0_CTS_MARK, SCIFA0_RTS_MARK, SCIFA0_RXD_MARK, SCIFA0_TXD_MARK, /* SCIFA1 */ SCIFA1_CTS_MARK, SCIFA1_SCK_MARK, SCIFA1_RXD_MARK, SCIFA1_TXD_MARK, SCIFA1_RTS_MARK, /* SCIFA2 */ SCIFA2_SCK_PORT22_MARK, /* SCIFA2_SCK Port 22/199 */ SCIFA2_SCK_PORT199_MARK, SCIFA2_RXD_MARK, SCIFA2_TXD_MARK, SCIFA2_CTS_MARK, SCIFA2_RTS_MARK, /* SCIFA3 */ SCIFA3_RTS_PORT105_MARK, /* MSEL5CR_8_0 */ SCIFA3_SCK_PORT116_MARK, SCIFA3_CTS_PORT117_MARK, SCIFA3_RXD_PORT174_MARK, SCIFA3_TXD_PORT175_MARK, SCIFA3_RTS_PORT161_MARK, /* MSEL5CR_8_1 */ SCIFA3_SCK_PORT158_MARK, SCIFA3_CTS_PORT162_MARK, SCIFA3_RXD_PORT159_MARK, SCIFA3_TXD_PORT160_MARK, /* SCIFA4 */ SCIFA4_RXD_PORT12_MARK, /* MSEL5CR[12:11] = 00 */ SCIFA4_TXD_PORT13_MARK, SCIFA4_RXD_PORT204_MARK, /* MSEL5CR[12:11] = 01 */ SCIFA4_TXD_PORT203_MARK, SCIFA4_RXD_PORT94_MARK, /* MSEL5CR[12:11] = 10 */ SCIFA4_TXD_PORT93_MARK, SCIFA4_SCK_PORT21_MARK, /* SCIFA4_SCK Port 21/205 */ SCIFA4_SCK_PORT205_MARK, /* SCIFA5 */ SCIFA5_TXD_PORT20_MARK, /* MSEL5CR[15:14] = 00 */ SCIFA5_RXD_PORT10_MARK, SCIFA5_RXD_PORT207_MARK, /* MSEL5CR[15:14] = 01 */ SCIFA5_TXD_PORT208_MARK, SCIFA5_TXD_PORT91_MARK, /* MSEL5CR[15:14] = 10 */ SCIFA5_RXD_PORT92_MARK, SCIFA5_SCK_PORT23_MARK, /* SCIFA5_SCK Port 23/206 */ SCIFA5_SCK_PORT206_MARK, /* SCIFA6 */ SCIFA6_SCK_MARK, SCIFA6_RXD_MARK, SCIFA6_TXD_MARK, /* SCIFA7 */ SCIFA7_TXD_MARK, SCIFA7_RXD_MARK, /* SCIFAB */ SCIFB_SCK_PORT190_MARK, /* MSEL5CR_17_0 */ SCIFB_RXD_PORT191_MARK, SCIFB_TXD_PORT192_MARK, SCIFB_RTS_PORT186_MARK, SCIFB_CTS_PORT187_MARK, SCIFB_SCK_PORT2_MARK, /* MSEL5CR_17_1 */ SCIFB_RXD_PORT3_MARK, SCIFB_TXD_PORT4_MARK, SCIFB_RTS_PORT172_MARK, SCIFB_CTS_PORT173_MARK, /* LCD0 */ LCDC0_SELECT_MARK, LCD0_D0_MARK, LCD0_D1_MARK, LCD0_D2_MARK, LCD0_D3_MARK, LCD0_D4_MARK, LCD0_D5_MARK, LCD0_D6_MARK, LCD0_D7_MARK, LCD0_D8_MARK, LCD0_D9_MARK, LCD0_D10_MARK, LCD0_D11_MARK, LCD0_D12_MARK, LCD0_D13_MARK, LCD0_D14_MARK, LCD0_D15_MARK, LCD0_D16_MARK, LCD0_D17_MARK, LCD0_DON_MARK, LCD0_VCPWC_MARK, LCD0_VEPWC_MARK, LCD0_DCK_MARK, LCD0_VSYN_MARK, /* for RGB */ LCD0_HSYN_MARK, LCD0_DISP_MARK, /* for RGB */ LCD0_WR_MARK, LCD0_RD_MARK, /* for SYS */ LCD0_CS_MARK, LCD0_RS_MARK, /* for SYS */ LCD0_D21_PORT158_MARK, LCD0_D23_PORT159_MARK, /* MSEL5CR_6_1 */ LCD0_D22_PORT160_MARK, LCD0_D20_PORT161_MARK, LCD0_D19_PORT162_MARK, LCD0_D18_PORT163_MARK, LCD0_LCLK_PORT165_MARK, LCD0_D18_PORT40_MARK, LCD0_D22_PORT0_MARK, /* MSEL5CR_6_0 */ LCD0_D23_PORT1_MARK, LCD0_D21_PORT2_MARK, LCD0_D20_PORT3_MARK, LCD0_D19_PORT4_MARK, LCD0_LCLK_PORT102_MARK, /* LCD1 */ LCDC1_SELECT_MARK, LCD1_D0_MARK, LCD1_D1_MARK, LCD1_D2_MARK, LCD1_D3_MARK, LCD1_D4_MARK, LCD1_D5_MARK, LCD1_D6_MARK, LCD1_D7_MARK, LCD1_D8_MARK, LCD1_D9_MARK, LCD1_D10_MARK, LCD1_D11_MARK, LCD1_D12_MARK, LCD1_D13_MARK, LCD1_D14_MARK, LCD1_D15_MARK, LCD1_D16_MARK, LCD1_D17_MARK, LCD1_D18_MARK, LCD1_D19_MARK, LCD1_D20_MARK, LCD1_D21_MARK, LCD1_D22_MARK, LCD1_D23_MARK, LCD1_DON_MARK, LCD1_VCPWC_MARK, LCD1_LCLK_MARK, LCD1_VEPWC_MARK, LCD1_DCK_MARK, LCD1_VSYN_MARK, /* for RGB */ LCD1_HSYN_MARK, LCD1_DISP_MARK, /* for RGB */ LCD1_RS_MARK, LCD1_CS_MARK, /* for SYS */ LCD1_RD_MARK, LCD1_WR_MARK, /* for SYS */ /* RSPI */ RSPI_SSL0_A_MARK, RSPI_SSL1_A_MARK, RSPI_SSL2_A_MARK, RSPI_SSL3_A_MARK, RSPI_CK_A_MARK, RSPI_MOSI_A_MARK, RSPI_MISO_A_MARK, /* VIO CKO */ VIO_CKO1_MARK, /* needs fixup */ VIO_CKO2_MARK, VIO_CKO_1_MARK, VIO_CKO_MARK, /* VIO0 */ VIO0_D0_MARK, VIO0_D1_MARK, VIO0_D2_MARK, VIO0_D3_MARK, VIO0_D4_MARK, VIO0_D5_MARK, VIO0_D6_MARK, VIO0_D7_MARK, VIO0_D8_MARK, VIO0_D9_MARK, VIO0_D10_MARK, VIO0_D11_MARK, VIO0_D12_MARK, VIO0_VD_MARK, VIO0_HD_MARK, VIO0_CLK_MARK, VIO0_FIELD_MARK, VIO0_D13_PORT26_MARK, /* MSEL5CR_27_0 */ VIO0_D14_PORT25_MARK, VIO0_D15_PORT24_MARK, VIO0_D13_PORT22_MARK, /* MSEL5CR_27_1 */ VIO0_D14_PORT95_MARK, VIO0_D15_PORT96_MARK, /* VIO1 */ VIO1_D0_MARK, VIO1_D1_MARK, VIO1_D2_MARK, VIO1_D3_MARK, VIO1_D4_MARK, VIO1_D5_MARK, VIO1_D6_MARK, VIO1_D7_MARK, VIO1_VD_MARK, VIO1_HD_MARK, VIO1_CLK_MARK, VIO1_FIELD_MARK, /* TPU0 */ TPU0TO0_MARK, TPU0TO1_MARK, TPU0TO3_MARK, TPU0TO2_PORT66_MARK, /* TPU0TO2 Port 66/202 */ TPU0TO2_PORT202_MARK, /* SSP1 0 */ STP0_IPD0_MARK, STP0_IPD1_MARK, STP0_IPD2_MARK, STP0_IPD3_MARK, STP0_IPD4_MARK, STP0_IPD5_MARK, STP0_IPD6_MARK, STP0_IPD7_MARK, STP0_IPEN_MARK, STP0_IPCLK_MARK, STP0_IPSYNC_MARK, /* SSP1 1 */ STP1_IPD1_MARK, STP1_IPD2_MARK, STP1_IPD3_MARK, STP1_IPD4_MARK, STP1_IPD5_MARK, STP1_IPD6_MARK, STP1_IPD7_MARK, STP1_IPCLK_MARK, STP1_IPSYNC_MARK, STP1_IPD0_PORT186_MARK, /* MSEL5CR_23_0 */ STP1_IPEN_PORT187_MARK, STP1_IPD0_PORT194_MARK, /* MSEL5CR_23_1 */ STP1_IPEN_PORT193_MARK, /* SIM */ SIM_RST_MARK, SIM_CLK_MARK, SIM_D_PORT22_MARK, /* SIM_D Port 22/199 */ SIM_D_PORT199_MARK, /* SDHI0 */ SDHI0_D0_MARK, SDHI0_D1_MARK, SDHI0_D2_MARK, SDHI0_D3_MARK, SDHI0_CD_MARK, SDHI0_WP_MARK, SDHI0_CMD_MARK, SDHI0_CLK_MARK, /* SDHI1 */ SDHI1_D0_MARK, SDHI1_D1_MARK, SDHI1_D2_MARK, SDHI1_D3_MARK, SDHI1_CD_MARK, SDHI1_WP_MARK, SDHI1_CMD_MARK, SDHI1_CLK_MARK, /* SDHI2 */ SDHI2_D0_MARK, SDHI2_D1_MARK, SDHI2_D2_MARK, SDHI2_D3_MARK, SDHI2_CLK_MARK, SDHI2_CMD_MARK, SDHI2_CD_PORT24_MARK, /* MSEL5CR_19_0 */ SDHI2_WP_PORT25_MARK, SDHI2_WP_PORT177_MARK, /* MSEL5CR_19_1 */ SDHI2_CD_PORT202_MARK, /* MSIOF2 */ MSIOF2_TXD_MARK, MSIOF2_RXD_MARK, MSIOF2_TSCK_MARK, MSIOF2_SS2_MARK, MSIOF2_TSYNC_MARK, MSIOF2_SS1_MARK, MSIOF2_MCK1_MARK, MSIOF2_MCK0_MARK, MSIOF2_RSYNC_MARK, MSIOF2_RSCK_MARK, /* KEYSC */ KEYIN4_MARK, KEYIN5_MARK, KEYIN6_MARK, KEYIN7_MARK, KEYOUT0_MARK, KEYOUT1_MARK, KEYOUT2_MARK, KEYOUT3_MARK, KEYOUT4_MARK, KEYOUT5_MARK, KEYOUT6_MARK, KEYOUT7_MARK, KEYIN0_PORT43_MARK, /* MSEL4CR_18_0 */ KEYIN1_PORT44_MARK, KEYIN2_PORT45_MARK, KEYIN3_PORT46_MARK, KEYIN0_PORT58_MARK, /* MSEL4CR_18_1 */ KEYIN1_PORT57_MARK, KEYIN2_PORT56_MARK, KEYIN3_PORT55_MARK, /* VOU */ DV_D0_MARK, DV_D1_MARK, DV_D2_MARK, DV_D3_MARK, DV_D4_MARK, DV_D5_MARK, DV_D6_MARK, DV_D7_MARK, DV_D8_MARK, DV_D9_MARK, DV_D10_MARK, DV_D11_MARK, DV_D12_MARK, DV_D13_MARK, DV_D14_MARK, DV_D15_MARK, DV_CLK_MARK, DV_VSYNC_MARK, DV_HSYNC_MARK, /* MEMC */ MEMC_AD0_MARK, MEMC_AD1_MARK, MEMC_AD2_MARK, MEMC_AD3_MARK, MEMC_AD4_MARK, MEMC_AD5_MARK, MEMC_AD6_MARK, MEMC_AD7_MARK, MEMC_AD8_MARK, MEMC_AD9_MARK, MEMC_AD10_MARK, MEMC_AD11_MARK, MEMC_AD12_MARK, MEMC_AD13_MARK, MEMC_AD14_MARK, MEMC_AD15_MARK, MEMC_CS0_MARK, MEMC_INT_MARK, MEMC_NWE_MARK, MEMC_NOE_MARK, MEMC_CS1_MARK, /* MSEL4CR_6_0 */ MEMC_ADV_MARK, MEMC_WAIT_MARK, MEMC_BUSCLK_MARK, MEMC_A1_MARK, /* MSEL4CR_6_1 */ MEMC_DREQ0_MARK, MEMC_DREQ1_MARK, MEMC_A0_MARK, /* MMC */ MMC0_D0_PORT68_MARK, MMC0_D1_PORT69_MARK, MMC0_D2_PORT70_MARK, MMC0_D3_PORT71_MARK, MMC0_D4_PORT72_MARK, MMC0_D5_PORT73_MARK, MMC0_D6_PORT74_MARK, MMC0_D7_PORT75_MARK, MMC0_CLK_PORT66_MARK, MMC0_CMD_PORT67_MARK, /* MSEL4CR_15_0 */ MMC1_D0_PORT149_MARK, MMC1_D1_PORT148_MARK, MMC1_D2_PORT147_MARK, MMC1_D3_PORT146_MARK, MMC1_D4_PORT145_MARK, MMC1_D5_PORT144_MARK, MMC1_D6_PORT143_MARK, MMC1_D7_PORT142_MARK, MMC1_CLK_PORT103_MARK, MMC1_CMD_PORT104_MARK, /* MSEL4CR_15_1 */ /* MSIOF0 */ MSIOF0_SS1_MARK, MSIOF0_SS2_MARK, MSIOF0_RXD_MARK, MSIOF0_TXD_MARK, MSIOF0_MCK0_MARK, MSIOF0_MCK1_MARK, MSIOF0_RSYNC_MARK, MSIOF0_RSCK_MARK, MSIOF0_TSCK_MARK, MSIOF0_TSYNC_MARK, /* MSIOF1 */ MSIOF1_RSCK_MARK, MSIOF1_RSYNC_MARK, MSIOF1_MCK0_MARK, MSIOF1_MCK1_MARK, MSIOF1_SS2_PORT116_MARK, MSIOF1_SS1_PORT117_MARK, MSIOF1_RXD_PORT118_MARK, MSIOF1_TXD_PORT119_MARK, MSIOF1_TSYNC_PORT120_MARK, MSIOF1_TSCK_PORT121_MARK, /* MSEL4CR_10_0 */ MSIOF1_SS1_PORT67_MARK, MSIOF1_TSCK_PORT72_MARK, MSIOF1_TSYNC_PORT73_MARK, MSIOF1_TXD_PORT74_MARK, MSIOF1_RXD_PORT75_MARK, MSIOF1_SS2_PORT202_MARK, /* MSEL4CR_10_1 */ /* GPIO */ GPO0_MARK, GPI0_MARK, GPO1_MARK, GPI1_MARK, /* USB0 */ USB0_OCI_MARK, USB0_PPON_MARK, VBUS_MARK, /* USB1 */ USB1_OCI_MARK, USB1_PPON_MARK, /* BBIF1 */ BBIF1_RXD_MARK, BBIF1_TXD_MARK, BBIF1_TSYNC_MARK, BBIF1_TSCK_MARK, BBIF1_RSCK_MARK, BBIF1_RSYNC_MARK, BBIF1_FLOW_MARK, BBIF1_RX_FLOW_N_MARK, /* BBIF2 */ BBIF2_TXD2_PORT5_MARK, /* MSEL5CR_0_0 */ BBIF2_RXD2_PORT60_MARK, BBIF2_TSYNC2_PORT6_MARK, BBIF2_TSCK2_PORT59_MARK, BBIF2_RXD2_PORT90_MARK, /* MSEL5CR_0_1 */ BBIF2_TXD2_PORT183_MARK, BBIF2_TSCK2_PORT89_MARK, BBIF2_TSYNC2_PORT184_MARK, /* BSC / FLCTL / PCMCIA */ CS0_MARK, CS2_MARK, CS4_MARK, CS5B_MARK, CS6A_MARK, CS5A_PORT105_MARK, /* CS5A PORT 19/105 */ CS5A_PORT19_MARK, IOIS16_MARK, /* ? */ A0_MARK, A1_MARK, A2_MARK, A3_MARK, A4_FOE_MARK, /* share with FLCTL */ A5_FCDE_MARK, /* share with FLCTL */ A6_MARK, A7_MARK, A8_MARK, A9_MARK, A10_MARK, A11_MARK, A12_MARK, A13_MARK, A14_MARK, A15_MARK, A16_MARK, A17_MARK, A18_MARK, A19_MARK, A20_MARK, A21_MARK, A22_MARK, A23_MARK, A24_MARK, A25_MARK, A26_MARK, D0_NAF0_MARK, D1_NAF1_MARK, D2_NAF2_MARK, /* share with FLCTL */ D3_NAF3_MARK, D4_NAF4_MARK, D5_NAF5_MARK, /* share with FLCTL */ D6_NAF6_MARK, D7_NAF7_MARK, D8_NAF8_MARK, /* share with FLCTL */ D9_NAF9_MARK, D10_NAF10_MARK, D11_NAF11_MARK, /* share with FLCTL */ D12_NAF12_MARK, D13_NAF13_MARK, D14_NAF14_MARK, /* share with FLCTL */ D15_NAF15_MARK, /* share with FLCTL */ D16_MARK, D17_MARK, D18_MARK, D19_MARK, D20_MARK, D21_MARK, D22_MARK, D23_MARK, D24_MARK, D25_MARK, D26_MARK, D27_MARK, D28_MARK, D29_MARK, D30_MARK, D31_MARK, WE0_FWE_MARK, /* share with FLCTL */ WE1_MARK, WE2_ICIORD_MARK, /* share with PCMCIA */ WE3_ICIOWR_MARK, /* share with PCMCIA */ CKO_MARK, BS_MARK, RDWR_MARK, RD_FSC_MARK, /* share with FLCTL */ WAIT_PORT177_MARK, /* WAIT Port 90/177 */ WAIT_PORT90_MARK, FCE0_MARK, FCE1_MARK, FRB_MARK, /* FLCTL */ /* IRDA */ IRDA_FIRSEL_MARK, IRDA_IN_MARK, IRDA_OUT_MARK, /* ATAPI */ IDE_D0_MARK, IDE_D1_MARK, IDE_D2_MARK, IDE_D3_MARK, IDE_D4_MARK, IDE_D5_MARK, IDE_D6_MARK, IDE_D7_MARK, IDE_D8_MARK, IDE_D9_MARK, IDE_D10_MARK, IDE_D11_MARK, IDE_D12_MARK, IDE_D13_MARK, IDE_D14_MARK, IDE_D15_MARK, IDE_A0_MARK, IDE_A1_MARK, IDE_A2_MARK, IDE_CS0_MARK, IDE_CS1_MARK, IDE_IOWR_MARK, IDE_IORD_MARK, IDE_IORDY_MARK, IDE_INT_MARK, IDE_RST_MARK, IDE_DIRECTION_MARK, IDE_EXBUF_ENB_MARK, IDE_IODACK_MARK, IDE_IODREQ_MARK, /* RMII */ RMII_CRS_DV_MARK, RMII_RX_ER_MARK, RMII_RXD0_MARK, RMII_RXD1_MARK, RMII_TX_EN_MARK, RMII_TXD0_MARK, RMII_MDC_MARK, RMII_TXD1_MARK, RMII_MDIO_MARK, RMII_REF50CK_MARK, /* for RMII */ RMII_REF125CK_MARK, /* for GMII */ /* GEther */ ET_TX_CLK_MARK, ET_TX_EN_MARK, ET_ETXD0_MARK, ET_ETXD1_MARK, ET_ETXD2_MARK, ET_ETXD3_MARK, ET_ETXD4_MARK, ET_ETXD5_MARK, /* for GEther */ ET_ETXD6_MARK, ET_ETXD7_MARK, /* for GEther */ ET_COL_MARK, ET_TX_ER_MARK, ET_RX_CLK_MARK, ET_RX_DV_MARK, ET_ERXD0_MARK, ET_ERXD1_MARK, ET_ERXD2_MARK, ET_ERXD3_MARK, ET_ERXD4_MARK, ET_ERXD5_MARK, /* for GEther */ ET_ERXD6_MARK, ET_ERXD7_MARK, /* for GEther */ ET_RX_ER_MARK, ET_CRS_MARK, ET_MDC_MARK, ET_MDIO_MARK, ET_LINK_MARK, ET_PHY_INT_MARK, ET_WOL_MARK, ET_GTX_CLK_MARK, /* DMA0 */ DREQ0_MARK, DACK0_MARK, /* DMA1 */ DREQ1_MARK, DACK1_MARK, /* SYSC */ RESETOUTS_MARK, RESETP_PULLUP_MARK, RESETP_PLAIN_MARK, /* IRREM */ IROUT_MARK, /* SDENC */ SDENC_CPG_MARK, SDENC_DV_CLKI_MARK, /* HDMI */ HDMI_HPD_MARK, HDMI_CEC_MARK, /* DEBUG */ EDEBGREQ_PULLUP_MARK, /* for JTAG */ EDEBGREQ_PULLDOWN_MARK, TRACEAUD_FROM_VIO_MARK, /* for TRACE/AUD */ TRACEAUD_FROM_LCDC0_MARK, TRACEAUD_FROM_MEMC_MARK, PINMUX_MARK_END, }; static const pinmux_enum_t pinmux_data[] = { /* specify valid pin states for each pin in GPIO mode */ /* I/O and Pull U/D */ PORT_DATA_IO_PD(0), PORT_DATA_IO_PD(1), PORT_DATA_IO_PD(2), PORT_DATA_IO_PD(3), PORT_DATA_IO_PD(4), PORT_DATA_IO_PD(5), PORT_DATA_IO_PD(6), PORT_DATA_IO(7), PORT_DATA_IO(8), PORT_DATA_IO(9), PORT_DATA_IO_PD(10), PORT_DATA_IO_PD(11), PORT_DATA_IO_PD(12), PORT_DATA_IO_PU_PD(13), PORT_DATA_IO_PD(14), PORT_DATA_IO_PD(15), PORT_DATA_IO_PD(16), PORT_DATA_IO_PD(17), PORT_DATA_IO(18), PORT_DATA_IO_PU(19), PORT_DATA_IO_PU_PD(20), PORT_DATA_IO_PD(21), PORT_DATA_IO_PU_PD(22), PORT_DATA_IO(23), PORT_DATA_IO_PU(24), PORT_DATA_IO_PU(25), PORT_DATA_IO_PU(26), PORT_DATA_IO_PU(27), PORT_DATA_IO_PU(28), PORT_DATA_IO_PU(29), PORT_DATA_IO_PU(30), PORT_DATA_IO_PD(31), PORT_DATA_IO_PD(32), PORT_DATA_IO_PD(33), PORT_DATA_IO_PD(34), PORT_DATA_IO_PU(35), PORT_DATA_IO_PU(36), PORT_DATA_IO_PD(37), PORT_DATA_IO_PU(38), PORT_DATA_IO_PD(39), PORT_DATA_IO_PU_PD(40), PORT_DATA_IO_PD(41), PORT_DATA_IO_PD(42), PORT_DATA_IO_PU_PD(43), PORT_DATA_IO_PU_PD(44), PORT_DATA_IO_PU_PD(45), PORT_DATA_IO_PU_PD(46), PORT_DATA_IO_PU_PD(47), PORT_DATA_IO_PU_PD(48), PORT_DATA_IO_PU_PD(49), PORT_DATA_IO_PU_PD(50), PORT_DATA_IO_PD(51), PORT_DATA_IO_PD(52), PORT_DATA_IO_PD(53), PORT_DATA_IO_PD(54), PORT_DATA_IO_PU_PD(55), PORT_DATA_IO_PU_PD(56), PORT_DATA_IO_PU_PD(57), PORT_DATA_IO_PU_PD(58), PORT_DATA_IO_PU_PD(59), PORT_DATA_IO_PU_PD(60), PORT_DATA_IO_PD(61), PORT_DATA_IO_PD(62), PORT_DATA_IO_PD(63), PORT_DATA_IO_PD(64), PORT_DATA_IO_PD(65), PORT_DATA_IO_PU_PD(66), PORT_DATA_IO_PU_PD(67), PORT_DATA_IO_PU_PD(68), PORT_DATA_IO_PU_PD(69), PORT_DATA_IO_PU_PD(70), PORT_DATA_IO_PU_PD(71), PORT_DATA_IO_PU_PD(72), PORT_DATA_IO_PU_PD(73), PORT_DATA_IO_PU_PD(74), PORT_DATA_IO_PU_PD(75), PORT_DATA_IO_PU_PD(76), PORT_DATA_IO_PU_PD(77), PORT_DATA_IO_PU_PD(78), PORT_DATA_IO_PU_PD(79), PORT_DATA_IO_PU_PD(80), PORT_DATA_IO_PU_PD(81), PORT_DATA_IO(82), PORT_DATA_IO_PU_PD(83), PORT_DATA_IO(84), PORT_DATA_IO_PD(85), PORT_DATA_IO_PD(86), PORT_DATA_IO_PD(87), PORT_DATA_IO_PD(88), PORT_DATA_IO_PD(89), PORT_DATA_IO_PD(90), PORT_DATA_IO_PU_PD(91), PORT_DATA_IO_PU_PD(92), PORT_DATA_IO_PU_PD(93), PORT_DATA_IO_PU_PD(94), PORT_DATA_IO_PU_PD(95), PORT_DATA_IO_PU_PD(96), PORT_DATA_IO_PU_PD(97), PORT_DATA_IO_PU_PD(98), PORT_DATA_IO_PU_PD(99), PORT_DATA_IO_PU_PD(100), PORT_DATA_IO(101), PORT_DATA_IO_PU(102), PORT_DATA_IO_PU_PD(103), PORT_DATA_IO_PU(104), PORT_DATA_IO_PU(105), PORT_DATA_IO_PU_PD(106), PORT_DATA_IO(107), PORT_DATA_IO(108), PORT_DATA_IO(109), PORT_DATA_IO(110), PORT_DATA_IO(111), PORT_DATA_IO(112), PORT_DATA_IO(113), PORT_DATA_IO_PU_PD(114), PORT_DATA_IO(115), PORT_DATA_IO_PD(116), PORT_DATA_IO_PD(117), PORT_DATA_IO_PD(118), PORT_DATA_IO_PD(119), PORT_DATA_IO_PD(120), PORT_DATA_IO_PD(121), PORT_DATA_IO_PD(122), PORT_DATA_IO_PD(123), PORT_DATA_IO_PD(124), PORT_DATA_IO(125), PORT_DATA_IO(126), PORT_DATA_IO(127), PORT_DATA_IO(128), PORT_DATA_IO(129), PORT_DATA_IO(130), PORT_DATA_IO(131), PORT_DATA_IO(132), PORT_DATA_IO(133), PORT_DATA_IO(134), PORT_DATA_IO(135), PORT_DATA_IO(136), PORT_DATA_IO(137), PORT_DATA_IO(138), PORT_DATA_IO(139), PORT_DATA_IO(140), PORT_DATA_IO(141), PORT_DATA_IO_PU(142), PORT_DATA_IO_PU(143), PORT_DATA_IO_PU(144), PORT_DATA_IO_PU(145), PORT_DATA_IO_PU(146), PORT_DATA_IO_PU(147), PORT_DATA_IO_PU(148), PORT_DATA_IO_PU(149), PORT_DATA_IO_PU(150), PORT_DATA_IO_PU(151), PORT_DATA_IO_PU(152), PORT_DATA_IO_PU(153), PORT_DATA_IO_PU(154), PORT_DATA_IO_PU(155), PORT_DATA_IO_PU(156), PORT_DATA_IO_PU(157), PORT_DATA_IO_PD(158), PORT_DATA_IO_PD(159), PORT_DATA_IO_PU_PD(160), PORT_DATA_IO_PD(161), PORT_DATA_IO_PD(162), PORT_DATA_IO_PD(163), PORT_DATA_IO_PD(164), PORT_DATA_IO_PD(165), PORT_DATA_IO_PU(166), PORT_DATA_IO_PU(167), PORT_DATA_IO_PU(168), PORT_DATA_IO_PU(169), PORT_DATA_IO_PU(170), PORT_DATA_IO_PU(171), PORT_DATA_IO_PD(172), PORT_DATA_IO_PD(173), PORT_DATA_IO_PD(174), PORT_DATA_IO_PD(175), PORT_DATA_IO_PU(176), PORT_DATA_IO_PU_PD(177), PORT_DATA_IO_PU(178), PORT_DATA_IO_PD(179), PORT_DATA_IO_PD(180), PORT_DATA_IO_PU(181), PORT_DATA_IO_PU(182), PORT_DATA_IO(183), PORT_DATA_IO_PD(184), PORT_DATA_IO_PD(185), PORT_DATA_IO_PD(186), PORT_DATA_IO_PD(187), PORT_DATA_IO_PD(188), PORT_DATA_IO_PD(189), PORT_DATA_IO_PD(190), PORT_DATA_IO_PD(191), PORT_DATA_IO_PD(192), PORT_DATA_IO_PU_PD(193), PORT_DATA_IO_PU_PD(194), PORT_DATA_IO_PD(195), PORT_DATA_IO_PU_PD(196), PORT_DATA_IO_PD(197), PORT_DATA_IO_PU_PD(198), PORT_DATA_IO_PU_PD(199), PORT_DATA_IO_PU_PD(200), PORT_DATA_IO_PU(201), PORT_DATA_IO_PU_PD(202), PORT_DATA_IO(203), PORT_DATA_IO_PU_PD(204), PORT_DATA_IO_PU_PD(205), PORT_DATA_IO_PU_PD(206), PORT_DATA_IO_PU_PD(207), PORT_DATA_IO_PU_PD(208), PORT_DATA_IO_PD(209), PORT_DATA_IO_PD(210), PORT_DATA_IO_PD(211), /* Port0 */ PINMUX_DATA(DBGMDT2_MARK, PORT0_FN1), PINMUX_DATA(FSIAISLD_PORT0_MARK, PORT0_FN2, MSEL5CR_3_0), PINMUX_DATA(FSIAOSLD1_MARK, PORT0_FN3), PINMUX_DATA(LCD0_D22_PORT0_MARK, PORT0_FN4, MSEL5CR_6_0), PINMUX_DATA(SCIFA7_RXD_MARK, PORT0_FN6), PINMUX_DATA(LCD1_D4_MARK, PORT0_FN7), PINMUX_DATA(IRQ5_PORT0_MARK, PORT0_FN0, MSEL1CR_5_0), /* Port1 */ PINMUX_DATA(DBGMDT1_MARK, PORT1_FN1), PINMUX_DATA(FMSISLD_PORT1_MARK, PORT1_FN2, MSEL5CR_5_0), PINMUX_DATA(FSIAOSLD2_MARK, PORT1_FN3), PINMUX_DATA(LCD0_D23_PORT1_MARK, PORT1_FN4, MSEL5CR_6_0), PINMUX_DATA(SCIFA7_TXD_MARK, PORT1_FN6), PINMUX_DATA(LCD1_D3_MARK, PORT1_FN7), PINMUX_DATA(IRQ5_PORT1_MARK, PORT1_FN0, MSEL1CR_5_1), /* Port2 */ PINMUX_DATA(DBGMDT0_MARK, PORT2_FN1), PINMUX_DATA(SCIFB_SCK_PORT2_MARK, PORT2_FN2, MSEL5CR_17_1), PINMUX_DATA(LCD0_D21_PORT2_MARK, PORT2_FN4, MSEL5CR_6_0), PINMUX_DATA(LCD1_D2_MARK, PORT2_FN7), PINMUX_DATA(IRQ0_PORT2_MARK, PORT2_FN0, MSEL1CR_0_1), /* Port3 */ PINMUX_DATA(DBGMD21_MARK, PORT3_FN1), PINMUX_DATA(SCIFB_RXD_PORT3_MARK, PORT3_FN2, MSEL5CR_17_1), PINMUX_DATA(LCD0_D20_PORT3_MARK, PORT3_FN4, MSEL5CR_6_0), PINMUX_DATA(LCD1_D1_MARK, PORT3_FN7), /* Port4 */ PINMUX_DATA(DBGMD20_MARK, PORT4_FN1), PINMUX_DATA(SCIFB_TXD_PORT4_MARK, PORT4_FN2, MSEL5CR_17_1), PINMUX_DATA(LCD0_D19_PORT4_MARK, PORT4_FN4, MSEL5CR_6_0), PINMUX_DATA(LCD1_D0_MARK, PORT4_FN7), /* Port5 */ PINMUX_DATA(DBGMD11_MARK, PORT5_FN1), PINMUX_DATA(BBIF2_TXD2_PORT5_MARK, PORT5_FN2, MSEL5CR_0_0), PINMUX_DATA(FSIAISLD_PORT5_MARK, PORT5_FN4, MSEL5CR_3_1), PINMUX_DATA(RSPI_SSL0_A_MARK, PORT5_FN6), PINMUX_DATA(LCD1_VCPWC_MARK, PORT5_FN7), /* Port6 */ PINMUX_DATA(DBGMD10_MARK, PORT6_FN1), PINMUX_DATA(BBIF2_TSYNC2_PORT6_MARK, PORT6_FN2, MSEL5CR_0_0), PINMUX_DATA(FMSISLD_PORT6_MARK, PORT6_FN4, MSEL5CR_5_1), PINMUX_DATA(RSPI_SSL1_A_MARK, PORT6_FN6), PINMUX_DATA(LCD1_VEPWC_MARK, PORT6_FN7), /* Port7 */ PINMUX_DATA(FSIAOLR_MARK, PORT7_FN1), /* Port8 */ PINMUX_DATA(FSIAOBT_MARK, PORT8_FN1), /* Port9 */ PINMUX_DATA(FSIAOSLD_MARK, PORT9_FN1), PINMUX_DATA(FSIASPDIF_PORT9_MARK, PORT9_FN2, MSEL5CR_4_0), /* Port10 */ PINMUX_DATA(FSIAOMC_MARK, PORT10_FN1), PINMUX_DATA(SCIFA5_RXD_PORT10_MARK, PORT10_FN3, MSEL5CR_14_0, MSEL5CR_15_0), PINMUX_DATA(IRQ3_PORT10_MARK, PORT10_FN0, MSEL1CR_3_0), /* Port11 */ PINMUX_DATA(FSIACK_MARK, PORT11_FN1), PINMUX_DATA(FSIBCK_MARK, PORT11_FN2), PINMUX_DATA(IRQ2_PORT11_MARK, PORT11_FN0, MSEL1CR_2_0), /* Port12 */ PINMUX_DATA(FSIAILR_MARK, PORT12_FN1), PINMUX_DATA(SCIFA4_RXD_PORT12_MARK, PORT12_FN2, MSEL5CR_12_0, MSEL5CR_11_0), PINMUX_DATA(LCD1_RS_MARK, PORT12_FN6), PINMUX_DATA(LCD1_DISP_MARK, PORT12_FN7), PINMUX_DATA(IRQ2_PORT12_MARK, PORT12_FN0, MSEL1CR_2_1), /* Port13 */ PINMUX_DATA(FSIAIBT_MARK, PORT13_FN1), PINMUX_DATA(SCIFA4_TXD_PORT13_MARK, PORT13_FN2, MSEL5CR_12_0, MSEL5CR_11_0), PINMUX_DATA(LCD1_RD_MARK, PORT13_FN7), PINMUX_DATA(IRQ0_PORT13_MARK, PORT13_FN0, MSEL1CR_0_0), /* Port14 */ PINMUX_DATA(FMSOILR_MARK, PORT14_FN1), PINMUX_DATA(FMSIILR_MARK, PORT14_FN2), PINMUX_DATA(VIO_CKO1_MARK, PORT14_FN3), PINMUX_DATA(LCD1_D23_MARK, PORT14_FN7), PINMUX_DATA(IRQ3_PORT14_MARK, PORT14_FN0, MSEL1CR_3_1), /* Port15 */ PINMUX_DATA(FMSOIBT_MARK, PORT15_FN1), PINMUX_DATA(FMSIIBT_MARK, PORT15_FN2), PINMUX_DATA(VIO_CKO2_MARK, PORT15_FN3), PINMUX_DATA(LCD1_D22_MARK, PORT15_FN7), PINMUX_DATA(IRQ4_PORT15_MARK, PORT15_FN0, MSEL1CR_4_0), /* Port16 */ PINMUX_DATA(FMSOOLR_MARK, PORT16_FN1), PINMUX_DATA(FMSIOLR_MARK, PORT16_FN2), /* Port17 */ PINMUX_DATA(FMSOOBT_MARK, PORT17_FN1), PINMUX_DATA(FMSIOBT_MARK, PORT17_FN2), /* Port18 */ PINMUX_DATA(FMSOSLD_MARK, PORT18_FN1), PINMUX_DATA(FSIASPDIF_PORT18_MARK, PORT18_FN2, MSEL5CR_4_1), /* Port19 */ PINMUX_DATA(FMSICK_MARK, PORT19_FN1), PINMUX_DATA(CS5A_PORT19_MARK, PORT19_FN7, MSEL5CR_2_1), PINMUX_DATA(IRQ10_MARK, PORT19_FN0), /* Port20 */ PINMUX_DATA(FMSOCK_MARK, PORT20_FN1), PINMUX_DATA(SCIFA5_TXD_PORT20_MARK, PORT20_FN3, MSEL5CR_15_0, MSEL5CR_14_0), PINMUX_DATA(IRQ1_MARK, PORT20_FN0), /* Port21 */ PINMUX_DATA(SCIFA1_CTS_MARK, PORT21_FN1), PINMUX_DATA(SCIFA4_SCK_PORT21_MARK, PORT21_FN2, MSEL5CR_10_0), PINMUX_DATA(TPU0TO1_MARK, PORT21_FN4), PINMUX_DATA(VIO1_FIELD_MARK, PORT21_FN5), PINMUX_DATA(STP0_IPD5_MARK, PORT21_FN6), PINMUX_DATA(LCD1_D10_MARK, PORT21_FN7), /* Port22 */ PINMUX_DATA(SCIFA2_SCK_PORT22_MARK, PORT22_FN1, MSEL5CR_7_0), PINMUX_DATA(SIM_D_PORT22_MARK, PORT22_FN4, MSEL5CR_21_0), PINMUX_DATA(VIO0_D13_PORT22_MARK, PORT22_FN7, MSEL5CR_27_1), /* Port23 */ PINMUX_DATA(SCIFA1_RTS_MARK, PORT23_FN1), PINMUX_DATA(SCIFA5_SCK_PORT23_MARK, PORT23_FN3, MSEL5CR_13_0), PINMUX_DATA(TPU0TO0_MARK, PORT23_FN4), PINMUX_DATA(VIO_CKO_1_MARK, PORT23_FN5), PINMUX_DATA(STP0_IPD2_MARK, PORT23_FN6), PINMUX_DATA(LCD1_D7_MARK, PORT23_FN7), /* Port24 */ PINMUX_DATA(VIO0_D15_PORT24_MARK, PORT24_FN1, MSEL5CR_27_0), PINMUX_DATA(VIO1_D7_MARK, PORT24_FN5), PINMUX_DATA(SCIFA6_SCK_MARK, PORT24_FN6), PINMUX_DATA(SDHI2_CD_PORT24_MARK, PORT24_FN7, MSEL5CR_19_0), /* Port25 */ PINMUX_DATA(VIO0_D14_PORT25_MARK, PORT25_FN1, MSEL5CR_27_0), PINMUX_DATA(VIO1_D6_MARK, PORT25_FN5), PINMUX_DATA(SCIFA6_RXD_MARK, PORT25_FN6), PINMUX_DATA(SDHI2_WP_PORT25_MARK, PORT25_FN7, MSEL5CR_19_0), /* Port26 */ PINMUX_DATA(VIO0_D13_PORT26_MARK, PORT26_FN1, MSEL5CR_27_0), PINMUX_DATA(VIO1_D5_MARK, PORT26_FN5), PINMUX_DATA(SCIFA6_TXD_MARK, PORT26_FN6), /* Port27 - Port39 Function */ PINMUX_DATA(VIO0_D7_MARK, PORT27_FN1), PINMUX_DATA(VIO0_D6_MARK, PORT28_FN1), PINMUX_DATA(VIO0_D5_MARK, PORT29_FN1), PINMUX_DATA(VIO0_D4_MARK, PORT30_FN1), PINMUX_DATA(VIO0_D3_MARK, PORT31_FN1), PINMUX_DATA(VIO0_D2_MARK, PORT32_FN1), PINMUX_DATA(VIO0_D1_MARK, PORT33_FN1), PINMUX_DATA(VIO0_D0_MARK, PORT34_FN1), PINMUX_DATA(VIO0_CLK_MARK, PORT35_FN1), PINMUX_DATA(VIO_CKO_MARK, PORT36_FN1), PINMUX_DATA(VIO0_HD_MARK, PORT37_FN1), PINMUX_DATA(VIO0_FIELD_MARK, PORT38_FN1), PINMUX_DATA(VIO0_VD_MARK, PORT39_FN1), /* Port38 IRQ */ PINMUX_DATA(IRQ25_MARK, PORT38_FN0), /* Port40 */ PINMUX_DATA(LCD0_D18_PORT40_MARK, PORT40_FN4, MSEL5CR_6_0), PINMUX_DATA(RSPI_CK_A_MARK, PORT40_FN6), PINMUX_DATA(LCD1_LCLK_MARK, PORT40_FN7), /* Port41 */ PINMUX_DATA(LCD0_D17_MARK, PORT41_FN1), PINMUX_DATA(MSIOF2_SS1_MARK, PORT41_FN2), PINMUX_DATA(IRQ31_PORT41_MARK, PORT41_FN0, MSEL1CR_31_1), /* Port42 */ PINMUX_DATA(LCD0_D16_MARK, PORT42_FN1), PINMUX_DATA(MSIOF2_MCK1_MARK, PORT42_FN2), PINMUX_DATA(IRQ12_PORT42_MARK, PORT42_FN0, MSEL1CR_12_1), /* Port43 */ PINMUX_DATA(LCD0_D15_MARK, PORT43_FN1), PINMUX_DATA(MSIOF2_MCK0_MARK, PORT43_FN2), PINMUX_DATA(KEYIN0_PORT43_MARK, PORT43_FN3, MSEL4CR_18_0), PINMUX_DATA(DV_D15_MARK, PORT43_FN6), /* Port44 */ PINMUX_DATA(LCD0_D14_MARK, PORT44_FN1), PINMUX_DATA(MSIOF2_RSYNC_MARK, PORT44_FN2), PINMUX_DATA(KEYIN1_PORT44_MARK, PORT44_FN3, MSEL4CR_18_0), PINMUX_DATA(DV_D14_MARK, PORT44_FN6), /* Port45 */ PINMUX_DATA(LCD0_D13_MARK, PORT45_FN1), PINMUX_DATA(MSIOF2_RSCK_MARK, PORT45_FN2), PINMUX_DATA(KEYIN2_PORT45_MARK, PORT45_FN3, MSEL4CR_18_0), PINMUX_DATA(DV_D13_MARK, PORT45_FN6), /* Port46 */ PINMUX_DATA(LCD0_D12_MARK, PORT46_FN1), PINMUX_DATA(KEYIN3_PORT46_MARK, PORT46_FN3, MSEL4CR_18_0), PINMUX_DATA(DV_D12_MARK, PORT46_FN6), /* Port47 */ PINMUX_DATA(LCD0_D11_MARK, PORT47_FN1), PINMUX_DATA(KEYIN4_MARK, PORT47_FN3), PINMUX_DATA(DV_D11_MARK, PORT47_FN6), /* Port48 */ PINMUX_DATA(LCD0_D10_MARK, PORT48_FN1), PINMUX_DATA(KEYIN5_MARK, PORT48_FN3), PINMUX_DATA(DV_D10_MARK, PORT48_FN6), /* Port49 */ PINMUX_DATA(LCD0_D9_MARK, PORT49_FN1), PINMUX_DATA(KEYIN6_MARK, PORT49_FN3), PINMUX_DATA(DV_D9_MARK, PORT49_FN6), PINMUX_DATA(IRQ30_PORT49_MARK, PORT49_FN0, MSEL1CR_30_1), /* Port50 */ PINMUX_DATA(LCD0_D8_MARK, PORT50_FN1), PINMUX_DATA(KEYIN7_MARK, PORT50_FN3), PINMUX_DATA(DV_D8_MARK, PORT50_FN6), PINMUX_DATA(IRQ29_PORT50_MARK, PORT50_FN0, MSEL1CR_29_1), /* Port51 */ PINMUX_DATA(LCD0_D7_MARK, PORT51_FN1), PINMUX_DATA(KEYOUT0_MARK, PORT51_FN3), PINMUX_DATA(DV_D7_MARK, PORT51_FN6), /* Port52 */ PINMUX_DATA(LCD0_D6_MARK, PORT52_FN1), PINMUX_DATA(KEYOUT1_MARK, PORT52_FN3), PINMUX_DATA(DV_D6_MARK, PORT52_FN6), /* Port53 */ PINMUX_DATA(LCD0_D5_MARK, PORT53_FN1), PINMUX_DATA(KEYOUT2_MARK, PORT53_FN3), PINMUX_DATA(DV_D5_MARK, PORT53_FN6), /* Port54 */ PINMUX_DATA(LCD0_D4_MARK, PORT54_FN1), PINMUX_DATA(KEYOUT3_MARK, PORT54_FN3), PINMUX_DATA(DV_D4_MARK, PORT54_FN6), /* Port55 */ PINMUX_DATA(LCD0_D3_MARK, PORT55_FN1), PINMUX_DATA(KEYOUT4_MARK, PORT55_FN3), PINMUX_DATA(KEYIN3_PORT55_MARK, PORT55_FN4, MSEL4CR_18_1), PINMUX_DATA(DV_D3_MARK, PORT55_FN6), /* Port56 */ PINMUX_DATA(LCD0_D2_MARK, PORT56_FN1), PINMUX_DATA(KEYOUT5_MARK, PORT56_FN3), PINMUX_DATA(KEYIN2_PORT56_MARK, PORT56_FN4, MSEL4CR_18_1), PINMUX_DATA(DV_D2_MARK, PORT56_FN6), PINMUX_DATA(IRQ28_PORT56_MARK, PORT56_FN0, MSEL1CR_28_1), /* Port57 */ PINMUX_DATA(LCD0_D1_MARK, PORT57_FN1), PINMUX_DATA(KEYOUT6_MARK, PORT57_FN3), PINMUX_DATA(KEYIN1_PORT57_MARK, PORT57_FN4, MSEL4CR_18_1), PINMUX_DATA(DV_D1_MARK, PORT57_FN6), PINMUX_DATA(IRQ27_PORT57_MARK, PORT57_FN0, MSEL1CR_27_1), /* Port58 */ PINMUX_DATA(LCD0_D0_MARK, PORT58_FN1), PINMUX_DATA(KEYOUT7_MARK, PORT58_FN3), PINMUX_DATA(KEYIN0_PORT58_MARK, PORT58_FN4, MSEL4CR_18_1), PINMUX_DATA(DV_D0_MARK, PORT58_FN6), PINMUX_DATA(IRQ26_PORT58_MARK, PORT58_FN0, MSEL1CR_26_1), /* Port59 */ PINMUX_DATA(LCD0_VCPWC_MARK, PORT59_FN1), PINMUX_DATA(BBIF2_TSCK2_PORT59_MARK, PORT59_FN2, MSEL5CR_0_0), PINMUX_DATA(RSPI_MOSI_A_MARK, PORT59_FN6), /* Port60 */ PINMUX_DATA(LCD0_VEPWC_MARK, PORT60_FN1), PINMUX_DATA(BBIF2_RXD2_PORT60_MARK, PORT60_FN2, MSEL5CR_0_0), PINMUX_DATA(RSPI_MISO_A_MARK, PORT60_FN6), /* Port61 */ PINMUX_DATA(LCD0_DON_MARK, PORT61_FN1), PINMUX_DATA(MSIOF2_TXD_MARK, PORT61_FN2), /* Port62 */ PINMUX_DATA(LCD0_DCK_MARK, PORT62_FN1), PINMUX_DATA(LCD0_WR_MARK, PORT62_FN4), PINMUX_DATA(DV_CLK_MARK, PORT62_FN6), PINMUX_DATA(IRQ15_PORT62_MARK, PORT62_FN0, MSEL1CR_15_1), /* Port63 */ PINMUX_DATA(LCD0_VSYN_MARK, PORT63_FN1), PINMUX_DATA(DV_VSYNC_MARK, PORT63_FN6), PINMUX_DATA(IRQ14_PORT63_MARK, PORT63_FN0, MSEL1CR_14_1), /* Port64 */ PINMUX_DATA(LCD0_HSYN_MARK, PORT64_FN1), PINMUX_DATA(LCD0_CS_MARK, PORT64_FN4), PINMUX_DATA(DV_HSYNC_MARK, PORT64_FN6), PINMUX_DATA(IRQ13_PORT64_MARK, PORT64_FN0, MSEL1CR_13_1), /* Port65 */ PINMUX_DATA(LCD0_DISP_MARK, PORT65_FN1), PINMUX_DATA(MSIOF2_TSCK_MARK, PORT65_FN2), PINMUX_DATA(LCD0_RS_MARK, PORT65_FN4), /* Port66 */ PINMUX_DATA(MEMC_INT_MARK, PORT66_FN1), PINMUX_DATA(TPU0TO2_PORT66_MARK, PORT66_FN3, MSEL5CR_25_0), PINMUX_DATA(MMC0_CLK_PORT66_MARK, PORT66_FN4, MSEL4CR_15_0), PINMUX_DATA(SDHI1_CLK_MARK, PORT66_FN6), /* Port67 - Port73 Function1 */ PINMUX_DATA(MEMC_CS0_MARK, PORT67_FN1), PINMUX_DATA(MEMC_AD8_MARK, PORT68_FN1), PINMUX_DATA(MEMC_AD9_MARK, PORT69_FN1), PINMUX_DATA(MEMC_AD10_MARK, PORT70_FN1), PINMUX_DATA(MEMC_AD11_MARK, PORT71_FN1), PINMUX_DATA(MEMC_AD12_MARK, PORT72_FN1), PINMUX_DATA(MEMC_AD13_MARK, PORT73_FN1), /* Port67 - Port73 Function2 */ PINMUX_DATA(MSIOF1_SS1_PORT67_MARK, PORT67_FN2, MSEL4CR_10_1), PINMUX_DATA(MSIOF1_RSCK_MARK, PORT68_FN2), PINMUX_DATA(MSIOF1_RSYNC_MARK, PORT69_FN2), PINMUX_DATA(MSIOF1_MCK0_MARK, PORT70_FN2), PINMUX_DATA(MSIOF1_MCK1_MARK, PORT71_FN2), PINMUX_DATA(MSIOF1_TSCK_PORT72_MARK, PORT72_FN2, MSEL4CR_10_1), PINMUX_DATA(MSIOF1_TSYNC_PORT73_MARK, PORT73_FN2, MSEL4CR_10_1), /* Port67 - Port73 Function4 */ PINMUX_DATA(MMC0_CMD_PORT67_MARK, PORT67_FN4, MSEL4CR_15_0), PINMUX_DATA(MMC0_D0_PORT68_MARK, PORT68_FN4, MSEL4CR_15_0), PINMUX_DATA(MMC0_D1_PORT69_MARK, PORT69_FN4, MSEL4CR_15_0), PINMUX_DATA(MMC0_D2_PORT70_MARK, PORT70_FN4, MSEL4CR_15_0), PINMUX_DATA(MMC0_D3_PORT71_MARK, PORT71_FN4, MSEL4CR_15_0), PINMUX_DATA(MMC0_D4_PORT72_MARK, PORT72_FN4, MSEL4CR_15_0), PINMUX_DATA(MMC0_D5_PORT73_MARK, PORT73_FN4, MSEL4CR_15_0), /* Port67 - Port73 Function6 */ PINMUX_DATA(SDHI1_CMD_MARK, PORT67_FN6), PINMUX_DATA(SDHI1_D0_MARK, PORT68_FN6), PINMUX_DATA(SDHI1_D1_MARK, PORT69_FN6), PINMUX_DATA(SDHI1_D2_MARK, PORT70_FN6), PINMUX_DATA(SDHI1_D3_MARK, PORT71_FN6), PINMUX_DATA(SDHI1_CD_MARK, PORT72_FN6), PINMUX_DATA(SDHI1_WP_MARK, PORT73_FN6), /* Port67 - Port71 IRQ */ PINMUX_DATA(IRQ20_MARK, PORT67_FN0), PINMUX_DATA(IRQ16_PORT68_MARK, PORT68_FN0, MSEL1CR_16_0), PINMUX_DATA(IRQ17_MARK, PORT69_FN0), PINMUX_DATA(IRQ18_MARK, PORT70_FN0), PINMUX_DATA(IRQ19_MARK, PORT71_FN0), /* Port74 */ PINMUX_DATA(MEMC_AD14_MARK, PORT74_FN1), PINMUX_DATA(MSIOF1_TXD_PORT74_MARK, PORT74_FN2, MSEL4CR_10_1), PINMUX_DATA(MMC0_D6_PORT74_MARK, PORT74_FN4, MSEL4CR_15_0), PINMUX_DATA(STP1_IPD7_MARK, PORT74_FN6), PINMUX_DATA(LCD1_D21_MARK, PORT74_FN7), /* Port75 */ PINMUX_DATA(MEMC_AD15_MARK, PORT75_FN1), PINMUX_DATA(MSIOF1_RXD_PORT75_MARK, PORT75_FN2, MSEL4CR_10_1), PINMUX_DATA(MMC0_D7_PORT75_MARK, PORT75_FN4, MSEL4CR_15_0), PINMUX_DATA(STP1_IPD6_MARK, PORT75_FN6), PINMUX_DATA(LCD1_D20_MARK, PORT75_FN7), /* Port76 - Port80 Function */ PINMUX_DATA(SDHI0_CMD_MARK, PORT76_FN1), PINMUX_DATA(SDHI0_D0_MARK, PORT77_FN1), PINMUX_DATA(SDHI0_D1_MARK, PORT78_FN1), PINMUX_DATA(SDHI0_D2_MARK, PORT79_FN1), PINMUX_DATA(SDHI0_D3_MARK, PORT80_FN1), /* Port81 */ PINMUX_DATA(SDHI0_CD_MARK, PORT81_FN1), PINMUX_DATA(IRQ26_PORT81_MARK, PORT81_FN0, MSEL1CR_26_0), /* Port82 - Port88 Function */ PINMUX_DATA(SDHI0_CLK_MARK, PORT82_FN1), PINMUX_DATA(SDHI0_WP_MARK, PORT83_FN1), PINMUX_DATA(RESETOUTS_MARK, PORT84_FN1), PINMUX_DATA(USB0_PPON_MARK, PORT85_FN1), PINMUX_DATA(USB0_OCI_MARK, PORT86_FN1), PINMUX_DATA(USB1_PPON_MARK, PORT87_FN1), PINMUX_DATA(USB1_OCI_MARK, PORT88_FN1), /* Port89 */ PINMUX_DATA(DREQ0_MARK, PORT89_FN1), PINMUX_DATA(BBIF2_TSCK2_PORT89_MARK, PORT89_FN2, MSEL5CR_0_1), PINMUX_DATA(RSPI_SSL3_A_MARK, PORT89_FN6), /* Port90 */ PINMUX_DATA(DACK0_MARK, PORT90_FN1), PINMUX_DATA(BBIF2_RXD2_PORT90_MARK, PORT90_FN2, MSEL5CR_0_1), PINMUX_DATA(RSPI_SSL2_A_MARK, PORT90_FN6), PINMUX_DATA(WAIT_PORT90_MARK, PORT90_FN7, MSEL5CR_2_1), /* Port91 */ PINMUX_DATA(MEMC_AD0_MARK, PORT91_FN1), PINMUX_DATA(BBIF1_RXD_MARK, PORT91_FN2), PINMUX_DATA(SCIFA5_TXD_PORT91_MARK, PORT91_FN3, MSEL5CR_15_1, MSEL5CR_14_0), PINMUX_DATA(LCD1_D5_MARK, PORT91_FN7), /* Port92 */ PINMUX_DATA(MEMC_AD1_MARK, PORT92_FN1), PINMUX_DATA(BBIF1_TSYNC_MARK, PORT92_FN2), PINMUX_DATA(SCIFA5_RXD_PORT92_MARK, PORT92_FN3, MSEL5CR_15_1, MSEL5CR_14_0), PINMUX_DATA(STP0_IPD1_MARK, PORT92_FN6), PINMUX_DATA(LCD1_D6_MARK, PORT92_FN7), /* Port93 */ PINMUX_DATA(MEMC_AD2_MARK, PORT93_FN1), PINMUX_DATA(BBIF1_TSCK_MARK, PORT93_FN2), PINMUX_DATA(SCIFA4_TXD_PORT93_MARK, PORT93_FN3, MSEL5CR_12_1, MSEL5CR_11_0), PINMUX_DATA(STP0_IPD3_MARK, PORT93_FN6), PINMUX_DATA(LCD1_D8_MARK, PORT93_FN7), /* Port94 */ PINMUX_DATA(MEMC_AD3_MARK, PORT94_FN1), PINMUX_DATA(BBIF1_TXD_MARK, PORT94_FN2), PINMUX_DATA(SCIFA4_RXD_PORT94_MARK, PORT94_FN3, MSEL5CR_12_1, MSEL5CR_11_0), PINMUX_DATA(STP0_IPD4_MARK, PORT94_FN6), PINMUX_DATA(LCD1_D9_MARK, PORT94_FN7), /* Port95 */ PINMUX_DATA(MEMC_CS1_MARK, PORT95_FN1, MSEL4CR_6_0), PINMUX_DATA(MEMC_A1_MARK, PORT95_FN1, MSEL4CR_6_1), PINMUX_DATA(SCIFA2_CTS_MARK, PORT95_FN2), PINMUX_DATA(SIM_RST_MARK, PORT95_FN4), PINMUX_DATA(VIO0_D14_PORT95_MARK, PORT95_FN7, MSEL5CR_27_1), PINMUX_DATA(IRQ22_MARK, PORT95_FN0), /* Port96 */ PINMUX_DATA(MEMC_ADV_MARK, PORT96_FN1, MSEL4CR_6_0), PINMUX_DATA(MEMC_DREQ0_MARK, PORT96_FN1, MSEL4CR_6_1), PINMUX_DATA(SCIFA2_RTS_MARK, PORT96_FN2), PINMUX_DATA(SIM_CLK_MARK, PORT96_FN4), PINMUX_DATA(VIO0_D15_PORT96_MARK, PORT96_FN7, MSEL5CR_27_1), PINMUX_DATA(IRQ23_MARK, PORT96_FN0), /* Port97 */ PINMUX_DATA(MEMC_AD4_MARK, PORT97_FN1), PINMUX_DATA(BBIF1_RSCK_MARK, PORT97_FN2), PINMUX_DATA(LCD1_CS_MARK, PORT97_FN6), PINMUX_DATA(LCD1_HSYN_MARK, PORT97_FN7), PINMUX_DATA(IRQ12_PORT97_MARK, PORT97_FN0, MSEL1CR_12_0), /* Port98 */ PINMUX_DATA(MEMC_AD5_MARK, PORT98_FN1), PINMUX_DATA(BBIF1_RSYNC_MARK, PORT98_FN2), PINMUX_DATA(LCD1_VSYN_MARK, PORT98_FN7), PINMUX_DATA(IRQ13_PORT98_MARK, PORT98_FN0, MSEL1CR_13_0), /* Port99 */ PINMUX_DATA(MEMC_AD6_MARK, PORT99_FN1), PINMUX_DATA(BBIF1_FLOW_MARK, PORT99_FN2), PINMUX_DATA(LCD1_WR_MARK, PORT99_FN6), PINMUX_DATA(LCD1_DCK_MARK, PORT99_FN7), PINMUX_DATA(IRQ14_PORT99_MARK, PORT99_FN0, MSEL1CR_14_0), /* Port100 */ PINMUX_DATA(MEMC_AD7_MARK, PORT100_FN1), PINMUX_DATA(BBIF1_RX_FLOW_N_MARK, PORT100_FN2), PINMUX_DATA(LCD1_DON_MARK, PORT100_FN7), PINMUX_DATA(IRQ15_PORT100_MARK, PORT100_FN0, MSEL1CR_15_0), /* Port101 */ PINMUX_DATA(FCE0_MARK, PORT101_FN1), /* Port102 */ PINMUX_DATA(FRB_MARK, PORT102_FN1), PINMUX_DATA(LCD0_LCLK_PORT102_MARK, PORT102_FN4, MSEL5CR_6_0), /* Port103 */ PINMUX_DATA(CS5B_MARK, PORT103_FN1), PINMUX_DATA(FCE1_MARK, PORT103_FN2), PINMUX_DATA(MMC1_CLK_PORT103_MARK, PORT103_FN3, MSEL4CR_15_1), /* Port104 */ PINMUX_DATA(CS6A_MARK, PORT104_FN1), PINMUX_DATA(MMC1_CMD_PORT104_MARK, PORT104_FN3, MSEL4CR_15_1), PINMUX_DATA(IRQ11_MARK, PORT104_FN0), /* Port105 */ PINMUX_DATA(CS5A_PORT105_MARK, PORT105_FN1, MSEL5CR_2_0), PINMUX_DATA(SCIFA3_RTS_PORT105_MARK, PORT105_FN4, MSEL5CR_8_0), /* Port106 */ PINMUX_DATA(IOIS16_MARK, PORT106_FN1), PINMUX_DATA(IDE_EXBUF_ENB_MARK, PORT106_FN6), /* Port107 - Port115 Function */ PINMUX_DATA(WE3_ICIOWR_MARK, PORT107_FN1), PINMUX_DATA(WE2_ICIORD_MARK, PORT108_FN1), PINMUX_DATA(CS0_MARK, PORT109_FN1), PINMUX_DATA(CS2_MARK, PORT110_FN1), PINMUX_DATA(CS4_MARK, PORT111_FN1), PINMUX_DATA(WE1_MARK, PORT112_FN1), PINMUX_DATA(WE0_FWE_MARK, PORT113_FN1), PINMUX_DATA(RDWR_MARK, PORT114_FN1), PINMUX_DATA(RD_FSC_MARK, PORT115_FN1), /* Port116 */ PINMUX_DATA(A25_MARK, PORT116_FN1), PINMUX_DATA(MSIOF0_SS2_MARK, PORT116_FN2), PINMUX_DATA(MSIOF1_SS2_PORT116_MARK, PORT116_FN3, MSEL4CR_10_0), PINMUX_DATA(SCIFA3_SCK_PORT116_MARK, PORT116_FN4, MSEL5CR_8_0), PINMUX_DATA(GPO1_MARK, PORT116_FN5), /* Port117 */ PINMUX_DATA(A24_MARK, PORT117_FN1), PINMUX_DATA(MSIOF0_SS1_MARK, PORT117_FN2), PINMUX_DATA(MSIOF1_SS1_PORT117_MARK, PORT117_FN3, MSEL4CR_10_0), PINMUX_DATA(SCIFA3_CTS_PORT117_MARK, PORT117_FN4, MSEL5CR_8_0), PINMUX_DATA(GPO0_MARK, PORT117_FN5), /* Port118 */ PINMUX_DATA(A23_MARK, PORT118_FN1), PINMUX_DATA(MSIOF0_MCK1_MARK, PORT118_FN2), PINMUX_DATA(MSIOF1_RXD_PORT118_MARK, PORT118_FN3, MSEL4CR_10_0), PINMUX_DATA(GPI1_MARK, PORT118_FN5), PINMUX_DATA(IRQ9_PORT118_MARK, PORT118_FN0, MSEL1CR_9_0), /* Port119 */ PINMUX_DATA(A22_MARK, PORT119_FN1), PINMUX_DATA(MSIOF0_MCK0_MARK, PORT119_FN2), PINMUX_DATA(MSIOF1_TXD_PORT119_MARK, PORT119_FN3, MSEL4CR_10_0), PINMUX_DATA(GPI0_MARK, PORT119_FN5), PINMUX_DATA(IRQ8_MARK, PORT119_FN0), /* Port120 */ PINMUX_DATA(A21_MARK, PORT120_FN1), PINMUX_DATA(MSIOF0_RSYNC_MARK, PORT120_FN2), PINMUX_DATA(MSIOF1_TSYNC_PORT120_MARK, PORT120_FN3, MSEL4CR_10_0), PINMUX_DATA(IRQ7_PORT120_MARK, PORT120_FN0, MSEL1CR_7_1), /* Port121 */ PINMUX_DATA(A20_MARK, PORT121_FN1), PINMUX_DATA(MSIOF0_RSCK_MARK, PORT121_FN2), PINMUX_DATA(MSIOF1_TSCK_PORT121_MARK, PORT121_FN3, MSEL4CR_10_0), PINMUX_DATA(IRQ6_PORT121_MARK, PORT121_FN0, MSEL1CR_6_0), /* Port122 */ PINMUX_DATA(A19_MARK, PORT122_FN1), PINMUX_DATA(MSIOF0_RXD_MARK, PORT122_FN2), /* Port123 */ PINMUX_DATA(A18_MARK, PORT123_FN1), PINMUX_DATA(MSIOF0_TSCK_MARK, PORT123_FN2), /* Port124 */ PINMUX_DATA(A17_MARK, PORT124_FN1), PINMUX_DATA(MSIOF0_TSYNC_MARK, PORT124_FN2), /* Port125 - Port141 Function */ PINMUX_DATA(A16_MARK, PORT125_FN1), PINMUX_DATA(A15_MARK, PORT126_FN1), PINMUX_DATA(A14_MARK, PORT127_FN1), PINMUX_DATA(A13_MARK, PORT128_FN1), PINMUX_DATA(A12_MARK, PORT129_FN1), PINMUX_DATA(A11_MARK, PORT130_FN1), PINMUX_DATA(A10_MARK, PORT131_FN1), PINMUX_DATA(A9_MARK, PORT132_FN1), PINMUX_DATA(A8_MARK, PORT133_FN1), PINMUX_DATA(A7_MARK, PORT134_FN1), PINMUX_DATA(A6_MARK, PORT135_FN1), PINMUX_DATA(A5_FCDE_MARK, PORT136_FN1), PINMUX_DATA(A4_FOE_MARK, PORT137_FN1), PINMUX_DATA(A3_MARK, PORT138_FN1), PINMUX_DATA(A2_MARK, PORT139_FN1), PINMUX_DATA(A1_MARK, PORT140_FN1), PINMUX_DATA(CKO_MARK, PORT141_FN1), /* Port142 - Port157 Function1 */ PINMUX_DATA(D15_NAF15_MARK, PORT142_FN1), PINMUX_DATA(D14_NAF14_MARK, PORT143_FN1), PINMUX_DATA(D13_NAF13_MARK, PORT144_FN1), PINMUX_DATA(D12_NAF12_MARK, PORT145_FN1), PINMUX_DATA(D11_NAF11_MARK, PORT146_FN1), PINMUX_DATA(D10_NAF10_MARK, PORT147_FN1), PINMUX_DATA(D9_NAF9_MARK, PORT148_FN1), PINMUX_DATA(D8_NAF8_MARK, PORT149_FN1), PINMUX_DATA(D7_NAF7_MARK, PORT150_FN1), PINMUX_DATA(D6_NAF6_MARK, PORT151_FN1), PINMUX_DATA(D5_NAF5_MARK, PORT152_FN1), PINMUX_DATA(D4_NAF4_MARK, PORT153_FN1), PINMUX_DATA(D3_NAF3_MARK, PORT154_FN1), PINMUX_DATA(D2_NAF2_MARK, PORT155_FN1), PINMUX_DATA(D1_NAF1_MARK, PORT156_FN1), PINMUX_DATA(D0_NAF0_MARK, PORT157_FN1), /* Port142 - Port149 Function3 */ PINMUX_DATA(MMC1_D7_PORT142_MARK, PORT142_FN3, MSEL4CR_15_1), PINMUX_DATA(MMC1_D6_PORT143_MARK, PORT143_FN3, MSEL4CR_15_1), PINMUX_DATA(MMC1_D5_PORT144_MARK, PORT144_FN3, MSEL4CR_15_1), PINMUX_DATA(MMC1_D4_PORT145_MARK, PORT145_FN3, MSEL4CR_15_1), PINMUX_DATA(MMC1_D3_PORT146_MARK, PORT146_FN3, MSEL4CR_15_1), PINMUX_DATA(MMC1_D2_PORT147_MARK, PORT147_FN3, MSEL4CR_15_1), PINMUX_DATA(MMC1_D1_PORT148_MARK, PORT148_FN3, MSEL4CR_15_1), PINMUX_DATA(MMC1_D0_PORT149_MARK, PORT149_FN3, MSEL4CR_15_1), /* Port158 */ PINMUX_DATA(D31_MARK, PORT158_FN1), PINMUX_DATA(SCIFA3_SCK_PORT158_MARK, PORT158_FN2, MSEL5CR_8_1), PINMUX_DATA(RMII_REF125CK_MARK, PORT158_FN3), PINMUX_DATA(LCD0_D21_PORT158_MARK, PORT158_FN4, MSEL5CR_6_1), PINMUX_DATA(IRDA_FIRSEL_MARK, PORT158_FN5), PINMUX_DATA(IDE_D15_MARK, PORT158_FN6), /* Port159 */ PINMUX_DATA(D30_MARK, PORT159_FN1), PINMUX_DATA(SCIFA3_RXD_PORT159_MARK, PORT159_FN2, MSEL5CR_8_1), PINMUX_DATA(RMII_REF50CK_MARK, PORT159_FN3), PINMUX_DATA(LCD0_D23_PORT159_MARK, PORT159_FN4, MSEL5CR_6_1), PINMUX_DATA(IDE_D14_MARK, PORT159_FN6), /* Port160 */ PINMUX_DATA(D29_MARK, PORT160_FN1), PINMUX_DATA(SCIFA3_TXD_PORT160_MARK, PORT160_FN2, MSEL5CR_8_1), PINMUX_DATA(LCD0_D22_PORT160_MARK, PORT160_FN4, MSEL5CR_6_1), PINMUX_DATA(VIO1_HD_MARK, PORT160_FN5), PINMUX_DATA(IDE_D13_MARK, PORT160_FN6), /* Port161 */ PINMUX_DATA(D28_MARK, PORT161_FN1), PINMUX_DATA(SCIFA3_RTS_PORT161_MARK, PORT161_FN2, MSEL5CR_8_1), PINMUX_DATA(ET_RX_DV_MARK, PORT161_FN3), PINMUX_DATA(LCD0_D20_PORT161_MARK, PORT161_FN4, MSEL5CR_6_1), PINMUX_DATA(IRDA_IN_MARK, PORT161_FN5), PINMUX_DATA(IDE_D12_MARK, PORT161_FN6), /* Port162 */ PINMUX_DATA(D27_MARK, PORT162_FN1), PINMUX_DATA(SCIFA3_CTS_PORT162_MARK, PORT162_FN2, MSEL5CR_8_1), PINMUX_DATA(LCD0_D19_PORT162_MARK, PORT162_FN4, MSEL5CR_6_1), PINMUX_DATA(IRDA_OUT_MARK, PORT162_FN5), PINMUX_DATA(IDE_D11_MARK, PORT162_FN6), /* Port163 */ PINMUX_DATA(D26_MARK, PORT163_FN1), PINMUX_DATA(MSIOF2_SS2_MARK, PORT163_FN2), PINMUX_DATA(ET_COL_MARK, PORT163_FN3), PINMUX_DATA(LCD0_D18_PORT163_MARK, PORT163_FN4, MSEL5CR_6_1), PINMUX_DATA(IROUT_MARK, PORT163_FN5), PINMUX_DATA(IDE_D10_MARK, PORT163_FN6), /* Port164 */ PINMUX_DATA(D25_MARK, PORT164_FN1), PINMUX_DATA(MSIOF2_TSYNC_MARK, PORT164_FN2), PINMUX_DATA(ET_PHY_INT_MARK, PORT164_FN3), PINMUX_DATA(LCD0_RD_MARK, PORT164_FN4), PINMUX_DATA(IDE_D9_MARK, PORT164_FN6), /* Port165 */ PINMUX_DATA(D24_MARK, PORT165_FN1), PINMUX_DATA(MSIOF2_RXD_MARK, PORT165_FN2), PINMUX_DATA(LCD0_LCLK_PORT165_MARK, PORT165_FN4, MSEL5CR_6_1), PINMUX_DATA(IDE_D8_MARK, PORT165_FN6), /* Port166 - Port171 Function1 */ PINMUX_DATA(D21_MARK, PORT166_FN1), PINMUX_DATA(D20_MARK, PORT167_FN1), PINMUX_DATA(D19_MARK, PORT168_FN1), PINMUX_DATA(D18_MARK, PORT169_FN1), PINMUX_DATA(D17_MARK, PORT170_FN1), PINMUX_DATA(D16_MARK, PORT171_FN1), /* Port166 - Port171 Function3 */ PINMUX_DATA(ET_ETXD5_MARK, PORT166_FN3), PINMUX_DATA(ET_ETXD4_MARK, PORT167_FN3), PINMUX_DATA(ET_ETXD3_MARK, PORT168_FN3), PINMUX_DATA(ET_ETXD2_MARK, PORT169_FN3), PINMUX_DATA(ET_ETXD1_MARK, PORT170_FN3), PINMUX_DATA(ET_ETXD0_MARK, PORT171_FN3), /* Port166 - Port171 Function6 */ PINMUX_DATA(IDE_D5_MARK, PORT166_FN6), PINMUX_DATA(IDE_D4_MARK, PORT167_FN6), PINMUX_DATA(IDE_D3_MARK, PORT168_FN6), PINMUX_DATA(IDE_D2_MARK, PORT169_FN6), PINMUX_DATA(IDE_D1_MARK, PORT170_FN6), PINMUX_DATA(IDE_D0_MARK, PORT171_FN6), /* Port167 - Port171 IRQ */ PINMUX_DATA(IRQ31_PORT167_MARK, PORT167_FN0, MSEL1CR_31_0), PINMUX_DATA(IRQ27_PORT168_MARK, PORT168_FN0, MSEL1CR_27_0), PINMUX_DATA(IRQ28_PORT169_MARK, PORT169_FN0, MSEL1CR_28_0), PINMUX_DATA(IRQ29_PORT170_MARK, PORT170_FN0, MSEL1CR_29_0), PINMUX_DATA(IRQ30_PORT171_MARK, PORT171_FN0, MSEL1CR_30_0), /* Port172 */ PINMUX_DATA(D23_MARK, PORT172_FN1), PINMUX_DATA(SCIFB_RTS_PORT172_MARK, PORT172_FN2, MSEL5CR_17_1), PINMUX_DATA(ET_ETXD7_MARK, PORT172_FN3), PINMUX_DATA(IDE_D7_MARK, PORT172_FN6), PINMUX_DATA(IRQ4_PORT172_MARK, PORT172_FN0, MSEL1CR_4_1), /* Port173 */ PINMUX_DATA(D22_MARK, PORT173_FN1), PINMUX_DATA(SCIFB_CTS_PORT173_MARK, PORT173_FN2, MSEL5CR_17_1), PINMUX_DATA(ET_ETXD6_MARK, PORT173_FN3), PINMUX_DATA(IDE_D6_MARK, PORT173_FN6), PINMUX_DATA(IRQ6_PORT173_MARK, PORT173_FN0, MSEL1CR_6_1), /* Port174 */ PINMUX_DATA(A26_MARK, PORT174_FN1), PINMUX_DATA(MSIOF0_TXD_MARK, PORT174_FN2), PINMUX_DATA(ET_RX_CLK_MARK, PORT174_FN3), PINMUX_DATA(SCIFA3_RXD_PORT174_MARK, PORT174_FN4, MSEL5CR_8_0), /* Port175 */ PINMUX_DATA(A0_MARK, PORT175_FN1), PINMUX_DATA(BS_MARK, PORT175_FN2), PINMUX_DATA(ET_WOL_MARK, PORT175_FN3), PINMUX_DATA(SCIFA3_TXD_PORT175_MARK, PORT175_FN4, MSEL5CR_8_0), /* Port176 */ PINMUX_DATA(ET_GTX_CLK_MARK, PORT176_FN3), /* Port177 */ PINMUX_DATA(WAIT_PORT177_MARK, PORT177_FN1, MSEL5CR_2_0), PINMUX_DATA(ET_LINK_MARK, PORT177_FN3), PINMUX_DATA(IDE_IOWR_MARK, PORT177_FN6), PINMUX_DATA(SDHI2_WP_PORT177_MARK, PORT177_FN7, MSEL5CR_19_1), /* Port178 */ PINMUX_DATA(VIO0_D12_MARK, PORT178_FN1), PINMUX_DATA(VIO1_D4_MARK, PORT178_FN5), PINMUX_DATA(IDE_IORD_MARK, PORT178_FN6), /* Port179 */ PINMUX_DATA(VIO0_D11_MARK, PORT179_FN1), PINMUX_DATA(VIO1_D3_MARK, PORT179_FN5), PINMUX_DATA(IDE_IORDY_MARK, PORT179_FN6), /* Port180 */ PINMUX_DATA(VIO0_D10_MARK, PORT180_FN1), PINMUX_DATA(TPU0TO3_MARK, PORT180_FN4), PINMUX_DATA(VIO1_D2_MARK, PORT180_FN5), PINMUX_DATA(IDE_INT_MARK, PORT180_FN6), PINMUX_DATA(IRQ24_MARK, PORT180_FN0), /* Port181 */ PINMUX_DATA(VIO0_D9_MARK, PORT181_FN1), PINMUX_DATA(VIO1_D1_MARK, PORT181_FN5), PINMUX_DATA(IDE_RST_MARK, PORT181_FN6), /* Port182 */ PINMUX_DATA(VIO0_D8_MARK, PORT182_FN1), PINMUX_DATA(VIO1_D0_MARK, PORT182_FN5), PINMUX_DATA(IDE_DIRECTION_MARK, PORT182_FN6), /* Port183 */ PINMUX_DATA(DREQ1_MARK, PORT183_FN1), PINMUX_DATA(BBIF2_TXD2_PORT183_MARK, PORT183_FN2, MSEL5CR_0_1), PINMUX_DATA(ET_TX_EN_MARK, PORT183_FN3), /* Port184 */ PINMUX_DATA(DACK1_MARK, PORT184_FN1), PINMUX_DATA(BBIF2_TSYNC2_PORT184_MARK, PORT184_FN2, MSEL5CR_0_1), PINMUX_DATA(ET_TX_CLK_MARK, PORT184_FN3), /* Port185 - Port192 Function1 */ PINMUX_DATA(SCIFA1_SCK_MARK, PORT185_FN1), PINMUX_DATA(SCIFB_RTS_PORT186_MARK, PORT186_FN1, MSEL5CR_17_0), PINMUX_DATA(SCIFB_CTS_PORT187_MARK, PORT187_FN1, MSEL5CR_17_0), PINMUX_DATA(SCIFA0_SCK_MARK, PORT188_FN1), PINMUX_DATA(SCIFB_SCK_PORT190_MARK, PORT190_FN1, MSEL5CR_17_0), PINMUX_DATA(SCIFB_RXD_PORT191_MARK, PORT191_FN1, MSEL5CR_17_0), PINMUX_DATA(SCIFB_TXD_PORT192_MARK, PORT192_FN1, MSEL5CR_17_0), /* Port185 - Port192 Function3 */ PINMUX_DATA(ET_ERXD0_MARK, PORT185_FN3), PINMUX_DATA(ET_ERXD1_MARK, PORT186_FN3), PINMUX_DATA(ET_ERXD2_MARK, PORT187_FN3), PINMUX_DATA(ET_ERXD3_MARK, PORT188_FN3), PINMUX_DATA(ET_ERXD4_MARK, PORT189_FN3), PINMUX_DATA(ET_ERXD5_MARK, PORT190_FN3), PINMUX_DATA(ET_ERXD6_MARK, PORT191_FN3), PINMUX_DATA(ET_ERXD7_MARK, PORT192_FN3), /* Port185 - Port192 Function6 */ PINMUX_DATA(STP1_IPCLK_MARK, PORT185_FN6), PINMUX_DATA(STP1_IPD0_PORT186_MARK, PORT186_FN6, MSEL5CR_23_0), PINMUX_DATA(STP1_IPEN_PORT187_MARK, PORT187_FN6, MSEL5CR_23_0), PINMUX_DATA(STP1_IPSYNC_MARK, PORT188_FN6), PINMUX_DATA(STP0_IPCLK_MARK, PORT189_FN6), PINMUX_DATA(STP0_IPD0_MARK, PORT190_FN6), PINMUX_DATA(STP0_IPEN_MARK, PORT191_FN6), PINMUX_DATA(STP0_IPSYNC_MARK, PORT192_FN6), /* Port193 */ PINMUX_DATA(SCIFA0_CTS_MARK, PORT193_FN1), PINMUX_DATA(RMII_CRS_DV_MARK, PORT193_FN3), PINMUX_DATA(STP1_IPEN_PORT193_MARK, PORT193_FN6, MSEL5CR_23_1), /* ? */ PINMUX_DATA(LCD1_D17_MARK, PORT193_FN7), /* Port194 */ PINMUX_DATA(SCIFA0_RTS_MARK, PORT194_FN1), PINMUX_DATA(RMII_RX_ER_MARK, PORT194_FN3), PINMUX_DATA(STP1_IPD0_PORT194_MARK, PORT194_FN6, MSEL5CR_23_1), /* ? */ PINMUX_DATA(LCD1_D16_MARK, PORT194_FN7), /* Port195 */ PINMUX_DATA(SCIFA1_RXD_MARK, PORT195_FN1), PINMUX_DATA(RMII_RXD0_MARK, PORT195_FN3), PINMUX_DATA(STP1_IPD3_MARK, PORT195_FN6), PINMUX_DATA(LCD1_D15_MARK, PORT195_FN7), /* Port196 */ PINMUX_DATA(SCIFA1_TXD_MARK, PORT196_FN1), PINMUX_DATA(RMII_RXD1_MARK, PORT196_FN3), PINMUX_DATA(STP1_IPD2_MARK, PORT196_FN6), PINMUX_DATA(LCD1_D14_MARK, PORT196_FN7), /* Port197 */ PINMUX_DATA(SCIFA0_RXD_MARK, PORT197_FN1), PINMUX_DATA(VIO1_CLK_MARK, PORT197_FN5), PINMUX_DATA(STP1_IPD5_MARK, PORT197_FN6), PINMUX_DATA(LCD1_D19_MARK, PORT197_FN7), /* Port198 */ PINMUX_DATA(SCIFA0_TXD_MARK, PORT198_FN1), PINMUX_DATA(VIO1_VD_MARK, PORT198_FN5), PINMUX_DATA(STP1_IPD4_MARK, PORT198_FN6), PINMUX_DATA(LCD1_D18_MARK, PORT198_FN7), /* Port199 */ PINMUX_DATA(MEMC_NWE_MARK, PORT199_FN1), PINMUX_DATA(SCIFA2_SCK_PORT199_MARK, PORT199_FN2, MSEL5CR_7_1), PINMUX_DATA(RMII_TX_EN_MARK, PORT199_FN3), PINMUX_DATA(SIM_D_PORT199_MARK, PORT199_FN4, MSEL5CR_21_1), PINMUX_DATA(STP1_IPD1_MARK, PORT199_FN6), PINMUX_DATA(LCD1_D13_MARK, PORT199_FN7), /* Port200 */ PINMUX_DATA(MEMC_NOE_MARK, PORT200_FN1), PINMUX_DATA(SCIFA2_RXD_MARK, PORT200_FN2), PINMUX_DATA(RMII_TXD0_MARK, PORT200_FN3), PINMUX_DATA(STP0_IPD7_MARK, PORT200_FN6), PINMUX_DATA(LCD1_D12_MARK, PORT200_FN7), /* Port201 */ PINMUX_DATA(MEMC_WAIT_MARK, PORT201_FN1, MSEL4CR_6_0), PINMUX_DATA(MEMC_DREQ1_MARK, PORT201_FN1, MSEL4CR_6_1), PINMUX_DATA(SCIFA2_TXD_MARK, PORT201_FN2), PINMUX_DATA(RMII_TXD1_MARK, PORT201_FN3), PINMUX_DATA(STP0_IPD6_MARK, PORT201_FN6), PINMUX_DATA(LCD1_D11_MARK, PORT201_FN7), /* Port202 */ PINMUX_DATA(MEMC_BUSCLK_MARK, PORT202_FN1, MSEL4CR_6_0), PINMUX_DATA(MEMC_A0_MARK, PORT202_FN1, MSEL4CR_6_1), PINMUX_DATA(MSIOF1_SS2_PORT202_MARK, PORT202_FN2, MSEL4CR_10_1), PINMUX_DATA(RMII_MDC_MARK, PORT202_FN3), PINMUX_DATA(TPU0TO2_PORT202_MARK, PORT202_FN4, MSEL5CR_25_1), PINMUX_DATA(IDE_CS0_MARK, PORT202_FN6), PINMUX_DATA(SDHI2_CD_PORT202_MARK, PORT202_FN7, MSEL5CR_19_1), PINMUX_DATA(IRQ21_MARK, PORT202_FN0), /* Port203 - Port208 Function1 */ PINMUX_DATA(SDHI2_CLK_MARK, PORT203_FN1), PINMUX_DATA(SDHI2_CMD_MARK, PORT204_FN1), PINMUX_DATA(SDHI2_D0_MARK, PORT205_FN1), PINMUX_DATA(SDHI2_D1_MARK, PORT206_FN1), PINMUX_DATA(SDHI2_D2_MARK, PORT207_FN1), PINMUX_DATA(SDHI2_D3_MARK, PORT208_FN1), /* Port203 - Port208 Function3 */ PINMUX_DATA(ET_TX_ER_MARK, PORT203_FN3), PINMUX_DATA(ET_RX_ER_MARK, PORT204_FN3), PINMUX_DATA(ET_CRS_MARK, PORT205_FN3), PINMUX_DATA(ET_MDC_MARK, PORT206_FN3), PINMUX_DATA(ET_MDIO_MARK, PORT207_FN3), PINMUX_DATA(RMII_MDIO_MARK, PORT208_FN3), /* Port203 - Port208 Function6 */ PINMUX_DATA(IDE_A2_MARK, PORT203_FN6), PINMUX_DATA(IDE_A1_MARK, PORT204_FN6), PINMUX_DATA(IDE_A0_MARK, PORT205_FN6), PINMUX_DATA(IDE_IODACK_MARK, PORT206_FN6), PINMUX_DATA(IDE_IODREQ_MARK, PORT207_FN6), PINMUX_DATA(IDE_CS1_MARK, PORT208_FN6), /* Port203 - Port208 Function7 */ PINMUX_DATA(SCIFA4_TXD_PORT203_MARK, PORT203_FN7, MSEL5CR_12_0, MSEL5CR_11_1), PINMUX_DATA(SCIFA4_RXD_PORT204_MARK, PORT204_FN7, MSEL5CR_12_0, MSEL5CR_11_1), PINMUX_DATA(SCIFA4_SCK_PORT205_MARK, PORT205_FN7, MSEL5CR_10_1), PINMUX_DATA(SCIFA5_SCK_PORT206_MARK, PORT206_FN7, MSEL5CR_13_1), PINMUX_DATA(SCIFA5_RXD_PORT207_MARK, PORT207_FN7, MSEL5CR_15_0, MSEL5CR_14_1), PINMUX_DATA(SCIFA5_TXD_PORT208_MARK, PORT208_FN7, MSEL5CR_15_0, MSEL5CR_14_1), /* Port209 */ PINMUX_DATA(VBUS_MARK, PORT209_FN1), PINMUX_DATA(IRQ7_PORT209_MARK, PORT209_FN0, MSEL1CR_7_0), /* Port210 */ PINMUX_DATA(IRQ9_PORT210_MARK, PORT210_FN0, MSEL1CR_9_1), PINMUX_DATA(HDMI_HPD_MARK, PORT210_FN1), /* Port211 */ PINMUX_DATA(IRQ16_PORT211_MARK, PORT211_FN0, MSEL1CR_16_1), PINMUX_DATA(HDMI_CEC_MARK, PORT211_FN1), /* LCDC select */ PINMUX_DATA(LCDC0_SELECT_MARK, MSEL3CR_6_0), PINMUX_DATA(LCDC1_SELECT_MARK, MSEL3CR_6_1), /* SDENC */ PINMUX_DATA(SDENC_CPG_MARK, MSEL4CR_19_0), PINMUX_DATA(SDENC_DV_CLKI_MARK, MSEL4CR_19_1), /* SYSC */ PINMUX_DATA(RESETP_PULLUP_MARK, MSEL4CR_4_0), PINMUX_DATA(RESETP_PLAIN_MARK, MSEL4CR_4_1), /* DEBUG */ PINMUX_DATA(EDEBGREQ_PULLDOWN_MARK, MSEL4CR_1_0), PINMUX_DATA(EDEBGREQ_PULLUP_MARK, MSEL4CR_1_1), PINMUX_DATA(TRACEAUD_FROM_VIO_MARK, MSEL5CR_30_0, MSEL5CR_29_0), PINMUX_DATA(TRACEAUD_FROM_LCDC0_MARK, MSEL5CR_30_0, MSEL5CR_29_1), PINMUX_DATA(TRACEAUD_FROM_MEMC_MARK, MSEL5CR_30_1, MSEL5CR_29_0), }; static struct sh_pfc_pin pinmux_pins[] = { GPIO_PORT_ALL(), }; /* - LCD0 ------------------------------------------------------------------- */ static const unsigned int lcd0_data8_pins[] = { /* D[0:7] */ 58, 57, 56, 55, 54, 53, 52, 51, }; static const unsigned int lcd0_data8_mux[] = { LCD0_D0_MARK, LCD0_D1_MARK, LCD0_D2_MARK, LCD0_D3_MARK, LCD0_D4_MARK, LCD0_D5_MARK, LCD0_D6_MARK, LCD0_D7_MARK, }; static const unsigned int lcd0_data9_pins[] = { /* D[0:8] */ 58, 57, 56, 55, 54, 53, 52, 51, 50, }; static const unsigned int lcd0_data9_mux[] = { LCD0_D0_MARK, LCD0_D1_MARK, LCD0_D2_MARK, LCD0_D3_MARK, LCD0_D4_MARK, LCD0_D5_MARK, LCD0_D6_MARK, LCD0_D7_MARK, LCD0_D8_MARK, }; static const unsigned int lcd0_data12_pins[] = { /* D[0:11] */ 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, }; static const unsigned int lcd0_data12_mux[] = { LCD0_D0_MARK, LCD0_D1_MARK, LCD0_D2_MARK, LCD0_D3_MARK, LCD0_D4_MARK, LCD0_D5_MARK, LCD0_D6_MARK, LCD0_D7_MARK, LCD0_D8_MARK, LCD0_D9_MARK, LCD0_D10_MARK, LCD0_D11_MARK, }; static const unsigned int lcd0_data16_pins[] = { /* D[0:15] */ 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, }; static const unsigned int lcd0_data16_mux[] = { LCD0_D0_MARK, LCD0_D1_MARK, LCD0_D2_MARK, LCD0_D3_MARK, LCD0_D4_MARK, LCD0_D5_MARK, LCD0_D6_MARK, LCD0_D7_MARK, LCD0_D8_MARK, LCD0_D9_MARK, LCD0_D10_MARK, LCD0_D11_MARK, LCD0_D12_MARK, LCD0_D13_MARK, LCD0_D14_MARK, LCD0_D15_MARK, }; static const unsigned int lcd0_data18_pins[] = { /* D[0:17] */ 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, }; static const unsigned int lcd0_data18_mux[] = { LCD0_D0_MARK, LCD0_D1_MARK, LCD0_D2_MARK, LCD0_D3_MARK, LCD0_D4_MARK, LCD0_D5_MARK, LCD0_D6_MARK, LCD0_D7_MARK, LCD0_D8_MARK, LCD0_D9_MARK, LCD0_D10_MARK, LCD0_D11_MARK, LCD0_D12_MARK, LCD0_D13_MARK, LCD0_D14_MARK, LCD0_D15_MARK, LCD0_D16_MARK, LCD0_D17_MARK, }; static const unsigned int lcd0_data24_0_pins[] = { /* D[0:23] */ 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 40, 4, 3, 2, 0, 1, }; static const unsigned int lcd0_data24_0_mux[] = { LCD0_D0_MARK, LCD0_D1_MARK, LCD0_D2_MARK, LCD0_D3_MARK, LCD0_D4_MARK, LCD0_D5_MARK, LCD0_D6_MARK, LCD0_D7_MARK, LCD0_D8_MARK, LCD0_D9_MARK, LCD0_D10_MARK, LCD0_D11_MARK, LCD0_D12_MARK, LCD0_D13_MARK, LCD0_D14_MARK, LCD0_D15_MARK, LCD0_D16_MARK, LCD0_D17_MARK, LCD0_D18_PORT40_MARK, LCD0_D19_PORT4_MARK, LCD0_D20_PORT3_MARK, LCD0_D21_PORT2_MARK, LCD0_D22_PORT0_MARK, LCD0_D23_PORT1_MARK, }; static const unsigned int lcd0_data24_1_pins[] = { /* D[0:23] */ 58, 57, 56, 55, 54, 53, 52, 51, 50, 49, 48, 47, 46, 45, 44, 43, 42, 41, 163, 162, 161, 158, 160, 159, }; static const unsigned int lcd0_data24_1_mux[] = { LCD0_D0_MARK, LCD0_D1_MARK, LCD0_D2_MARK, LCD0_D3_MARK, LCD0_D4_MARK, LCD0_D5_MARK, LCD0_D6_MARK, LCD0_D7_MARK, LCD0_D8_MARK, LCD0_D9_MARK, LCD0_D10_MARK, LCD0_D11_MARK, LCD0_D16_MARK, LCD0_D17_MARK, LCD0_D18_PORT163_MARK, LCD0_D19_PORT162_MARK, LCD0_D20_PORT161_MARK, LCD0_D21_PORT158_MARK, LCD0_D22_PORT160_MARK, LCD0_D23_PORT159_MARK, }; static const unsigned int lcd0_display_pins[] = { /* DON, VCPWC, VEPWC */ 61, 59, 60, }; static const unsigned int lcd0_display_mux[] = { LCD0_DON_MARK, LCD0_VCPWC_MARK, LCD0_VEPWC_MARK, }; static const unsigned int lcd0_lclk_0_pins[] = { /* LCLK */ 102, }; static const unsigned int lcd0_lclk_0_mux[] = { LCD0_LCLK_PORT102_MARK, }; static const unsigned int lcd0_lclk_1_pins[] = { /* LCLK */ 165, }; static const unsigned int lcd0_lclk_1_mux[] = { LCD0_LCLK_PORT165_MARK, }; static const unsigned int lcd0_sync_pins[] = { /* VSYN, HSYN, DCK, DISP */ 63, 64, 62, 65, }; static const unsigned int lcd0_sync_mux[] = { LCD0_VSYN_MARK, LCD0_HSYN_MARK, LCD0_DCK_MARK, LCD0_DISP_MARK, }; static const unsigned int lcd0_sys_pins[] = { /* CS, WR, RD, RS */ 64, 62, 164, 65, }; static const unsigned int lcd0_sys_mux[] = { LCD0_CS_MARK, LCD0_WR_MARK, LCD0_RD_MARK, LCD0_RS_MARK, }; /* - LCD1 ------------------------------------------------------------------- */ static const unsigned int lcd1_data8_pins[] = { /* D[0:7] */ 4, 3, 2, 1, 0, 91, 92, 23, }; static const unsigned int lcd1_data8_mux[] = { LCD1_D0_MARK, LCD1_D1_MARK, LCD1_D2_MARK, LCD1_D3_MARK, LCD1_D4_MARK, LCD1_D5_MARK, LCD1_D6_MARK, LCD1_D7_MARK, }; static const unsigned int lcd1_data9_pins[] = { /* D[0:8] */ 4, 3, 2, 1, 0, 91, 92, 23, 93, }; static const unsigned int lcd1_data9_mux[] = { LCD1_D0_MARK, LCD1_D1_MARK, LCD1_D2_MARK, LCD1_D3_MARK, LCD1_D4_MARK, LCD1_D5_MARK, LCD1_D6_MARK, LCD1_D7_MARK, LCD1_D8_MARK, }; static const unsigned int lcd1_data12_pins[] = { /* D[0:12] */ 4, 3, 2, 1, 0, 91, 92, 23, 93, 94, 21, 201, }; static const unsigned int lcd1_data12_mux[] = { LCD1_D0_MARK, LCD1_D1_MARK, LCD1_D2_MARK, LCD1_D3_MARK, LCD1_D4_MARK, LCD1_D5_MARK, LCD1_D6_MARK, LCD1_D7_MARK, LCD1_D8_MARK, LCD1_D9_MARK, LCD1_D10_MARK, LCD1_D11_MARK, }; static const unsigned int lcd1_data16_pins[] = { /* D[0:15] */ 4, 3, 2, 1, 0, 91, 92, 23, 93, 94, 21, 201, 200, 199, 196, 195, }; static const unsigned int lcd1_data16_mux[] = { LCD1_D0_MARK, LCD1_D1_MARK, LCD1_D2_MARK, LCD1_D3_MARK, LCD1_D4_MARK, LCD1_D5_MARK, LCD1_D6_MARK, LCD1_D7_MARK, LCD1_D8_MARK, LCD1_D9_MARK, LCD1_D10_MARK, LCD1_D11_MARK, LCD1_D12_MARK, LCD1_D13_MARK, LCD1_D14_MARK, LCD1_D15_MARK, }; static const unsigned int lcd1_data18_pins[] = { /* D[0:17] */ 4, 3, 2, 1, 0, 91, 92, 23, 93, 94, 21, 201, 200, 199, 196, 195, 194, 193, }; static const unsigned int lcd1_data18_mux[] = { LCD1_D0_MARK, LCD1_D1_MARK, LCD1_D2_MARK, LCD1_D3_MARK, LCD1_D4_MARK, LCD1_D5_MARK, LCD1_D6_MARK, LCD1_D7_MARK, LCD1_D8_MARK, LCD1_D9_MARK, LCD1_D10_MARK, LCD1_D11_MARK, LCD1_D12_MARK, LCD1_D13_MARK, LCD1_D14_MARK, LCD1_D15_MARK, LCD1_D16_MARK, LCD1_D17_MARK, }; static const unsigned int lcd1_data24_pins[] = { /* D[0:23] */ 4, 3, 2, 1, 0, 91, 92, 23, 93, 94, 21, 201, 200, 199, 196, 195, 194, 193, 198, 197, 75, 74, 15, 14, }; static const unsigned int lcd1_data24_mux[] = { LCD1_D0_MARK, LCD1_D1_MARK, LCD1_D2_MARK, LCD1_D3_MARK, LCD1_D4_MARK, LCD1_D5_MARK, LCD1_D6_MARK, LCD1_D7_MARK, LCD1_D8_MARK, LCD1_D9_MARK, LCD1_D10_MARK, LCD1_D11_MARK, LCD1_D12_MARK, LCD1_D13_MARK, LCD1_D14_MARK, LCD1_D15_MARK, LCD1_D16_MARK, LCD1_D17_MARK, LCD1_D18_MARK, LCD1_D19_MARK, LCD1_D20_MARK, LCD1_D21_MARK, LCD1_D22_MARK, LCD1_D23_MARK, }; static const unsigned int lcd1_display_pins[] = { /* DON, VCPWC, VEPWC */ 100, 5, 6, }; static const unsigned int lcd1_display_mux[] = { LCD1_DON_MARK, LCD1_VCPWC_MARK, LCD1_VEPWC_MARK, }; static const unsigned int lcd1_lclk_pins[] = { /* LCLK */ 40, }; static const unsigned int lcd1_lclk_mux[] = { LCD1_LCLK_MARK, }; static const unsigned int lcd1_sync_pins[] = { /* VSYN, HSYN, DCK, DISP */ 98, 97, 99, 12, }; static const unsigned int lcd1_sync_mux[] = { LCD1_VSYN_MARK, LCD1_HSYN_MARK, LCD1_DCK_MARK, LCD1_DISP_MARK, }; static const unsigned int lcd1_sys_pins[] = { /* CS, WR, RD, RS */ 97, 99, 13, 12, }; static const unsigned int lcd1_sys_mux[] = { LCD1_CS_MARK, LCD1_WR_MARK, LCD1_RD_MARK, LCD1_RS_MARK, }; /* - MMCIF ------------------------------------------------------------------ */ static const unsigned int mmc0_data1_0_pins[] = { /* D[0] */ 68, }; static const unsigned int mmc0_data1_0_mux[] = { MMC0_D0_PORT68_MARK, }; static const unsigned int mmc0_data4_0_pins[] = { /* D[0:3] */ 68, 69, 70, 71, }; static const unsigned int mmc0_data4_0_mux[] = { MMC0_D0_PORT68_MARK, MMC0_D1_PORT69_MARK, MMC0_D2_PORT70_MARK, MMC0_D3_PORT71_MARK, }; static const unsigned int mmc0_data8_0_pins[] = { /* D[0:7] */ 68, 69, 70, 71, 72, 73, 74, 75, }; static const unsigned int mmc0_data8_0_mux[] = { MMC0_D0_PORT68_MARK, MMC0_D1_PORT69_MARK, MMC0_D2_PORT70_MARK, MMC0_D3_PORT71_MARK, MMC0_D4_PORT72_MARK, MMC0_D5_PORT73_MARK, MMC0_D6_PORT74_MARK, MMC0_D7_PORT75_MARK, }; static const unsigned int mmc0_ctrl_0_pins[] = { /* CMD, CLK */ 67, 66, }; static const unsigned int mmc0_ctrl_0_mux[] = { MMC0_CMD_PORT67_MARK, MMC0_CLK_PORT66_MARK, }; static const unsigned int mmc0_data1_1_pins[] = { /* D[0] */ 149, }; static const unsigned int mmc0_data1_1_mux[] = { MMC1_D0_PORT149_MARK, }; static const unsigned int mmc0_data4_1_pins[] = { /* D[0:3] */ 149, 148, 147, 146, }; static const unsigned int mmc0_data4_1_mux[] = { MMC1_D0_PORT149_MARK, MMC1_D1_PORT148_MARK, MMC1_D2_PORT147_MARK, MMC1_D3_PORT146_MARK, }; static const unsigned int mmc0_data8_1_pins[] = { /* D[0:7] */ 149, 148, 147, 146, 145, 144, 143, 142, }; static const unsigned int mmc0_data8_1_mux[] = { MMC1_D0_PORT149_MARK, MMC1_D1_PORT148_MARK, MMC1_D2_PORT147_MARK, MMC1_D3_PORT146_MARK, MMC1_D4_PORT145_MARK, MMC1_D5_PORT144_MARK, MMC1_D6_PORT143_MARK, MMC1_D7_PORT142_MARK, }; static const unsigned int mmc0_ctrl_1_pins[] = { /* CMD, CLK */ 104, 103, }; static const unsigned int mmc0_ctrl_1_mux[] = { MMC1_CMD_PORT104_MARK, MMC1_CLK_PORT103_MARK, }; /* - SDHI0 ------------------------------------------------------------------ */ static const unsigned int sdhi0_data1_pins[] = { /* D0 */ 77, }; static const unsigned int sdhi0_data1_mux[] = { SDHI0_D0_MARK, }; static const unsigned int sdhi0_data4_pins[] = { /* D[0:3] */ 77, 78, 79, 80, }; static const unsigned int sdhi0_data4_mux[] = { SDHI0_D0_MARK, SDHI0_D1_MARK, SDHI0_D2_MARK, SDHI0_D3_MARK, }; static const unsigned int sdhi0_ctrl_pins[] = { /* CMD, CLK */ 76, 82, }; static const unsigned int sdhi0_ctrl_mux[] = { SDHI0_CMD_MARK, SDHI0_CLK_MARK, }; static const unsigned int sdhi0_cd_pins[] = { /* CD */ 81, }; static const unsigned int sdhi0_cd_mux[] = { SDHI0_CD_MARK, }; static const unsigned int sdhi0_wp_pins[] = { /* WP */ 83, }; static const unsigned int sdhi0_wp_mux[] = { SDHI0_WP_MARK, }; /* - SDHI1 ------------------------------------------------------------------ */ static const unsigned int sdhi1_data1_pins[] = { /* D0 */ 68, }; static const unsigned int sdhi1_data1_mux[] = { SDHI1_D0_MARK, }; static const unsigned int sdhi1_data4_pins[] = { /* D[0:3] */ 68, 69, 70, 71, }; static const unsigned int sdhi1_data4_mux[] = { SDHI1_D0_MARK, SDHI1_D1_MARK, SDHI1_D2_MARK, SDHI1_D3_MARK, }; static const unsigned int sdhi1_ctrl_pins[] = { /* CMD, CLK */ 67, 66, }; static const unsigned int sdhi1_ctrl_mux[] = { SDHI1_CMD_MARK, SDHI1_CLK_MARK, }; static const unsigned int sdhi1_cd_pins[] = { /* CD */ 72, }; static const unsigned int sdhi1_cd_mux[] = { SDHI1_CD_MARK, }; static const unsigned int sdhi1_wp_pins[] = { /* WP */ 73, }; static const unsigned int sdhi1_wp_mux[] = { SDHI1_WP_MARK, }; /* - SDHI2 ------------------------------------------------------------------ */ static const unsigned int sdhi2_data1_pins[] = { /* D0 */ 205, }; static const unsigned int sdhi2_data1_mux[] = { SDHI2_D0_MARK, }; static const unsigned int sdhi2_data4_pins[] = { /* D[0:3] */ 205, 206, 207, 208, }; static const unsigned int sdhi2_data4_mux[] = { SDHI2_D0_MARK, SDHI2_D1_MARK, SDHI2_D2_MARK, SDHI2_D3_MARK, }; static const unsigned int sdhi2_ctrl_pins[] = { /* CMD, CLK */ 204, 203, }; static const unsigned int sdhi2_ctrl_mux[] = { SDHI2_CMD_MARK, SDHI2_CLK_MARK, }; static const unsigned int sdhi2_cd_0_pins[] = { /* CD */ 202, }; static const unsigned int sdhi2_cd_0_mux[] = { SDHI2_CD_PORT202_MARK, }; static const unsigned int sdhi2_wp_0_pins[] = { /* WP */ 177, }; static const unsigned int sdhi2_wp_0_mux[] = { SDHI2_WP_PORT177_MARK, }; static const unsigned int sdhi2_cd_1_pins[] = { /* CD */ 24, }; static const unsigned int sdhi2_cd_1_mux[] = { SDHI2_CD_PORT24_MARK, }; static const unsigned int sdhi2_wp_1_pins[] = { /* WP */ 25, }; static const unsigned int sdhi2_wp_1_mux[] = { SDHI2_WP_PORT25_MARK, }; static const struct sh_pfc_pin_group pinmux_groups[] = { SH_PFC_PIN_GROUP(lcd0_data8), SH_PFC_PIN_GROUP(lcd0_data9), SH_PFC_PIN_GROUP(lcd0_data12), SH_PFC_PIN_GROUP(lcd0_data16), SH_PFC_PIN_GROUP(lcd0_data18), SH_PFC_PIN_GROUP(lcd0_data24_0), SH_PFC_PIN_GROUP(lcd0_data24_1), SH_PFC_PIN_GROUP(lcd0_display), SH_PFC_PIN_GROUP(lcd0_lclk_0), SH_PFC_PIN_GROUP(lcd0_lclk_1), SH_PFC_PIN_GROUP(lcd0_sync), SH_PFC_PIN_GROUP(lcd0_sys), SH_PFC_PIN_GROUP(lcd1_data8), SH_PFC_PIN_GROUP(lcd1_data9), SH_PFC_PIN_GROUP(lcd1_data12), SH_PFC_PIN_GROUP(lcd1_data16), SH_PFC_PIN_GROUP(lcd1_data18), SH_PFC_PIN_GROUP(lcd1_data24), SH_PFC_PIN_GROUP(lcd1_display), SH_PFC_PIN_GROUP(lcd1_lclk), SH_PFC_PIN_GROUP(lcd1_sync), SH_PFC_PIN_GROUP(lcd1_sys), SH_PFC_PIN_GROUP(mmc0_data1_0), SH_PFC_PIN_GROUP(mmc0_data4_0), SH_PFC_PIN_GROUP(mmc0_data8_0), SH_PFC_PIN_GROUP(mmc0_ctrl_0), SH_PFC_PIN_GROUP(mmc0_data1_1), SH_PFC_PIN_GROUP(mmc0_data4_1), SH_PFC_PIN_GROUP(mmc0_data8_1), SH_PFC_PIN_GROUP(mmc0_ctrl_1), SH_PFC_PIN_GROUP(sdhi0_data1), SH_PFC_PIN_GROUP(sdhi0_data4), SH_PFC_PIN_GROUP(sdhi0_ctrl), SH_PFC_PIN_GROUP(sdhi0_cd), SH_PFC_PIN_GROUP(sdhi0_wp), SH_PFC_PIN_GROUP(sdhi1_data1), SH_PFC_PIN_GROUP(sdhi1_data4), SH_PFC_PIN_GROUP(sdhi1_ctrl), SH_PFC_PIN_GROUP(sdhi1_cd), SH_PFC_PIN_GROUP(sdhi1_wp), SH_PFC_PIN_GROUP(sdhi2_data1), SH_PFC_PIN_GROUP(sdhi2_data4), SH_PFC_PIN_GROUP(sdhi2_ctrl), SH_PFC_PIN_GROUP(sdhi2_cd_0), SH_PFC_PIN_GROUP(sdhi2_wp_0), SH_PFC_PIN_GROUP(sdhi2_cd_1), SH_PFC_PIN_GROUP(sdhi2_wp_1), }; static const char * const lcd0_groups[] = { "lcd0_data8", "lcd0_data9", "lcd0_data12", "lcd0_data16", "lcd0_data18", "lcd0_data24_0", "lcd0_data24_1", "lcd0_display", "lcd0_lclk_0", "lcd0_lclk_1", "lcd0_sync", "lcd0_sys", }; static const char * const lcd1_groups[] = { "lcd1_data8", "lcd1_data9", "lcd1_data12", "lcd1_data16", "lcd1_data18", "lcd1_data24", "lcd1_display", "lcd1_lclk", "lcd1_sync", "lcd1_sys", }; static const char * const mmc0_groups[] = { "mmc0_data1_0", "mmc0_data4_0", "mmc0_data8_0", "mmc0_ctrl_0", "mmc0_data1_1", "mmc0_data4_1", "mmc0_data8_1", "mmc0_ctrl_1", }; static const char * const sdhi0_groups[] = { "sdhi0_data1", "sdhi0_data4", "sdhi0_ctrl", "sdhi0_cd", "sdhi0_wp", }; static const char * const sdhi1_groups[] = { "sdhi1_data1", "sdhi1_data4", "sdhi1_ctrl", "sdhi1_cd", "sdhi1_wp", }; static const char * const sdhi2_groups[] = { "sdhi2_data1", "sdhi2_data4", "sdhi2_ctrl", "sdhi2_cd_0", "sdhi2_wp_0", "sdhi2_cd_1", "sdhi2_wp_1", }; static const struct sh_pfc_function pinmux_functions[] = { SH_PFC_FUNCTION(lcd0), SH_PFC_FUNCTION(lcd1), SH_PFC_FUNCTION(mmc0), SH_PFC_FUNCTION(sdhi0), SH_PFC_FUNCTION(sdhi1), SH_PFC_FUNCTION(sdhi2), }; #define PINMUX_FN_BASE ARRAY_SIZE(pinmux_pins) static const struct pinmux_func pinmux_func_gpios[] = { /* IRQ */ GPIO_FN(IRQ0_PORT2), GPIO_FN(IRQ0_PORT13), GPIO_FN(IRQ1), GPIO_FN(IRQ2_PORT11), GPIO_FN(IRQ2_PORT12), GPIO_FN(IRQ3_PORT10), GPIO_FN(IRQ3_PORT14), GPIO_FN(IRQ4_PORT15), GPIO_FN(IRQ4_PORT172), GPIO_FN(IRQ5_PORT0), GPIO_FN(IRQ5_PORT1), GPIO_FN(IRQ6_PORT121), GPIO_FN(IRQ6_PORT173), GPIO_FN(IRQ7_PORT120), GPIO_FN(IRQ7_PORT209), GPIO_FN(IRQ8), GPIO_FN(IRQ9_PORT118), GPIO_FN(IRQ9_PORT210), GPIO_FN(IRQ10), GPIO_FN(IRQ11), GPIO_FN(IRQ12_PORT42), GPIO_FN(IRQ12_PORT97), GPIO_FN(IRQ13_PORT64), GPIO_FN(IRQ13_PORT98), GPIO_FN(IRQ14_PORT63), GPIO_FN(IRQ14_PORT99), GPIO_FN(IRQ15_PORT62), GPIO_FN(IRQ15_PORT100), GPIO_FN(IRQ16_PORT68), GPIO_FN(IRQ16_PORT211), GPIO_FN(IRQ17), GPIO_FN(IRQ18), GPIO_FN(IRQ19), GPIO_FN(IRQ20), GPIO_FN(IRQ21), GPIO_FN(IRQ22), GPIO_FN(IRQ23), GPIO_FN(IRQ24), GPIO_FN(IRQ25), GPIO_FN(IRQ26_PORT58), GPIO_FN(IRQ26_PORT81), GPIO_FN(IRQ27_PORT57), GPIO_FN(IRQ27_PORT168), GPIO_FN(IRQ28_PORT56), GPIO_FN(IRQ28_PORT169), GPIO_FN(IRQ29_PORT50), GPIO_FN(IRQ29_PORT170), GPIO_FN(IRQ30_PORT49), GPIO_FN(IRQ30_PORT171), GPIO_FN(IRQ31_PORT41), GPIO_FN(IRQ31_PORT167), /* Function */ /* DBGT */ GPIO_FN(DBGMDT2), GPIO_FN(DBGMDT1), GPIO_FN(DBGMDT0), GPIO_FN(DBGMD10), GPIO_FN(DBGMD11), GPIO_FN(DBGMD20), GPIO_FN(DBGMD21), /* FSI-A */ GPIO_FN(FSIAISLD_PORT0), /* FSIAISLD Port 0/5 */ GPIO_FN(FSIAISLD_PORT5), GPIO_FN(FSIASPDIF_PORT9), /* FSIASPDIF Port 9/18 */ GPIO_FN(FSIASPDIF_PORT18), GPIO_FN(FSIAOSLD1), GPIO_FN(FSIAOSLD2), GPIO_FN(FSIAOLR), GPIO_FN(FSIAOBT), GPIO_FN(FSIAOSLD), GPIO_FN(FSIAOMC), GPIO_FN(FSIACK), GPIO_FN(FSIAILR), GPIO_FN(FSIAIBT), /* FSI-B */ GPIO_FN(FSIBCK), /* FMSI */ GPIO_FN(FMSISLD_PORT1), /* FMSISLD Port 1/6 */ GPIO_FN(FMSISLD_PORT6), GPIO_FN(FMSIILR), GPIO_FN(FMSIIBT), GPIO_FN(FMSIOLR), GPIO_FN(FMSIOBT), GPIO_FN(FMSICK), GPIO_FN(FMSOILR), GPIO_FN(FMSOIBT), GPIO_FN(FMSOOLR), GPIO_FN(FMSOOBT), GPIO_FN(FMSOSLD), GPIO_FN(FMSOCK), /* SCIFA0 */ GPIO_FN(SCIFA0_SCK), GPIO_FN(SCIFA0_CTS), GPIO_FN(SCIFA0_RTS), GPIO_FN(SCIFA0_RXD), GPIO_FN(SCIFA0_TXD), /* SCIFA1 */ GPIO_FN(SCIFA1_CTS), GPIO_FN(SCIFA1_SCK), GPIO_FN(SCIFA1_RXD), GPIO_FN(SCIFA1_TXD), GPIO_FN(SCIFA1_RTS), /* SCIFA2 */ GPIO_FN(SCIFA2_SCK_PORT22), /* SCIFA2_SCK Port 22/199 */ GPIO_FN(SCIFA2_SCK_PORT199), GPIO_FN(SCIFA2_RXD), GPIO_FN(SCIFA2_TXD), GPIO_FN(SCIFA2_CTS), GPIO_FN(SCIFA2_RTS), /* SCIFA3 */ GPIO_FN(SCIFA3_RTS_PORT105), /* MSEL5CR_8_0 */ GPIO_FN(SCIFA3_SCK_PORT116), GPIO_FN(SCIFA3_CTS_PORT117), GPIO_FN(SCIFA3_RXD_PORT174), GPIO_FN(SCIFA3_TXD_PORT175), GPIO_FN(SCIFA3_RTS_PORT161), /* MSEL5CR_8_1 */ GPIO_FN(SCIFA3_SCK_PORT158), GPIO_FN(SCIFA3_CTS_PORT162), GPIO_FN(SCIFA3_RXD_PORT159), GPIO_FN(SCIFA3_TXD_PORT160), /* SCIFA4 */ GPIO_FN(SCIFA4_RXD_PORT12), /* MSEL5CR[12:11] = 00 */ GPIO_FN(SCIFA4_TXD_PORT13), GPIO_FN(SCIFA4_RXD_PORT204), /* MSEL5CR[12:11] = 01 */ GPIO_FN(SCIFA4_TXD_PORT203), GPIO_FN(SCIFA4_RXD_PORT94), /* MSEL5CR[12:11] = 10 */ GPIO_FN(SCIFA4_TXD_PORT93), GPIO_FN(SCIFA4_SCK_PORT21), /* SCIFA4_SCK Port 21/205 */ GPIO_FN(SCIFA4_SCK_PORT205), /* SCIFA5 */ GPIO_FN(SCIFA5_TXD_PORT20), /* MSEL5CR[15:14] = 00 */ GPIO_FN(SCIFA5_RXD_PORT10), GPIO_FN(SCIFA5_RXD_PORT207), /* MSEL5CR[15:14] = 01 */ GPIO_FN(SCIFA5_TXD_PORT208), GPIO_FN(SCIFA5_TXD_PORT91), /* MSEL5CR[15:14] = 10 */ GPIO_FN(SCIFA5_RXD_PORT92), GPIO_FN(SCIFA5_SCK_PORT23), /* SCIFA5_SCK Port 23/206 */ GPIO_FN(SCIFA5_SCK_PORT206), /* SCIFA6 */ GPIO_FN(SCIFA6_SCK), GPIO_FN(SCIFA6_RXD), GPIO_FN(SCIFA6_TXD), /* SCIFA7 */ GPIO_FN(SCIFA7_TXD), GPIO_FN(SCIFA7_RXD), /* SCIFAB */ GPIO_FN(SCIFB_SCK_PORT190), /* MSEL5CR_17_0 */ GPIO_FN(SCIFB_RXD_PORT191), GPIO_FN(SCIFB_TXD_PORT192), GPIO_FN(SCIFB_RTS_PORT186), GPIO_FN(SCIFB_CTS_PORT187), GPIO_FN(SCIFB_SCK_PORT2), /* MSEL5CR_17_1 */ GPIO_FN(SCIFB_RXD_PORT3), GPIO_FN(SCIFB_TXD_PORT4), GPIO_FN(SCIFB_RTS_PORT172), GPIO_FN(SCIFB_CTS_PORT173), /* RSPI */ GPIO_FN(RSPI_SSL0_A), GPIO_FN(RSPI_SSL1_A), GPIO_FN(RSPI_SSL2_A), GPIO_FN(RSPI_SSL3_A), GPIO_FN(RSPI_CK_A), GPIO_FN(RSPI_MOSI_A), GPIO_FN(RSPI_MISO_A), /* VIO CKO */ GPIO_FN(VIO_CKO1), GPIO_FN(VIO_CKO2), GPIO_FN(VIO_CKO_1), GPIO_FN(VIO_CKO), /* VIO0 */ GPIO_FN(VIO0_D0), GPIO_FN(VIO0_D1), GPIO_FN(VIO0_D2), GPIO_FN(VIO0_D3), GPIO_FN(VIO0_D4), GPIO_FN(VIO0_D5), GPIO_FN(VIO0_D6), GPIO_FN(VIO0_D7), GPIO_FN(VIO0_D8), GPIO_FN(VIO0_D9), GPIO_FN(VIO0_D10), GPIO_FN(VIO0_D11), GPIO_FN(VIO0_D12), GPIO_FN(VIO0_VD), GPIO_FN(VIO0_HD), GPIO_FN(VIO0_CLK), GPIO_FN(VIO0_FIELD), GPIO_FN(VIO0_D13_PORT26), /* MSEL5CR_27_0 */ GPIO_FN(VIO0_D14_PORT25), GPIO_FN(VIO0_D15_PORT24), GPIO_FN(VIO0_D13_PORT22), /* MSEL5CR_27_1 */ GPIO_FN(VIO0_D14_PORT95), GPIO_FN(VIO0_D15_PORT96), /* VIO1 */ GPIO_FN(VIO1_D0), GPIO_FN(VIO1_D1), GPIO_FN(VIO1_D2), GPIO_FN(VIO1_D3), GPIO_FN(VIO1_D4), GPIO_FN(VIO1_D5), GPIO_FN(VIO1_D6), GPIO_FN(VIO1_D7), GPIO_FN(VIO1_VD), GPIO_FN(VIO1_HD), GPIO_FN(VIO1_CLK), GPIO_FN(VIO1_FIELD), /* TPU0 */ GPIO_FN(TPU0TO0), GPIO_FN(TPU0TO1), GPIO_FN(TPU0TO3), GPIO_FN(TPU0TO2_PORT66), /* TPU0TO2 Port 66/202 */ GPIO_FN(TPU0TO2_PORT202), /* SSP1 0 */ GPIO_FN(STP0_IPD0), GPIO_FN(STP0_IPD1), GPIO_FN(STP0_IPD2), GPIO_FN(STP0_IPD3), GPIO_FN(STP0_IPD4), GPIO_FN(STP0_IPD5), GPIO_FN(STP0_IPD6), GPIO_FN(STP0_IPD7), GPIO_FN(STP0_IPEN), GPIO_FN(STP0_IPCLK), GPIO_FN(STP0_IPSYNC), /* SSP1 1 */ GPIO_FN(STP1_IPD1), GPIO_FN(STP1_IPD2), GPIO_FN(STP1_IPD3), GPIO_FN(STP1_IPD4), GPIO_FN(STP1_IPD5), GPIO_FN(STP1_IPD6), GPIO_FN(STP1_IPD7), GPIO_FN(STP1_IPCLK), GPIO_FN(STP1_IPSYNC), GPIO_FN(STP1_IPD0_PORT186), /* MSEL5CR_23_0 */ GPIO_FN(STP1_IPEN_PORT187), GPIO_FN(STP1_IPD0_PORT194), /* MSEL5CR_23_1 */ GPIO_FN(STP1_IPEN_PORT193), /* SIM */ GPIO_FN(SIM_RST), GPIO_FN(SIM_CLK), GPIO_FN(SIM_D_PORT22), /* SIM_D Port 22/199 */ GPIO_FN(SIM_D_PORT199), /* MSIOF2 */ GPIO_FN(MSIOF2_TXD), GPIO_FN(MSIOF2_RXD), GPIO_FN(MSIOF2_TSCK), GPIO_FN(MSIOF2_SS2), GPIO_FN(MSIOF2_TSYNC), GPIO_FN(MSIOF2_SS1), GPIO_FN(MSIOF2_MCK1), GPIO_FN(MSIOF2_MCK0), GPIO_FN(MSIOF2_RSYNC), GPIO_FN(MSIOF2_RSCK), /* KEYSC */ GPIO_FN(KEYIN4), GPIO_FN(KEYIN5), GPIO_FN(KEYIN6), GPIO_FN(KEYIN7), GPIO_FN(KEYOUT0), GPIO_FN(KEYOUT1), GPIO_FN(KEYOUT2), GPIO_FN(KEYOUT3), GPIO_FN(KEYOUT4), GPIO_FN(KEYOUT5), GPIO_FN(KEYOUT6), GPIO_FN(KEYOUT7), GPIO_FN(KEYIN0_PORT43), /* MSEL4CR_18_0 */ GPIO_FN(KEYIN1_PORT44), GPIO_FN(KEYIN2_PORT45), GPIO_FN(KEYIN3_PORT46), GPIO_FN(KEYIN0_PORT58), /* MSEL4CR_18_1 */ GPIO_FN(KEYIN1_PORT57), GPIO_FN(KEYIN2_PORT56), GPIO_FN(KEYIN3_PORT55), /* VOU */ GPIO_FN(DV_D0), GPIO_FN(DV_D1), GPIO_FN(DV_D2), GPIO_FN(DV_D3), GPIO_FN(DV_D4), GPIO_FN(DV_D5), GPIO_FN(DV_D6), GPIO_FN(DV_D7), GPIO_FN(DV_D8), GPIO_FN(DV_D9), GPIO_FN(DV_D10), GPIO_FN(DV_D11), GPIO_FN(DV_D12), GPIO_FN(DV_D13), GPIO_FN(DV_D14), GPIO_FN(DV_D15), GPIO_FN(DV_CLK), GPIO_FN(DV_VSYNC), GPIO_FN(DV_HSYNC), /* MEMC */ GPIO_FN(MEMC_AD0), GPIO_FN(MEMC_AD1), GPIO_FN(MEMC_AD2), GPIO_FN(MEMC_AD3), GPIO_FN(MEMC_AD4), GPIO_FN(MEMC_AD5), GPIO_FN(MEMC_AD6), GPIO_FN(MEMC_AD7), GPIO_FN(MEMC_AD8), GPIO_FN(MEMC_AD9), GPIO_FN(MEMC_AD10), GPIO_FN(MEMC_AD11), GPIO_FN(MEMC_AD12), GPIO_FN(MEMC_AD13), GPIO_FN(MEMC_AD14), GPIO_FN(MEMC_AD15), GPIO_FN(MEMC_CS0), GPIO_FN(MEMC_INT), GPIO_FN(MEMC_NWE), GPIO_FN(MEMC_NOE), GPIO_FN(MEMC_CS1), GPIO_FN(MEMC_A1), GPIO_FN(MEMC_ADV), GPIO_FN(MEMC_DREQ0), GPIO_FN(MEMC_WAIT), GPIO_FN(MEMC_DREQ1), GPIO_FN(MEMC_BUSCLK), GPIO_FN(MEMC_A0), /* MSIOF0 */ GPIO_FN(MSIOF0_SS1), GPIO_FN(MSIOF0_SS2), GPIO_FN(MSIOF0_RXD), GPIO_FN(MSIOF0_TXD), GPIO_FN(MSIOF0_MCK0), GPIO_FN(MSIOF0_MCK1), GPIO_FN(MSIOF0_RSYNC), GPIO_FN(MSIOF0_RSCK), GPIO_FN(MSIOF0_TSCK), GPIO_FN(MSIOF0_TSYNC), /* MSIOF1 */ GPIO_FN(MSIOF1_RSCK), GPIO_FN(MSIOF1_RSYNC), GPIO_FN(MSIOF1_MCK0), GPIO_FN(MSIOF1_MCK1), GPIO_FN(MSIOF1_SS2_PORT116), GPIO_FN(MSIOF1_SS1_PORT117), GPIO_FN(MSIOF1_RXD_PORT118), GPIO_FN(MSIOF1_TXD_PORT119), GPIO_FN(MSIOF1_TSYNC_PORT120), GPIO_FN(MSIOF1_TSCK_PORT121), /* MSEL4CR_10_0 */ GPIO_FN(MSIOF1_SS1_PORT67), GPIO_FN(MSIOF1_TSCK_PORT72), GPIO_FN(MSIOF1_TSYNC_PORT73), GPIO_FN(MSIOF1_TXD_PORT74), GPIO_FN(MSIOF1_RXD_PORT75), GPIO_FN(MSIOF1_SS2_PORT202), /* MSEL4CR_10_1 */ /* GPIO */ GPIO_FN(GPO0), GPIO_FN(GPI0), GPIO_FN(GPO1), GPIO_FN(GPI1), /* USB0 */ GPIO_FN(USB0_OCI), GPIO_FN(USB0_PPON), GPIO_FN(VBUS), /* USB1 */ GPIO_FN(USB1_OCI), GPIO_FN(USB1_PPON), /* BBIF1 */ GPIO_FN(BBIF1_RXD), GPIO_FN(BBIF1_TXD), GPIO_FN(BBIF1_TSYNC), GPIO_FN(BBIF1_TSCK), GPIO_FN(BBIF1_RSCK), GPIO_FN(BBIF1_RSYNC), GPIO_FN(BBIF1_FLOW), GPIO_FN(BBIF1_RX_FLOW_N), /* BBIF2 */ GPIO_FN(BBIF2_TXD2_PORT5), /* MSEL5CR_0_0 */ GPIO_FN(BBIF2_RXD2_PORT60), GPIO_FN(BBIF2_TSYNC2_PORT6), GPIO_FN(BBIF2_TSCK2_PORT59), GPIO_FN(BBIF2_RXD2_PORT90), /* MSEL5CR_0_1 */ GPIO_FN(BBIF2_TXD2_PORT183), GPIO_FN(BBIF2_TSCK2_PORT89), GPIO_FN(BBIF2_TSYNC2_PORT184), /* BSC / FLCTL / PCMCIA */ GPIO_FN(CS0), GPIO_FN(CS2), GPIO_FN(CS4), GPIO_FN(CS5B), GPIO_FN(CS6A), GPIO_FN(CS5A_PORT105), /* CS5A PORT 19/105 */ GPIO_FN(CS5A_PORT19), GPIO_FN(IOIS16), /* ? */ GPIO_FN(A0), GPIO_FN(A1), GPIO_FN(A2), GPIO_FN(A3), GPIO_FN(A4_FOE), GPIO_FN(A5_FCDE), /* share with FLCTL */ GPIO_FN(A6), GPIO_FN(A7), GPIO_FN(A8), GPIO_FN(A9), GPIO_FN(A10), GPIO_FN(A11), GPIO_FN(A12), GPIO_FN(A13), GPIO_FN(A14), GPIO_FN(A15), GPIO_FN(A16), GPIO_FN(A17), GPIO_FN(A18), GPIO_FN(A19), GPIO_FN(A20), GPIO_FN(A21), GPIO_FN(A22), GPIO_FN(A23), GPIO_FN(A24), GPIO_FN(A25), GPIO_FN(A26), GPIO_FN(D0_NAF0), GPIO_FN(D1_NAF1), /* share with FLCTL */ GPIO_FN(D2_NAF2), GPIO_FN(D3_NAF3), /* share with FLCTL */ GPIO_FN(D4_NAF4), GPIO_FN(D5_NAF5), /* share with FLCTL */ GPIO_FN(D6_NAF6), GPIO_FN(D7_NAF7), /* share with FLCTL */ GPIO_FN(D8_NAF8), GPIO_FN(D9_NAF9), /* share with FLCTL */ GPIO_FN(D10_NAF10), GPIO_FN(D11_NAF11), /* share with FLCTL */ GPIO_FN(D12_NAF12), GPIO_FN(D13_NAF13), /* share with FLCTL */ GPIO_FN(D14_NAF14), GPIO_FN(D15_NAF15), /* share with FLCTL */ GPIO_FN(D16), GPIO_FN(D17), GPIO_FN(D18), GPIO_FN(D19), GPIO_FN(D20), GPIO_FN(D21), GPIO_FN(D22), GPIO_FN(D23), GPIO_FN(D24), GPIO_FN(D25), GPIO_FN(D26), GPIO_FN(D27), GPIO_FN(D28), GPIO_FN(D29), GPIO_FN(D30), GPIO_FN(D31), GPIO_FN(WE0_FWE), /* share with FLCTL */ GPIO_FN(WE1), GPIO_FN(WE2_ICIORD), /* share with PCMCIA */ GPIO_FN(WE3_ICIOWR), /* share with PCMCIA */ GPIO_FN(CKO), GPIO_FN(BS), GPIO_FN(RDWR), GPIO_FN(RD_FSC), /* share with FLCTL */ GPIO_FN(WAIT_PORT177), /* WAIT Port 90/177 */ GPIO_FN(WAIT_PORT90), GPIO_FN(FCE0), GPIO_FN(FCE1), GPIO_FN(FRB), /* FLCTL */ /* IRDA */ GPIO_FN(IRDA_FIRSEL), GPIO_FN(IRDA_IN), GPIO_FN(IRDA_OUT), /* ATAPI */ GPIO_FN(IDE_D0), GPIO_FN(IDE_D1), GPIO_FN(IDE_D2), GPIO_FN(IDE_D3), GPIO_FN(IDE_D4), GPIO_FN(IDE_D5), GPIO_FN(IDE_D6), GPIO_FN(IDE_D7), GPIO_FN(IDE_D8), GPIO_FN(IDE_D9), GPIO_FN(IDE_D10), GPIO_FN(IDE_D11), GPIO_FN(IDE_D12), GPIO_FN(IDE_D13), GPIO_FN(IDE_D14), GPIO_FN(IDE_D15), GPIO_FN(IDE_A0), GPIO_FN(IDE_A1), GPIO_FN(IDE_A2), GPIO_FN(IDE_CS0), GPIO_FN(IDE_CS1), GPIO_FN(IDE_IOWR), GPIO_FN(IDE_IORD), GPIO_FN(IDE_IORDY), GPIO_FN(IDE_INT), GPIO_FN(IDE_RST), GPIO_FN(IDE_DIRECTION), GPIO_FN(IDE_EXBUF_ENB), GPIO_FN(IDE_IODACK), GPIO_FN(IDE_IODREQ), /* RMII */ GPIO_FN(RMII_CRS_DV), GPIO_FN(RMII_RX_ER), GPIO_FN(RMII_RXD0), GPIO_FN(RMII_RXD1), GPIO_FN(RMII_TX_EN), GPIO_FN(RMII_TXD0), GPIO_FN(RMII_MDC), GPIO_FN(RMII_TXD1), GPIO_FN(RMII_MDIO), GPIO_FN(RMII_REF50CK), GPIO_FN(RMII_REF125CK), /* for GMII */ /* GEther */ GPIO_FN(ET_TX_CLK), GPIO_FN(ET_TX_EN), GPIO_FN(ET_ETXD0), GPIO_FN(ET_ETXD1), GPIO_FN(ET_ETXD2), GPIO_FN(ET_ETXD3), GPIO_FN(ET_ETXD4), GPIO_FN(ET_ETXD5), /* for GEther */ GPIO_FN(ET_ETXD6), GPIO_FN(ET_ETXD7), /* for GEther */ GPIO_FN(ET_COL), GPIO_FN(ET_TX_ER), GPIO_FN(ET_RX_CLK), GPIO_FN(ET_RX_DV), GPIO_FN(ET_ERXD0), GPIO_FN(ET_ERXD1), GPIO_FN(ET_ERXD2), GPIO_FN(ET_ERXD3), GPIO_FN(ET_ERXD4), GPIO_FN(ET_ERXD5), /* for GEther */ GPIO_FN(ET_ERXD6), GPIO_FN(ET_ERXD7), /* for GEther */ GPIO_FN(ET_RX_ER), GPIO_FN(ET_CRS), GPIO_FN(ET_MDC), GPIO_FN(ET_MDIO), GPIO_FN(ET_LINK), GPIO_FN(ET_PHY_INT), GPIO_FN(ET_WOL), GPIO_FN(ET_GTX_CLK), /* DMA0 */ GPIO_FN(DREQ0), GPIO_FN(DACK0), /* DMA1 */ GPIO_FN(DREQ1), GPIO_FN(DACK1), /* SYSC */ GPIO_FN(RESETOUTS), /* IRREM */ GPIO_FN(IROUT), /* LCDC */ GPIO_FN(LCDC0_SELECT), GPIO_FN(LCDC1_SELECT), /* SDENC */ GPIO_FN(SDENC_CPG), GPIO_FN(SDENC_DV_CLKI), /* HDMI */ GPIO_FN(HDMI_HPD), GPIO_FN(HDMI_CEC), /* SYSC */ GPIO_FN(RESETP_PULLUP), GPIO_FN(RESETP_PLAIN), /* DEBUG */ GPIO_FN(EDEBGREQ_PULLDOWN), GPIO_FN(EDEBGREQ_PULLUP), GPIO_FN(TRACEAUD_FROM_VIO), GPIO_FN(TRACEAUD_FROM_LCDC0), GPIO_FN(TRACEAUD_FROM_MEMC), }; static const struct pinmux_cfg_reg pinmux_config_regs[] = { PORTCR(0, 0xe6050000), /* PORT0CR */ PORTCR(1, 0xe6050001), /* PORT1CR */ PORTCR(2, 0xe6050002), /* PORT2CR */ PORTCR(3, 0xe6050003), /* PORT3CR */ PORTCR(4, 0xe6050004), /* PORT4CR */ PORTCR(5, 0xe6050005), /* PORT5CR */ PORTCR(6, 0xe6050006), /* PORT6CR */ PORTCR(7, 0xe6050007), /* PORT7CR */ PORTCR(8, 0xe6050008), /* PORT8CR */ PORTCR(9, 0xe6050009), /* PORT9CR */ PORTCR(10, 0xe605000a), /* PORT10CR */ PORTCR(11, 0xe605000b), /* PORT11CR */ PORTCR(12, 0xe605000c), /* PORT12CR */ PORTCR(13, 0xe605000d), /* PORT13CR */ PORTCR(14, 0xe605000e), /* PORT14CR */ PORTCR(15, 0xe605000f), /* PORT15CR */ PORTCR(16, 0xe6050010), /* PORT16CR */ PORTCR(17, 0xe6050011), /* PORT17CR */ PORTCR(18, 0xe6050012), /* PORT18CR */ PORTCR(19, 0xe6050013), /* PORT19CR */ PORTCR(20, 0xe6050014), /* PORT20CR */ PORTCR(21, 0xe6050015), /* PORT21CR */ PORTCR(22, 0xe6050016), /* PORT22CR */ PORTCR(23, 0xe6050017), /* PORT23CR */ PORTCR(24, 0xe6050018), /* PORT24CR */ PORTCR(25, 0xe6050019), /* PORT25CR */ PORTCR(26, 0xe605001a), /* PORT26CR */ PORTCR(27, 0xe605001b), /* PORT27CR */ PORTCR(28, 0xe605001c), /* PORT28CR */ PORTCR(29, 0xe605001d), /* PORT29CR */ PORTCR(30, 0xe605001e), /* PORT30CR */ PORTCR(31, 0xe605001f), /* PORT31CR */ PORTCR(32, 0xe6050020), /* PORT32CR */ PORTCR(33, 0xe6050021), /* PORT33CR */ PORTCR(34, 0xe6050022), /* PORT34CR */ PORTCR(35, 0xe6050023), /* PORT35CR */ PORTCR(36, 0xe6050024), /* PORT36CR */ PORTCR(37, 0xe6050025), /* PORT37CR */ PORTCR(38, 0xe6050026), /* PORT38CR */ PORTCR(39, 0xe6050027), /* PORT39CR */ PORTCR(40, 0xe6050028), /* PORT40CR */ PORTCR(41, 0xe6050029), /* PORT41CR */ PORTCR(42, 0xe605002a), /* PORT42CR */ PORTCR(43, 0xe605002b), /* PORT43CR */ PORTCR(44, 0xe605002c), /* PORT44CR */ PORTCR(45, 0xe605002d), /* PORT45CR */ PORTCR(46, 0xe605002e), /* PORT46CR */ PORTCR(47, 0xe605002f), /* PORT47CR */ PORTCR(48, 0xe6050030), /* PORT48CR */ PORTCR(49, 0xe6050031), /* PORT49CR */ PORTCR(50, 0xe6050032), /* PORT50CR */ PORTCR(51, 0xe6050033), /* PORT51CR */ PORTCR(52, 0xe6050034), /* PORT52CR */ PORTCR(53, 0xe6050035), /* PORT53CR */ PORTCR(54, 0xe6050036), /* PORT54CR */ PORTCR(55, 0xe6050037), /* PORT55CR */ PORTCR(56, 0xe6050038), /* PORT56CR */ PORTCR(57, 0xe6050039), /* PORT57CR */ PORTCR(58, 0xe605003a), /* PORT58CR */ PORTCR(59, 0xe605003b), /* PORT59CR */ PORTCR(60, 0xe605003c), /* PORT60CR */ PORTCR(61, 0xe605003d), /* PORT61CR */ PORTCR(62, 0xe605003e), /* PORT62CR */ PORTCR(63, 0xe605003f), /* PORT63CR */ PORTCR(64, 0xe6050040), /* PORT64CR */ PORTCR(65, 0xe6050041), /* PORT65CR */ PORTCR(66, 0xe6050042), /* PORT66CR */ PORTCR(67, 0xe6050043), /* PORT67CR */ PORTCR(68, 0xe6050044), /* PORT68CR */ PORTCR(69, 0xe6050045), /* PORT69CR */ PORTCR(70, 0xe6050046), /* PORT70CR */ PORTCR(71, 0xe6050047), /* PORT71CR */ PORTCR(72, 0xe6050048), /* PORT72CR */ PORTCR(73, 0xe6050049), /* PORT73CR */ PORTCR(74, 0xe605004a), /* PORT74CR */ PORTCR(75, 0xe605004b), /* PORT75CR */ PORTCR(76, 0xe605004c), /* PORT76CR */ PORTCR(77, 0xe605004d), /* PORT77CR */ PORTCR(78, 0xe605004e), /* PORT78CR */ PORTCR(79, 0xe605004f), /* PORT79CR */ PORTCR(80, 0xe6050050), /* PORT80CR */ PORTCR(81, 0xe6050051), /* PORT81CR */ PORTCR(82, 0xe6050052), /* PORT82CR */ PORTCR(83, 0xe6050053), /* PORT83CR */ PORTCR(84, 0xe6051054), /* PORT84CR */ PORTCR(85, 0xe6051055), /* PORT85CR */ PORTCR(86, 0xe6051056), /* PORT86CR */ PORTCR(87, 0xe6051057), /* PORT87CR */ PORTCR(88, 0xe6051058), /* PORT88CR */ PORTCR(89, 0xe6051059), /* PORT89CR */ PORTCR(90, 0xe605105a), /* PORT90CR */ PORTCR(91, 0xe605105b), /* PORT91CR */ PORTCR(92, 0xe605105c), /* PORT92CR */ PORTCR(93, 0xe605105d), /* PORT93CR */ PORTCR(94, 0xe605105e), /* PORT94CR */ PORTCR(95, 0xe605105f), /* PORT95CR */ PORTCR(96, 0xe6051060), /* PORT96CR */ PORTCR(97, 0xe6051061), /* PORT97CR */ PORTCR(98, 0xe6051062), /* PORT98CR */ PORTCR(99, 0xe6051063), /* PORT99CR */ PORTCR(100, 0xe6051064), /* PORT100CR */ PORTCR(101, 0xe6051065), /* PORT101CR */ PORTCR(102, 0xe6051066), /* PORT102CR */ PORTCR(103, 0xe6051067), /* PORT103CR */ PORTCR(104, 0xe6051068), /* PORT104CR */ PORTCR(105, 0xe6051069), /* PORT105CR */ PORTCR(106, 0xe605106a), /* PORT106CR */ PORTCR(107, 0xe605106b), /* PORT107CR */ PORTCR(108, 0xe605106c), /* PORT108CR */ PORTCR(109, 0xe605106d), /* PORT109CR */ PORTCR(110, 0xe605106e), /* PORT110CR */ PORTCR(111, 0xe605106f), /* PORT111CR */ PORTCR(112, 0xe6051070), /* PORT112CR */ PORTCR(113, 0xe6051071), /* PORT113CR */ PORTCR(114, 0xe6051072), /* PORT114CR */ PORTCR(115, 0xe6052073), /* PORT115CR */ PORTCR(116, 0xe6052074), /* PORT116CR */ PORTCR(117, 0xe6052075), /* PORT117CR */ PORTCR(118, 0xe6052076), /* PORT118CR */ PORTCR(119, 0xe6052077), /* PORT119CR */ PORTCR(120, 0xe6052078), /* PORT120CR */ PORTCR(121, 0xe6052079), /* PORT121CR */ PORTCR(122, 0xe605207a), /* PORT122CR */ PORTCR(123, 0xe605207b), /* PORT123CR */ PORTCR(124, 0xe605207c), /* PORT124CR */ PORTCR(125, 0xe605207d), /* PORT125CR */ PORTCR(126, 0xe605207e), /* PORT126CR */ PORTCR(127, 0xe605207f), /* PORT127CR */ PORTCR(128, 0xe6052080), /* PORT128CR */ PORTCR(129, 0xe6052081), /* PORT129CR */ PORTCR(130, 0xe6052082), /* PORT130CR */ PORTCR(131, 0xe6052083), /* PORT131CR */ PORTCR(132, 0xe6052084), /* PORT132CR */ PORTCR(133, 0xe6052085), /* PORT133CR */ PORTCR(134, 0xe6052086), /* PORT134CR */ PORTCR(135, 0xe6052087), /* PORT135CR */ PORTCR(136, 0xe6052088), /* PORT136CR */ PORTCR(137, 0xe6052089), /* PORT137CR */ PORTCR(138, 0xe605208a), /* PORT138CR */ PORTCR(139, 0xe605208b), /* PORT139CR */ PORTCR(140, 0xe605208c), /* PORT140CR */ PORTCR(141, 0xe605208d), /* PORT141CR */ PORTCR(142, 0xe605208e), /* PORT142CR */ PORTCR(143, 0xe605208f), /* PORT143CR */ PORTCR(144, 0xe6052090), /* PORT144CR */ PORTCR(145, 0xe6052091), /* PORT145CR */ PORTCR(146, 0xe6052092), /* PORT146CR */ PORTCR(147, 0xe6052093), /* PORT147CR */ PORTCR(148, 0xe6052094), /* PORT148CR */ PORTCR(149, 0xe6052095), /* PORT149CR */ PORTCR(150, 0xe6052096), /* PORT150CR */ PORTCR(151, 0xe6052097), /* PORT151CR */ PORTCR(152, 0xe6052098), /* PORT152CR */ PORTCR(153, 0xe6052099), /* PORT153CR */ PORTCR(154, 0xe605209a), /* PORT154CR */ PORTCR(155, 0xe605209b), /* PORT155CR */ PORTCR(156, 0xe605209c), /* PORT156CR */ PORTCR(157, 0xe605209d), /* PORT157CR */ PORTCR(158, 0xe605209e), /* PORT158CR */ PORTCR(159, 0xe605209f), /* PORT159CR */ PORTCR(160, 0xe60520a0), /* PORT160CR */ PORTCR(161, 0xe60520a1), /* PORT161CR */ PORTCR(162, 0xe60520a2), /* PORT162CR */ PORTCR(163, 0xe60520a3), /* PORT163CR */ PORTCR(164, 0xe60520a4), /* PORT164CR */ PORTCR(165, 0xe60520a5), /* PORT165CR */ PORTCR(166, 0xe60520a6), /* PORT166CR */ PORTCR(167, 0xe60520a7), /* PORT167CR */ PORTCR(168, 0xe60520a8), /* PORT168CR */ PORTCR(169, 0xe60520a9), /* PORT169CR */ PORTCR(170, 0xe60520aa), /* PORT170CR */ PORTCR(171, 0xe60520ab), /* PORT171CR */ PORTCR(172, 0xe60520ac), /* PORT172CR */ PORTCR(173, 0xe60520ad), /* PORT173CR */ PORTCR(174, 0xe60520ae), /* PORT174CR */ PORTCR(175, 0xe60520af), /* PORT175CR */ PORTCR(176, 0xe60520b0), /* PORT176CR */ PORTCR(177, 0xe60520b1), /* PORT177CR */ PORTCR(178, 0xe60520b2), /* PORT178CR */ PORTCR(179, 0xe60520b3), /* PORT179CR */ PORTCR(180, 0xe60520b4), /* PORT180CR */ PORTCR(181, 0xe60520b5), /* PORT181CR */ PORTCR(182, 0xe60520b6), /* PORT182CR */ PORTCR(183, 0xe60520b7), /* PORT183CR */ PORTCR(184, 0xe60520b8), /* PORT184CR */ PORTCR(185, 0xe60520b9), /* PORT185CR */ PORTCR(186, 0xe60520ba), /* PORT186CR */ PORTCR(187, 0xe60520bb), /* PORT187CR */ PORTCR(188, 0xe60520bc), /* PORT188CR */ PORTCR(189, 0xe60520bd), /* PORT189CR */ PORTCR(190, 0xe60520be), /* PORT190CR */ PORTCR(191, 0xe60520bf), /* PORT191CR */ PORTCR(192, 0xe60520c0), /* PORT192CR */ PORTCR(193, 0xe60520c1), /* PORT193CR */ PORTCR(194, 0xe60520c2), /* PORT194CR */ PORTCR(195, 0xe60520c3), /* PORT195CR */ PORTCR(196, 0xe60520c4), /* PORT196CR */ PORTCR(197, 0xe60520c5), /* PORT197CR */ PORTCR(198, 0xe60520c6), /* PORT198CR */ PORTCR(199, 0xe60520c7), /* PORT199CR */ PORTCR(200, 0xe60520c8), /* PORT200CR */ PORTCR(201, 0xe60520c9), /* PORT201CR */ PORTCR(202, 0xe60520ca), /* PORT202CR */ PORTCR(203, 0xe60520cb), /* PORT203CR */ PORTCR(204, 0xe60520cc), /* PORT204CR */ PORTCR(205, 0xe60520cd), /* PORT205CR */ PORTCR(206, 0xe60520ce), /* PORT206CR */ PORTCR(207, 0xe60520cf), /* PORT207CR */ PORTCR(208, 0xe60520d0), /* PORT208CR */ PORTCR(209, 0xe60520d1), /* PORT209CR */ PORTCR(210, 0xe60530d2), /* PORT210CR */ PORTCR(211, 0xe60530d3), /* PORT211CR */ { PINMUX_CFG_REG("MSEL1CR", 0xe605800c, 32, 1) { MSEL1CR_31_0, MSEL1CR_31_1, MSEL1CR_30_0, MSEL1CR_30_1, MSEL1CR_29_0, MSEL1CR_29_1, MSEL1CR_28_0, MSEL1CR_28_1, MSEL1CR_27_0, MSEL1CR_27_1, MSEL1CR_26_0, MSEL1CR_26_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MSEL1CR_16_0, MSEL1CR_16_1, MSEL1CR_15_0, MSEL1CR_15_1, MSEL1CR_14_0, MSEL1CR_14_1, MSEL1CR_13_0, MSEL1CR_13_1, MSEL1CR_12_0, MSEL1CR_12_1, 0, 0, 0, 0, MSEL1CR_9_0, MSEL1CR_9_1, 0, 0, MSEL1CR_7_0, MSEL1CR_7_1, MSEL1CR_6_0, MSEL1CR_6_1, MSEL1CR_5_0, MSEL1CR_5_1, MSEL1CR_4_0, MSEL1CR_4_1, MSEL1CR_3_0, MSEL1CR_3_1, MSEL1CR_2_0, MSEL1CR_2_1, 0, 0, MSEL1CR_0_0, MSEL1CR_0_1, } }, { PINMUX_CFG_REG("MSEL3CR", 0xE6058020, 32, 1) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MSEL3CR_15_0, MSEL3CR_15_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MSEL3CR_6_0, MSEL3CR_6_1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, } }, { PINMUX_CFG_REG("MSEL4CR", 0xE6058024, 32, 1) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, MSEL4CR_19_0, MSEL4CR_19_1, MSEL4CR_18_0, MSEL4CR_18_1, 0, 0, 0, 0, MSEL4CR_15_0, MSEL4CR_15_1, 0, 0, 0, 0, 0, 0, 0, 0, MSEL4CR_10_0, MSEL4CR_10_1, 0, 0, 0, 0, 0, 0, MSEL4CR_6_0, MSEL4CR_6_1, 0, 0, MSEL4CR_4_0, MSEL4CR_4_1, 0, 0, 0, 0, MSEL4CR_1_0, MSEL4CR_1_1, 0, 0, } }, { PINMUX_CFG_REG("MSEL5CR", 0xE6058028, 32, 1) { MSEL5CR_31_0, MSEL5CR_31_1, MSEL5CR_30_0, MSEL5CR_30_1, MSEL5CR_29_0, MSEL5CR_29_1, 0, 0, MSEL5CR_27_0, MSEL5CR_27_1, 0, 0, MSEL5CR_25_0, MSEL5CR_25_1, 0, 0, MSEL5CR_23_0, MSEL5CR_23_1, 0, 0, MSEL5CR_21_0, MSEL5CR_21_1, 0, 0, MSEL5CR_19_0, MSEL5CR_19_1, 0, 0, MSEL5CR_17_0, MSEL5CR_17_1, 0, 0, MSEL5CR_15_0, MSEL5CR_15_1, MSEL5CR_14_0, MSEL5CR_14_1, MSEL5CR_13_0, MSEL5CR_13_1, MSEL5CR_12_0, MSEL5CR_12_1, MSEL5CR_11_0, MSEL5CR_11_1, MSEL5CR_10_0, MSEL5CR_10_1, 0, 0, MSEL5CR_8_0, MSEL5CR_8_1, MSEL5CR_7_0, MSEL5CR_7_1, MSEL5CR_6_0, MSEL5CR_6_1, MSEL5CR_5_0, MSEL5CR_5_1, MSEL5CR_4_0, MSEL5CR_4_1, MSEL5CR_3_0, MSEL5CR_3_1, MSEL5CR_2_0, MSEL5CR_2_1, 0, 0, MSEL5CR_0_0, MSEL5CR_0_1, } }, { }, }; static const struct pinmux_data_reg pinmux_data_regs[] = { { PINMUX_DATA_REG("PORTL031_000DR", 0xe6054800, 32) { PORT31_DATA, PORT30_DATA, PORT29_DATA, PORT28_DATA, PORT27_DATA, PORT26_DATA, PORT25_DATA, PORT24_DATA, PORT23_DATA, PORT22_DATA, PORT21_DATA, PORT20_DATA, PORT19_DATA, PORT18_DATA, PORT17_DATA, PORT16_DATA, PORT15_DATA, PORT14_DATA, PORT13_DATA, PORT12_DATA, PORT11_DATA, PORT10_DATA, PORT9_DATA, PORT8_DATA, PORT7_DATA, PORT6_DATA, PORT5_DATA, PORT4_DATA, PORT3_DATA, PORT2_DATA, PORT1_DATA, PORT0_DATA } }, { PINMUX_DATA_REG("PORTL063_032DR", 0xe6054804, 32) { PORT63_DATA, PORT62_DATA, PORT61_DATA, PORT60_DATA, PORT59_DATA, PORT58_DATA, PORT57_DATA, PORT56_DATA, PORT55_DATA, PORT54_DATA, PORT53_DATA, PORT52_DATA, PORT51_DATA, PORT50_DATA, PORT49_DATA, PORT48_DATA, PORT47_DATA, PORT46_DATA, PORT45_DATA, PORT44_DATA, PORT43_DATA, PORT42_DATA, PORT41_DATA, PORT40_DATA, PORT39_DATA, PORT38_DATA, PORT37_DATA, PORT36_DATA, PORT35_DATA, PORT34_DATA, PORT33_DATA, PORT32_DATA } }, { PINMUX_DATA_REG("PORTL095_064DR", 0xe6054808, 32) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PORT83_DATA, PORT82_DATA, PORT81_DATA, PORT80_DATA, PORT79_DATA, PORT78_DATA, PORT77_DATA, PORT76_DATA, PORT75_DATA, PORT74_DATA, PORT73_DATA, PORT72_DATA, PORT71_DATA, PORT70_DATA, PORT69_DATA, PORT68_DATA, PORT67_DATA, PORT66_DATA, PORT65_DATA, PORT64_DATA } }, { PINMUX_DATA_REG("PORTD095_064DR", 0xe6055808, 32) { PORT95_DATA, PORT94_DATA, PORT93_DATA, PORT92_DATA, PORT91_DATA, PORT90_DATA, PORT89_DATA, PORT88_DATA, PORT87_DATA, PORT86_DATA, PORT85_DATA, PORT84_DATA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_DATA_REG("PORTD127_096DR", 0xe605580c, 32) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PORT114_DATA, PORT113_DATA, PORT112_DATA, PORT111_DATA, PORT110_DATA, PORT109_DATA, PORT108_DATA, PORT107_DATA, PORT106_DATA, PORT105_DATA, PORT104_DATA, PORT103_DATA, PORT102_DATA, PORT101_DATA, PORT100_DATA, PORT99_DATA, PORT98_DATA, PORT97_DATA, PORT96_DATA } }, { PINMUX_DATA_REG("PORTR127_096DR", 0xe605680C, 32) { PORT127_DATA, PORT126_DATA, PORT125_DATA, PORT124_DATA, PORT123_DATA, PORT122_DATA, PORT121_DATA, PORT120_DATA, PORT119_DATA, PORT118_DATA, PORT117_DATA, PORT116_DATA, PORT115_DATA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { PINMUX_DATA_REG("PORTR159_128DR", 0xe6056810, 32) { PORT159_DATA, PORT158_DATA, PORT157_DATA, PORT156_DATA, PORT155_DATA, PORT154_DATA, PORT153_DATA, PORT152_DATA, PORT151_DATA, PORT150_DATA, PORT149_DATA, PORT148_DATA, PORT147_DATA, PORT146_DATA, PORT145_DATA, PORT144_DATA, PORT143_DATA, PORT142_DATA, PORT141_DATA, PORT140_DATA, PORT139_DATA, PORT138_DATA, PORT137_DATA, PORT136_DATA, PORT135_DATA, PORT134_DATA, PORT133_DATA, PORT132_DATA, PORT131_DATA, PORT130_DATA, PORT129_DATA, PORT128_DATA } }, { PINMUX_DATA_REG("PORTR191_160DR", 0xe6056814, 32) { PORT191_DATA, PORT190_DATA, PORT189_DATA, PORT188_DATA, PORT187_DATA, PORT186_DATA, PORT185_DATA, PORT184_DATA, PORT183_DATA, PORT182_DATA, PORT181_DATA, PORT180_DATA, PORT179_DATA, PORT178_DATA, PORT177_DATA, PORT176_DATA, PORT175_DATA, PORT174_DATA, PORT173_DATA, PORT172_DATA, PORT171_DATA, PORT170_DATA, PORT169_DATA, PORT168_DATA, PORT167_DATA, PORT166_DATA, PORT165_DATA, PORT164_DATA, PORT163_DATA, PORT162_DATA, PORT161_DATA, PORT160_DATA } }, { PINMUX_DATA_REG("PORTR223_192DR", 0xe6056818, 32) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PORT209_DATA, PORT208_DATA, PORT207_DATA, PORT206_DATA, PORT205_DATA, PORT204_DATA, PORT203_DATA, PORT202_DATA, PORT201_DATA, PORT200_DATA, PORT199_DATA, PORT198_DATA, PORT197_DATA, PORT196_DATA, PORT195_DATA, PORT194_DATA, PORT193_DATA, PORT192_DATA } }, { PINMUX_DATA_REG("PORTU223_192DR", 0xe6057818, 32) { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, PORT211_DATA, PORT210_DATA, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } }, { }, }; static const struct pinmux_irq pinmux_irqs[] = { PINMUX_IRQ(irq_pin(0), GPIO_PORT2, GPIO_PORT13), /* IRQ0A */ PINMUX_IRQ(irq_pin(1), GPIO_PORT20), /* IRQ1A */ PINMUX_IRQ(irq_pin(2), GPIO_PORT11, GPIO_PORT12), /* IRQ2A */ PINMUX_IRQ(irq_pin(3), GPIO_PORT10, GPIO_PORT14), /* IRQ3A */ PINMUX_IRQ(irq_pin(4), GPIO_PORT15, GPIO_PORT172),/* IRQ4A */ PINMUX_IRQ(irq_pin(5), GPIO_PORT0, GPIO_PORT1), /* IRQ5A */ PINMUX_IRQ(irq_pin(6), GPIO_PORT121, GPIO_PORT173),/* IRQ6A */ PINMUX_IRQ(irq_pin(7), GPIO_PORT120, GPIO_PORT209),/* IRQ7A */ PINMUX_IRQ(irq_pin(8), GPIO_PORT119), /* IRQ8A */ PINMUX_IRQ(irq_pin(9), GPIO_PORT118, GPIO_PORT210),/* IRQ9A */ PINMUX_IRQ(irq_pin(10), GPIO_PORT19), /* IRQ10A */ PINMUX_IRQ(irq_pin(11), GPIO_PORT104), /* IRQ11A */ PINMUX_IRQ(irq_pin(12), GPIO_PORT42, GPIO_PORT97), /* IRQ12A */ PINMUX_IRQ(irq_pin(13), GPIO_PORT64, GPIO_PORT98), /* IRQ13A */ PINMUX_IRQ(irq_pin(14), GPIO_PORT63, GPIO_PORT99), /* IRQ14A */ PINMUX_IRQ(irq_pin(15), GPIO_PORT62, GPIO_PORT100),/* IRQ15A */ PINMUX_IRQ(irq_pin(16), GPIO_PORT68, GPIO_PORT211),/* IRQ16A */ PINMUX_IRQ(irq_pin(17), GPIO_PORT69), /* IRQ17A */ PINMUX_IRQ(irq_pin(18), GPIO_PORT70), /* IRQ18A */ PINMUX_IRQ(irq_pin(19), GPIO_PORT71), /* IRQ19A */ PINMUX_IRQ(irq_pin(20), GPIO_PORT67), /* IRQ20A */ PINMUX_IRQ(irq_pin(21), GPIO_PORT202), /* IRQ21A */ PINMUX_IRQ(irq_pin(22), GPIO_PORT95), /* IRQ22A */ PINMUX_IRQ(irq_pin(23), GPIO_PORT96), /* IRQ23A */ PINMUX_IRQ(irq_pin(24), GPIO_PORT180), /* IRQ24A */ PINMUX_IRQ(irq_pin(25), GPIO_PORT38), /* IRQ25A */ PINMUX_IRQ(irq_pin(26), GPIO_PORT58, GPIO_PORT81), /* IRQ26A */ PINMUX_IRQ(irq_pin(27), GPIO_PORT57, GPIO_PORT168),/* IRQ27A */ PINMUX_IRQ(irq_pin(28), GPIO_PORT56, GPIO_PORT169),/* IRQ28A */ PINMUX_IRQ(irq_pin(29), GPIO_PORT50, GPIO_PORT170),/* IRQ29A */ PINMUX_IRQ(irq_pin(30), GPIO_PORT49, GPIO_PORT171),/* IRQ30A */ PINMUX_IRQ(irq_pin(31), GPIO_PORT41, GPIO_PORT167),/* IRQ31A */ }; const struct sh_pfc_soc_info r8a7740_pinmux_info = { .name = "r8a7740_pfc", .input = { PINMUX_INPUT_BEGIN, PINMUX_INPUT_END }, .input_pu = { PINMUX_INPUT_PULLUP_BEGIN, PINMUX_INPUT_PULLUP_END }, .input_pd = { PINMUX_INPUT_PULLDOWN_BEGIN, PINMUX_INPUT_PULLDOWN_END }, .output = { PINMUX_OUTPUT_BEGIN, PINMUX_OUTPUT_END }, .function = { PINMUX_FUNCTION_BEGIN, PINMUX_FUNCTION_END }, .pins = pinmux_pins, .nr_pins = ARRAY_SIZE(pinmux_pins), .groups = pinmux_groups, .nr_groups = ARRAY_SIZE(pinmux_groups), .functions = pinmux_functions, .nr_functions = ARRAY_SIZE(pinmux_functions), .func_gpios = pinmux_func_gpios, .nr_func_gpios = ARRAY_SIZE(pinmux_func_gpios), .cfg_regs = pinmux_config_regs, .data_regs = pinmux_data_regs, .gpio_data = pinmux_data, .gpio_data_size = ARRAY_SIZE(pinmux_data), .gpio_irq = pinmux_irqs, .gpio_irq_size = ARRAY_SIZE(pinmux_irqs), };
gpl-2.0
The-Covenant/android_kernel_samsung_i927
drivers/gpu/drm/radeon/radeon_fb.c
2615
10499
/* * Copyright © 2007 David Airlie * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. * * Authors: * David Airlie */ #include <linux/module.h> #include <linux/slab.h> #include <linux/fb.h> #include "drmP.h" #include "drm.h" #include "drm_crtc.h" #include "drm_crtc_helper.h" #include "radeon_drm.h" #include "radeon.h" #include "drm_fb_helper.h" #include <linux/vga_switcheroo.h> /* object hierarchy - this contains a helper + a radeon fb the helper contains a pointer to radeon framebuffer baseclass. */ struct radeon_fbdev { struct drm_fb_helper helper; struct radeon_framebuffer rfb; struct list_head fbdev_list; struct radeon_device *rdev; }; static struct fb_ops radeonfb_ops = { .owner = THIS_MODULE, .fb_check_var = drm_fb_helper_check_var, .fb_set_par = drm_fb_helper_set_par, .fb_fillrect = cfb_fillrect, .fb_copyarea = cfb_copyarea, .fb_imageblit = cfb_imageblit, .fb_pan_display = drm_fb_helper_pan_display, .fb_blank = drm_fb_helper_blank, .fb_setcmap = drm_fb_helper_setcmap, .fb_debug_enter = drm_fb_helper_debug_enter, .fb_debug_leave = drm_fb_helper_debug_leave, }; int radeon_align_pitch(struct radeon_device *rdev, int width, int bpp, bool tiled) { int aligned = width; int align_large = (ASIC_IS_AVIVO(rdev)) || tiled; int pitch_mask = 0; switch (bpp / 8) { case 1: pitch_mask = align_large ? 255 : 127; break; case 2: pitch_mask = align_large ? 127 : 31; break; case 3: case 4: pitch_mask = align_large ? 63 : 15; break; } aligned += pitch_mask; aligned &= ~pitch_mask; return aligned; } static void radeonfb_destroy_pinned_object(struct drm_gem_object *gobj) { struct radeon_bo *rbo = gem_to_radeon_bo(gobj); int ret; ret = radeon_bo_reserve(rbo, false); if (likely(ret == 0)) { radeon_bo_kunmap(rbo); radeon_bo_unpin(rbo); radeon_bo_unreserve(rbo); } drm_gem_object_unreference_unlocked(gobj); } static int radeonfb_create_pinned_object(struct radeon_fbdev *rfbdev, struct drm_mode_fb_cmd *mode_cmd, struct drm_gem_object **gobj_p) { struct radeon_device *rdev = rfbdev->rdev; struct drm_gem_object *gobj = NULL; struct radeon_bo *rbo = NULL; bool fb_tiled = false; /* useful for testing */ u32 tiling_flags = 0; int ret; int aligned_size, size; int height = mode_cmd->height; /* need to align pitch with crtc limits */ mode_cmd->pitch = radeon_align_pitch(rdev, mode_cmd->width, mode_cmd->bpp, fb_tiled) * ((mode_cmd->bpp + 1) / 8); if (rdev->family >= CHIP_R600) height = ALIGN(mode_cmd->height, 8); size = mode_cmd->pitch * height; aligned_size = ALIGN(size, PAGE_SIZE); ret = radeon_gem_object_create(rdev, aligned_size, 0, RADEON_GEM_DOMAIN_VRAM, false, true, &gobj); if (ret) { printk(KERN_ERR "failed to allocate framebuffer (%d)\n", aligned_size); return -ENOMEM; } rbo = gem_to_radeon_bo(gobj); if (fb_tiled) tiling_flags = RADEON_TILING_MACRO; #ifdef __BIG_ENDIAN switch (mode_cmd->bpp) { case 32: tiling_flags |= RADEON_TILING_SWAP_32BIT; break; case 16: tiling_flags |= RADEON_TILING_SWAP_16BIT; default: break; } #endif if (tiling_flags) { ret = radeon_bo_set_tiling_flags(rbo, tiling_flags | RADEON_TILING_SURFACE, mode_cmd->pitch); if (ret) dev_err(rdev->dev, "FB failed to set tiling flags\n"); } ret = radeon_bo_reserve(rbo, false); if (unlikely(ret != 0)) goto out_unref; ret = radeon_bo_pin(rbo, RADEON_GEM_DOMAIN_VRAM, NULL); if (ret) { radeon_bo_unreserve(rbo); goto out_unref; } if (fb_tiled) radeon_bo_check_tiling(rbo, 0, 0); ret = radeon_bo_kmap(rbo, NULL); radeon_bo_unreserve(rbo); if (ret) { goto out_unref; } *gobj_p = gobj; return 0; out_unref: radeonfb_destroy_pinned_object(gobj); *gobj_p = NULL; return ret; } static int radeonfb_create(struct radeon_fbdev *rfbdev, struct drm_fb_helper_surface_size *sizes) { struct radeon_device *rdev = rfbdev->rdev; struct fb_info *info; struct drm_framebuffer *fb = NULL; struct drm_mode_fb_cmd mode_cmd; struct drm_gem_object *gobj = NULL; struct radeon_bo *rbo = NULL; struct device *device = &rdev->pdev->dev; int ret; unsigned long tmp; mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; /* avivo can't scanout real 24bpp */ if ((sizes->surface_bpp == 24) && ASIC_IS_AVIVO(rdev)) sizes->surface_bpp = 32; mode_cmd.bpp = sizes->surface_bpp; mode_cmd.depth = sizes->surface_depth; ret = radeonfb_create_pinned_object(rfbdev, &mode_cmd, &gobj); rbo = gem_to_radeon_bo(gobj); /* okay we have an object now allocate the framebuffer */ info = framebuffer_alloc(0, device); if (info == NULL) { ret = -ENOMEM; goto out_unref; } info->par = rfbdev; radeon_framebuffer_init(rdev->ddev, &rfbdev->rfb, &mode_cmd, gobj); fb = &rfbdev->rfb.base; /* setup helper */ rfbdev->helper.fb = fb; rfbdev->helper.fbdev = info; memset_io(rbo->kptr, 0x0, radeon_bo_size(rbo)); strcpy(info->fix.id, "radeondrmfb"); drm_fb_helper_fill_fix(info, fb->pitch, fb->depth); info->flags = FBINFO_DEFAULT | FBINFO_CAN_FORCE_OUTPUT; info->fbops = &radeonfb_ops; tmp = radeon_bo_gpu_offset(rbo) - rdev->mc.vram_start; info->fix.smem_start = rdev->mc.aper_base + tmp; info->fix.smem_len = radeon_bo_size(rbo); info->screen_base = rbo->kptr; info->screen_size = radeon_bo_size(rbo); drm_fb_helper_fill_var(info, &rfbdev->helper, sizes->fb_width, sizes->fb_height); /* setup aperture base/size for vesafb takeover */ info->apertures = alloc_apertures(1); if (!info->apertures) { ret = -ENOMEM; goto out_unref; } info->apertures->ranges[0].base = rdev->ddev->mode_config.fb_base; info->apertures->ranges[0].size = rdev->mc.aper_size; info->pixmap.size = 64*1024; info->pixmap.buf_align = 8; info->pixmap.access_align = 32; info->pixmap.flags = FB_PIXMAP_SYSTEM; info->pixmap.scan_align = 1; if (info->screen_base == NULL) { ret = -ENOSPC; goto out_unref; } ret = fb_alloc_cmap(&info->cmap, 256, 0); if (ret) { ret = -ENOMEM; goto out_unref; } DRM_INFO("fb mappable at 0x%lX\n", info->fix.smem_start); DRM_INFO("vram apper at 0x%lX\n", (unsigned long)rdev->mc.aper_base); DRM_INFO("size %lu\n", (unsigned long)radeon_bo_size(rbo)); DRM_INFO("fb depth is %d\n", fb->depth); DRM_INFO(" pitch is %d\n", fb->pitch); vga_switcheroo_client_fb_set(rdev->ddev->pdev, info); return 0; out_unref: if (rbo) { } if (fb && ret) { drm_gem_object_unreference(gobj); drm_framebuffer_cleanup(fb); kfree(fb); } return ret; } static int radeon_fb_find_or_create_single(struct drm_fb_helper *helper, struct drm_fb_helper_surface_size *sizes) { struct radeon_fbdev *rfbdev = (struct radeon_fbdev *)helper; int new_fb = 0; int ret; if (!helper->fb) { ret = radeonfb_create(rfbdev, sizes); if (ret) return ret; new_fb = 1; } return new_fb; } static char *mode_option; int radeon_parse_options(char *options) { char *this_opt; if (!options || !*options) return 0; while ((this_opt = strsep(&options, ",")) != NULL) { if (!*this_opt) continue; mode_option = this_opt; } return 0; } void radeon_fb_output_poll_changed(struct radeon_device *rdev) { drm_fb_helper_hotplug_event(&rdev->mode_info.rfbdev->helper); } static int radeon_fbdev_destroy(struct drm_device *dev, struct radeon_fbdev *rfbdev) { struct fb_info *info; struct radeon_framebuffer *rfb = &rfbdev->rfb; if (rfbdev->helper.fbdev) { info = rfbdev->helper.fbdev; unregister_framebuffer(info); if (info->cmap.len) fb_dealloc_cmap(&info->cmap); framebuffer_release(info); } if (rfb->obj) { radeonfb_destroy_pinned_object(rfb->obj); rfb->obj = NULL; } drm_fb_helper_fini(&rfbdev->helper); drm_framebuffer_cleanup(&rfb->base); return 0; } static struct drm_fb_helper_funcs radeon_fb_helper_funcs = { .gamma_set = radeon_crtc_fb_gamma_set, .gamma_get = radeon_crtc_fb_gamma_get, .fb_probe = radeon_fb_find_or_create_single, }; int radeon_fbdev_init(struct radeon_device *rdev) { struct radeon_fbdev *rfbdev; int bpp_sel = 32; int ret; /* select 8 bpp console on RN50 or 16MB cards */ if (ASIC_IS_RN50(rdev) || rdev->mc.real_vram_size <= (32*1024*1024)) bpp_sel = 8; rfbdev = kzalloc(sizeof(struct radeon_fbdev), GFP_KERNEL); if (!rfbdev) return -ENOMEM; rfbdev->rdev = rdev; rdev->mode_info.rfbdev = rfbdev; rfbdev->helper.funcs = &radeon_fb_helper_funcs; ret = drm_fb_helper_init(rdev->ddev, &rfbdev->helper, rdev->num_crtc, RADEONFB_CONN_LIMIT); if (ret) { kfree(rfbdev); return ret; } drm_fb_helper_single_add_all_connectors(&rfbdev->helper); drm_fb_helper_initial_config(&rfbdev->helper, bpp_sel); return 0; } void radeon_fbdev_fini(struct radeon_device *rdev) { if (!rdev->mode_info.rfbdev) return; radeon_fbdev_destroy(rdev->ddev, rdev->mode_info.rfbdev); kfree(rdev->mode_info.rfbdev); rdev->mode_info.rfbdev = NULL; } void radeon_fbdev_set_suspend(struct radeon_device *rdev, int state) { fb_set_suspend(rdev->mode_info.rfbdev->helper.fbdev, state); } int radeon_fbdev_total_size(struct radeon_device *rdev) { struct radeon_bo *robj; int size = 0; robj = gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj); size += radeon_bo_size(robj); return size; } bool radeon_fbdev_robj_is_fb(struct radeon_device *rdev, struct radeon_bo *robj) { if (robj == gem_to_radeon_bo(rdev->mode_info.rfbdev->rfb.obj)) return true; return false; }
gpl-2.0
zarboz/android_kernel_flounder
drivers/md/persistent-data/dm-bitset.c
2615
3946
/* * Copyright (C) 2012 Red Hat, Inc. * * This file is released under the GPL. */ #include "dm-bitset.h" #include "dm-transaction-manager.h" #include <linux/export.h> #include <linux/device-mapper.h> #define DM_MSG_PREFIX "bitset" #define BITS_PER_ARRAY_ENTRY 64 /*----------------------------------------------------------------*/ static struct dm_btree_value_type bitset_bvt = { .context = NULL, .size = sizeof(__le64), .inc = NULL, .dec = NULL, .equal = NULL, }; /*----------------------------------------------------------------*/ void dm_disk_bitset_init(struct dm_transaction_manager *tm, struct dm_disk_bitset *info) { dm_array_info_init(&info->array_info, tm, &bitset_bvt); info->current_index_set = false; } EXPORT_SYMBOL_GPL(dm_disk_bitset_init); int dm_bitset_empty(struct dm_disk_bitset *info, dm_block_t *root) { return dm_array_empty(&info->array_info, root); } EXPORT_SYMBOL_GPL(dm_bitset_empty); int dm_bitset_resize(struct dm_disk_bitset *info, dm_block_t root, uint32_t old_nr_entries, uint32_t new_nr_entries, bool default_value, dm_block_t *new_root) { uint32_t old_blocks = dm_div_up(old_nr_entries, BITS_PER_ARRAY_ENTRY); uint32_t new_blocks = dm_div_up(new_nr_entries, BITS_PER_ARRAY_ENTRY); __le64 value = default_value ? cpu_to_le64(~0) : cpu_to_le64(0); __dm_bless_for_disk(&value); return dm_array_resize(&info->array_info, root, old_blocks, new_blocks, &value, new_root); } EXPORT_SYMBOL_GPL(dm_bitset_resize); int dm_bitset_del(struct dm_disk_bitset *info, dm_block_t root) { return dm_array_del(&info->array_info, root); } EXPORT_SYMBOL_GPL(dm_bitset_del); int dm_bitset_flush(struct dm_disk_bitset *info, dm_block_t root, dm_block_t *new_root) { int r; __le64 value; if (!info->current_index_set) return 0; value = cpu_to_le64(info->current_bits); __dm_bless_for_disk(&value); r = dm_array_set_value(&info->array_info, root, info->current_index, &value, new_root); if (r) return r; info->current_index_set = false; return 0; } EXPORT_SYMBOL_GPL(dm_bitset_flush); static int read_bits(struct dm_disk_bitset *info, dm_block_t root, uint32_t array_index) { int r; __le64 value; r = dm_array_get_value(&info->array_info, root, array_index, &value); if (r) return r; info->current_bits = le64_to_cpu(value); info->current_index_set = true; info->current_index = array_index; return 0; } static int get_array_entry(struct dm_disk_bitset *info, dm_block_t root, uint32_t index, dm_block_t *new_root) { int r; unsigned array_index = index / BITS_PER_ARRAY_ENTRY; if (info->current_index_set) { if (info->current_index == array_index) return 0; r = dm_bitset_flush(info, root, new_root); if (r) return r; } return read_bits(info, root, array_index); } int dm_bitset_set_bit(struct dm_disk_bitset *info, dm_block_t root, uint32_t index, dm_block_t *new_root) { int r; unsigned b = index % BITS_PER_ARRAY_ENTRY; r = get_array_entry(info, root, index, new_root); if (r) return r; set_bit(b, (unsigned long *) &info->current_bits); return 0; } EXPORT_SYMBOL_GPL(dm_bitset_set_bit); int dm_bitset_clear_bit(struct dm_disk_bitset *info, dm_block_t root, uint32_t index, dm_block_t *new_root) { int r; unsigned b = index % BITS_PER_ARRAY_ENTRY; r = get_array_entry(info, root, index, new_root); if (r) return r; clear_bit(b, (unsigned long *) &info->current_bits); return 0; } EXPORT_SYMBOL_GPL(dm_bitset_clear_bit); int dm_bitset_test_bit(struct dm_disk_bitset *info, dm_block_t root, uint32_t index, dm_block_t *new_root, bool *result) { int r; unsigned b = index % BITS_PER_ARRAY_ENTRY; r = get_array_entry(info, root, index, new_root); if (r) return r; *result = test_bit(b, (unsigned long *) &info->current_bits); return 0; } EXPORT_SYMBOL_GPL(dm_bitset_test_bit); /*----------------------------------------------------------------*/
gpl-2.0
esgie/viennalte_p905_kernel_source
drivers/net/ethernet/sfc/falcon.c
3383
52043
/**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2005-2006 Fen Systems Ltd. * Copyright 2006-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference. */ #include <linux/bitops.h> #include <linux/delay.h> #include <linux/pci.h> #include <linux/module.h> #include <linux/seq_file.h> #include <linux/i2c.h> #include <linux/mii.h> #include <linux/slab.h> #include "net_driver.h" #include "bitfield.h" #include "efx.h" #include "spi.h" #include "nic.h" #include "regs.h" #include "io.h" #include "phy.h" #include "workarounds.h" /* Hardware control for SFC4000 (aka Falcon). */ static const unsigned int /* "Large" EEPROM device: Atmel AT25640 or similar * 8 KB, 16-bit address, 32 B write block */ large_eeprom_type = ((13 << SPI_DEV_TYPE_SIZE_LBN) | (2 << SPI_DEV_TYPE_ADDR_LEN_LBN) | (5 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)), /* Default flash device: Atmel AT25F1024 * 128 KB, 24-bit address, 32 KB erase block, 256 B write block */ default_flash_type = ((17 << SPI_DEV_TYPE_SIZE_LBN) | (3 << SPI_DEV_TYPE_ADDR_LEN_LBN) | (0x52 << SPI_DEV_TYPE_ERASE_CMD_LBN) | (15 << SPI_DEV_TYPE_ERASE_SIZE_LBN) | (8 << SPI_DEV_TYPE_BLOCK_SIZE_LBN)); /************************************************************************** * * I2C bus - this is a bit-bashing interface using GPIO pins * Note that it uses the output enables to tristate the outputs * SDA is the data pin and SCL is the clock * ************************************************************************** */ static void falcon_setsda(void *data, int state) { struct efx_nic *efx = (struct efx_nic *)data; efx_oword_t reg; efx_reado(efx, &reg, FR_AB_GPIO_CTL); EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state); efx_writeo(efx, &reg, FR_AB_GPIO_CTL); } static void falcon_setscl(void *data, int state) { struct efx_nic *efx = (struct efx_nic *)data; efx_oword_t reg; efx_reado(efx, &reg, FR_AB_GPIO_CTL); EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state); efx_writeo(efx, &reg, FR_AB_GPIO_CTL); } static int falcon_getsda(void *data) { struct efx_nic *efx = (struct efx_nic *)data; efx_oword_t reg; efx_reado(efx, &reg, FR_AB_GPIO_CTL); return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN); } static int falcon_getscl(void *data) { struct efx_nic *efx = (struct efx_nic *)data; efx_oword_t reg; efx_reado(efx, &reg, FR_AB_GPIO_CTL); return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN); } static const struct i2c_algo_bit_data falcon_i2c_bit_operations = { .setsda = falcon_setsda, .setscl = falcon_setscl, .getsda = falcon_getsda, .getscl = falcon_getscl, .udelay = 5, /* Wait up to 50 ms for slave to let us pull SCL high */ .timeout = DIV_ROUND_UP(HZ, 20), }; static void falcon_push_irq_moderation(struct efx_channel *channel) { efx_dword_t timer_cmd; struct efx_nic *efx = channel->efx; /* Set timer register */ if (channel->irq_moderation) { EFX_POPULATE_DWORD_2(timer_cmd, FRF_AB_TC_TIMER_MODE, FFE_BB_TIMER_MODE_INT_HLDOFF, FRF_AB_TC_TIMER_VAL, channel->irq_moderation - 1); } else { EFX_POPULATE_DWORD_2(timer_cmd, FRF_AB_TC_TIMER_MODE, FFE_BB_TIMER_MODE_DIS, FRF_AB_TC_TIMER_VAL, 0); } BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0); efx_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0, channel->channel); } static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx); static void falcon_prepare_flush(struct efx_nic *efx) { falcon_deconfigure_mac_wrapper(efx); /* Wait for the tx and rx fifo's to get to the next packet boundary * (~1ms without back-pressure), then to drain the remainder of the * fifo's at data path speeds (negligible), with a healthy margin. */ msleep(10); } /* Acknowledge a legacy interrupt from Falcon * * This acknowledges a legacy (not MSI) interrupt via INT_ACK_KER_REG. * * Due to SFC bug 3706 (silicon revision <=A1) reads can be duplicated in the * BIU. Interrupt acknowledge is read sensitive so must write instead * (then read to ensure the BIU collector is flushed) * * NB most hardware supports MSI interrupts */ inline void falcon_irq_ack_a1(struct efx_nic *efx) { efx_dword_t reg; EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e); efx_writed(efx, &reg, FR_AA_INT_ACK_KER); efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS); } irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id) { struct efx_nic *efx = dev_id; efx_oword_t *int_ker = efx->irq_status.addr; int syserr; int queues; /* Check to see if this is our interrupt. If it isn't, we * exit without having touched the hardware. */ if (unlikely(EFX_OWORD_IS_ZERO(*int_ker))) { netif_vdbg(efx, intr, efx->net_dev, "IRQ %d on CPU %d not for me\n", irq, raw_smp_processor_id()); return IRQ_NONE; } efx->last_irq_cpu = raw_smp_processor_id(); netif_vdbg(efx, intr, efx->net_dev, "IRQ %d on CPU %d status " EFX_OWORD_FMT "\n", irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); /* Check to see if we have a serious error condition */ syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT); if (unlikely(syserr)) return efx_nic_fatal_interrupt(efx); /* Determine interrupting queues, clear interrupt status * register and acknowledge the device interrupt. */ BUILD_BUG_ON(FSF_AZ_NET_IVEC_INT_Q_WIDTH > EFX_MAX_CHANNELS); queues = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_INT_Q); EFX_ZERO_OWORD(*int_ker); wmb(); /* Ensure the vector is cleared before interrupt ack */ falcon_irq_ack_a1(efx); if (queues & 1) efx_schedule_channel_irq(efx_get_channel(efx, 0)); if (queues & 2) efx_schedule_channel_irq(efx_get_channel(efx, 1)); return IRQ_HANDLED; } /************************************************************************** * * EEPROM/flash * ************************************************************************** */ #define FALCON_SPI_MAX_LEN sizeof(efx_oword_t) static int falcon_spi_poll(struct efx_nic *efx) { efx_oword_t reg; efx_reado(efx, &reg, FR_AB_EE_SPI_HCMD); return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0; } /* Wait for SPI command completion */ static int falcon_spi_wait(struct efx_nic *efx) { /* Most commands will finish quickly, so we start polling at * very short intervals. Sometimes the command may have to * wait for VPD or expansion ROM access outside of our * control, so we allow up to 100 ms. */ unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 10); int i; for (i = 0; i < 10; i++) { if (!falcon_spi_poll(efx)) return 0; udelay(10); } for (;;) { if (!falcon_spi_poll(efx)) return 0; if (time_after_eq(jiffies, timeout)) { netif_err(efx, hw, efx->net_dev, "timed out waiting for SPI\n"); return -ETIMEDOUT; } schedule_timeout_uninterruptible(1); } } int falcon_spi_cmd(struct efx_nic *efx, const struct efx_spi_device *spi, unsigned int command, int address, const void *in, void *out, size_t len) { bool addressed = (address >= 0); bool reading = (out != NULL); efx_oword_t reg; int rc; /* Input validation */ if (len > FALCON_SPI_MAX_LEN) return -EINVAL; /* Check that previous command is not still running */ rc = falcon_spi_poll(efx); if (rc) return rc; /* Program address register, if we have an address */ if (addressed) { EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address); efx_writeo(efx, &reg, FR_AB_EE_SPI_HADR); } /* Program data register, if we have data */ if (in != NULL) { memcpy(&reg, in, len); efx_writeo(efx, &reg, FR_AB_EE_SPI_HDATA); } /* Issue read/write command */ EFX_POPULATE_OWORD_7(reg, FRF_AB_EE_SPI_HCMD_CMD_EN, 1, FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id, FRF_AB_EE_SPI_HCMD_DABCNT, len, FRF_AB_EE_SPI_HCMD_READ, reading, FRF_AB_EE_SPI_HCMD_DUBCNT, 0, FRF_AB_EE_SPI_HCMD_ADBCNT, (addressed ? spi->addr_len : 0), FRF_AB_EE_SPI_HCMD_ENC, command); efx_writeo(efx, &reg, FR_AB_EE_SPI_HCMD); /* Wait for read/write to complete */ rc = falcon_spi_wait(efx); if (rc) return rc; /* Read data */ if (out != NULL) { efx_reado(efx, &reg, FR_AB_EE_SPI_HDATA); memcpy(out, &reg, len); } return 0; } static size_t falcon_spi_write_limit(const struct efx_spi_device *spi, size_t start) { return min(FALCON_SPI_MAX_LEN, (spi->block_size - (start & (spi->block_size - 1)))); } static inline u8 efx_spi_munge_command(const struct efx_spi_device *spi, const u8 command, const unsigned int address) { return command | (((address >> 8) & spi->munge_address) << 3); } /* Wait up to 10 ms for buffered write completion */ int falcon_spi_wait_write(struct efx_nic *efx, const struct efx_spi_device *spi) { unsigned long timeout = jiffies + 1 + DIV_ROUND_UP(HZ, 100); u8 status; int rc; for (;;) { rc = falcon_spi_cmd(efx, spi, SPI_RDSR, -1, NULL, &status, sizeof(status)); if (rc) return rc; if (!(status & SPI_STATUS_NRDY)) return 0; if (time_after_eq(jiffies, timeout)) { netif_err(efx, hw, efx->net_dev, "SPI write timeout on device %d" " last status=0x%02x\n", spi->device_id, status); return -ETIMEDOUT; } schedule_timeout_uninterruptible(1); } } int falcon_spi_read(struct efx_nic *efx, const struct efx_spi_device *spi, loff_t start, size_t len, size_t *retlen, u8 *buffer) { size_t block_len, pos = 0; unsigned int command; int rc = 0; while (pos < len) { block_len = min(len - pos, FALCON_SPI_MAX_LEN); command = efx_spi_munge_command(spi, SPI_READ, start + pos); rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL, buffer + pos, block_len); if (rc) break; pos += block_len; /* Avoid locking up the system */ cond_resched(); if (signal_pending(current)) { rc = -EINTR; break; } } if (retlen) *retlen = pos; return rc; } int falcon_spi_write(struct efx_nic *efx, const struct efx_spi_device *spi, loff_t start, size_t len, size_t *retlen, const u8 *buffer) { u8 verify_buffer[FALCON_SPI_MAX_LEN]; size_t block_len, pos = 0; unsigned int command; int rc = 0; while (pos < len) { rc = falcon_spi_cmd(efx, spi, SPI_WREN, -1, NULL, NULL, 0); if (rc) break; block_len = min(len - pos, falcon_spi_write_limit(spi, start + pos)); command = efx_spi_munge_command(spi, SPI_WRITE, start + pos); rc = falcon_spi_cmd(efx, spi, command, start + pos, buffer + pos, NULL, block_len); if (rc) break; rc = falcon_spi_wait_write(efx, spi); if (rc) break; command = efx_spi_munge_command(spi, SPI_READ, start + pos); rc = falcon_spi_cmd(efx, spi, command, start + pos, NULL, verify_buffer, block_len); if (memcmp(verify_buffer, buffer + pos, block_len)) { rc = -EIO; break; } pos += block_len; /* Avoid locking up the system */ cond_resched(); if (signal_pending(current)) { rc = -EINTR; break; } } if (retlen) *retlen = pos; return rc; } /************************************************************************** * * MAC wrapper * ************************************************************************** */ static void falcon_push_multicast_hash(struct efx_nic *efx) { union efx_multicast_hash *mc_hash = &efx->multicast_hash; WARN_ON(!mutex_is_locked(&efx->mac_lock)); efx_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0); efx_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1); } static void falcon_reset_macs(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; efx_oword_t reg, mac_ctrl; int count; if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) { /* It's not safe to use GLB_CTL_REG to reset the * macs, so instead use the internal MAC resets */ EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1); efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG); for (count = 0; count < 10000; count++) { efx_reado(efx, &reg, FR_AB_XM_GLB_CFG); if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) == 0) return; udelay(10); } netif_err(efx, hw, efx->net_dev, "timed out waiting for XMAC core reset\n"); } /* Mac stats will fail whist the TX fifo is draining */ WARN_ON(nic_data->stats_disable_count == 0); efx_reado(efx, &mac_ctrl, FR_AB_MAC_CTRL); EFX_SET_OWORD_FIELD(mac_ctrl, FRF_BB_TXFIFO_DRAIN_EN, 1); efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL); efx_reado(efx, &reg, FR_AB_GLB_CTL); EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1); EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1); EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1); efx_writeo(efx, &reg, FR_AB_GLB_CTL); count = 0; while (1) { efx_reado(efx, &reg, FR_AB_GLB_CTL); if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) && !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) && !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) { netif_dbg(efx, hw, efx->net_dev, "Completed MAC reset after %d loops\n", count); break; } if (count > 20) { netif_err(efx, hw, efx->net_dev, "MAC reset failed\n"); break; } count++; udelay(10); } /* Ensure the correct MAC is selected before statistics * are re-enabled by the caller */ efx_writeo(efx, &mac_ctrl, FR_AB_MAC_CTRL); falcon_setup_xaui(efx); } void falcon_drain_tx_fifo(struct efx_nic *efx) { efx_oword_t reg; if ((efx_nic_rev(efx) < EFX_REV_FALCON_B0) || (efx->loopback_mode != LOOPBACK_NONE)) return; efx_reado(efx, &reg, FR_AB_MAC_CTRL); /* There is no point in draining more than once */ if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN)) return; falcon_reset_macs(efx); } static void falcon_deconfigure_mac_wrapper(struct efx_nic *efx) { efx_oword_t reg; if (efx_nic_rev(efx) < EFX_REV_FALCON_B0) return; /* Isolate the MAC -> RX */ efx_reado(efx, &reg, FR_AZ_RX_CFG); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0); efx_writeo(efx, &reg, FR_AZ_RX_CFG); /* Isolate TX -> MAC */ falcon_drain_tx_fifo(efx); } void falcon_reconfigure_mac_wrapper(struct efx_nic *efx) { struct efx_link_state *link_state = &efx->link_state; efx_oword_t reg; int link_speed, isolate; isolate = !!ACCESS_ONCE(efx->reset_pending); switch (link_state->speed) { case 10000: link_speed = 3; break; case 1000: link_speed = 2; break; case 100: link_speed = 1; break; default: link_speed = 0; break; } /* MAC_LINK_STATUS controls MAC backpressure but doesn't work * as advertised. Disable to ensure packets are not * indefinitely held and TX queue can be flushed at any point * while the link is down. */ EFX_POPULATE_OWORD_5(reg, FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */, FRF_AB_MAC_BCAD_ACPT, 1, FRF_AB_MAC_UC_PROM, efx->promiscuous, FRF_AB_MAC_LINK_STATUS, 1, /* always set */ FRF_AB_MAC_SPEED, link_speed); /* On B0, MAC backpressure can be disabled and packets get * discarded. */ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN, !link_state->up || isolate); } efx_writeo(efx, &reg, FR_AB_MAC_CTRL); /* Restore the multicast hash registers. */ falcon_push_multicast_hash(efx); efx_reado(efx, &reg, FR_AZ_RX_CFG); /* Enable XOFF signal from RX FIFO (we enabled it during NIC * initialisation but it may read back as 0) */ EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1); /* Unisolate the MAC -> RX */ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, !isolate); efx_writeo(efx, &reg, FR_AZ_RX_CFG); } static void falcon_stats_request(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; efx_oword_t reg; WARN_ON(nic_data->stats_pending); WARN_ON(nic_data->stats_disable_count); if (nic_data->stats_dma_done == NULL) return; /* no mac selected */ *nic_data->stats_dma_done = FALCON_STATS_NOT_DONE; nic_data->stats_pending = true; wmb(); /* ensure done flag is clear */ /* Initiate DMA transfer of stats */ EFX_POPULATE_OWORD_2(reg, FRF_AB_MAC_STAT_DMA_CMD, 1, FRF_AB_MAC_STAT_DMA_ADR, efx->stats_buffer.dma_addr); efx_writeo(efx, &reg, FR_AB_MAC_STAT_DMA); mod_timer(&nic_data->stats_timer, round_jiffies_up(jiffies + HZ / 2)); } static void falcon_stats_complete(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; if (!nic_data->stats_pending) return; nic_data->stats_pending = false; if (*nic_data->stats_dma_done == FALCON_STATS_DONE) { rmb(); /* read the done flag before the stats */ falcon_update_stats_xmac(efx); } else { netif_err(efx, hw, efx->net_dev, "timed out waiting for statistics\n"); } } static void falcon_stats_timer_func(unsigned long context) { struct efx_nic *efx = (struct efx_nic *)context; struct falcon_nic_data *nic_data = efx->nic_data; spin_lock(&efx->stats_lock); falcon_stats_complete(efx); if (nic_data->stats_disable_count == 0) falcon_stats_request(efx); spin_unlock(&efx->stats_lock); } static bool falcon_loopback_link_poll(struct efx_nic *efx) { struct efx_link_state old_state = efx->link_state; WARN_ON(!mutex_is_locked(&efx->mac_lock)); WARN_ON(!LOOPBACK_INTERNAL(efx)); efx->link_state.fd = true; efx->link_state.fc = efx->wanted_fc; efx->link_state.up = true; efx->link_state.speed = 10000; return !efx_link_state_equal(&efx->link_state, &old_state); } static int falcon_reconfigure_port(struct efx_nic *efx) { int rc; WARN_ON(efx_nic_rev(efx) > EFX_REV_FALCON_B0); /* Poll the PHY link state *before* reconfiguring it. This means we * will pick up the correct speed (in loopback) to select the correct * MAC. */ if (LOOPBACK_INTERNAL(efx)) falcon_loopback_link_poll(efx); else efx->phy_op->poll(efx); falcon_stop_nic_stats(efx); falcon_deconfigure_mac_wrapper(efx); falcon_reset_macs(efx); efx->phy_op->reconfigure(efx); rc = falcon_reconfigure_xmac(efx); BUG_ON(rc); falcon_start_nic_stats(efx); /* Synchronise efx->link_state with the kernel */ efx_link_status_changed(efx); return 0; } /************************************************************************** * * PHY access via GMII * ************************************************************************** */ /* Wait for GMII access to complete */ static int falcon_gmii_wait(struct efx_nic *efx) { efx_oword_t md_stat; int count; /* wait up to 50ms - taken max from datasheet */ for (count = 0; count < 5000; count++) { efx_reado(efx, &md_stat, FR_AB_MD_STAT); if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) { if (EFX_OWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 || EFX_OWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) { netif_err(efx, hw, efx->net_dev, "error from GMII access " EFX_OWORD_FMT"\n", EFX_OWORD_VAL(md_stat)); return -EIO; } return 0; } udelay(10); } netif_err(efx, hw, efx->net_dev, "timed out waiting for GMII\n"); return -ETIMEDOUT; } /* Write an MDIO register of a PHY connected to Falcon. */ static int falcon_mdio_write(struct net_device *net_dev, int prtad, int devad, u16 addr, u16 value) { struct efx_nic *efx = netdev_priv(net_dev); struct falcon_nic_data *nic_data = efx->nic_data; efx_oword_t reg; int rc; netif_vdbg(efx, hw, efx->net_dev, "writing MDIO %d register %d.%d with 0x%04x\n", prtad, devad, addr, value); mutex_lock(&nic_data->mdio_lock); /* Check MDIO not currently being accessed */ rc = falcon_gmii_wait(efx); if (rc) goto out; /* Write the address/ID register */ EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr); efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR); EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad, FRF_AB_MD_DEV_ADR, devad); efx_writeo(efx, &reg, FR_AB_MD_ID); /* Write data */ EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value); efx_writeo(efx, &reg, FR_AB_MD_TXD); EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_WRC, 1, FRF_AB_MD_GC, 0); efx_writeo(efx, &reg, FR_AB_MD_CS); /* Wait for data to be written */ rc = falcon_gmii_wait(efx); if (rc) { /* Abort the write operation */ EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_WRC, 0, FRF_AB_MD_GC, 1); efx_writeo(efx, &reg, FR_AB_MD_CS); udelay(10); } out: mutex_unlock(&nic_data->mdio_lock); return rc; } /* Read an MDIO register of a PHY connected to Falcon. */ static int falcon_mdio_read(struct net_device *net_dev, int prtad, int devad, u16 addr) { struct efx_nic *efx = netdev_priv(net_dev); struct falcon_nic_data *nic_data = efx->nic_data; efx_oword_t reg; int rc; mutex_lock(&nic_data->mdio_lock); /* Check MDIO not currently being accessed */ rc = falcon_gmii_wait(efx); if (rc) goto out; EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr); efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR); EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad, FRF_AB_MD_DEV_ADR, devad); efx_writeo(efx, &reg, FR_AB_MD_ID); /* Request data to be read */ EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0); efx_writeo(efx, &reg, FR_AB_MD_CS); /* Wait for data to become available */ rc = falcon_gmii_wait(efx); if (rc == 0) { efx_reado(efx, &reg, FR_AB_MD_RXD); rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD); netif_vdbg(efx, hw, efx->net_dev, "read from MDIO %d register %d.%d, got %04x\n", prtad, devad, addr, rc); } else { /* Abort the read operation */ EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RIC, 0, FRF_AB_MD_GC, 1); efx_writeo(efx, &reg, FR_AB_MD_CS); netif_dbg(efx, hw, efx->net_dev, "read from MDIO %d register %d.%d, got error %d\n", prtad, devad, addr, rc); } out: mutex_unlock(&nic_data->mdio_lock); return rc; } /* This call is responsible for hooking in the MAC and PHY operations */ static int falcon_probe_port(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; int rc; switch (efx->phy_type) { case PHY_TYPE_SFX7101: efx->phy_op = &falcon_sfx7101_phy_ops; break; case PHY_TYPE_QT2022C2: case PHY_TYPE_QT2025C: efx->phy_op = &falcon_qt202x_phy_ops; break; case PHY_TYPE_TXC43128: efx->phy_op = &falcon_txc_phy_ops; break; default: netif_err(efx, probe, efx->net_dev, "Unknown PHY type %d\n", efx->phy_type); return -ENODEV; } /* Fill out MDIO structure and loopback modes */ mutex_init(&nic_data->mdio_lock); efx->mdio.mdio_read = falcon_mdio_read; efx->mdio.mdio_write = falcon_mdio_write; rc = efx->phy_op->probe(efx); if (rc != 0) return rc; /* Initial assumption */ efx->link_state.speed = 10000; efx->link_state.fd = true; /* Hardware flow ctrl. FalconA RX FIFO too small for pause generation */ if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) efx->wanted_fc = EFX_FC_RX | EFX_FC_TX; else efx->wanted_fc = EFX_FC_RX; if (efx->mdio.mmds & MDIO_DEVS_AN) efx->wanted_fc |= EFX_FC_AUTO; /* Allocate buffer for stats */ rc = efx_nic_alloc_buffer(efx, &efx->stats_buffer, FALCON_MAC_STATS_SIZE); if (rc) return rc; netif_dbg(efx, probe, efx->net_dev, "stats buffer at %llx (virt %p phys %llx)\n", (u64)efx->stats_buffer.dma_addr, efx->stats_buffer.addr, (u64)virt_to_phys(efx->stats_buffer.addr)); nic_data->stats_dma_done = efx->stats_buffer.addr + XgDmaDone_offset; return 0; } static void falcon_remove_port(struct efx_nic *efx) { efx->phy_op->remove(efx); efx_nic_free_buffer(efx, &efx->stats_buffer); } /* Global events are basically PHY events */ static bool falcon_handle_global_event(struct efx_channel *channel, efx_qword_t *event) { struct efx_nic *efx = channel->efx; struct falcon_nic_data *nic_data = efx->nic_data; if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) || EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) || EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) /* Ignored */ return true; if ((efx_nic_rev(efx) == EFX_REV_FALCON_B0) && EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) { nic_data->xmac_poll_required = true; return true; } if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1 ? EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) : EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) { netif_err(efx, rx_err, efx->net_dev, "channel %d seen global RX_RESET event. Resetting.\n", channel->channel); atomic_inc(&efx->rx_reset); efx_schedule_reset(efx, EFX_WORKAROUND_6555(efx) ? RESET_TYPE_RX_RECOVERY : RESET_TYPE_DISABLE); return true; } return false; } /************************************************************************** * * Falcon test code * **************************************************************************/ static int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out) { struct falcon_nic_data *nic_data = efx->nic_data; struct falcon_nvconfig *nvconfig; struct efx_spi_device *spi; void *region; int rc, magic_num, struct_ver; __le16 *word, *limit; u32 csum; if (efx_spi_present(&nic_data->spi_flash)) spi = &nic_data->spi_flash; else if (efx_spi_present(&nic_data->spi_eeprom)) spi = &nic_data->spi_eeprom; else return -EINVAL; region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL); if (!region) return -ENOMEM; nvconfig = region + FALCON_NVCONFIG_OFFSET; mutex_lock(&nic_data->spi_lock); rc = falcon_spi_read(efx, spi, 0, FALCON_NVCONFIG_END, NULL, region); mutex_unlock(&nic_data->spi_lock); if (rc) { netif_err(efx, hw, efx->net_dev, "Failed to read %s\n", efx_spi_present(&nic_data->spi_flash) ? "flash" : "EEPROM"); rc = -EIO; goto out; } magic_num = le16_to_cpu(nvconfig->board_magic_num); struct_ver = le16_to_cpu(nvconfig->board_struct_ver); rc = -EINVAL; if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) { netif_err(efx, hw, efx->net_dev, "NVRAM bad magic 0x%x\n", magic_num); goto out; } if (struct_ver < 2) { netif_err(efx, hw, efx->net_dev, "NVRAM has ancient version 0x%x\n", struct_ver); goto out; } else if (struct_ver < 4) { word = &nvconfig->board_magic_num; limit = (__le16 *) (nvconfig + 1); } else { word = region; limit = region + FALCON_NVCONFIG_END; } for (csum = 0; word < limit; ++word) csum += le16_to_cpu(*word); if (~csum & 0xffff) { netif_err(efx, hw, efx->net_dev, "NVRAM has incorrect checksum\n"); goto out; } rc = 0; if (nvconfig_out) memcpy(nvconfig_out, nvconfig, sizeof(*nvconfig)); out: kfree(region); return rc; } static int falcon_test_nvram(struct efx_nic *efx) { return falcon_read_nvram(efx, NULL); } static const struct efx_nic_register_test falcon_b0_register_tests[] = { { FR_AZ_ADR_REGION, EFX_OWORD32(0x0003FFFF, 0x0003FFFF, 0x0003FFFF, 0x0003FFFF) }, { FR_AZ_RX_CFG, EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) }, { FR_AZ_TX_CFG, EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) }, { FR_AZ_TX_RESERVED, EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) }, { FR_AB_MAC_CTRL, EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) }, { FR_AZ_SRM_TX_DC_CFG, EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) }, { FR_AZ_RX_DC_CFG, EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) }, { FR_AZ_RX_DC_PF_WM, EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, { FR_BZ_DP_CTRL, EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, { FR_AB_GM_CFG2, EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) }, { FR_AB_GMF_CFG0, EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) }, { FR_AB_XM_GLB_CFG, EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) }, { FR_AB_XM_TX_CFG, EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) }, { FR_AB_XM_RX_CFG, EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) }, { FR_AB_XM_RX_PARAM, EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) }, { FR_AB_XM_FC, EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) }, { FR_AB_XM_ADR_LO, EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) }, { FR_AB_XX_SD_CTL, EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) }, }; static int falcon_b0_test_registers(struct efx_nic *efx) { return efx_nic_test_registers(efx, falcon_b0_register_tests, ARRAY_SIZE(falcon_b0_register_tests)); } /************************************************************************** * * Device reset * ************************************************************************** */ static enum reset_type falcon_map_reset_reason(enum reset_type reason) { switch (reason) { case RESET_TYPE_RX_RECOVERY: case RESET_TYPE_RX_DESC_FETCH: case RESET_TYPE_TX_DESC_FETCH: case RESET_TYPE_TX_SKIP: /* These can occasionally occur due to hardware bugs. * We try to reset without disrupting the link. */ return RESET_TYPE_INVISIBLE; default: return RESET_TYPE_ALL; } } static int falcon_map_reset_flags(u32 *flags) { enum { FALCON_RESET_INVISIBLE = (ETH_RESET_DMA | ETH_RESET_FILTER | ETH_RESET_OFFLOAD | ETH_RESET_MAC), FALCON_RESET_ALL = FALCON_RESET_INVISIBLE | ETH_RESET_PHY, FALCON_RESET_WORLD = FALCON_RESET_ALL | ETH_RESET_IRQ, }; if ((*flags & FALCON_RESET_WORLD) == FALCON_RESET_WORLD) { *flags &= ~FALCON_RESET_WORLD; return RESET_TYPE_WORLD; } if ((*flags & FALCON_RESET_ALL) == FALCON_RESET_ALL) { *flags &= ~FALCON_RESET_ALL; return RESET_TYPE_ALL; } if ((*flags & FALCON_RESET_INVISIBLE) == FALCON_RESET_INVISIBLE) { *flags &= ~FALCON_RESET_INVISIBLE; return RESET_TYPE_INVISIBLE; } return -EINVAL; } /* Resets NIC to known state. This routine must be called in process * context and is allowed to sleep. */ static int __falcon_reset_hw(struct efx_nic *efx, enum reset_type method) { struct falcon_nic_data *nic_data = efx->nic_data; efx_oword_t glb_ctl_reg_ker; int rc; netif_dbg(efx, hw, efx->net_dev, "performing %s hardware reset\n", RESET_TYPE(method)); /* Initiate device reset */ if (method == RESET_TYPE_WORLD) { rc = pci_save_state(efx->pci_dev); if (rc) { netif_err(efx, drv, efx->net_dev, "failed to backup PCI state of primary " "function prior to hardware reset\n"); goto fail1; } if (efx_nic_is_dual_func(efx)) { rc = pci_save_state(nic_data->pci_dev2); if (rc) { netif_err(efx, drv, efx->net_dev, "failed to backup PCI state of " "secondary function prior to " "hardware reset\n"); goto fail2; } } EFX_POPULATE_OWORD_2(glb_ctl_reg_ker, FRF_AB_EXT_PHY_RST_DUR, FFE_AB_EXT_PHY_RST_DUR_10240US, FRF_AB_SWRST, 1); } else { EFX_POPULATE_OWORD_7(glb_ctl_reg_ker, /* exclude PHY from "invisible" reset */ FRF_AB_EXT_PHY_RST_CTL, method == RESET_TYPE_INVISIBLE, /* exclude EEPROM/flash and PCIe */ FRF_AB_PCIE_CORE_RST_CTL, 1, FRF_AB_PCIE_NSTKY_RST_CTL, 1, FRF_AB_PCIE_SD_RST_CTL, 1, FRF_AB_EE_RST_CTL, 1, FRF_AB_EXT_PHY_RST_DUR, FFE_AB_EXT_PHY_RST_DUR_10240US, FRF_AB_SWRST, 1); } efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); netif_dbg(efx, hw, efx->net_dev, "waiting for hardware reset\n"); schedule_timeout_uninterruptible(HZ / 20); /* Restore PCI configuration if needed */ if (method == RESET_TYPE_WORLD) { if (efx_nic_is_dual_func(efx)) pci_restore_state(nic_data->pci_dev2); pci_restore_state(efx->pci_dev); netif_dbg(efx, drv, efx->net_dev, "successfully restored PCI config\n"); } /* Assert that reset complete */ efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL); if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) { rc = -ETIMEDOUT; netif_err(efx, hw, efx->net_dev, "timed out waiting for hardware reset\n"); goto fail3; } netif_dbg(efx, hw, efx->net_dev, "hardware reset complete\n"); return 0; /* pci_save_state() and pci_restore_state() MUST be called in pairs */ fail2: pci_restore_state(efx->pci_dev); fail1: fail3: return rc; } static int falcon_reset_hw(struct efx_nic *efx, enum reset_type method) { struct falcon_nic_data *nic_data = efx->nic_data; int rc; mutex_lock(&nic_data->spi_lock); rc = __falcon_reset_hw(efx, method); mutex_unlock(&nic_data->spi_lock); return rc; } static void falcon_monitor(struct efx_nic *efx) { bool link_changed; int rc; BUG_ON(!mutex_is_locked(&efx->mac_lock)); rc = falcon_board(efx)->type->monitor(efx); if (rc) { netif_err(efx, hw, efx->net_dev, "Board sensor %s; shutting down PHY\n", (rc == -ERANGE) ? "reported fault" : "failed"); efx->phy_mode |= PHY_MODE_LOW_POWER; rc = __efx_reconfigure_port(efx); WARN_ON(rc); } if (LOOPBACK_INTERNAL(efx)) link_changed = falcon_loopback_link_poll(efx); else link_changed = efx->phy_op->poll(efx); if (link_changed) { falcon_stop_nic_stats(efx); falcon_deconfigure_mac_wrapper(efx); falcon_reset_macs(efx); rc = falcon_reconfigure_xmac(efx); BUG_ON(rc); falcon_start_nic_stats(efx); efx_link_status_changed(efx); } falcon_poll_xmac(efx); } /* Zeroes out the SRAM contents. This routine must be called in * process context and is allowed to sleep. */ static int falcon_reset_sram(struct efx_nic *efx) { efx_oword_t srm_cfg_reg_ker, gpio_cfg_reg_ker; int count; /* Set the SRAM wake/sleep GPIO appropriately. */ efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL); EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1); EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1); efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL); /* Initiate SRAM reset */ EFX_POPULATE_OWORD_2(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN, 1, FRF_AZ_SRM_NB_SZ, 0); efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); /* Wait for SRAM reset to complete */ count = 0; do { netif_dbg(efx, hw, efx->net_dev, "waiting for SRAM reset (attempt %d)...\n", count); /* SRAM reset is slow; expect around 16ms */ schedule_timeout_uninterruptible(HZ / 50); /* Check for reset complete */ efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG); if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) { netif_dbg(efx, hw, efx->net_dev, "SRAM reset complete\n"); return 0; } } while (++count < 20); /* wait up to 0.4 sec */ netif_err(efx, hw, efx->net_dev, "timed out waiting for SRAM reset\n"); return -ETIMEDOUT; } static void falcon_spi_device_init(struct efx_nic *efx, struct efx_spi_device *spi_device, unsigned int device_id, u32 device_type) { if (device_type != 0) { spi_device->device_id = device_id; spi_device->size = 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_SIZE); spi_device->addr_len = SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ADDR_LEN); spi_device->munge_address = (spi_device->size == 1 << 9 && spi_device->addr_len == 1); spi_device->erase_command = SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_CMD); spi_device->erase_size = 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_ERASE_SIZE); spi_device->block_size = 1 << SPI_DEV_TYPE_FIELD(device_type, SPI_DEV_TYPE_BLOCK_SIZE); } else { spi_device->size = 0; } } /* Extract non-volatile configuration */ static int falcon_probe_nvconfig(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; struct falcon_nvconfig *nvconfig; int rc; nvconfig = kmalloc(sizeof(*nvconfig), GFP_KERNEL); if (!nvconfig) return -ENOMEM; rc = falcon_read_nvram(efx, nvconfig); if (rc) goto out; efx->phy_type = nvconfig->board_v2.port0_phy_type; efx->mdio.prtad = nvconfig->board_v2.port0_phy_addr; if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) { falcon_spi_device_init( efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH, le32_to_cpu(nvconfig->board_v3 .spi_device_type[FFE_AB_SPI_DEVICE_FLASH])); falcon_spi_device_init( efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM, le32_to_cpu(nvconfig->board_v3 .spi_device_type[FFE_AB_SPI_DEVICE_EEPROM])); } /* Read the MAC addresses */ memcpy(efx->net_dev->perm_addr, nvconfig->mac_address[0], ETH_ALEN); netif_dbg(efx, probe, efx->net_dev, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad); rc = falcon_probe_board(efx, le16_to_cpu(nvconfig->board_v2.board_revision)); out: kfree(nvconfig); return rc; } static void falcon_dimension_resources(struct efx_nic *efx) { efx->rx_dc_base = 0x20000; efx->tx_dc_base = 0x26000; } /* Probe all SPI devices on the NIC */ static void falcon_probe_spi_devices(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg; int boot_dev; efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL); efx_reado(efx, &nic_stat, FR_AB_NIC_STAT); efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0); if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) { boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ? FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM); netif_dbg(efx, probe, efx->net_dev, "Booted from %s\n", boot_dev == FFE_AB_SPI_DEVICE_FLASH ? "flash" : "EEPROM"); } else { /* Disable VPD and set clock dividers to safe * values for initial programming. */ boot_dev = -1; netif_dbg(efx, probe, efx->net_dev, "Booted from internal ASIC settings;" " setting SPI config\n"); EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0, /* 125 MHz / 7 ~= 20 MHz */ FRF_AB_EE_SF_CLOCK_DIV, 7, /* 125 MHz / 63 ~= 2 MHz */ FRF_AB_EE_EE_CLOCK_DIV, 63); efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0); } mutex_init(&nic_data->spi_lock); if (boot_dev == FFE_AB_SPI_DEVICE_FLASH) falcon_spi_device_init(efx, &nic_data->spi_flash, FFE_AB_SPI_DEVICE_FLASH, default_flash_type); if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM) falcon_spi_device_init(efx, &nic_data->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM, large_eeprom_type); } static int falcon_probe_nic(struct efx_nic *efx) { struct falcon_nic_data *nic_data; struct falcon_board *board; int rc; /* Allocate storage for hardware specific data */ nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL); if (!nic_data) return -ENOMEM; efx->nic_data = nic_data; rc = -ENODEV; if (efx_nic_fpga_ver(efx) != 0) { netif_err(efx, probe, efx->net_dev, "Falcon FPGA not supported\n"); goto fail1; } if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) { efx_oword_t nic_stat; struct pci_dev *dev; u8 pci_rev = efx->pci_dev->revision; if ((pci_rev == 0xff) || (pci_rev == 0)) { netif_err(efx, probe, efx->net_dev, "Falcon rev A0 not supported\n"); goto fail1; } efx_reado(efx, &nic_stat, FR_AB_NIC_STAT); if (EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) == 0) { netif_err(efx, probe, efx->net_dev, "Falcon rev A1 1G not supported\n"); goto fail1; } if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) { netif_err(efx, probe, efx->net_dev, "Falcon rev A1 PCI-X not supported\n"); goto fail1; } dev = pci_dev_get(efx->pci_dev); while ((dev = pci_get_device(PCI_VENDOR_ID_SOLARFLARE, PCI_DEVICE_ID_SOLARFLARE_SFC4000A_1, dev))) { if (dev->bus == efx->pci_dev->bus && dev->devfn == efx->pci_dev->devfn + 1) { nic_data->pci_dev2 = dev; break; } } if (!nic_data->pci_dev2) { netif_err(efx, probe, efx->net_dev, "failed to find secondary function\n"); rc = -ENODEV; goto fail2; } } /* Now we can reset the NIC */ rc = __falcon_reset_hw(efx, RESET_TYPE_ALL); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to reset NIC\n"); goto fail3; } /* Allocate memory for INT_KER */ rc = efx_nic_alloc_buffer(efx, &efx->irq_status, sizeof(efx_oword_t)); if (rc) goto fail4; BUG_ON(efx->irq_status.dma_addr & 0x0f); netif_dbg(efx, probe, efx->net_dev, "INT_KER at %llx (virt %p phys %llx)\n", (u64)efx->irq_status.dma_addr, efx->irq_status.addr, (u64)virt_to_phys(efx->irq_status.addr)); falcon_probe_spi_devices(efx); /* Read in the non-volatile configuration */ rc = falcon_probe_nvconfig(efx); if (rc) { if (rc == -EINVAL) netif_err(efx, probe, efx->net_dev, "NVRAM is invalid\n"); goto fail5; } efx->timer_quantum_ns = 4968; /* 621 cycles */ /* Initialise I2C adapter */ board = falcon_board(efx); board->i2c_adap.owner = THIS_MODULE; board->i2c_data = falcon_i2c_bit_operations; board->i2c_data.data = efx; board->i2c_adap.algo_data = &board->i2c_data; board->i2c_adap.dev.parent = &efx->pci_dev->dev; strlcpy(board->i2c_adap.name, "SFC4000 GPIO", sizeof(board->i2c_adap.name)); rc = i2c_bit_add_bus(&board->i2c_adap); if (rc) goto fail5; rc = falcon_board(efx)->type->init(efx); if (rc) { netif_err(efx, probe, efx->net_dev, "failed to initialise board\n"); goto fail6; } nic_data->stats_disable_count = 1; setup_timer(&nic_data->stats_timer, &falcon_stats_timer_func, (unsigned long)efx); return 0; fail6: BUG_ON(i2c_del_adapter(&board->i2c_adap)); memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); fail5: efx_nic_free_buffer(efx, &efx->irq_status); fail4: fail3: if (nic_data->pci_dev2) { pci_dev_put(nic_data->pci_dev2); nic_data->pci_dev2 = NULL; } fail2: fail1: kfree(efx->nic_data); return rc; } static void falcon_init_rx_cfg(struct efx_nic *efx) { /* Prior to Siena the RX DMA engine will split each frame at * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to * be so large that that never happens. */ const unsigned huge_buf_size = (3 * 4096) >> 5; /* RX control FIFO thresholds (32 entries) */ const unsigned ctrl_xon_thr = 20; const unsigned ctrl_xoff_thr = 25; efx_oword_t reg; efx_reado(efx, &reg, FR_AZ_RX_CFG); if (efx_nic_rev(efx) <= EFX_REV_FALCON_A1) { /* Data FIFO size is 5.5K */ EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE, huge_buf_size); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, 512 >> 8); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, 2048 >> 8); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr); EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr); } else { /* Data FIFO size is 80K; register fields moved */ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE, huge_buf_size); /* Send XON and XOFF at ~3 * max MTU away from empty/full */ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, 27648 >> 8); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, 54272 >> 8); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1); /* Enable hash insertion. This is broken for the * 'Falcon' hash so also select Toeplitz TCP/IPv4 and * IPv4 hashes. */ EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_INSRT_HDR, 1); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_HASH_ALG, 1); EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_IP_HASH, 1); } /* Always enable XOFF signal from RX FIFO. We enable * or disable transmission of pause frames at the MAC. */ EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, 1); efx_writeo(efx, &reg, FR_AZ_RX_CFG); } /* This call performs hardware-specific global initialisation, such as * defining the descriptor cache sizes and number of RSS channels. * It does not set up any buffers, descriptor rings or event queues. */ static int falcon_init_nic(struct efx_nic *efx) { efx_oword_t temp; int rc; /* Use on-chip SRAM */ efx_reado(efx, &temp, FR_AB_NIC_STAT); EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1); efx_writeo(efx, &temp, FR_AB_NIC_STAT); rc = falcon_reset_sram(efx); if (rc) return rc; /* Clear the parity enables on the TX data fifos as * they produce false parity errors because of timing issues */ if (EFX_WORKAROUND_5129(efx)) { efx_reado(efx, &temp, FR_AZ_CSR_SPARE); EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0); efx_writeo(efx, &temp, FR_AZ_CSR_SPARE); } if (EFX_WORKAROUND_7244(efx)) { efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL); EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8); EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8); EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8); EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8); efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL); } /* XXX This is documented only for Falcon A0/A1 */ /* Setup RX. Wait for descriptor is broken and must * be disabled. RXDP recovery shouldn't be needed, but is. */ efx_reado(efx, &temp, FR_AA_RX_SELF_RST); EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1); EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1); if (EFX_WORKAROUND_5583(efx)) EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1); efx_writeo(efx, &temp, FR_AA_RX_SELF_RST); /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 * descriptors (which is bad). */ efx_reado(efx, &temp, FR_AZ_TX_CFG); EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0); efx_writeo(efx, &temp, FR_AZ_TX_CFG); falcon_init_rx_cfg(efx); if (efx_nic_rev(efx) >= EFX_REV_FALCON_B0) { /* Set hash key for IPv4 */ memcpy(&temp, efx->rx_hash_key, sizeof(temp)); efx_writeo(efx, &temp, FR_BZ_RX_RSS_TKEY); /* Set destination of both TX and RX Flush events */ EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0); efx_writeo(efx, &temp, FR_BZ_DP_CTRL); } efx_nic_init_common(efx); return 0; } static void falcon_remove_nic(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; struct falcon_board *board = falcon_board(efx); int rc; board->type->fini(efx); /* Remove I2C adapter and clear it in preparation for a retry */ rc = i2c_del_adapter(&board->i2c_adap); BUG_ON(rc); memset(&board->i2c_adap, 0, sizeof(board->i2c_adap)); efx_nic_free_buffer(efx, &efx->irq_status); __falcon_reset_hw(efx, RESET_TYPE_ALL); /* Release the second function after the reset */ if (nic_data->pci_dev2) { pci_dev_put(nic_data->pci_dev2); nic_data->pci_dev2 = NULL; } /* Tear down the private nic state */ kfree(efx->nic_data); efx->nic_data = NULL; } static void falcon_update_nic_stats(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; efx_oword_t cnt; if (nic_data->stats_disable_count) return; efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP); efx->n_rx_nodesc_drop_cnt += EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT); if (nic_data->stats_pending && *nic_data->stats_dma_done == FALCON_STATS_DONE) { nic_data->stats_pending = false; rmb(); /* read the done flag before the stats */ falcon_update_stats_xmac(efx); } } void falcon_start_nic_stats(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; spin_lock_bh(&efx->stats_lock); if (--nic_data->stats_disable_count == 0) falcon_stats_request(efx); spin_unlock_bh(&efx->stats_lock); } void falcon_stop_nic_stats(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; int i; might_sleep(); spin_lock_bh(&efx->stats_lock); ++nic_data->stats_disable_count; spin_unlock_bh(&efx->stats_lock); del_timer_sync(&nic_data->stats_timer); /* Wait enough time for the most recent transfer to * complete. */ for (i = 0; i < 4 && nic_data->stats_pending; i++) { if (*nic_data->stats_dma_done == FALCON_STATS_DONE) break; msleep(1); } spin_lock_bh(&efx->stats_lock); falcon_stats_complete(efx); spin_unlock_bh(&efx->stats_lock); } static void falcon_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) { falcon_board(efx)->type->set_id_led(efx, mode); } /************************************************************************** * * Wake on LAN * ************************************************************************** */ static void falcon_get_wol(struct efx_nic *efx, struct ethtool_wolinfo *wol) { wol->supported = 0; wol->wolopts = 0; memset(&wol->sopass, 0, sizeof(wol->sopass)); } static int falcon_set_wol(struct efx_nic *efx, u32 type) { if (type != 0) return -EINVAL; return 0; } /************************************************************************** * * Revision-dependent attributes used by efx.c and nic.c * ************************************************************************** */ const struct efx_nic_type falcon_a1_nic_type = { .probe = falcon_probe_nic, .remove = falcon_remove_nic, .init = falcon_init_nic, .dimension_resources = falcon_dimension_resources, .fini = efx_port_dummy_op_void, .monitor = falcon_monitor, .map_reset_reason = falcon_map_reset_reason, .map_reset_flags = falcon_map_reset_flags, .reset = falcon_reset_hw, .probe_port = falcon_probe_port, .remove_port = falcon_remove_port, .handle_global_event = falcon_handle_global_event, .prepare_flush = falcon_prepare_flush, .update_stats = falcon_update_nic_stats, .start_stats = falcon_start_nic_stats, .stop_stats = falcon_stop_nic_stats, .set_id_led = falcon_set_id_led, .push_irq_moderation = falcon_push_irq_moderation, .reconfigure_port = falcon_reconfigure_port, .reconfigure_mac = falcon_reconfigure_xmac, .check_mac_fault = falcon_xmac_check_fault, .get_wol = falcon_get_wol, .set_wol = falcon_set_wol, .resume_wol = efx_port_dummy_op_void, .test_nvram = falcon_test_nvram, .revision = EFX_REV_FALCON_A1, .mem_map_size = 0x20000, .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER, .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER, .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER, .evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER, .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER, .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), .rx_buffer_padding = 0x24, .max_interrupt_mode = EFX_INT_MODE_MSI, .phys_addr_channels = 4, .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, .offload_features = NETIF_F_IP_CSUM, }; const struct efx_nic_type falcon_b0_nic_type = { .probe = falcon_probe_nic, .remove = falcon_remove_nic, .init = falcon_init_nic, .dimension_resources = falcon_dimension_resources, .fini = efx_port_dummy_op_void, .monitor = falcon_monitor, .map_reset_reason = falcon_map_reset_reason, .map_reset_flags = falcon_map_reset_flags, .reset = falcon_reset_hw, .probe_port = falcon_probe_port, .remove_port = falcon_remove_port, .handle_global_event = falcon_handle_global_event, .prepare_flush = falcon_prepare_flush, .update_stats = falcon_update_nic_stats, .start_stats = falcon_start_nic_stats, .stop_stats = falcon_stop_nic_stats, .set_id_led = falcon_set_id_led, .push_irq_moderation = falcon_push_irq_moderation, .reconfigure_port = falcon_reconfigure_port, .reconfigure_mac = falcon_reconfigure_xmac, .check_mac_fault = falcon_xmac_check_fault, .get_wol = falcon_get_wol, .set_wol = falcon_set_wol, .resume_wol = efx_port_dummy_op_void, .test_registers = falcon_b0_test_registers, .test_nvram = falcon_test_nvram, .revision = EFX_REV_FALCON_B0, /* Map everything up to and including the RSS indirection * table. Don't map MSI-X table, MSI-X PBA since Linux * requires that they not be mapped. */ .mem_map_size = (FR_BZ_RX_INDIRECTION_TBL + FR_BZ_RX_INDIRECTION_TBL_STEP * FR_BZ_RX_INDIRECTION_TBL_ROWS), .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL, .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL, .buf_tbl_base = FR_BZ_BUF_FULL_TBL, .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL, .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR, .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH), .rx_buffer_hash_size = 0x10, .rx_buffer_padding = 0, .max_interrupt_mode = EFX_INT_MODE_MSIX, .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy * interrupt handler only supports 32 * channels */ .timer_period_max = 1 << FRF_AB_TC_TIMER_VAL_WIDTH, .offload_features = NETIF_F_IP_CSUM | NETIF_F_RXHASH | NETIF_F_NTUPLE, };
gpl-2.0
DarkStarSword/linux
net/netfilter/xt_NFLOG.c
3895
1914
/* * Copyright (c) 2006 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/init.h> #include <linux/skbuff.h> #include <linux/netfilter/x_tables.h> #include <linux/netfilter/xt_NFLOG.h> #include <net/netfilter/nf_log.h> #include <net/netfilter/nfnetlink_log.h> MODULE_AUTHOR("Patrick McHardy <kaber@trash.net>"); MODULE_DESCRIPTION("Xtables: packet logging to netlink using NFLOG"); MODULE_LICENSE("GPL"); MODULE_ALIAS("ipt_NFLOG"); MODULE_ALIAS("ip6t_NFLOG"); static unsigned int nflog_tg(struct sk_buff *skb, const struct xt_action_param *par) { const struct xt_nflog_info *info = par->targinfo; struct nf_loginfo li; struct net *net = dev_net(par->in ? par->in : par->out); li.type = NF_LOG_TYPE_ULOG; li.u.ulog.copy_len = info->len; li.u.ulog.group = info->group; li.u.ulog.qthreshold = info->threshold; nfulnl_log_packet(net, par->family, par->hooknum, skb, par->in, par->out, &li, info->prefix); return XT_CONTINUE; } static int nflog_tg_check(const struct xt_tgchk_param *par) { const struct xt_nflog_info *info = par->targinfo; if (info->flags & ~XT_NFLOG_MASK) return -EINVAL; if (info->prefix[sizeof(info->prefix) - 1] != '\0') return -EINVAL; return 0; } static struct xt_target nflog_tg_reg __read_mostly = { .name = "NFLOG", .revision = 0, .family = NFPROTO_UNSPEC, .checkentry = nflog_tg_check, .target = nflog_tg, .targetsize = sizeof(struct xt_nflog_info), .me = THIS_MODULE, }; static int __init nflog_tg_init(void) { return xt_register_target(&nflog_tg_reg); } static void __exit nflog_tg_exit(void) { xt_unregister_target(&nflog_tg_reg); } module_init(nflog_tg_init); module_exit(nflog_tg_exit);
gpl-2.0
0x00evil/linux
arch/arm/mach-sa1100/hackkit.c
4663
5420
/* * linux/arch/arm/mach-sa1100/hackkit.c * * Copyright (C) 2002 Stefan Eletzhofer <stefan.eletzhofer@eletztrick.de> * * This file contains all HackKit tweaks. Based on original work from * Nicolas Pitre's assabet fixes * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/sched.h> #include <linux/tty.h> #include <linux/module.h> #include <linux/errno.h> #include <linux/cpufreq.h> #include <linux/platform_data/sa11x0-serial.h> #include <linux/serial_core.h> #include <linux/mtd/mtd.h> #include <linux/mtd/partitions.h> #include <linux/tty.h> #include <linux/gpio.h> #include <linux/leds.h> #include <linux/platform_device.h> #include <asm/mach-types.h> #include <asm/setup.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/mach/arch.h> #include <asm/mach/flash.h> #include <asm/mach/map.h> #include <asm/mach/irq.h> #include <mach/hardware.h> #include <mach/irqs.h> #include "generic.h" /********************************************************************** * prototypes */ /* init funcs */ static void __init hackkit_map_io(void); static u_int hackkit_get_mctrl(struct uart_port *port); static void hackkit_set_mctrl(struct uart_port *port, u_int mctrl); static void hackkit_uart_pm(struct uart_port *port, u_int state, u_int oldstate); /********************************************************************** * global data */ /********************************************************************** * static data */ static struct map_desc hackkit_io_desc[] __initdata = { { /* Flash bank 0 */ .virtual = 0xe8000000, .pfn = __phys_to_pfn(0x00000000), .length = 0x01000000, .type = MT_DEVICE }, }; static struct sa1100_port_fns hackkit_port_fns __initdata = { .set_mctrl = hackkit_set_mctrl, .get_mctrl = hackkit_get_mctrl, .pm = hackkit_uart_pm, }; /********************************************************************** * Static functions */ static void __init hackkit_map_io(void) { sa1100_map_io(); iotable_init(hackkit_io_desc, ARRAY_SIZE(hackkit_io_desc)); sa1100_register_uart_fns(&hackkit_port_fns); sa1100_register_uart(0, 1); /* com port */ sa1100_register_uart(1, 2); sa1100_register_uart(2, 3); /* radio module */ Ser1SDCR0 |= SDCR0_SUS; } /** * hackkit_uart_pm - powermgmt callback function for system 3 UART * @port: uart port structure * @state: pm state * @oldstate: old pm state * */ static void hackkit_uart_pm(struct uart_port *port, u_int state, u_int oldstate) { /* TODO: switch on/off uart in powersave mode */ } /* * Note! this can be called from IRQ context. * FIXME: No modem ctrl lines yet. */ static void hackkit_set_mctrl(struct uart_port *port, u_int mctrl) { #if 0 if (port->mapbase == _Ser1UTCR0) { u_int set = 0, clear = 0; if (mctrl & TIOCM_RTS) set |= PT_CTRL2_RS1_RTS; else clear |= PT_CTRL2_RS1_RTS; if (mctrl & TIOCM_DTR) set |= PT_CTRL2_RS1_DTR; else clear |= PT_CTRL2_RS1_DTR; PTCTRL2_clear(clear); PTCTRL2_set(set); } #endif } static u_int hackkit_get_mctrl(struct uart_port *port) { u_int ret = 0; #if 0 u_int irqsr = PT_IRQSR; /* need 2 reads to read current value */ irqsr = PT_IRQSR; /* TODO: check IRQ source register for modem/com status lines and set them correctly. */ #endif ret = TIOCM_CD | TIOCM_CTS | TIOCM_DSR; return ret; } static struct mtd_partition hackkit_partitions[] = { { .name = "BLOB", .size = 0x00040000, .offset = 0x00000000, .mask_flags = MTD_WRITEABLE, /* force read-only */ }, { .name = "config", .size = 0x00040000, .offset = MTDPART_OFS_APPEND, }, { .name = "kernel", .size = 0x00100000, .offset = MTDPART_OFS_APPEND, }, { .name = "initrd", .size = 0x00180000, .offset = MTDPART_OFS_APPEND, }, { .name = "rootfs", .size = 0x700000, .offset = MTDPART_OFS_APPEND, }, { .name = "data", .size = MTDPART_SIZ_FULL, .offset = MTDPART_OFS_APPEND, } }; static struct flash_platform_data hackkit_flash_data = { .map_name = "cfi_probe", .parts = hackkit_partitions, .nr_parts = ARRAY_SIZE(hackkit_partitions), }; static struct resource hackkit_flash_resource = DEFINE_RES_MEM(SA1100_CS0_PHYS, SZ_32M); /* LEDs */ struct gpio_led hackkit_gpio_leds[] = { { .name = "hackkit:red", .default_trigger = "cpu0", .gpio = 22, }, { .name = "hackkit:green", .default_trigger = "heartbeat", .gpio = 23, }, }; static struct gpio_led_platform_data hackkit_gpio_led_info = { .leds = hackkit_gpio_leds, .num_leds = ARRAY_SIZE(hackkit_gpio_leds), }; static struct platform_device hackkit_leds = { .name = "leds-gpio", .id = -1, .dev = { .platform_data = &hackkit_gpio_led_info, } }; static void __init hackkit_init(void) { sa11x0_register_mtd(&hackkit_flash_data, &hackkit_flash_resource, 1); platform_device_register(&hackkit_leds); } /********************************************************************** * Exported Functions */ MACHINE_START(HACKKIT, "HackKit Cpu Board") .atag_offset = 0x100, .map_io = hackkit_map_io, .nr_irqs = SA1100_NR_IRQS, .init_irq = sa1100_init_irq, .init_time = sa1100_timer_init, .init_machine = hackkit_init, .init_late = sa11x0_init_late, .restart = sa11x0_restart, MACHINE_END
gpl-2.0
omnirom/android_kernel_oppo_msm8974
drivers/acpi/acpica/rslist.c
4919
8356
/******************************************************************************* * * Module Name: rslist - Linked list utilities * ******************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acresrc.h" #define _COMPONENT ACPI_RESOURCES ACPI_MODULE_NAME("rslist") /******************************************************************************* * * FUNCTION: acpi_rs_convert_aml_to_resources * * PARAMETERS: acpi_walk_aml_callback * resource_ptr - Pointer to the buffer that will * contain the output structures * * RETURN: Status * * DESCRIPTION: Convert an AML resource to an internal representation of the * resource that is aligned and easier to access. * ******************************************************************************/ acpi_status acpi_rs_convert_aml_to_resources(u8 * aml, u32 length, u32 offset, u8 resource_index, void **context) { struct acpi_resource **resource_ptr = ACPI_CAST_INDIRECT_PTR(struct acpi_resource, context); struct acpi_resource *resource; union aml_resource *aml_resource; struct acpi_rsconvert_info *conversion_table; acpi_status status; ACPI_FUNCTION_TRACE(rs_convert_aml_to_resources); /* * Check that the input buffer and all subsequent pointers into it * are aligned on a native word boundary. Most important on IA64 */ resource = *resource_ptr; if (ACPI_IS_MISALIGNED(resource)) { ACPI_WARNING((AE_INFO, "Misaligned resource pointer %p", resource)); } /* Get the appropriate conversion info table */ aml_resource = ACPI_CAST_PTR(union aml_resource, aml); if (acpi_ut_get_resource_type(aml) == ACPI_RESOURCE_NAME_SERIAL_BUS) { if (aml_resource->common_serial_bus.type > AML_RESOURCE_MAX_SERIALBUSTYPE) { conversion_table = NULL; } else { /* This is an I2C, SPI, or UART serial_bus descriptor */ conversion_table = acpi_gbl_convert_resource_serial_bus_dispatch [aml_resource->common_serial_bus.type]; } } else { conversion_table = acpi_gbl_get_resource_dispatch[resource_index]; } if (!conversion_table) { ACPI_ERROR((AE_INFO, "Invalid/unsupported resource descriptor: Type 0x%2.2X", resource_index)); return (AE_AML_INVALID_RESOURCE_TYPE); } /* Convert the AML byte stream resource to a local resource struct */ status = acpi_rs_convert_aml_to_resource(resource, aml_resource, conversion_table); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not convert AML resource (Type 0x%X)", *aml)); return_ACPI_STATUS(status); } ACPI_DEBUG_PRINT((ACPI_DB_RESOURCES, "Type %.2X, AmlLength %.2X InternalLength %.2X\n", acpi_ut_get_resource_type(aml), length, resource->length)); /* Point to the next structure in the output buffer */ *resource_ptr = ACPI_NEXT_RESOURCE(resource); return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_rs_convert_resources_to_aml * * PARAMETERS: Resource - Pointer to the resource linked list * aml_size_needed - Calculated size of the byte stream * needed from calling acpi_rs_get_aml_length() * The size of the output_buffer is * guaranteed to be >= aml_size_needed * output_buffer - Pointer to the buffer that will * contain the byte stream * * RETURN: Status * * DESCRIPTION: Takes the resource linked list and parses it, creating a * byte stream of resources in the caller's output buffer * ******************************************************************************/ acpi_status acpi_rs_convert_resources_to_aml(struct acpi_resource *resource, acpi_size aml_size_needed, u8 * output_buffer) { u8 *aml = output_buffer; u8 *end_aml = output_buffer + aml_size_needed; struct acpi_rsconvert_info *conversion_table; acpi_status status; ACPI_FUNCTION_TRACE(rs_convert_resources_to_aml); /* Walk the resource descriptor list, convert each descriptor */ while (aml < end_aml) { /* Validate the (internal) Resource Type */ if (resource->type > ACPI_RESOURCE_TYPE_MAX) { ACPI_ERROR((AE_INFO, "Invalid descriptor type (0x%X) in resource list", resource->type)); return_ACPI_STATUS(AE_BAD_DATA); } /* Perform the conversion */ if (resource->type == ACPI_RESOURCE_TYPE_SERIAL_BUS) { if (resource->data.common_serial_bus.type > AML_RESOURCE_MAX_SERIALBUSTYPE) { conversion_table = NULL; } else { /* This is an I2C, SPI, or UART serial_bus descriptor */ conversion_table = acpi_gbl_convert_resource_serial_bus_dispatch [resource->data.common_serial_bus.type]; } } else { conversion_table = acpi_gbl_set_resource_dispatch[resource->type]; } if (!conversion_table) { ACPI_ERROR((AE_INFO, "Invalid/unsupported resource descriptor: Type 0x%2.2X", resource->type)); return (AE_AML_INVALID_RESOURCE_TYPE); } status = acpi_rs_convert_resource_to_aml(resource, ACPI_CAST_PTR(union aml_resource, aml), conversion_table); if (ACPI_FAILURE(status)) { ACPI_EXCEPTION((AE_INFO, status, "Could not convert resource (type 0x%X) to AML", resource->type)); return_ACPI_STATUS(status); } /* Perform final sanity check on the new AML resource descriptor */ status = acpi_ut_validate_resource(ACPI_CAST_PTR (union aml_resource, aml), NULL); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Check for end-of-list, normal exit */ if (resource->type == ACPI_RESOURCE_TYPE_END_TAG) { /* An End Tag indicates the end of the input Resource Template */ return_ACPI_STATUS(AE_OK); } /* * Extract the total length of the new descriptor and set the * Aml to point to the next (output) resource descriptor */ aml += acpi_ut_get_descriptor_length(aml); /* Point to the next input resource descriptor */ resource = ACPI_NEXT_RESOURCE(resource); } /* Completed buffer, but did not find an end_tag resource descriptor */ return_ACPI_STATUS(AE_AML_NO_RESOURCE_END_TAG); }
gpl-2.0
SlimForce/kernel_lge_hammerhead
drivers/acpi/acpica/dsutils.c
4919
25450
/******************************************************************************* * * Module Name: dsutils - Dispatcher utilities * ******************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <acpi/acpi.h> #include "accommon.h" #include "acparser.h" #include "amlcode.h" #include "acdispat.h" #include "acinterp.h" #include "acnamesp.h" #include "acdebug.h" #define _COMPONENT ACPI_DISPATCHER ACPI_MODULE_NAME("dsutils") /******************************************************************************* * * FUNCTION: acpi_ds_clear_implicit_return * * PARAMETERS: walk_state - Current State * * RETURN: None. * * DESCRIPTION: Clear and remove a reference on an implicit return value. Used * to delete "stale" return values (if enabled, the return value * from every operator is saved at least momentarily, in case the * parent method exits.) * ******************************************************************************/ void acpi_ds_clear_implicit_return(struct acpi_walk_state *walk_state) { ACPI_FUNCTION_NAME(ds_clear_implicit_return); /* * Slack must be enabled for this feature */ if (!acpi_gbl_enable_interpreter_slack) { return; } if (walk_state->implicit_return_obj) { /* * Delete any "stale" implicit return. However, in * complex statements, the implicit return value can be * bubbled up several levels. */ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Removing reference on stale implicit return obj %p\n", walk_state->implicit_return_obj)); acpi_ut_remove_reference(walk_state->implicit_return_obj); walk_state->implicit_return_obj = NULL; } } #ifndef ACPI_NO_METHOD_EXECUTION /******************************************************************************* * * FUNCTION: acpi_ds_do_implicit_return * * PARAMETERS: return_desc - The return value * walk_state - Current State * add_reference - True if a reference should be added to the * return object * * RETURN: TRUE if implicit return enabled, FALSE otherwise * * DESCRIPTION: Implements the optional "implicit return". We save the result * of every ASL operator and control method invocation in case the * parent method exit. Before storing a new return value, we * delete the previous return value. * ******************************************************************************/ u8 acpi_ds_do_implicit_return(union acpi_operand_object *return_desc, struct acpi_walk_state *walk_state, u8 add_reference) { ACPI_FUNCTION_NAME(ds_do_implicit_return); /* * Slack must be enabled for this feature, and we must * have a valid return object */ if ((!acpi_gbl_enable_interpreter_slack) || (!return_desc)) { return (FALSE); } ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Result %p will be implicitly returned; Prev=%p\n", return_desc, walk_state->implicit_return_obj)); /* * Delete any "stale" implicit return value first. However, in * complex statements, the implicit return value can be * bubbled up several levels, so we don't clear the value if it * is the same as the return_desc. */ if (walk_state->implicit_return_obj) { if (walk_state->implicit_return_obj == return_desc) { return (TRUE); } acpi_ds_clear_implicit_return(walk_state); } /* Save the implicit return value, add a reference if requested */ walk_state->implicit_return_obj = return_desc; if (add_reference) { acpi_ut_add_reference(return_desc); } return (TRUE); } /******************************************************************************* * * FUNCTION: acpi_ds_is_result_used * * PARAMETERS: Op - Current Op * walk_state - Current State * * RETURN: TRUE if result is used, FALSE otherwise * * DESCRIPTION: Check if a result object will be used by the parent * ******************************************************************************/ u8 acpi_ds_is_result_used(union acpi_parse_object * op, struct acpi_walk_state * walk_state) { const struct acpi_opcode_info *parent_info; ACPI_FUNCTION_TRACE_PTR(ds_is_result_used, op); /* Must have both an Op and a Result Object */ if (!op) { ACPI_ERROR((AE_INFO, "Null Op")); return_UINT8(TRUE); } /* * We know that this operator is not a * Return() operator (would not come here.) The following code is the * optional support for a so-called "implicit return". Some AML code * assumes that the last value of the method is "implicitly" returned * to the caller. Just save the last result as the return value. * NOTE: this is optional because the ASL language does not actually * support this behavior. */ (void)acpi_ds_do_implicit_return(walk_state->result_obj, walk_state, TRUE); /* * Now determine if the parent will use the result * * If there is no parent, or the parent is a scope_op, we are executing * at the method level. An executing method typically has no parent, * since each method is parsed separately. A method invoked externally * via execute_control_method has a scope_op as the parent. */ if ((!op->common.parent) || (op->common.parent->common.aml_opcode == AML_SCOPE_OP)) { /* No parent, the return value cannot possibly be used */ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "At Method level, result of [%s] not used\n", acpi_ps_get_opcode_name(op->common. aml_opcode))); return_UINT8(FALSE); } /* Get info on the parent. The root_op is AML_SCOPE */ parent_info = acpi_ps_get_opcode_info(op->common.parent->common.aml_opcode); if (parent_info->class == AML_CLASS_UNKNOWN) { ACPI_ERROR((AE_INFO, "Unknown parent opcode Op=%p", op)); return_UINT8(FALSE); } /* * Decide what to do with the result based on the parent. If * the parent opcode will not use the result, delete the object. * Otherwise leave it as is, it will be deleted when it is used * as an operand later. */ switch (parent_info->class) { case AML_CLASS_CONTROL: switch (op->common.parent->common.aml_opcode) { case AML_RETURN_OP: /* Never delete the return value associated with a return opcode */ goto result_used; case AML_IF_OP: case AML_WHILE_OP: /* * If we are executing the predicate AND this is the predicate op, * we will use the return value */ if ((walk_state->control_state->common.state == ACPI_CONTROL_PREDICATE_EXECUTING) && (walk_state->control_state->control. predicate_op == op)) { goto result_used; } break; default: /* Ignore other control opcodes */ break; } /* The general control opcode returns no result */ goto result_not_used; case AML_CLASS_CREATE: /* * These opcodes allow term_arg(s) as operands and therefore * the operands can be method calls. The result is used. */ goto result_used; case AML_CLASS_NAMED_OBJECT: if ((op->common.parent->common.aml_opcode == AML_REGION_OP) || (op->common.parent->common.aml_opcode == AML_DATA_REGION_OP) || (op->common.parent->common.aml_opcode == AML_PACKAGE_OP) || (op->common.parent->common.aml_opcode == AML_VAR_PACKAGE_OP) || (op->common.parent->common.aml_opcode == AML_BUFFER_OP) || (op->common.parent->common.aml_opcode == AML_INT_EVAL_SUBTREE_OP) || (op->common.parent->common.aml_opcode == AML_BANK_FIELD_OP)) { /* * These opcodes allow term_arg(s) as operands and therefore * the operands can be method calls. The result is used. */ goto result_used; } goto result_not_used; default: /* * In all other cases. the parent will actually use the return * object, so keep it. */ goto result_used; } result_used: ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Result of [%s] used by Parent [%s] Op=%p\n", acpi_ps_get_opcode_name(op->common.aml_opcode), acpi_ps_get_opcode_name(op->common.parent->common. aml_opcode), op)); return_UINT8(TRUE); result_not_used: ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Result of [%s] not used by Parent [%s] Op=%p\n", acpi_ps_get_opcode_name(op->common.aml_opcode), acpi_ps_get_opcode_name(op->common.parent->common. aml_opcode), op)); return_UINT8(FALSE); } /******************************************************************************* * * FUNCTION: acpi_ds_delete_result_if_not_used * * PARAMETERS: Op - Current parse Op * result_obj - Result of the operation * walk_state - Current state * * RETURN: Status * * DESCRIPTION: Used after interpretation of an opcode. If there is an internal * result descriptor, check if the parent opcode will actually use * this result. If not, delete the result now so that it will * not become orphaned. * ******************************************************************************/ void acpi_ds_delete_result_if_not_used(union acpi_parse_object *op, union acpi_operand_object *result_obj, struct acpi_walk_state *walk_state) { union acpi_operand_object *obj_desc; acpi_status status; ACPI_FUNCTION_TRACE_PTR(ds_delete_result_if_not_used, result_obj); if (!op) { ACPI_ERROR((AE_INFO, "Null Op")); return_VOID; } if (!result_obj) { return_VOID; } if (!acpi_ds_is_result_used(op, walk_state)) { /* Must pop the result stack (obj_desc should be equal to result_obj) */ status = acpi_ds_result_pop(&obj_desc, walk_state); if (ACPI_SUCCESS(status)) { acpi_ut_remove_reference(result_obj); } } return_VOID; } /******************************************************************************* * * FUNCTION: acpi_ds_resolve_operands * * PARAMETERS: walk_state - Current walk state with operands on stack * * RETURN: Status * * DESCRIPTION: Resolve all operands to their values. Used to prepare * arguments to a control method invocation (a call from one * method to another.) * ******************************************************************************/ acpi_status acpi_ds_resolve_operands(struct acpi_walk_state *walk_state) { u32 i; acpi_status status = AE_OK; ACPI_FUNCTION_TRACE_PTR(ds_resolve_operands, walk_state); /* * Attempt to resolve each of the valid operands * Method arguments are passed by reference, not by value. This means * that the actual objects are passed, not copies of the objects. */ for (i = 0; i < walk_state->num_operands; i++) { status = acpi_ex_resolve_to_value(&walk_state->operands[i], walk_state); if (ACPI_FAILURE(status)) { break; } } return_ACPI_STATUS(status); } /******************************************************************************* * * FUNCTION: acpi_ds_clear_operands * * PARAMETERS: walk_state - Current walk state with operands on stack * * RETURN: None * * DESCRIPTION: Clear all operands on the current walk state operand stack. * ******************************************************************************/ void acpi_ds_clear_operands(struct acpi_walk_state *walk_state) { u32 i; ACPI_FUNCTION_TRACE_PTR(ds_clear_operands, walk_state); /* Remove a reference on each operand on the stack */ for (i = 0; i < walk_state->num_operands; i++) { /* * Remove a reference to all operands, including both * "Arguments" and "Targets". */ acpi_ut_remove_reference(walk_state->operands[i]); walk_state->operands[i] = NULL; } walk_state->num_operands = 0; return_VOID; } #endif /******************************************************************************* * * FUNCTION: acpi_ds_create_operand * * PARAMETERS: walk_state - Current walk state * Arg - Parse object for the argument * arg_index - Which argument (zero based) * * RETURN: Status * * DESCRIPTION: Translate a parse tree object that is an argument to an AML * opcode to the equivalent interpreter object. This may include * looking up a name or entering a new name into the internal * namespace. * ******************************************************************************/ acpi_status acpi_ds_create_operand(struct acpi_walk_state *walk_state, union acpi_parse_object *arg, u32 arg_index) { acpi_status status = AE_OK; char *name_string; u32 name_length; union acpi_operand_object *obj_desc; union acpi_parse_object *parent_op; u16 opcode; acpi_interpreter_mode interpreter_mode; const struct acpi_opcode_info *op_info; ACPI_FUNCTION_TRACE_PTR(ds_create_operand, arg); /* A valid name must be looked up in the namespace */ if ((arg->common.aml_opcode == AML_INT_NAMEPATH_OP) && (arg->common.value.string) && !(arg->common.flags & ACPI_PARSEOP_IN_STACK)) { ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Getting a name: Arg=%p\n", arg)); /* Get the entire name string from the AML stream */ status = acpi_ex_get_name_string(ACPI_TYPE_ANY, arg->common.value.buffer, &name_string, &name_length); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* All prefixes have been handled, and the name is in name_string */ /* * Special handling for buffer_field declarations. This is a deferred * opcode that unfortunately defines the field name as the last * parameter instead of the first. We get here when we are performing * the deferred execution, so the actual name of the field is already * in the namespace. We don't want to attempt to look it up again * because we may be executing in a different scope than where the * actual opcode exists. */ if ((walk_state->deferred_node) && (walk_state->deferred_node->type == ACPI_TYPE_BUFFER_FIELD) && (arg_index == (u32) ((walk_state->opcode == AML_CREATE_FIELD_OP) ? 3 : 2))) { obj_desc = ACPI_CAST_PTR(union acpi_operand_object, walk_state->deferred_node); status = AE_OK; } else { /* All other opcodes */ /* * Differentiate between a namespace "create" operation * versus a "lookup" operation (IMODE_LOAD_PASS2 vs. * IMODE_EXECUTE) in order to support the creation of * namespace objects during the execution of control methods. */ parent_op = arg->common.parent; op_info = acpi_ps_get_opcode_info(parent_op->common. aml_opcode); if ((op_info->flags & AML_NSNODE) && (parent_op->common.aml_opcode != AML_INT_METHODCALL_OP) && (parent_op->common.aml_opcode != AML_REGION_OP) && (parent_op->common.aml_opcode != AML_INT_NAMEPATH_OP)) { /* Enter name into namespace if not found */ interpreter_mode = ACPI_IMODE_LOAD_PASS2; } else { /* Return a failure if name not found */ interpreter_mode = ACPI_IMODE_EXECUTE; } status = acpi_ns_lookup(walk_state->scope_info, name_string, ACPI_TYPE_ANY, interpreter_mode, ACPI_NS_SEARCH_PARENT | ACPI_NS_DONT_OPEN_SCOPE, walk_state, ACPI_CAST_INDIRECT_PTR(struct acpi_namespace_node, &obj_desc)); /* * The only case where we pass through (ignore) a NOT_FOUND * error is for the cond_ref_of opcode. */ if (status == AE_NOT_FOUND) { if (parent_op->common.aml_opcode == AML_COND_REF_OF_OP) { /* * For the Conditional Reference op, it's OK if * the name is not found; We just need a way to * indicate this to the interpreter, set the * object to the root */ obj_desc = ACPI_CAST_PTR(union acpi_operand_object, acpi_gbl_root_node); status = AE_OK; } else { /* * We just plain didn't find it -- which is a * very serious error at this point */ status = AE_AML_NAME_NOT_FOUND; } } if (ACPI_FAILURE(status)) { ACPI_ERROR_NAMESPACE(name_string, status); } } /* Free the namestring created above */ ACPI_FREE(name_string); /* Check status from the lookup */ if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Put the resulting object onto the current object stack */ status = acpi_ds_obj_stack_push(obj_desc, walk_state); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } ACPI_DEBUGGER_EXEC(acpi_db_display_argument_object (obj_desc, walk_state)); } else { /* Check for null name case */ if ((arg->common.aml_opcode == AML_INT_NAMEPATH_OP) && !(arg->common.flags & ACPI_PARSEOP_IN_STACK)) { /* * If the name is null, this means that this is an * optional result parameter that was not specified * in the original ASL. Create a Zero Constant for a * placeholder. (Store to a constant is a Noop.) */ opcode = AML_ZERO_OP; /* Has no arguments! */ ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Null namepath: Arg=%p\n", arg)); } else { opcode = arg->common.aml_opcode; } /* Get the object type of the argument */ op_info = acpi_ps_get_opcode_info(opcode); if (op_info->object_type == ACPI_TYPE_INVALID) { return_ACPI_STATUS(AE_NOT_IMPLEMENTED); } if ((op_info->flags & AML_HAS_RETVAL) || (arg->common.flags & ACPI_PARSEOP_IN_STACK)) { ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Argument previously created, already stacked\n")); ACPI_DEBUGGER_EXEC(acpi_db_display_argument_object (walk_state-> operands[walk_state->num_operands - 1], walk_state)); /* * Use value that was already previously returned * by the evaluation of this argument */ status = acpi_ds_result_pop(&obj_desc, walk_state); if (ACPI_FAILURE(status)) { /* * Only error is underflow, and this indicates * a missing or null operand! */ ACPI_EXCEPTION((AE_INFO, status, "Missing or null operand")); return_ACPI_STATUS(status); } } else { /* Create an ACPI_INTERNAL_OBJECT for the argument */ obj_desc = acpi_ut_create_internal_object(op_info-> object_type); if (!obj_desc) { return_ACPI_STATUS(AE_NO_MEMORY); } /* Initialize the new object */ status = acpi_ds_init_object_from_op(walk_state, arg, opcode, &obj_desc); if (ACPI_FAILURE(status)) { acpi_ut_delete_object_desc(obj_desc); return_ACPI_STATUS(status); } } /* Put the operand object on the object stack */ status = acpi_ds_obj_stack_push(obj_desc, walk_state); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } ACPI_DEBUGGER_EXEC(acpi_db_display_argument_object (obj_desc, walk_state)); } return_ACPI_STATUS(AE_OK); } /******************************************************************************* * * FUNCTION: acpi_ds_create_operands * * PARAMETERS: walk_state - Current state * first_arg - First argument of a parser argument tree * * RETURN: Status * * DESCRIPTION: Convert an operator's arguments from a parse tree format to * namespace objects and place those argument object on the object * stack in preparation for evaluation by the interpreter. * ******************************************************************************/ acpi_status acpi_ds_create_operands(struct acpi_walk_state *walk_state, union acpi_parse_object *first_arg) { acpi_status status = AE_OK; union acpi_parse_object *arg; union acpi_parse_object *arguments[ACPI_OBJ_NUM_OPERANDS]; u32 arg_count = 0; u32 index = walk_state->num_operands; u32 i; ACPI_FUNCTION_TRACE_PTR(ds_create_operands, first_arg); /* Get all arguments in the list */ arg = first_arg; while (arg) { if (index >= ACPI_OBJ_NUM_OPERANDS) { return_ACPI_STATUS(AE_BAD_DATA); } arguments[index] = arg; walk_state->operands[index] = NULL; /* Move on to next argument, if any */ arg = arg->common.next; arg_count++; index++; } index--; /* It is the appropriate order to get objects from the Result stack */ for (i = 0; i < arg_count; i++) { arg = arguments[index]; /* Force the filling of the operand stack in inverse order */ walk_state->operand_index = (u8) index; status = acpi_ds_create_operand(walk_state, arg, index); if (ACPI_FAILURE(status)) { goto cleanup; } index--; ACPI_DEBUG_PRINT((ACPI_DB_DISPATCH, "Arg #%u (%p) done, Arg1=%p\n", index, arg, first_arg)); } return_ACPI_STATUS(status); cleanup: /* * We must undo everything done above; meaning that we must * pop everything off of the operand stack and delete those * objects */ acpi_ds_obj_stack_pop_and_delete(arg_count, walk_state); ACPI_EXCEPTION((AE_INFO, status, "While creating Arg %u", index)); return_ACPI_STATUS(status); } /***************************************************************************** * * FUNCTION: acpi_ds_evaluate_name_path * * PARAMETERS: walk_state - Current state of the parse tree walk, * the opcode of current operation should be * AML_INT_NAMEPATH_OP * * RETURN: Status * * DESCRIPTION: Translate the -name_path- parse tree object to the equivalent * interpreter object, convert it to value, if needed, duplicate * it, if needed, and push it onto the current result stack. * ****************************************************************************/ acpi_status acpi_ds_evaluate_name_path(struct acpi_walk_state *walk_state) { acpi_status status = AE_OK; union acpi_parse_object *op = walk_state->op; union acpi_operand_object **operand = &walk_state->operands[0]; union acpi_operand_object *new_obj_desc; u8 type; ACPI_FUNCTION_TRACE_PTR(ds_evaluate_name_path, walk_state); if (!op->common.parent) { /* This happens after certain exception processing */ goto exit; } if ((op->common.parent->common.aml_opcode == AML_PACKAGE_OP) || (op->common.parent->common.aml_opcode == AML_VAR_PACKAGE_OP) || (op->common.parent->common.aml_opcode == AML_REF_OF_OP)) { /* TBD: Should we specify this feature as a bit of op_info->Flags of these opcodes? */ goto exit; } status = acpi_ds_create_operand(walk_state, op, 0); if (ACPI_FAILURE(status)) { goto exit; } if (op->common.flags & ACPI_PARSEOP_TARGET) { new_obj_desc = *operand; goto push_result; } type = (*operand)->common.type; status = acpi_ex_resolve_to_value(operand, walk_state); if (ACPI_FAILURE(status)) { goto exit; } if (type == ACPI_TYPE_INTEGER) { /* It was incremented by acpi_ex_resolve_to_value */ acpi_ut_remove_reference(*operand); status = acpi_ut_copy_iobject_to_iobject(*operand, &new_obj_desc, walk_state); if (ACPI_FAILURE(status)) { goto exit; } } else { /* * The object either was anew created or is * a Namespace node - don't decrement it. */ new_obj_desc = *operand; } /* Cleanup for name-path operand */ status = acpi_ds_obj_stack_pop(1, walk_state); if (ACPI_FAILURE(status)) { walk_state->result_obj = new_obj_desc; goto exit; } push_result: walk_state->result_obj = new_obj_desc; status = acpi_ds_result_push(walk_state->result_obj, walk_state); if (ACPI_SUCCESS(status)) { /* Force to take it from stack */ op->common.flags |= ACPI_PARSEOP_IN_STACK; } exit: return_ACPI_STATUS(status); }
gpl-2.0
srsdanitest/swingacera9
drivers/acpi/acpica/evxfregn.c
4919
9312
/****************************************************************************** * * Module Name: evxfregn - External Interfaces, ACPI Operation Regions and * Address Spaces. * *****************************************************************************/ /* * Copyright (C) 2000 - 2012, Intel Corp. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions, and the following disclaimer, * without modification. * 2. Redistributions in binary form must reproduce at minimum a disclaimer * substantially similar to the "NO WARRANTY" disclaimer below * ("Disclaimer") and any redistribution must be conditioned upon * including a substantially similar Disclaimer requirement for further * binary redistribution. * 3. Neither the names of the above-listed copyright holders nor the names * of any contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * Alternatively, this software may be distributed under the terms of the * GNU General Public License ("GPL") version 2 as published by the Free * Software Foundation. * * NO WARRANTY * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE * POSSIBILITY OF SUCH DAMAGES. */ #include <linux/export.h> #include <acpi/acpi.h> #include "accommon.h" #include "acnamesp.h" #include "acevents.h" #define _COMPONENT ACPI_EVENTS ACPI_MODULE_NAME("evxfregn") /******************************************************************************* * * FUNCTION: acpi_install_address_space_handler * * PARAMETERS: Device - Handle for the device * space_id - The address space ID * Handler - Address of the handler * Setup - Address of the setup function * Context - Value passed to the handler on each access * * RETURN: Status * * DESCRIPTION: Install a handler for all op_regions of a given space_id. * * NOTE: This function should only be called after acpi_enable_subsystem has * been called. This is because any _REG methods associated with the Space ID * are executed here, and these methods can only be safely executed after * the default handlers have been installed and the hardware has been * initialized (via acpi_enable_subsystem.) * ******************************************************************************/ acpi_status acpi_install_address_space_handler(acpi_handle device, acpi_adr_space_type space_id, acpi_adr_space_handler handler, acpi_adr_space_setup setup, void *context) { struct acpi_namespace_node *node; acpi_status status; ACPI_FUNCTION_TRACE(acpi_install_address_space_handler); /* Parameter validation */ if (!device) { return_ACPI_STATUS(AE_BAD_PARAMETER); } status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Convert and validate the device handle */ node = acpi_ns_validate_handle(device); if (!node) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } /* Install the handler for all Regions for this Space ID */ status = acpi_ev_install_space_handler(node, space_id, handler, setup, context); if (ACPI_FAILURE(status)) { goto unlock_and_exit; } /* * For the default space_iDs, (the IDs for which there are default region handlers * installed) Only execute the _REG methods if the global initialization _REG * methods have already been run (via acpi_initialize_objects). In other words, * we will defer the execution of the _REG methods for these space_iDs until * execution of acpi_initialize_objects. This is done because we need the handlers * for the default spaces (mem/io/pci/table) to be installed before we can run * any control methods (or _REG methods). There is known BIOS code that depends * on this. * * For all other space_iDs, we can safely execute the _REG methods immediately. * This means that for IDs like embedded_controller, this function should be called * only after acpi_enable_subsystem has been called. */ switch (space_id) { case ACPI_ADR_SPACE_SYSTEM_MEMORY: case ACPI_ADR_SPACE_SYSTEM_IO: case ACPI_ADR_SPACE_PCI_CONFIG: case ACPI_ADR_SPACE_DATA_TABLE: if (!acpi_gbl_reg_methods_executed) { /* We will defer execution of the _REG methods for this space */ goto unlock_and_exit; } break; default: break; } /* Run all _REG methods for this address space */ status = acpi_ev_execute_reg_methods(node, space_id); unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_install_address_space_handler) /******************************************************************************* * * FUNCTION: acpi_remove_address_space_handler * * PARAMETERS: Device - Handle for the device * space_id - The address space ID * Handler - Address of the handler * * RETURN: Status * * DESCRIPTION: Remove a previously installed handler. * ******************************************************************************/ acpi_status acpi_remove_address_space_handler(acpi_handle device, acpi_adr_space_type space_id, acpi_adr_space_handler handler) { union acpi_operand_object *obj_desc; union acpi_operand_object *handler_obj; union acpi_operand_object *region_obj; union acpi_operand_object **last_obj_ptr; struct acpi_namespace_node *node; acpi_status status; ACPI_FUNCTION_TRACE(acpi_remove_address_space_handler); /* Parameter validation */ if (!device) { return_ACPI_STATUS(AE_BAD_PARAMETER); } status = acpi_ut_acquire_mutex(ACPI_MTX_NAMESPACE); if (ACPI_FAILURE(status)) { return_ACPI_STATUS(status); } /* Convert and validate the device handle */ node = acpi_ns_validate_handle(device); if (!node || ((node->type != ACPI_TYPE_DEVICE) && (node->type != ACPI_TYPE_PROCESSOR) && (node->type != ACPI_TYPE_THERMAL) && (node != acpi_gbl_root_node))) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } /* Make sure the internal object exists */ obj_desc = acpi_ns_get_attached_object(node); if (!obj_desc) { status = AE_NOT_EXIST; goto unlock_and_exit; } /* Find the address handler the user requested */ handler_obj = obj_desc->device.handler; last_obj_ptr = &obj_desc->device.handler; while (handler_obj) { /* We have a handler, see if user requested this one */ if (handler_obj->address_space.space_id == space_id) { /* Handler must be the same as the installed handler */ if (handler_obj->address_space.handler != handler) { status = AE_BAD_PARAMETER; goto unlock_and_exit; } /* Matched space_id, first dereference this in the Regions */ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, "Removing address handler %p(%p) for region %s " "on Device %p(%p)\n", handler_obj, handler, acpi_ut_get_region_name(space_id), node, obj_desc)); region_obj = handler_obj->address_space.region_list; /* Walk the handler's region list */ while (region_obj) { /* * First disassociate the handler from the region. * * NOTE: this doesn't mean that the region goes away * The region is just inaccessible as indicated to * the _REG method */ acpi_ev_detach_region(region_obj, TRUE); /* * Walk the list: Just grab the head because the * detach_region removed the previous head. */ region_obj = handler_obj->address_space.region_list; } /* Remove this Handler object from the list */ *last_obj_ptr = handler_obj->address_space.next; /* Now we can delete the handler object */ acpi_ut_remove_reference(handler_obj); goto unlock_and_exit; } /* Walk the linked list of handlers */ last_obj_ptr = &handler_obj->address_space.next; handler_obj = handler_obj->address_space.next; } /* The handler does not exist */ ACPI_DEBUG_PRINT((ACPI_DB_OPREGION, "Unable to remove address handler %p for %s(%X), DevNode %p, obj %p\n", handler, acpi_ut_get_region_name(space_id), space_id, node, obj_desc)); status = AE_NOT_EXIST; unlock_and_exit: (void)acpi_ut_release_mutex(ACPI_MTX_NAMESPACE); return_ACPI_STATUS(status); } ACPI_EXPORT_SYMBOL(acpi_remove_address_space_handler)
gpl-2.0
atilag/flatfish-kernel
arch/arm/mach-sa1100/leds.c
5175
1171
/* * linux/arch/arm/mach-sa1100/leds.c * * SA1100 LEDs dispatcher * * Copyright (C) 2001 Nicolas Pitre */ #include <linux/compiler.h> #include <linux/init.h> #include <asm/leds.h> #include <asm/mach-types.h> #include "leds.h" static int __init sa1100_leds_init(void) { if (machine_is_assabet()) leds_event = assabet_leds_event; if (machine_is_consus()) leds_event = consus_leds_event; if (machine_is_badge4()) leds_event = badge4_leds_event; if (machine_is_brutus()) leds_event = brutus_leds_event; if (machine_is_cerf()) leds_event = cerf_leds_event; if (machine_is_flexanet()) leds_event = flexanet_leds_event; if (machine_is_graphicsclient()) leds_event = graphicsclient_leds_event; if (machine_is_hackkit()) leds_event = hackkit_leds_event; if (machine_is_lart()) leds_event = lart_leds_event; if (machine_is_pfs168()) leds_event = pfs168_leds_event; if (machine_is_graphicsmaster()) leds_event = graphicsmaster_leds_event; if (machine_is_adsbitsy()) leds_event = adsbitsy_leds_event; if (machine_is_pt_system3()) leds_event = system3_leds_event; leds_event(led_start); return 0; } core_initcall(sa1100_leds_init);
gpl-2.0
mathkid95/linux_lg_jb
fs/9p/fid.c
5175
7287
/* * V9FS FID Management * * Copyright (C) 2007 by Latchesar Ionkov <lucho@ionkov.net> * Copyright (C) 2005, 2006 by Eric Van Hensbergen <ericvh@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to: * Free Software Foundation * 51 Franklin Street, Fifth Floor * Boston, MA 02111-1301 USA * */ #include <linux/module.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/slab.h> #include <linux/sched.h> #include <linux/idr.h> #include <net/9p/9p.h> #include <net/9p/client.h> #include "v9fs.h" #include "v9fs_vfs.h" #include "fid.h" /** * v9fs_fid_add - add a fid to a dentry * @dentry: dentry that the fid is being added to * @fid: fid to add * */ int v9fs_fid_add(struct dentry *dentry, struct p9_fid *fid) { struct v9fs_dentry *dent; p9_debug(P9_DEBUG_VFS, "fid %d dentry %s\n", fid->fid, dentry->d_name.name); dent = dentry->d_fsdata; if (!dent) { dent = kmalloc(sizeof(struct v9fs_dentry), GFP_KERNEL); if (!dent) return -ENOMEM; spin_lock_init(&dent->lock); INIT_LIST_HEAD(&dent->fidlist); dentry->d_fsdata = dent; } spin_lock(&dent->lock); list_add(&fid->dlist, &dent->fidlist); spin_unlock(&dent->lock); return 0; } /** * v9fs_fid_find - retrieve a fid that belongs to the specified uid * @dentry: dentry to look for fid in * @uid: return fid that belongs to the specified user * @any: if non-zero, return any fid associated with the dentry * */ static struct p9_fid *v9fs_fid_find(struct dentry *dentry, u32 uid, int any) { struct v9fs_dentry *dent; struct p9_fid *fid, *ret; p9_debug(P9_DEBUG_VFS, " dentry: %s (%p) uid %d any %d\n", dentry->d_name.name, dentry, uid, any); dent = (struct v9fs_dentry *) dentry->d_fsdata; ret = NULL; if (dent) { spin_lock(&dent->lock); list_for_each_entry(fid, &dent->fidlist, dlist) { if (any || fid->uid == uid) { ret = fid; break; } } spin_unlock(&dent->lock); } return ret; } /* * We need to hold v9ses->rename_sem as long as we hold references * to returned path array. Array element contain pointers to * dentry names. */ static int build_path_from_dentry(struct v9fs_session_info *v9ses, struct dentry *dentry, char ***names) { int n = 0, i; char **wnames; struct dentry *ds; for (ds = dentry; !IS_ROOT(ds); ds = ds->d_parent) n++; wnames = kmalloc(sizeof(char *) * n, GFP_KERNEL); if (!wnames) goto err_out; for (ds = dentry, i = (n-1); i >= 0; i--, ds = ds->d_parent) wnames[i] = (char *)ds->d_name.name; *names = wnames; return n; err_out: return -ENOMEM; } static struct p9_fid *v9fs_fid_lookup_with_uid(struct dentry *dentry, uid_t uid, int any) { struct dentry *ds; char **wnames, *uname; int i, n, l, clone, access; struct v9fs_session_info *v9ses; struct p9_fid *fid, *old_fid = NULL; v9ses = v9fs_dentry2v9ses(dentry); access = v9ses->flags & V9FS_ACCESS_MASK; fid = v9fs_fid_find(dentry, uid, any); if (fid) return fid; /* * we don't have a matching fid. To do a TWALK we need * parent fid. We need to prevent rename when we want to * look at the parent. */ down_read(&v9ses->rename_sem); ds = dentry->d_parent; fid = v9fs_fid_find(ds, uid, any); if (fid) { /* Found the parent fid do a lookup with that */ fid = p9_client_walk(fid, 1, (char **)&dentry->d_name.name, 1); goto fid_out; } up_read(&v9ses->rename_sem); /* start from the root and try to do a lookup */ fid = v9fs_fid_find(dentry->d_sb->s_root, uid, any); if (!fid) { /* the user is not attached to the fs yet */ if (access == V9FS_ACCESS_SINGLE) return ERR_PTR(-EPERM); if (v9fs_proto_dotu(v9ses) || v9fs_proto_dotl(v9ses)) uname = NULL; else uname = v9ses->uname; fid = p9_client_attach(v9ses->clnt, NULL, uname, uid, v9ses->aname); if (IS_ERR(fid)) return fid; v9fs_fid_add(dentry->d_sb->s_root, fid); } /* If we are root ourself just return that */ if (dentry->d_sb->s_root == dentry) return fid; /* * Do a multipath walk with attached root. * When walking parent we need to make sure we * don't have a parallel rename happening */ down_read(&v9ses->rename_sem); n = build_path_from_dentry(v9ses, dentry, &wnames); if (n < 0) { fid = ERR_PTR(n); goto err_out; } clone = 1; i = 0; while (i < n) { l = min(n - i, P9_MAXWELEM); /* * We need to hold rename lock when doing a multipath * walk to ensure none of the patch component change */ fid = p9_client_walk(fid, l, &wnames[i], clone); if (IS_ERR(fid)) { if (old_fid) { /* * If we fail, clunk fid which are mapping * to path component and not the last component * of the path. */ p9_client_clunk(old_fid); } kfree(wnames); goto err_out; } old_fid = fid; i += l; clone = 0; } kfree(wnames); fid_out: if (!IS_ERR(fid)) v9fs_fid_add(dentry, fid); err_out: up_read(&v9ses->rename_sem); return fid; } /** * v9fs_fid_lookup - lookup for a fid, try to walk if not found * @dentry: dentry to look for fid in * * Look for a fid in the specified dentry for the current user. * If no fid is found, try to create one walking from a fid from the parent * dentry (if it has one), or the root dentry. If the user haven't accessed * the fs yet, attach now and walk from the root. */ struct p9_fid *v9fs_fid_lookup(struct dentry *dentry) { uid_t uid; int any, access; struct v9fs_session_info *v9ses; v9ses = v9fs_dentry2v9ses(dentry); access = v9ses->flags & V9FS_ACCESS_MASK; switch (access) { case V9FS_ACCESS_SINGLE: case V9FS_ACCESS_USER: case V9FS_ACCESS_CLIENT: uid = current_fsuid(); any = 0; break; case V9FS_ACCESS_ANY: uid = v9ses->uid; any = 1; break; default: uid = ~0; any = 0; break; } return v9fs_fid_lookup_with_uid(dentry, uid, any); } struct p9_fid *v9fs_fid_clone(struct dentry *dentry) { struct p9_fid *fid, *ret; fid = v9fs_fid_lookup(dentry); if (IS_ERR(fid)) return fid; ret = p9_client_walk(fid, 0, NULL, 1); return ret; } static struct p9_fid *v9fs_fid_clone_with_uid(struct dentry *dentry, uid_t uid) { struct p9_fid *fid, *ret; fid = v9fs_fid_lookup_with_uid(dentry, uid, 0); if (IS_ERR(fid)) return fid; ret = p9_client_walk(fid, 0, NULL, 1); return ret; } struct p9_fid *v9fs_writeback_fid(struct dentry *dentry) { int err; struct p9_fid *fid; fid = v9fs_fid_clone_with_uid(dentry, 0); if (IS_ERR(fid)) goto error_out; /* * writeback fid will only be used to write back the * dirty pages. We always request for the open fid in read-write * mode so that a partial page write which result in page * read can work. */ err = p9_client_open(fid, O_RDWR); if (err < 0) { p9_client_clunk(fid); fid = ERR_PTR(err); goto error_out; } error_out: return fid; }
gpl-2.0
ShinySide/G530P_Permissive
lib/syscall.c
7735
2475
#include <linux/ptrace.h> #include <linux/sched.h> #include <linux/export.h> #include <asm/syscall.h> static int collect_syscall(struct task_struct *target, long *callno, unsigned long args[6], unsigned int maxargs, unsigned long *sp, unsigned long *pc) { struct pt_regs *regs = task_pt_regs(target); if (unlikely(!regs)) return -EAGAIN; *sp = user_stack_pointer(regs); *pc = instruction_pointer(regs); *callno = syscall_get_nr(target, regs); if (*callno != -1L && maxargs > 0) syscall_get_arguments(target, regs, 0, maxargs, args); return 0; } /** * task_current_syscall - Discover what a blocked task is doing. * @target: thread to examine * @callno: filled with system call number or -1 * @args: filled with @maxargs system call arguments * @maxargs: number of elements in @args to fill * @sp: filled with user stack pointer * @pc: filled with user PC * * If @target is blocked in a system call, returns zero with *@callno * set to the the call's number and @args filled in with its arguments. * Registers not used for system call arguments may not be available and * it is not kosher to use &struct user_regset calls while the system * call is still in progress. Note we may get this result if @target * has finished its system call but not yet returned to user mode, such * as when it's stopped for signal handling or syscall exit tracing. * * If @target is blocked in the kernel during a fault or exception, * returns zero with *@callno set to -1 and does not fill in @args. * If so, it's now safe to examine @target using &struct user_regset * get() calls as long as we're sure @target won't return to user mode. * * Returns -%EAGAIN if @target does not remain blocked. * * Returns -%EINVAL if @maxargs is too large (maximum is six). */ int task_current_syscall(struct task_struct *target, long *callno, unsigned long args[6], unsigned int maxargs, unsigned long *sp, unsigned long *pc) { long state; unsigned long ncsw; if (unlikely(maxargs > 6)) return -EINVAL; if (target == current) return collect_syscall(target, callno, args, maxargs, sp, pc); state = target->state; if (unlikely(!state)) return -EAGAIN; ncsw = wait_task_inactive(target, state); if (unlikely(!ncsw) || unlikely(collect_syscall(target, callno, args, maxargs, sp, pc)) || unlikely(wait_task_inactive(target, state) != ncsw)) return -EAGAIN; return 0; } EXPORT_SYMBOL_GPL(task_current_syscall);
gpl-2.0
chris41g/android_kernel_samsung_epic4gtouch
drivers/staging/tidspbridge/core/ue_deh.c
7991
6802
/* * ue_deh.c * * DSP-BIOS Bridge driver support functions for TI OMAP processors. * * Implements upper edge DSP exception handling (DEH) functions. * * Copyright (C) 2005-2006 Texas Instruments, Inc. * Copyright (C) 2010 Felipe Contreras * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #include <linux/kernel.h> #include <linux/interrupt.h> #include <plat/dmtimer.h> #include <dspbridge/dbdefs.h> #include <dspbridge/dspdeh.h> #include <dspbridge/dev.h> #include "_tiomap.h" #include "_deh.h" #include <dspbridge/io_sm.h> #include <dspbridge/drv.h> #include <dspbridge/wdt.h> static u32 fault_addr; static void mmu_fault_dpc(unsigned long data) { struct deh_mgr *deh = (void *)data; if (!deh) return; bridge_deh_notify(deh, DSP_MMUFAULT, 0); } static irqreturn_t mmu_fault_isr(int irq, void *data) { struct deh_mgr *deh = data; struct cfg_hostres *resources; u32 event; if (!deh) return IRQ_HANDLED; resources = deh->bridge_context->resources; if (!resources) { dev_dbg(bridge, "%s: Failed to get Host Resources\n", __func__); return IRQ_HANDLED; } hw_mmu_event_status(resources->dmmu_base, &event); if (event == HW_MMU_TRANSLATION_FAULT) { hw_mmu_fault_addr_read(resources->dmmu_base, &fault_addr); dev_dbg(bridge, "%s: event=0x%x, fault_addr=0x%x\n", __func__, event, fault_addr); /* * Schedule a DPC directly. In the future, it may be * necessary to check if DSP MMU fault is intended for * Bridge. */ tasklet_schedule(&deh->dpc_tasklet); /* Disable the MMU events, else once we clear it will * start to raise INTs again */ hw_mmu_event_disable(resources->dmmu_base, HW_MMU_TRANSLATION_FAULT); } else { hw_mmu_event_disable(resources->dmmu_base, HW_MMU_ALL_INTERRUPTS); } return IRQ_HANDLED; } int bridge_deh_create(struct deh_mgr **ret_deh, struct dev_object *hdev_obj) { int status; struct deh_mgr *deh; struct bridge_dev_context *hbridge_context = NULL; /* Message manager will be created when a file is loaded, since * size of message buffer in shared memory is configurable in * the base image. */ /* Get Bridge context info. */ dev_get_bridge_context(hdev_obj, &hbridge_context); /* Allocate IO manager object: */ deh = kzalloc(sizeof(*deh), GFP_KERNEL); if (!deh) { status = -ENOMEM; goto err; } /* Create an NTFY object to manage notifications */ deh->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL); if (!deh->ntfy_obj) { status = -ENOMEM; goto err; } ntfy_init(deh->ntfy_obj); /* Create a MMUfault DPC */ tasklet_init(&deh->dpc_tasklet, mmu_fault_dpc, (u32) deh); /* Fill in context structure */ deh->bridge_context = hbridge_context; /* Install ISR function for DSP MMU fault */ status = request_irq(INT_DSP_MMU_IRQ, mmu_fault_isr, 0, "DspBridge\tiommu fault", deh); if (status < 0) goto err; *ret_deh = deh; return 0; err: bridge_deh_destroy(deh); *ret_deh = NULL; return status; } int bridge_deh_destroy(struct deh_mgr *deh) { if (!deh) return -EFAULT; /* If notification object exists, delete it */ if (deh->ntfy_obj) { ntfy_delete(deh->ntfy_obj); kfree(deh->ntfy_obj); } /* Disable DSP MMU fault */ free_irq(INT_DSP_MMU_IRQ, deh); /* Free DPC object */ tasklet_kill(&deh->dpc_tasklet); /* Deallocate the DEH manager object */ kfree(deh); return 0; } int bridge_deh_register_notify(struct deh_mgr *deh, u32 event_mask, u32 notify_type, struct dsp_notification *hnotification) { if (!deh) return -EFAULT; if (event_mask) return ntfy_register(deh->ntfy_obj, hnotification, event_mask, notify_type); else return ntfy_unregister(deh->ntfy_obj, hnotification); } #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE static void mmu_fault_print_stack(struct bridge_dev_context *dev_context) { struct cfg_hostres *resources; struct hw_mmu_map_attrs_t map_attrs = { .endianism = HW_LITTLE_ENDIAN, .element_size = HW_ELEM_SIZE16BIT, .mixed_size = HW_MMU_CPUES, }; void *dummy_va_addr; resources = dev_context->resources; dummy_va_addr = (void*)__get_free_page(GFP_ATOMIC); /* * Before acking the MMU fault, let's make sure MMU can only * access entry #0. Then add a new entry so that the DSP OS * can continue in order to dump the stack. */ hw_mmu_twl_disable(resources->dmmu_base); hw_mmu_tlb_flush_all(resources->dmmu_base); hw_mmu_tlb_add(resources->dmmu_base, virt_to_phys(dummy_va_addr), fault_addr, HW_PAGE_SIZE4KB, 1, &map_attrs, HW_SET, HW_SET); dsp_clk_enable(DSP_CLK_GPT8); dsp_gpt_wait_overflow(DSP_CLK_GPT8, 0xfffffffe); /* Clear MMU interrupt */ hw_mmu_event_ack(resources->dmmu_base, HW_MMU_TRANSLATION_FAULT); dump_dsp_stack(dev_context); dsp_clk_disable(DSP_CLK_GPT8); hw_mmu_disable(resources->dmmu_base); free_page((unsigned long)dummy_va_addr); } #endif static inline const char *event_to_string(int event) { switch (event) { case DSP_SYSERROR: return "DSP_SYSERROR"; break; case DSP_MMUFAULT: return "DSP_MMUFAULT"; break; case DSP_PWRERROR: return "DSP_PWRERROR"; break; case DSP_WDTOVERFLOW: return "DSP_WDTOVERFLOW"; break; default: return "unkown event"; break; } } void bridge_deh_notify(struct deh_mgr *deh, int event, int info) { struct bridge_dev_context *dev_context; const char *str = event_to_string(event); if (!deh) return; dev_dbg(bridge, "%s: device exception", __func__); dev_context = deh->bridge_context; switch (event) { case DSP_SYSERROR: dev_err(bridge, "%s: %s, info=0x%x", __func__, str, info); #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE dump_dl_modules(dev_context); dump_dsp_stack(dev_context); #endif break; case DSP_MMUFAULT: dev_err(bridge, "%s: %s, addr=0x%x", __func__, str, fault_addr); #ifdef CONFIG_TIDSPBRIDGE_BACKTRACE print_dsp_trace_buffer(dev_context); dump_dl_modules(dev_context); mmu_fault_print_stack(dev_context); #endif break; default: dev_err(bridge, "%s: %s", __func__, str); break; } /* Filter subsequent notifications when an error occurs */ if (dev_context->brd_state != BRD_ERROR) { ntfy_notify(deh->ntfy_obj, event); #ifdef CONFIG_TIDSPBRIDGE_RECOVERY bridge_recover_schedule(); #endif } /* Set the Board state as ERROR */ dev_context->brd_state = BRD_ERROR; /* Disable all the clocks that were enabled by DSP */ dsp_clock_disable_all(dev_context->dsp_per_clks); /* * Avoid the subsequent WDT if it happens once, * also if fatal error occurs. */ dsp_wdt_enable(false); }
gpl-2.0
Klaus-schwarzkopf/linux-davinci-sensortherm
arch/arm/mach-imx/mach-imx27lite.c
56
2223
/* * Copyright 2007 Robert Schwebel <r.schwebel@pengutronix.de>, Pengutronix * Copyright (C) 2008 Juergen Beisert (kernel@pengutronix.de) * Copyright 2009 Daniel Schaeffer (daniel.schaeffer@timesys.com) * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/platform_device.h> #include <linux/gpio.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include <asm/mach/map.h> #include <mach/hardware.h> #include <mach/common.h> #include <mach/iomux-mx27.h> #include "devices-imx27.h" #include "devices.h" static const int mx27lite_pins[] __initconst = { /* UART1 */ PE12_PF_UART1_TXD, PE13_PF_UART1_RXD, PE14_PF_UART1_CTS, PE15_PF_UART1_RTS, /* FEC */ PD0_AIN_FEC_TXD0, PD1_AIN_FEC_TXD1, PD2_AIN_FEC_TXD2, PD3_AIN_FEC_TXD3, PD4_AOUT_FEC_RX_ER, PD5_AOUT_FEC_RXD1, PD6_AOUT_FEC_RXD2, PD7_AOUT_FEC_RXD3, PD8_AF_FEC_MDIO, PD9_AIN_FEC_MDC, PD10_AOUT_FEC_CRS, PD11_AOUT_FEC_TX_CLK, PD12_AOUT_FEC_RXD0, PD13_AOUT_FEC_RX_DV, PD14_AOUT_FEC_RX_CLK, PD15_AOUT_FEC_COL, PD16_AIN_FEC_TX_ER, PF23_AIN_FEC_TX_EN, }; static const struct imxuart_platform_data uart_pdata __initconst = { .flags = IMXUART_HAVE_RTSCTS, }; static void __init mx27lite_init(void) { mxc_gpio_setup_multiple_pins(mx27lite_pins, ARRAY_SIZE(mx27lite_pins), "imx27lite"); imx27_add_imx_uart0(&uart_pdata); imx27_add_fec(NULL); } static void __init mx27lite_timer_init(void) { mx27_clocks_init(26000000); } static struct sys_timer mx27lite_timer = { .init = mx27lite_timer_init, }; MACHINE_START(IMX27LITE, "LogicPD i.MX27LITE") .boot_params = MX27_PHYS_OFFSET + 0x100, .map_io = mx27_map_io, .init_irq = mx27_init_irq, .init_machine = mx27lite_init, .timer = &mx27lite_timer, MACHINE_END
gpl-2.0
janrinze/loox7xxport.loox2-6-22
arch/mips/sgi-ip32/ip32-irq.c
56
12872
/* * Code to handle IP32 IRQs * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2000 Harald Koerfgen * Copyright (C) 2001 Keith M Wesolowski */ #include <linux/init.h> #include <linux/kernel_stat.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/irq.h> #include <linux/bitops.h> #include <linux/kernel.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/random.h> #include <linux/sched.h> #include <asm/mipsregs.h> #include <asm/signal.h> #include <asm/system.h> #include <asm/time.h> #include <asm/ip32/crime.h> #include <asm/ip32/mace.h> #include <asm/ip32/ip32_ints.h> /* issue a PIO read to make sure no PIO writes are pending */ static void inline flush_crime_bus(void) { crime->control; } static void inline flush_mace_bus(void) { mace->perif.ctrl.misc; } #undef DEBUG_IRQ #ifdef DEBUG_IRQ #define DBG(x...) printk(x) #else #define DBG(x...) #endif /* O2 irq map * * IP0 -> software (ignored) * IP1 -> software (ignored) * IP2 -> (irq0) C crime 1.1 all interrupts; crime 1.5 ??? * IP3 -> (irq1) X unknown * IP4 -> (irq2) X unknown * IP5 -> (irq3) X unknown * IP6 -> (irq4) X unknown * IP7 -> (irq5) 0 CPU count/compare timer (system timer) * * crime: (C) * * CRIME_INT_STAT 31:0: * * 0 -> 1 Video in 1 * 1 -> 2 Video in 2 * 2 -> 3 Video out * 3 -> 4 Mace ethernet * 4 -> S SuperIO sub-interrupt * 5 -> M Miscellaneous sub-interrupt * 6 -> A Audio sub-interrupt * 7 -> 8 PCI bridge errors * 8 -> 9 PCI SCSI aic7xxx 0 * 9 -> 10 PCI SCSI aic7xxx 1 * 10 -> 11 PCI slot 0 * 11 -> 12 unused (PCI slot 1) * 12 -> 13 unused (PCI slot 2) * 13 -> 14 unused (PCI shared 0) * 14 -> 15 unused (PCI shared 1) * 15 -> 16 unused (PCI shared 2) * 16 -> 17 GBE0 (E) * 17 -> 18 GBE1 (E) * 18 -> 19 GBE2 (E) * 19 -> 20 GBE3 (E) * 20 -> 21 CPU errors * 21 -> 22 Memory errors * 22 -> 23 RE empty edge (E) * 23 -> 24 RE full edge (E) * 24 -> 25 RE idle edge (E) * 25 -> 26 RE empty level * 26 -> 27 RE full level * 27 -> 28 RE idle level * 28 -> 29 unused (software 0) (E) * 29 -> 30 unused (software 1) (E) * 30 -> 31 unused (software 2) - crime 1.5 CPU SysCorError (E) * 31 -> 32 VICE * * S, M, A: Use the MACE ISA interrupt register * MACE_ISA_INT_STAT 31:0 * * 0-7 -> 33-40 Audio * 8 -> 41 RTC * 9 -> 42 Keyboard * 10 -> X Keyboard polled * 11 -> 44 Mouse * 12 -> X Mouse polled * 13-15 -> 46-48 Count/compare timers * 16-19 -> 49-52 Parallel (16 E) * 20-25 -> 53-58 Serial 1 (22 E) * 26-31 -> 59-64 Serial 2 (28 E) * * Note that this means IRQs 5-7, 43, and 45 do not exist. This is a * different IRQ map than IRIX uses, but that's OK as Linux irq handling * is quite different anyway. */ /* Some initial interrupts to set up */ extern irqreturn_t crime_memerr_intr(int irq, void *dev_id); extern irqreturn_t crime_cpuerr_intr(int irq, void *dev_id); struct irqaction memerr_irq = { crime_memerr_intr, IRQF_DISABLED, CPU_MASK_NONE, "CRIME memory error", NULL, NULL }; struct irqaction cpuerr_irq = { crime_cpuerr_intr, IRQF_DISABLED, CPU_MASK_NONE, "CRIME CPU error", NULL, NULL }; /* * For interrupts wired from a single device to the CPU. Only the clock * uses this it seems, which is IRQ 0 and IP7. */ static void enable_cpu_irq(unsigned int irq) { set_c0_status(STATUSF_IP7); } static void disable_cpu_irq(unsigned int irq) { clear_c0_status(STATUSF_IP7); } static void end_cpu_irq(unsigned int irq) { if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) enable_cpu_irq (irq); } static struct irq_chip ip32_cpu_interrupt = { .name = "IP32 CPU", .ack = disable_cpu_irq, .mask = disable_cpu_irq, .mask_ack = disable_cpu_irq, .unmask = enable_cpu_irq, .end = end_cpu_irq, }; /* * This is for pure CRIME interrupts - ie not MACE. The advantage? * We get to split the register in half and do faster lookups. */ static uint64_t crime_mask; static void enable_crime_irq(unsigned int irq) { crime_mask |= 1 << (irq - 1); crime->imask = crime_mask; } static void disable_crime_irq(unsigned int irq) { crime_mask &= ~(1 << (irq - 1)); crime->imask = crime_mask; flush_crime_bus(); } static void mask_and_ack_crime_irq(unsigned int irq) { /* Edge triggered interrupts must be cleared. */ if ((irq >= CRIME_GBE0_IRQ && irq <= CRIME_GBE3_IRQ) || (irq >= CRIME_RE_EMPTY_E_IRQ && irq <= CRIME_RE_IDLE_E_IRQ) || (irq >= CRIME_SOFT0_IRQ && irq <= CRIME_SOFT2_IRQ)) { uint64_t crime_int; crime_int = crime->hard_int; crime_int &= ~(1 << (irq - 1)); crime->hard_int = crime_int; } disable_crime_irq(irq); } static void end_crime_irq(unsigned int irq) { if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) enable_crime_irq(irq); } static struct irq_chip ip32_crime_interrupt = { .name = "IP32 CRIME", .ack = mask_and_ack_crime_irq, .mask = disable_crime_irq, .mask_ack = mask_and_ack_crime_irq, .unmask = enable_crime_irq, .end = end_crime_irq, }; /* * This is for MACE PCI interrupts. We can decrease bus traffic by masking * as close to the source as possible. This also means we can take the * next chunk of the CRIME register in one piece. */ static unsigned long macepci_mask; static void enable_macepci_irq(unsigned int irq) { macepci_mask |= MACEPCI_CONTROL_INT(irq - 9); mace->pci.control = macepci_mask; crime_mask |= 1 << (irq - 1); crime->imask = crime_mask; } static void disable_macepci_irq(unsigned int irq) { crime_mask &= ~(1 << (irq - 1)); crime->imask = crime_mask; flush_crime_bus(); macepci_mask &= ~MACEPCI_CONTROL_INT(irq - 9); mace->pci.control = macepci_mask; flush_mace_bus(); } static void end_macepci_irq(unsigned int irq) { if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) enable_macepci_irq(irq); } static struct irq_chip ip32_macepci_interrupt = { .name = "IP32 MACE PCI", .ack = disable_macepci_irq, .mask = disable_macepci_irq, .mask_ack = disable_macepci_irq, .unmask = enable_macepci_irq, .end = end_macepci_irq, }; /* This is used for MACE ISA interrupts. That means bits 4-6 in the * CRIME register. */ #define MACEISA_AUDIO_INT (MACEISA_AUDIO_SW_INT | \ MACEISA_AUDIO_SC_INT | \ MACEISA_AUDIO1_DMAT_INT | \ MACEISA_AUDIO1_OF_INT | \ MACEISA_AUDIO2_DMAT_INT | \ MACEISA_AUDIO2_MERR_INT | \ MACEISA_AUDIO3_DMAT_INT | \ MACEISA_AUDIO3_MERR_INT) #define MACEISA_MISC_INT (MACEISA_RTC_INT | \ MACEISA_KEYB_INT | \ MACEISA_KEYB_POLL_INT | \ MACEISA_MOUSE_INT | \ MACEISA_MOUSE_POLL_INT | \ MACEISA_TIMER0_INT | \ MACEISA_TIMER1_INT | \ MACEISA_TIMER2_INT) #define MACEISA_SUPERIO_INT (MACEISA_PARALLEL_INT | \ MACEISA_PAR_CTXA_INT | \ MACEISA_PAR_CTXB_INT | \ MACEISA_PAR_MERR_INT | \ MACEISA_SERIAL1_INT | \ MACEISA_SERIAL1_TDMAT_INT | \ MACEISA_SERIAL1_TDMAPR_INT | \ MACEISA_SERIAL1_TDMAME_INT | \ MACEISA_SERIAL1_RDMAT_INT | \ MACEISA_SERIAL1_RDMAOR_INT | \ MACEISA_SERIAL2_INT | \ MACEISA_SERIAL2_TDMAT_INT | \ MACEISA_SERIAL2_TDMAPR_INT | \ MACEISA_SERIAL2_TDMAME_INT | \ MACEISA_SERIAL2_RDMAT_INT | \ MACEISA_SERIAL2_RDMAOR_INT) static unsigned long maceisa_mask; static void enable_maceisa_irq (unsigned int irq) { unsigned int crime_int = 0; DBG ("maceisa enable: %u\n", irq); switch (irq) { case MACEISA_AUDIO_SW_IRQ ... MACEISA_AUDIO3_MERR_IRQ: crime_int = MACE_AUDIO_INT; break; case MACEISA_RTC_IRQ ... MACEISA_TIMER2_IRQ: crime_int = MACE_MISC_INT; break; case MACEISA_PARALLEL_IRQ ... MACEISA_SERIAL2_RDMAOR_IRQ: crime_int = MACE_SUPERIO_INT; break; } DBG ("crime_int %08x enabled\n", crime_int); crime_mask |= crime_int; crime->imask = crime_mask; maceisa_mask |= 1 << (irq - 33); mace->perif.ctrl.imask = maceisa_mask; } static void disable_maceisa_irq(unsigned int irq) { unsigned int crime_int = 0; maceisa_mask &= ~(1 << (irq - 33)); if(!(maceisa_mask & MACEISA_AUDIO_INT)) crime_int |= MACE_AUDIO_INT; if(!(maceisa_mask & MACEISA_MISC_INT)) crime_int |= MACE_MISC_INT; if(!(maceisa_mask & MACEISA_SUPERIO_INT)) crime_int |= MACE_SUPERIO_INT; crime_mask &= ~crime_int; crime->imask = crime_mask; flush_crime_bus(); mace->perif.ctrl.imask = maceisa_mask; flush_mace_bus(); } static void mask_and_ack_maceisa_irq(unsigned int irq) { unsigned long mace_int; switch (irq) { case MACEISA_PARALLEL_IRQ: case MACEISA_SERIAL1_TDMAPR_IRQ: case MACEISA_SERIAL2_TDMAPR_IRQ: /* edge triggered */ mace_int = mace->perif.ctrl.istat; mace_int &= ~(1 << (irq - 33)); mace->perif.ctrl.istat = mace_int; break; } disable_maceisa_irq(irq); } static void end_maceisa_irq(unsigned irq) { if (!(irq_desc[irq].status & (IRQ_DISABLED | IRQ_INPROGRESS))) enable_maceisa_irq(irq); } static struct irq_chip ip32_maceisa_interrupt = { .name = "IP32 MACE ISA", .ack = mask_and_ack_maceisa_irq, .mask = disable_maceisa_irq, .mask_ack = mask_and_ack_maceisa_irq, .unmask = enable_maceisa_irq, .end = end_maceisa_irq, }; /* This is used for regular non-ISA, non-PCI MACE interrupts. That means * bits 0-3 and 7 in the CRIME register. */ static void enable_mace_irq(unsigned int irq) { crime_mask |= 1 << (irq - 1); crime->imask = crime_mask; } static void disable_mace_irq(unsigned int irq) { crime_mask &= ~(1 << (irq - 1)); crime->imask = crime_mask; flush_crime_bus(); } static void end_mace_irq(unsigned int irq) { if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS))) enable_mace_irq(irq); } static struct irq_chip ip32_mace_interrupt = { .name = "IP32 MACE", .ack = disable_mace_irq, .mask = disable_mace_irq, .mask_ack = disable_mace_irq, .unmask = enable_mace_irq, .end = end_mace_irq, }; static void ip32_unknown_interrupt(void) { printk ("Unknown interrupt occurred!\n"); printk ("cp0_status: %08x\n", read_c0_status()); printk ("cp0_cause: %08x\n", read_c0_cause()); printk ("CRIME intr mask: %016lx\n", crime->imask); printk ("CRIME intr status: %016lx\n", crime->istat); printk ("CRIME hardware intr register: %016lx\n", crime->hard_int); printk ("MACE ISA intr mask: %08lx\n", mace->perif.ctrl.imask); printk ("MACE ISA intr status: %08lx\n", mace->perif.ctrl.istat); printk ("MACE PCI control register: %08x\n", mace->pci.control); printk("Register dump:\n"); show_regs(get_irq_regs()); printk("Please mail this report to linux-mips@linux-mips.org\n"); printk("Spinning..."); while(1) ; } /* CRIME 1.1 appears to deliver all interrupts to this one pin. */ /* change this to loop over all edge-triggered irqs, exception masked out ones */ static void ip32_irq0(void) { uint64_t crime_int; int irq = 0; crime_int = crime->istat & crime_mask; irq = __ffs(crime_int); crime_int = 1 << irq; if (crime_int & CRIME_MACEISA_INT_MASK) { unsigned long mace_int = mace->perif.ctrl.istat; irq = __ffs(mace_int & maceisa_mask) + 32; } irq++; DBG("*irq %u*\n", irq); do_IRQ(irq); } static void ip32_irq1(void) { ip32_unknown_interrupt(); } static void ip32_irq2(void) { ip32_unknown_interrupt(); } static void ip32_irq3(void) { ip32_unknown_interrupt(); } static void ip32_irq4(void) { ip32_unknown_interrupt(); } static void ip32_irq5(void) { ll_timer_interrupt(IP32_R4K_TIMER_IRQ); } asmlinkage void plat_irq_dispatch(void) { unsigned int pending = read_c0_status() & read_c0_cause(); if (likely(pending & IE_IRQ0)) ip32_irq0(); else if (unlikely(pending & IE_IRQ1)) ip32_irq1(); else if (unlikely(pending & IE_IRQ2)) ip32_irq2(); else if (unlikely(pending & IE_IRQ3)) ip32_irq3(); else if (unlikely(pending & IE_IRQ4)) ip32_irq4(); else if (likely(pending & IE_IRQ5)) ip32_irq5(); } void __init arch_init_irq(void) { unsigned int irq; /* Install our interrupt handler, then clear and disable all * CRIME and MACE interrupts. */ crime->imask = 0; crime->hard_int = 0; crime->soft_int = 0; mace->perif.ctrl.istat = 0; mace->perif.ctrl.imask = 0; for (irq = 0; irq <= IP32_IRQ_MAX; irq++) { struct irq_chip *controller; if (irq == IP32_R4K_TIMER_IRQ) controller = &ip32_cpu_interrupt; else if (irq <= MACE_PCI_BRIDGE_IRQ && irq >= MACE_VID_IN1_IRQ) controller = &ip32_mace_interrupt; else if (irq <= MACEPCI_SHARED2_IRQ && irq >= MACEPCI_SCSI0_IRQ) controller = &ip32_macepci_interrupt; else if (irq <= CRIME_VICE_IRQ && irq >= CRIME_GBE0_IRQ) controller = &ip32_crime_interrupt; else controller = &ip32_maceisa_interrupt; set_irq_chip(irq, controller); } setup_irq(CRIME_MEMERR_IRQ, &memerr_irq); setup_irq(CRIME_CPUERR_IRQ, &cpuerr_irq); #define ALLINTS (IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | IE_IRQ3 | IE_IRQ4 | IE_IRQ5) change_c0_status(ST0_IM, ALLINTS); }
gpl-2.0
advx9600/kernel-rp4412
drivers/media/video/exynos/fimc-is-mc/fimc-is-helper.c
56
115433
/* * Samsung Exynos5 SoC series FIMC-IS driver * * exynos5 fimc-is helper functions * * Copyright (c) 2011 Samsung Electronics Co., Ltd * Contact: Jiyoung Shin<idon.shin@samsung.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/clk.h> #include <linux/irq.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/memory.h> #include <linux/regulator/consumer.h> #include <linux/pm_runtime.h> #include <linux/videodev2.h> #include <media/v4l2-subdev.h> #include <linux/videodev2.h> #include <linux/videodev2_samsung.h> #include <linux/gpio.h> #include <linux/gpio_event.h> #include <plat/gpio-cfg.h> #include "fimc-is-core.h" #include "fimc-is-regs.h" #include "fimc-is-cmd.h" #include "fimc-is-param.h" #include "fimc-is-err.h" #include "fimc-is-helper.h" #include "fimc-is-misc.h" /* Default setting values */ static const struct sensor_param init_val_sensor_preview_still = { .frame_rate = { .frame_rate = DEFAULT_PREVIEW_STILL_FRAMERATE, }, }; static const struct isp_param init_val_isp_preview_still = { .control = { .cmd = CONTROL_COMMAND_START, .bypass = CONTROL_BYPASS_DISABLE, .err = CONTROL_ERROR_NO, }, .otf_input = { .cmd = OTF_INPUT_COMMAND_ENABLE, .width = DEFAULT_PREVIEW_STILL_WIDTH, .height = DEFAULT_PREVIEW_STILL_HEIGHT, #ifndef ISP_STRGEN .format = OTF_INPUT_FORMAT_BAYER, #else .format = OTF_INPUT_FORMAT_STRGEN_COLORBAR_BAYER, #endif .bitwidth = OTF_INPUT_BIT_WIDTH_10BIT, .order = OTF_INPUT_ORDER_BAYER_GR_BG, .reserved[3] = 0, .reserved[4] = 66666, .err = OTF_INPUT_ERROR_NO, }, .dma1_input = { .cmd = DMA_INPUT_COMMAND_DISABLE, .width = 0, .height = 0, .format = 0, .bitwidth = 0, .plane = 0, .order = 0, .buffer_number = 0, .buffer_address = 0, .err = 0, }, .dma2_input = { .cmd = DMA_INPUT_COMMAND_DISABLE, .width = 0, .height = 0, .format = 0, .bitwidth = 0, .plane = 0, .order = 0, .buffer_number = 0, .buffer_address = 0, .err = 0, }, .aa = { .cmd = ISP_AA_COMMAND_START, .target = ISP_AA_TARGET_AF | ISP_AA_TARGET_AE | ISP_AA_TARGET_AWB, .mode = 0, .face = 0, .win_pos_x = 0, .win_pos_y = 0, .err = ISP_AF_ERROR_NO, }, .flash = { .cmd = ISP_FLASH_COMMAND_DISABLE, .redeye = ISP_FLASH_REDEYE_DISABLE, .err = ISP_FLASH_ERROR_NO, }, .awb = { .cmd = ISP_AWB_COMMAND_AUTO, .illumination = 0, .err = ISP_AWB_ERROR_NO, }, .effect = { .cmd = ISP_IMAGE_EFFECT_DISABLE, .err = ISP_IMAGE_EFFECT_ERROR_NO, }, .iso = { .cmd = ISP_ISO_COMMAND_AUTO, .value = 0, .err = ISP_ISO_ERROR_NO, }, .adjust = { .cmd = ISP_ADJUST_COMMAND_AUTO, .contrast = 0, .saturation = 0, .sharpness = 0, .exposure = 0, .brightness = 0, .hue = 0, .shutter_time_min = 0, .shutter_time_max = 66666, .err = ISP_ADJUST_ERROR_NO, }, .metering = { .cmd = ISP_METERING_COMMAND_MATRIX, .win_pos_x = 0, .win_pos_y = 0, .win_width = DEFAULT_PREVIEW_STILL_WIDTH, .win_height = DEFAULT_PREVIEW_STILL_HEIGHT, .err = ISP_METERING_ERROR_NO, }, .afc = { .cmd = ISP_AFC_COMMAND_AUTO, .manual = 0, .err = ISP_AFC_ERROR_NO, }, .otf_output = { .cmd = OTF_OUTPUT_COMMAND_ENABLE, .width = DEFAULT_PREVIEW_STILL_WIDTH, .height = DEFAULT_PREVIEW_STILL_HEIGHT, .format = OTF_OUTPUT_FORMAT_YUV444, .bitwidth = OTF_OUTPUT_BIT_WIDTH_12BIT, .order = OTF_OUTPUT_ORDER_BAYER_GR_BG, .err = OTF_OUTPUT_ERROR_NO, }, .dma1_output = { #ifndef ISP_DMA .cmd = DMA_OUTPUT_COMMAND_DISABLE, #else .cmd = DMA_OUTPUT_COMMAND_ENABLE, #endif .width = DEFAULT_PREVIEW_STILL_WIDTH, .height = DEFAULT_PREVIEW_STILL_HEIGHT, .format = DMA_OUTPUT_FORMAT_YUV422, .bitwidth = DMA_OUTPUT_BIT_WIDTH_8BIT, .plane = DMA_OUTPUT_PLANE_3, .order = DMA_INPUT_ORDER_NO, .buffer_number = 1, .buffer_address = 0x50060400, .err = DMA_OUTPUT_ERROR_NO, }, .dma2_output = { #ifndef ISP_DMA .cmd = DMA_OUTPUT_COMMAND_DISABLE, #else .cmd = DMA_OUTPUT_COMMAND_ENABLE, #endif .width = DEFAULT_PREVIEW_STILL_WIDTH, .height = DEFAULT_PREVIEW_STILL_HEIGHT, .format = DMA_OUTPUT_FORMAT_BAYER, .bitwidth = DMA_OUTPUT_BIT_WIDTH_10BIT, .plane = DMA_OUTPUT_PLANE_1, .order = DMA_OUTPUT_ORDER_GB_BG, .buffer_number = 1, .buffer_address = 0x501D0000, .err = DMA_OUTPUT_ERROR_NO, }, }; static const struct drc_param init_val_drc_preview_still = { .control = { .cmd = CONTROL_COMMAND_START, .bypass = CONTROL_BYPASS_ENABLE, .err = CONTROL_ERROR_NO, }, .otf_input = { .cmd = OTF_INPUT_COMMAND_ENABLE, .width = DEFAULT_PREVIEW_STILL_WIDTH, .height = DEFAULT_PREVIEW_STILL_HEIGHT, .format = OTF_INPUT_FORMAT_YUV444, .bitwidth = OTF_INPUT_BIT_WIDTH_12BIT, .order = OTF_INPUT_ORDER_BAYER_GR_BG, .err = OTF_INPUT_ERROR_NO, }, .dma_input = { .cmd = DMA_INPUT_COMMAND_DISABLE, .width = 0, .height = 0, .format = 0, .bitwidth = 0, .plane = 0, .order = 0, .buffer_number = 0, .buffer_address = 0, .err = 0, }, .otf_output = { .cmd = OTF_OUTPUT_COMMAND_ENABLE, .width = DEFAULT_PREVIEW_STILL_WIDTH, .height = DEFAULT_PREVIEW_STILL_HEIGHT, .format = OTF_OUTPUT_FORMAT_YUV444, .bitwidth = OTF_INPUT_BIT_WIDTH_8BIT, .order = OTF_OUTPUT_ORDER_BAYER_GR_BG, .err = OTF_OUTPUT_ERROR_NO, }, }; static const struct scalerc_param init_val_scalerc_preview_still = { .control = { .cmd = CONTROL_COMMAND_START, .bypass = CONTROL_BYPASS_ENABLE, .err = CONTROL_ERROR_NO, }, .otf_input = { .cmd = OTF_INPUT_COMMAND_ENABLE, .width = DEFAULT_PREVIEW_STILL_WIDTH, .height = DEFAULT_PREVIEW_STILL_HEIGHT, .format = OTF_INPUT_FORMAT_YUV444, .bitwidth = OTF_INPUT_BIT_WIDTH_12BIT, .order = OTF_INPUT_ORDER_BAYER_GR_BG, .crop_offset_x = 0, .crop_offset_y = 0, .crop_width = 0, .crop_height = 0, .err = OTF_INPUT_ERROR_NO, }, .effect = { .cmd = 0, .err = 0, }, .crop = { .cmd = OTF_INPUT_COMMAND_DISABLE, .pos_x = 0, .pos_y = 0, .crop_width = DEFAULT_PREVIEW_STILL_WIDTH, .crop_height = DEFAULT_PREVIEW_STILL_HEIGHT, .err = 0, }, .scale = { .cmd = OTF_INPUT_COMMAND_DISABLE, .pre_h_ratio = 0, .pre_v_ratio = 0, .sh_factor = 0, .h_ratio = 0, .v_ratio = 0, .err = 0, }, .otf_output = { .cmd = OTF_OUTPUT_COMMAND_ENABLE, .width = DEFAULT_PREVIEW_STILL_WIDTH, .height = DEFAULT_PREVIEW_STILL_HEIGHT, .format = OTF_OUTPUT_FORMAT_YUV444, .bitwidth = OTF_OUTPUT_BIT_WIDTH_8BIT, .order = OTF_OUTPUT_ORDER_BAYER_GR_BG, .err = OTF_OUTPUT_ERROR_NO, }, .dma_output = { .cmd = DMA_OUTPUT_COMMAND_DISABLE, .width = DEFAULT_PREVIEW_STILL_WIDTH, .height = DEFAULT_PREVIEW_STILL_HEIGHT, .format = DMA_OUTPUT_FORMAT_YUV422, .bitwidth = DMA_OUTPUT_BIT_WIDTH_8BIT, .plane = DMA_OUTPUT_PLANE_1, .order = DMA_OUTPUT_ORDER_CrYCbY, .buffer_number = 0, .buffer_address = 0, .reserved[0] = 2, .err = DMA_OUTPUT_ERROR_NO, }, }; static const struct odc_param init_val_odc_preview_still = { .control = { .cmd = CONTROL_COMMAND_START, .bypass = CONTROL_BYPASS_ENABLE, .err = CONTROL_ERROR_NO, }, .otf_input = { .cmd = OTF_INPUT_COMMAND_ENABLE, .width = DEFAULT_PREVIEW_STILL_WIDTH, .height = DEFAULT_PREVIEW_STILL_HEIGHT, .format = OTF_INPUT_FORMAT_YUV422, .bitwidth = OTF_INPUT_BIT_WIDTH_8BIT, .order = OTF_INPUT_ORDER_BAYER_GR_BG, .crop_offset_x = 0, .crop_offset_y = 0, .crop_width = 0, .crop_height = 0, .err = OTF_INPUT_ERROR_NO, }, .otf_output = { .cmd = OTF_OUTPUT_COMMAND_ENABLE, .width = DEFAULT_PREVIEW_STILL_WIDTH, .height = DEFAULT_PREVIEW_STILL_HEIGHT, .format = OTF_OUTPUT_FORMAT_YUV422, .bitwidth = OTF_OUTPUT_BIT_WIDTH_8BIT, .order = OTF_OUTPUT_ORDER_BAYER_GR_BG, .err = OTF_OUTPUT_ERROR_NO, }, }; static const struct dis_param init_val_dis_preview_still = { .control = { .cmd = CONTROL_COMMAND_START, .bypass = CONTROL_BYPASS_ENABLE, .err = CONTROL_ERROR_NO, }, .otf_input = { .cmd = OTF_INPUT_COMMAND_ENABLE, .width = DEFAULT_PREVIEW_STILL_WIDTH, .height = DEFAULT_PREVIEW_STILL_HEIGHT, .format = OTF_INPUT_FORMAT_YUV422, .bitwidth = OTF_INPUT_BIT_WIDTH_8BIT, .order = OTF_INPUT_ORDER_BAYER_GR_BG, .crop_offset_x = 0, .crop_offset_y = 0, .crop_width = 0, .crop_height = 0, .err = OTF_INPUT_ERROR_NO, }, .otf_output = { .cmd = OTF_OUTPUT_COMMAND_ENABLE, .width = DEFAULT_PREVIEW_STILL_WIDTH, .height = DEFAULT_PREVIEW_STILL_HEIGHT, .format = OTF_OUTPUT_FORMAT_YUV422, .bitwidth = OTF_OUTPUT_BIT_WIDTH_8BIT, .order = OTF_OUTPUT_ORDER_BAYER_GR_BG, .err = OTF_OUTPUT_ERROR_NO, }, }; static const struct tdnr_param init_val_tdnr_preview_still = { .control = { .cmd = CONTROL_COMMAND_START, .bypass = CONTROL_BYPASS_ENABLE, .err = CONTROL_ERROR_NO, }, .otf_input = { .cmd = OTF_INPUT_COMMAND_ENABLE, .width = DEFAULT_PREVIEW_STILL_WIDTH, .height = DEFAULT_PREVIEW_STILL_HEIGHT, .format = OTF_INPUT_FORMAT_YUV422, .bitwidth = OTF_INPUT_BIT_WIDTH_8BIT, .order = OTF_INPUT_ORDER_BAYER_GR_BG, .err = OTF_INPUT_ERROR_NO, }, .frame = { .cmd = 0, .err = 0, }, .otf_output = { .cmd = OTF_OUTPUT_COMMAND_ENABLE, .width = DEFAULT_PREVIEW_STILL_WIDTH, .height = DEFAULT_PREVIEW_STILL_HEIGHT, .format = OTF_OUTPUT_FORMAT_YUV422, .bitwidth = OTF_INPUT_BIT_WIDTH_8BIT, .order = OTF_OUTPUT_ORDER_BAYER_GR_BG, .err = OTF_OUTPUT_ERROR_NO, }, .dma_output = { .cmd = DMA_OUTPUT_COMMAND_DISABLE, .width = DEFAULT_PREVIEW_STILL_WIDTH, .height = DEFAULT_PREVIEW_STILL_HEIGHT, .format = DMA_OUTPUT_FORMAT_YUV420, .bitwidth = DMA_OUTPUT_BIT_WIDTH_8BIT, .plane = DMA_OUTPUT_PLANE_2, .order = DMA_OUTPUT_ORDER_YCbYCr, /*FW error, need to change*/ .buffer_number = 0, .buffer_address = 0, .err = DMA_OUTPUT_ERROR_NO, }, }; static const struct scalerp_param init_val_scalerp_preview_still = { .control = { .cmd = CONTROL_COMMAND_START, .bypass = CONTROL_BYPASS_ENABLE, .err = CONTROL_ERROR_NO, }, .otf_input = { .cmd = OTF_INPUT_COMMAND_ENABLE, .width = DEFAULT_PREVIEW_STILL_WIDTH, .height = DEFAULT_PREVIEW_STILL_HEIGHT, .format = OTF_INPUT_FORMAT_YUV444, .bitwidth = OTF_INPUT_BIT_WIDTH_8BIT, .order = OTF_INPUT_ORDER_BAYER_GR_BG, .crop_offset_x = 0, .crop_offset_y = 0, .crop_width = 0, .crop_height = 0, .err = OTF_INPUT_ERROR_NO, }, .effect = { .cmd = 0, .err = 0, }, .crop = { .cmd = OTF_INPUT_COMMAND_DISABLE, .pos_x = 0, .pos_y = 0, .crop_width = DEFAULT_PREVIEW_STILL_WIDTH, .crop_height = DEFAULT_PREVIEW_STILL_HEIGHT, .err = 0, }, .scale = { .cmd = OTF_INPUT_COMMAND_DISABLE, .pre_h_ratio = 0, .pre_v_ratio = 0, .sh_factor = 0, .h_ratio = 0, .v_ratio = 0, .err = 0, }, .rotation = { .cmd = 0, .err = 0, }, .flip = { .cmd = 0, .err = 0, }, .otf_output = { .cmd = OTF_OUTPUT_COMMAND_ENABLE, .width = DEFAULT_PREVIEW_STILL_WIDTH, .height = DEFAULT_PREVIEW_STILL_HEIGHT, .format = OTF_INPUT_FORMAT_YUV444, .bitwidth = OTF_INPUT_BIT_WIDTH_8BIT, .order = OTF_OUTPUT_ORDER_BAYER_GR_BG, .err = OTF_OUTPUT_ERROR_NO, }, .dma_output = { .cmd = DMA_OUTPUT_COMMAND_DISABLE, .width = DEFAULT_PREVIEW_STILL_WIDTH, .height = DEFAULT_PREVIEW_STILL_HEIGHT, .format = OTF_OUTPUT_FORMAT_YUV420, .bitwidth = DMA_OUTPUT_BIT_WIDTH_8BIT, .plane = DMA_OUTPUT_PLANE_3, .order = DMA_OUTPUT_ORDER_NO, .buffer_number = 0, .buffer_address = 0, .err = DMA_OUTPUT_ERROR_NO, }, }; static const struct fd_param init_val_fd_preview_still = { .control = { .cmd = CONTROL_COMMAND_STOP, .bypass = CONTROL_BYPASS_DISABLE, .err = CONTROL_ERROR_NO, }, .otf_input = { .cmd = OTF_INPUT_COMMAND_ENABLE, .width = DEFAULT_PREVIEW_STILL_WIDTH, .height = DEFAULT_PREVIEW_STILL_HEIGHT, .format = OTF_INPUT_FORMAT_YUV444, .bitwidth = OTF_INPUT_BIT_WIDTH_8BIT, .order = OTF_INPUT_ORDER_BAYER_GR_BG, .err = OTF_INPUT_ERROR_NO, }, .dma_input = { .cmd = DMA_INPUT_COMMAND_DISABLE, .width = 0, .height = 0, .format = 0, .bitwidth = 0, .plane = 0, .order = 0, .buffer_number = 0, .buffer_address = 0, .err = 0, }, .config = { .cmd = FD_CONFIG_COMMAND_MAXIMUM_NUMBER | FD_CONFIG_COMMAND_ROLL_ANGLE | FD_CONFIG_COMMAND_YAW_ANGLE | FD_CONFIG_COMMAND_SMILE_MODE | FD_CONFIG_COMMAND_BLINK_MODE | FD_CONFIG_COMMAND_EYES_DETECT | FD_CONFIG_COMMAND_MOUTH_DETECT | FD_CONFIG_COMMAND_ORIENTATION | FD_CONFIG_COMMAND_ORIENTATION_VALUE, .max_number = 5, .roll_angle = FD_CONFIG_ROLL_ANGLE_FULL, .yaw_angle = FD_CONFIG_YAW_ANGLE_45, .smile_mode = FD_CONFIG_SMILE_MODE_DISABLE, .blink_mode = FD_CONFIG_BLINK_MODE_DISABLE, .eye_detect = FD_CONFIG_EYES_DETECT_ENABLE, .mouth_detect = FD_CONFIG_MOUTH_DETECT_DISABLE, .orientation = FD_CONFIG_ORIENTATION_DISABLE, .orientation_value = 0, .err = ERROR_FD_NO, }, }; static const struct sensor_param init_val_sensor_capture = { .frame_rate = { .frame_rate = DEFAULT_CAPTURE_STILL_FRAMERATE, }, }; static const struct isp_param init_val_isp_capture = { .control = { .cmd = CONTROL_COMMAND_START, .bypass = CONTROL_BYPASS_DISABLE, .err = CONTROL_ERROR_NO, }, .otf_input = { .cmd = OTF_INPUT_COMMAND_ENABLE, .width = DEFAULT_CAPTURE_STILL_WIDTH, .height = DEFAULT_CAPTURE_STILL_HEIGHT, #ifndef ISP_STRGEN .format = OTF_INPUT_FORMAT_BAYER, #else .format = OTF_INPUT_FORMAT_STRGEN_COLORBAR_BAYER, #endif .bitwidth = OTF_INPUT_BIT_WIDTH_10BIT, .order = OTF_INPUT_ORDER_BAYER_GR_BG, .reserved[3] = 0, .reserved[4] = 66666, .err = OTF_INPUT_ERROR_NO, }, .dma1_input = { .cmd = DMA_INPUT_COMMAND_DISABLE, .width = 0, .height = 0, .format = 0, .bitwidth = 0, .plane = 0, .order = 0, .buffer_number = 0, .buffer_address = 0, .err = 0, }, .dma2_input = { .cmd = DMA_INPUT_COMMAND_DISABLE, .width = 0, .height = 0, .format = 0, .bitwidth = 0, .plane = 0, .order = 0, .buffer_number = 0, .buffer_address = 0, .err = 0, }, .aa = { .cmd = ISP_AA_COMMAND_START, .target = ISP_AA_TARGET_AF | ISP_AA_TARGET_AE | ISP_AA_TARGET_AWB, .mode = 0, .face = 0, .win_pos_x = 0, .win_pos_y = 0, .err = ISP_AF_ERROR_NO, }, .flash = { .cmd = ISP_FLASH_COMMAND_DISABLE, .redeye = ISP_FLASH_REDEYE_DISABLE, .err = ISP_FLASH_ERROR_NO, }, .awb = { .cmd = ISP_AWB_COMMAND_AUTO, .illumination = 0, .err = ISP_AWB_ERROR_NO, }, .effect = { .cmd = ISP_IMAGE_EFFECT_DISABLE, .err = ISP_IMAGE_EFFECT_ERROR_NO, }, .iso = { .cmd = ISP_ISO_COMMAND_AUTO, .value = 0, .err = ISP_ISO_ERROR_NO, }, .adjust = { .cmd = ISP_ADJUST_COMMAND_AUTO, .contrast = 0, .saturation = 0, .sharpness = 0, .exposure = 0, .brightness = 0, .hue = 0, .shutter_time_min = 0, .shutter_time_max = 66666, .err = ISP_ADJUST_ERROR_NO, }, .metering = { .cmd = ISP_METERING_COMMAND_MATRIX, .win_pos_x = 0, .win_pos_y = 0, .win_width = DEFAULT_CAPTURE_STILL_WIDTH, .win_height = DEFAULT_CAPTURE_STILL_HEIGHT, .err = ISP_METERING_ERROR_NO, }, .afc = { .cmd = ISP_AFC_COMMAND_AUTO, .manual = 0, .err = ISP_AFC_ERROR_NO, }, .otf_output = { .cmd = OTF_OUTPUT_COMMAND_ENABLE, .width = DEFAULT_CAPTURE_STILL_WIDTH, .height = DEFAULT_CAPTURE_STILL_HEIGHT, .format = OTF_OUTPUT_FORMAT_YUV444, .bitwidth = OTF_OUTPUT_BIT_WIDTH_12BIT, .order = OTF_OUTPUT_ORDER_BAYER_GR_BG, .err = OTF_OUTPUT_ERROR_NO, }, .dma1_output = { .cmd = DMA_OUTPUT_COMMAND_DISABLE, .width = 0, .height = 0, .format = 0, .bitwidth = 0, .plane = 0, .order = 0, .buffer_number = 0, .buffer_address = 0, .err = DMA_OUTPUT_ERROR_NO, }, .dma2_output = { .cmd = DMA_OUTPUT_COMMAND_DISABLE, .width = 0, .height = 0, .format = 0, .bitwidth = 0, .plane = 0, .order = 0, .buffer_number = 0, .buffer_address = 0, .err = DMA_OUTPUT_ERROR_NO, }, }; static const struct drc_param init_val_drc_capture = { .control = { .cmd = CONTROL_COMMAND_START, .bypass = CONTROL_BYPASS_ENABLE, .err = CONTROL_ERROR_NO, }, .otf_input = { .cmd = OTF_INPUT_COMMAND_ENABLE, .width = DEFAULT_CAPTURE_STILL_WIDTH, .height = DEFAULT_CAPTURE_STILL_HEIGHT, .format = OTF_INPUT_FORMAT_YUV444, .bitwidth = OTF_INPUT_BIT_WIDTH_12BIT, .order = OTF_INPUT_ORDER_BAYER_GR_BG, .err = OTF_INPUT_ERROR_NO, }, .dma_input = { .cmd = DMA_INPUT_COMMAND_DISABLE, .width = 0, .height = 0, .format = 0, .bitwidth = 0, .plane = 0, .order = 0, .buffer_number = 0, .buffer_address = 0, .err = 0, }, .otf_output = { .cmd = OTF_OUTPUT_COMMAND_ENABLE, .width = DEFAULT_CAPTURE_STILL_WIDTH, .height = DEFAULT_CAPTURE_STILL_HEIGHT, .format = OTF_OUTPUT_FORMAT_YUV444, .bitwidth = OTF_OUTPUT_BIT_WIDTH_8BIT, .order = OTF_OUTPUT_ORDER_BAYER_GR_BG, .err = OTF_OUTPUT_ERROR_NO, }, }; static const struct fd_param init_val_fd_capture = { .control = { .cmd = CONTROL_COMMAND_STOP, /* in FD case , bypass is not available */ .bypass = CONTROL_BYPASS_DISABLE, .err = CONTROL_ERROR_NO, }, .otf_input = { .cmd = OTF_INPUT_COMMAND_ENABLE, .width = DEFAULT_CAPTURE_STILL_WIDTH, .height = DEFAULT_CAPTURE_STILL_HEIGHT, .format = OTF_INPUT_FORMAT_YUV444, .bitwidth = OTF_OUTPUT_BIT_WIDTH_8BIT, .order = OTF_INPUT_ORDER_BAYER_GR_BG, .err = OTF_INPUT_ERROR_NO, }, .dma_input = { .cmd = DMA_INPUT_COMMAND_DISABLE, .width = 0, .height = 0, .format = 0, .bitwidth = 0, .plane = 0, .order = 0, .buffer_number = 0, .buffer_address = 0, .err = 0, }, .config = { .cmd = FD_CONFIG_COMMAND_MAXIMUM_NUMBER | FD_CONFIG_COMMAND_ROLL_ANGLE | FD_CONFIG_COMMAND_YAW_ANGLE | FD_CONFIG_COMMAND_SMILE_MODE | FD_CONFIG_COMMAND_BLINK_MODE | FD_CONFIG_COMMAND_EYES_DETECT | FD_CONFIG_COMMAND_MOUTH_DETECT | FD_CONFIG_COMMAND_ORIENTATION | FD_CONFIG_COMMAND_ORIENTATION_VALUE, .max_number = 5, .roll_angle = FD_CONFIG_ROLL_ANGLE_FULL, .yaw_angle = FD_CONFIG_YAW_ANGLE_45, .smile_mode = FD_CONFIG_SMILE_MODE_DISABLE, .blink_mode = FD_CONFIG_BLINK_MODE_DISABLE, .eye_detect = FD_CONFIG_EYES_DETECT_ENABLE, .mouth_detect = FD_CONFIG_MOUTH_DETECT_DISABLE, .orientation = FD_CONFIG_ORIENTATION_DISABLE, .orientation_value = 0, .err = ERROR_FD_NO, }, }; static const struct sensor_param init_val_sensor_preview_video = { .frame_rate = { .frame_rate = DEFAULT_PREVIEW_VIDEO_FRAMERATE, }, }; static const struct isp_param init_val_isp_preview_video = { .control = { .cmd = CONTROL_COMMAND_START, .bypass = CONTROL_BYPASS_DISABLE, .err = CONTROL_ERROR_NO, }, .otf_input = { .cmd = OTF_INPUT_COMMAND_ENABLE, .width = DEFAULT_PREVIEW_VIDEO_WIDTH, .height = DEFAULT_PREVIEW_VIDEO_HEIGHT, #ifndef ISP_STRGEN .format = OTF_INPUT_FORMAT_BAYER, #else .format = OTF_INPUT_FORMAT_STRGEN_COLORBAR_BAYER, #endif .bitwidth = OTF_INPUT_BIT_WIDTH_10BIT, .order = OTF_INPUT_ORDER_BAYER_GR_BG, .reserved[3] = 0, .reserved[4] = 66666, .err = OTF_INPUT_ERROR_NO, }, .dma1_input = { .cmd = DMA_INPUT_COMMAND_DISABLE, .width = 0, .height = 0, .format = 0, .bitwidth = 0, .plane = 0, .order = 0, .buffer_number = 0, .buffer_address = 0, .err = 0, }, .dma2_input = { .cmd = DMA_INPUT_COMMAND_DISABLE, .width = 0, .height = 0, .format = 0, .bitwidth = 0, .plane = 0, .order = 0, .buffer_number = 0, .buffer_address = 0, .err = 0, }, .aa = { .cmd = ISP_AA_COMMAND_START, .target = ISP_AA_TARGET_AF | ISP_AA_TARGET_AE | ISP_AA_TARGET_AWB, .mode = 0, .face = 0, .win_pos_x = 0, .win_pos_y = 0, .err = ISP_AF_ERROR_NO, }, .flash = { .cmd = ISP_FLASH_COMMAND_DISABLE, .redeye = ISP_FLASH_REDEYE_DISABLE, .err = ISP_FLASH_ERROR_NO, }, .awb = { .cmd = ISP_AWB_COMMAND_AUTO, .illumination = 0, .err = ISP_AWB_ERROR_NO, }, .effect = { .cmd = ISP_IMAGE_EFFECT_DISABLE, .err = ISP_IMAGE_EFFECT_ERROR_NO, }, .iso = { .cmd = ISP_ISO_COMMAND_AUTO, .value = 0, .err = ISP_ISO_ERROR_NO, }, .adjust = { .cmd = ISP_ADJUST_COMMAND_AUTO, .contrast = 0, .saturation = 0, .sharpness = 0, .exposure = 0, .brightness = 0, .hue = 0, .shutter_time_min = 0, .shutter_time_max = 33333, .err = ISP_ADJUST_ERROR_NO, }, .metering = { .cmd = ISP_METERING_COMMAND_MATRIX, .win_pos_x = 0, .win_pos_y = 0, .win_width = DEFAULT_PREVIEW_VIDEO_WIDTH, .win_height = DEFAULT_PREVIEW_VIDEO_HEIGHT, .err = ISP_METERING_ERROR_NO, }, .afc = { .cmd = ISP_AFC_COMMAND_AUTO, .manual = 0, .err = ISP_AFC_ERROR_NO, }, .otf_output = { .cmd = OTF_OUTPUT_COMMAND_ENABLE, .width = DEFAULT_PREVIEW_VIDEO_WIDTH, .height = DEFAULT_PREVIEW_VIDEO_HEIGHT, .format = OTF_OUTPUT_FORMAT_YUV444, .bitwidth = OTF_OUTPUT_BIT_WIDTH_12BIT, .order = OTF_OUTPUT_ORDER_BAYER_GR_BG, .err = OTF_OUTPUT_ERROR_NO, }, .dma1_output = { #ifndef ISP_DMA .cmd = DMA_OUTPUT_COMMAND_DISABLE, #else .cmd = DMA_OUTPUT_COMMAND_ENABLE, #endif .width = DEFAULT_PREVIEW_VIDEO_WIDTH, .height = DEFAULT_PREVIEW_VIDEO_HEIGHT, .format = DMA_OUTPUT_FORMAT_YUV422, .bitwidth = DMA_OUTPUT_BIT_WIDTH_8BIT, .plane = DMA_OUTPUT_PLANE_3, .order = DMA_INPUT_ORDER_NO, .buffer_number = 1, .buffer_address = 0x50060400, .err = DMA_OUTPUT_ERROR_NO, }, .dma2_output = { #ifndef ISP_DMA .cmd = DMA_OUTPUT_COMMAND_DISABLE, #else .cmd = DMA_OUTPUT_COMMAND_ENABLE, #endif .width = DEFAULT_PREVIEW_VIDEO_WIDTH, .height = DEFAULT_PREVIEW_VIDEO_HEIGHT, .format = DMA_OUTPUT_FORMAT_BAYER, .bitwidth = DMA_OUTPUT_BIT_WIDTH_10BIT, .plane = DMA_OUTPUT_PLANE_1, .order = DMA_OUTPUT_ORDER_GB_BG, .buffer_number = 1, .buffer_address = 0x501D0000, .err = DMA_OUTPUT_ERROR_NO, }, }; static const struct drc_param init_val_drc_preview_video = { .control = { .cmd = CONTROL_COMMAND_START, .bypass = CONTROL_BYPASS_ENABLE, .err = CONTROL_ERROR_NO, }, .otf_input = { .cmd = OTF_INPUT_COMMAND_ENABLE, .width = DEFAULT_PREVIEW_VIDEO_WIDTH, .height = DEFAULT_PREVIEW_VIDEO_HEIGHT, .format = OTF_INPUT_FORMAT_YUV444, .bitwidth = OTF_INPUT_BIT_WIDTH_12BIT, .order = OTF_INPUT_ORDER_BAYER_GR_BG, .err = OTF_INPUT_ERROR_NO, }, .dma_input = { .cmd = DMA_INPUT_COMMAND_DISABLE, .width = 0, .height = 0, .format = 0, .bitwidth = 0, .plane = 0, .order = 0, .buffer_number = 0, .buffer_address = 0, .err = 0, }, .otf_output = { .cmd = OTF_OUTPUT_COMMAND_ENABLE, .width = DEFAULT_PREVIEW_VIDEO_WIDTH, .height = DEFAULT_PREVIEW_VIDEO_HEIGHT, .format = OTF_OUTPUT_FORMAT_YUV444, .bitwidth = OTF_OUTPUT_BIT_WIDTH_8BIT, .order = OTF_OUTPUT_ORDER_BAYER_GR_BG, .err = OTF_OUTPUT_ERROR_NO, }, }; static const struct fd_param init_val_fd_preview_video = { .control = { .cmd = CONTROL_COMMAND_STOP, .bypass = CONTROL_BYPASS_DISABLE, .err = CONTROL_ERROR_NO, }, .otf_input = { .cmd = OTF_INPUT_COMMAND_ENABLE, .width = DEFAULT_PREVIEW_VIDEO_WIDTH, .height = DEFAULT_PREVIEW_VIDEO_HEIGHT, .format = OTF_INPUT_FORMAT_YUV444, .bitwidth = OTF_INPUT_BIT_WIDTH_8BIT, .order = OTF_INPUT_ORDER_BAYER_GR_BG, .err = OTF_INPUT_ERROR_NO, }, .dma_input = { .cmd = DMA_INPUT_COMMAND_DISABLE, .width = 0, .height = 0, .format = 0, .bitwidth = 0, .plane = 0, .order = 0, .buffer_number = 0, .buffer_address = 0, .err = 0, }, .config = { .cmd = FD_CONFIG_COMMAND_MAXIMUM_NUMBER | FD_CONFIG_COMMAND_ROLL_ANGLE | FD_CONFIG_COMMAND_YAW_ANGLE | FD_CONFIG_COMMAND_SMILE_MODE | FD_CONFIG_COMMAND_BLINK_MODE | FD_CONFIG_COMMAND_EYES_DETECT | FD_CONFIG_COMMAND_MOUTH_DETECT | FD_CONFIG_COMMAND_ORIENTATION | FD_CONFIG_COMMAND_ORIENTATION_VALUE, .max_number = 5, .roll_angle = FD_CONFIG_ROLL_ANGLE_FULL, .yaw_angle = FD_CONFIG_YAW_ANGLE_45, .smile_mode = FD_CONFIG_SMILE_MODE_DISABLE, .blink_mode = FD_CONFIG_BLINK_MODE_DISABLE, .eye_detect = FD_CONFIG_EYES_DETECT_ENABLE, .mouth_detect = FD_CONFIG_MOUTH_DETECT_DISABLE, .orientation = FD_CONFIG_ORIENTATION_DISABLE, .orientation_value = 0, .err = ERROR_FD_NO, }, }; static const struct sensor_param init_val_sensor_camcording = { .frame_rate = { .frame_rate = DEFAULT_CAPTURE_VIDEO_FRAMERATE, }, }; static const struct isp_param init_val_isp_camcording = { .control = { .cmd = CONTROL_COMMAND_START, .bypass = CONTROL_BYPASS_DISABLE, .err = CONTROL_ERROR_NO, }, .otf_input = { .cmd = OTF_INPUT_COMMAND_ENABLE, .width = DEFAULT_CAPTURE_VIDEO_WIDTH, .height = DEFAULT_CAPTURE_VIDEO_HEIGHT, #ifndef ISP_STRGEN .format = OTF_INPUT_FORMAT_BAYER, #else .format = OTF_INPUT_FORMAT_STRGEN_COLORBAR_BAYER, #endif .bitwidth = OTF_INPUT_BIT_WIDTH_10BIT, .order = OTF_INPUT_ORDER_BAYER_GR_BG, .err = OTF_INPUT_ERROR_NO, }, .dma1_input = { .cmd = DMA_INPUT_COMMAND_DISABLE, .width = 0, .height = 0, .format = 0, .bitwidth = 0, .plane = 0, .order = 0, .buffer_number = 0, .buffer_address = 0, .err = 0, }, .dma2_input = { .cmd = DMA_INPUT_COMMAND_DISABLE, .width = 0, .height = 0, .format = 0, .bitwidth = 0, .plane = 0, .order = 0, .buffer_number = 0, .buffer_address = 0, .err = 0, }, .aa = { .cmd = ISP_AA_COMMAND_START, .target = ISP_AA_TARGET_AF | ISP_AA_TARGET_AE | ISP_AA_TARGET_AWB, .mode = 0, .face = 0, .win_pos_x = 0, .win_pos_y = 0, .err = ISP_AF_ERROR_NO, }, .flash = { .cmd = ISP_FLASH_COMMAND_DISABLE, .redeye = ISP_FLASH_REDEYE_DISABLE, .err = ISP_FLASH_ERROR_NO, }, .awb = { .cmd = ISP_AWB_COMMAND_AUTO, .illumination = 0, .err = ISP_AWB_ERROR_NO, }, .effect = { .cmd = ISP_IMAGE_EFFECT_DISABLE, .err = ISP_IMAGE_EFFECT_ERROR_NO, }, .iso = { .cmd = ISP_ISO_COMMAND_AUTO, .value = 0, .err = ISP_ISO_ERROR_NO, }, .adjust = { .cmd = ISP_ADJUST_COMMAND_AUTO, .contrast = 0, .saturation = 0, .sharpness = 0, .exposure = 0, .brightness = 0, .hue = 0, .shutter_time_min = 0, .shutter_time_max = 33333, .err = ISP_ADJUST_ERROR_NO, }, .metering = { .cmd = ISP_METERING_COMMAND_MATRIX, .win_pos_x = 0, .win_pos_y = 0, .win_width = DEFAULT_CAPTURE_VIDEO_WIDTH, .win_height = DEFAULT_CAPTURE_VIDEO_HEIGHT, .err = ISP_METERING_ERROR_NO, }, .afc = { .cmd = ISP_AFC_COMMAND_AUTO, .manual = 0, .err = ISP_AFC_ERROR_NO, }, .otf_output = { .cmd = OTF_OUTPUT_COMMAND_ENABLE, .width = DEFAULT_CAPTURE_VIDEO_WIDTH, .height = DEFAULT_CAPTURE_VIDEO_HEIGHT, .format = OTF_OUTPUT_FORMAT_YUV444, .bitwidth = OTF_OUTPUT_BIT_WIDTH_12BIT, .order = OTF_OUTPUT_ORDER_BAYER_GR_BG, .err = OTF_OUTPUT_ERROR_NO, }, .dma1_output = { #ifndef ISP_DMA .cmd = DMA_OUTPUT_COMMAND_DISABLE, #else .cmd = DMA_OUTPUT_COMMAND_ENABLE, #endif .width = DEFAULT_CAPTURE_VIDEO_WIDTH, .height = DEFAULT_CAPTURE_VIDEO_HEIGHT, .format = DMA_OUTPUT_FORMAT_YUV422, .bitwidth = DMA_OUTPUT_BIT_WIDTH_8BIT, .plane = DMA_OUTPUT_PLANE_3, .order = DMA_INPUT_ORDER_NO, .buffer_number = 1, .buffer_address = 0x50060400, .err = DMA_OUTPUT_ERROR_NO, }, .dma2_output = { #ifndef ISP_DMA .cmd = DMA_OUTPUT_COMMAND_DISABLE, #else .cmd = DMA_OUTPUT_COMMAND_ENABLE, #endif .width = DEFAULT_CAPTURE_VIDEO_WIDTH, .height = DEFAULT_CAPTURE_VIDEO_HEIGHT, .format = DMA_OUTPUT_FORMAT_BAYER, .bitwidth = DMA_OUTPUT_BIT_WIDTH_10BIT, .plane = DMA_OUTPUT_PLANE_1, .order = DMA_OUTPUT_ORDER_GB_BG, .buffer_number = 1, .buffer_address = 0x501D0000, .err = DMA_OUTPUT_ERROR_NO, }, }; static const struct drc_param init_val_drc_camcording = { .control = { .cmd = CONTROL_COMMAND_START, .bypass = CONTROL_BYPASS_ENABLE, .err = CONTROL_ERROR_NO, }, .otf_input = { .cmd = OTF_INPUT_COMMAND_ENABLE, .width = DEFAULT_CAPTURE_VIDEO_WIDTH, .height = DEFAULT_CAPTURE_VIDEO_HEIGHT, .format = OTF_INPUT_FORMAT_YUV444, .bitwidth = OTF_INPUT_BIT_WIDTH_12BIT, .order = OTF_INPUT_ORDER_BAYER_GR_BG, .err = OTF_INPUT_ERROR_NO, }, .dma_input = { .cmd = DMA_INPUT_COMMAND_DISABLE, .width = 0, .height = 0, .format = 0, .bitwidth = 0, .plane = 0, .order = 0, .buffer_number = 0, .buffer_address = 0, .err = 0, }, .otf_output = { .cmd = OTF_OUTPUT_COMMAND_ENABLE, .width = DEFAULT_CAPTURE_VIDEO_WIDTH, .height = DEFAULT_CAPTURE_VIDEO_HEIGHT, .format = OTF_OUTPUT_FORMAT_YUV444, .bitwidth = OTF_OUTPUT_BIT_WIDTH_8BIT, .order = OTF_OUTPUT_ORDER_BAYER_GR_BG, .err = OTF_OUTPUT_ERROR_NO, }, }; static const struct fd_param init_val_fd_camcording = { .control = { .cmd = CONTROL_COMMAND_STOP, .bypass = CONTROL_BYPASS_DISABLE, .err = CONTROL_ERROR_NO, }, .otf_input = { .cmd = OTF_INPUT_COMMAND_ENABLE, .width = DEFAULT_CAPTURE_VIDEO_WIDTH, .height = DEFAULT_CAPTURE_VIDEO_HEIGHT, .format = OTF_INPUT_FORMAT_YUV444, .bitwidth = OTF_OUTPUT_BIT_WIDTH_8BIT, .order = OTF_INPUT_ORDER_BAYER_GR_BG, .err = OTF_INPUT_ERROR_NO, }, .dma_input = { .cmd = DMA_INPUT_COMMAND_DISABLE, .width = 0, .height = 0, .format = 0, .bitwidth = 0, .plane = 0, .order = 0, .buffer_number = 0, .buffer_address = 0, .err = 0, }, .config = { .cmd = FD_CONFIG_COMMAND_MAXIMUM_NUMBER | FD_CONFIG_COMMAND_ROLL_ANGLE | FD_CONFIG_COMMAND_YAW_ANGLE | FD_CONFIG_COMMAND_SMILE_MODE | FD_CONFIG_COMMAND_BLINK_MODE | FD_CONFIG_COMMAND_EYES_DETECT | FD_CONFIG_COMMAND_MOUTH_DETECT | FD_CONFIG_COMMAND_ORIENTATION | FD_CONFIG_COMMAND_ORIENTATION_VALUE, .max_number = 5, .roll_angle = FD_CONFIG_ROLL_ANGLE_FULL, .yaw_angle = FD_CONFIG_YAW_ANGLE_45, .smile_mode = FD_CONFIG_SMILE_MODE_DISABLE, .blink_mode = FD_CONFIG_BLINK_MODE_DISABLE, .eye_detect = FD_CONFIG_EYES_DETECT_ENABLE, .mouth_detect = FD_CONFIG_MOUTH_DETECT_DISABLE, .orientation = FD_CONFIG_ORIENTATION_DISABLE, .orientation_value = 0, .err = ERROR_FD_NO, }, }; /* Group 1. Interrupt */ void fimc_is_hw_set_intgr0_gd0(struct fimc_is_dev *dev) { writel(INTGR0_INTGD0, dev->regs + INTGR0); } int fimc_is_hw_wait_intsr0_intsd0(struct fimc_is_dev *dev) { u32 cfg = readl(dev->regs + INTSR0); u32 status = INTSR0_GET_INTSD0(cfg); while (status) { cfg = readl(dev->regs + INTSR0); status = INTSR0_GET_INTSD0(cfg); } return 0; } int fimc_is_hw_wait_intmsr0_intmsd0(struct fimc_is_dev *dev) { u32 cfg = readl(dev->regs + INTMSR0); u32 status = INTMSR0_GET_INTMSD0(cfg); while (status) { cfg = readl(dev->regs + INTMSR0); status = INTMSR0_GET_INTMSD0(cfg); } return 0; } int fimc_is_fw_clear_irq1(struct fimc_is_dev *dev, unsigned int intr_pos) { writel((1<<intr_pos), dev->regs + INTCR1); return 0; } int fimc_is_fw_clear_irq2(struct fimc_is_dev *dev) { u32 cfg = readl(dev->regs + INTSR2); writel(cfg, dev->regs + INTCR2); return 0; } int fimc_is_fw_clear_insr1(struct fimc_is_dev *dev) { writel(0, dev->regs + INTGR1); return 0; } /* Group 2. Common */ int fimc_is_hw_get_sensor_max_framerate(struct fimc_is_dev *dev) { int max_framerate = 0; switch (dev->sensor.sensor_type) { case SENSOR_S5K3H2_CSI_A: case SENSOR_S5K3H2_CSI_B: max_framerate = 15; break; case SENSOR_S5K3H7_CSI_A: case SENSOR_S5K3H7_CSI_B: max_framerate = 30; break; case SENSOR_S5K6A3_CSI_A: case SENSOR_S5K6A3_CSI_B: max_framerate = 30; break; case SENSOR_S5K4E5_CSI_A: case SENSOR_S5K4E5_CSI_B: max_framerate = 30; break; default: max_framerate = 15; } return max_framerate; } void fimc_is_hw_open_sensor(struct fimc_is_dev *dev, u32 id, u32 sensor_index) { fimc_is_hw_wait_intmsr0_intmsd0(dev); writel(HIC_OPEN_SENSOR, dev->regs + ISSR0); writel(id, dev->regs + ISSR1); switch (sensor_index) { case SENSOR_S5K3H2_CSI_A: dev->af.use_af = 1; dev->sensor.sensor_type = SENSOR_S5K3H2_CSI_A; writel(SENSOR_NAME_S5K3H2, dev->regs + ISSR2); writel(SENSOR_CONTROL_I2C0, dev->regs + ISSR3); break; case SENSOR_S5K3H2_CSI_B: dev->af.use_af = 1; dev->sensor.sensor_type = SENSOR_S5K3H2_CSI_B; writel(SENSOR_NAME_S5K3H2, dev->regs + ISSR2); writel(SENSOR_CONTROL_I2C1, dev->regs + ISSR3); break; case SENSOR_S5K6A3_CSI_A: dev->af.use_af = 0; dev->sensor.sensor_type = SENSOR_S5K6A3_CSI_A; writel(SENSOR_NAME_S5K6A3, dev->regs + ISSR2); writel(SENSOR_CONTROL_I2C0, dev->regs + ISSR3); break; case SENSOR_S5K6A3_CSI_B: dev->af.use_af = 0; dev->sensor.sensor_type = SENSOR_S5K6A3_CSI_B; writel(SENSOR_NAME_S5K6A3, dev->regs + ISSR2); writel(SENSOR_CONTROL_I2C1, dev->regs + ISSR3); break; case SENSOR_S5K4E5_CSI_A: dev->af.use_af = 0; dev->sensor.sensor_type = SENSOR_S5K4E5_CSI_A; writel(SENSOR_NAME_S5K4E5, dev->regs + ISSR2); writel(SENSOR_CONTROL_I2C0, dev->regs + ISSR3); break; case SENSOR_S5K4E5_CSI_B: dev->af.use_af = 0; dev->sensor.sensor_type = SENSOR_S5K4E5_CSI_B; writel(SENSOR_NAME_S5K4E5, dev->regs + ISSR2); writel(SENSOR_CONTROL_I2C1, dev->regs + ISSR3); break; } /* Parameter3 : Scenario ID(Initial Scenario) */ writel(ISS_PREVIEW_STILL, dev->regs + ISSR4); fimc_is_hw_set_intgr0_gd0(dev); } void fimc_is_hw_close_sensor(struct fimc_is_dev *dev, u32 id) { if (dev->sensor.id == id) { fimc_is_hw_wait_intmsr0_intmsd0(dev); writel(HIC_CLOSE_SENSOR, dev->regs + ISSR0); writel(dev->sensor.id, dev->regs + ISSR1); writel(dev->sensor.id, dev->regs + ISSR2); fimc_is_hw_set_intgr0_gd0(dev); } } void fimc_is_hw_diable_wdt(struct fimc_is_dev *dev) { writel(0x0, dev->regs + WDT); } void fimc_is_hw_subip_poweroff(struct fimc_is_dev *dev) { /* 1. Make FIMC-IS power-off state */ fimc_is_hw_wait_intmsr0_intmsd0(dev); writel(HIC_POWER_DOWN, dev->regs + ISSR0); writel(dev->sensor.id, dev->regs + ISSR1); fimc_is_hw_set_intgr0_gd0(dev); } void fimc_is_hw_a5_power(struct fimc_is_dev *isp, int on) { int ret = 0; struct device *dev = &isp->pdev->dev; printk(KERN_INFO "%s(%d)\n", __func__, on); if (on) { /* 2. enable ISP */ clear_bit(FIMC_IS_PWR_ST_POWEROFF, &isp->power); set_bit(FIMC_IS_PWR_ST_POWERED, &isp->power); ret = pm_runtime_get_sync(dev); } else { clear_bit(FIMC_IS_PWR_ST_POWERED, &isp->power); /*start mipi & fimclite*/ stop_fimc_lite(); mdelay(10); stop_mipi_csi(); #if defined(CONFIG_VIDEOBUF2_ION) if (isp->alloc_ctx) fimc_is_mem_suspend(isp->alloc_ctx); #endif if (isp->pdata->clk_off) { isp->pdata->clk_off(isp->pdev); } else { dev_err(&isp->pdev->dev, "failed to clock on\n"); return; } ret = pm_runtime_put_sync(dev); } } void fimc_is_hw_set_sensor_num(struct fimc_is_dev *dev) { u32 cfg; writel(ISR_DONE, dev->regs + ISSR0); cfg = dev->sensor.id; writel(cfg, dev->regs + ISSR1); /* param 1 */ writel(IHC_GET_SENSOR_NUMBER, dev->regs + ISSR2); /* param 2 */ cfg = dev->sensor_num; writel(cfg, dev->regs + ISSR3); } void fimc_is_hw_set_load_setfile(struct fimc_is_dev *dev) { u32 cfg; writel(ISR_DONE, dev->regs + ISSR0); cfg = dev->sensor.id; writel(cfg, dev->regs + ISSR1); /* param 1 */ writel(IHC_LOAD_SET_FILE, dev->regs + ISSR2); /* param 2 */ cfg = dev->sensor_num; writel(cfg, dev->regs + ISSR3); } int fimc_is_hw_get_sensor_num(struct fimc_is_dev *dev) { u32 cfg = readl(dev->regs + ISSR11); if (dev->sensor_num == cfg) return 0; else return cfg; } int fimc_is_hw_set_param(struct fimc_is_dev *dev) { fimc_is_hw_wait_intmsr0_intmsd0(dev); writel(HIC_SET_PARAMETER, dev->regs + ISSR0); writel(dev->sensor.id, dev->regs + ISSR1); writel(dev->scenario_id, dev->regs + ISSR2); writel(atomic_read(&dev->p_region_num), dev->regs + ISSR3); writel(dev->p_region_index1, dev->regs + ISSR4); writel(dev->p_region_index2, dev->regs + ISSR5); dbg("### set param\n"); dbg("cmd :0x%08x\n",HIC_SET_PARAMETER); dbg("senorID :0x%08x\n", dev->sensor.id); dbg("parma1 :0x%08x\n", dev->scenario_id); dbg("parma2 :0x%08x\n", atomic_read(&dev->p_region_num)); dbg("parma3 :0x%08x\n", (unsigned int)dev->p_region_index1); dbg("parma4 :0x%08x\n", (unsigned int)dev->p_region_index2); fimc_is_hw_set_intgr0_gd0(dev); return 0; } int fimc_is_hw_get_param(struct fimc_is_dev *dev, u16 offset) { dev->i2h_cmd.num_valid_args = offset; switch (offset) { case 1: dev->i2h_cmd.arg[0] = readl(dev->regs + ISSR12); dev->i2h_cmd.arg[1] = 0; dev->i2h_cmd.arg[2] = 0; dev->i2h_cmd.arg[3] = 0; break; case 2: dev->i2h_cmd.arg[0] = readl(dev->regs + ISSR12); dev->i2h_cmd.arg[1] = readl(dev->regs + ISSR13); dev->i2h_cmd.arg[2] = 0; dev->i2h_cmd.arg[3] = 0; break; case 3: dev->i2h_cmd.arg[0] = readl(dev->regs + ISSR12); dev->i2h_cmd.arg[1] = readl(dev->regs + ISSR13); dev->i2h_cmd.arg[2] = readl(dev->regs + ISSR14); dev->i2h_cmd.arg[3] = 0; break; case 4: dev->i2h_cmd.arg[0] = readl(dev->regs + ISSR12); dev->i2h_cmd.arg[1] = readl(dev->regs + ISSR13); dev->i2h_cmd.arg[2] = readl(dev->regs + ISSR14); dev->i2h_cmd.arg[3] = readl(dev->regs + ISSR15); break; default: return -EINVAL; } return 0; } void fimc_is_hw_set_stream(struct fimc_is_dev *dev, int on) { if (on) { fimc_is_hw_wait_intmsr0_intmsd0(dev); writel(HIC_STREAM_ON, dev->regs + ISSR0); writel(dev->sensor.id, dev->regs + ISSR1); fimc_is_hw_set_intgr0_gd0(dev); } else { fimc_is_hw_wait_intmsr0_intmsd0(dev); writel(HIC_STREAM_OFF, dev->regs + ISSR0); writel(dev->sensor.id, dev->regs + ISSR1); fimc_is_hw_set_intgr0_gd0(dev); } } void fimc_is_hw_change_mode(struct fimc_is_dev *dev, int val) { switch (val) { case IS_MODE_PREVIEW_STILL: dev->scenario_id = ISS_PREVIEW_STILL; fimc_is_hw_wait_intmsr0_intmsd0(dev); clear_bit(IS_ST_RUN, &dev->state); set_bit(IS_ST_CHANGE_MODE, &dev->state); writel(HIC_PREVIEW_STILL, dev->regs + ISSR0); writel(dev->sensor.id, dev->regs + ISSR1); fimc_is_hw_set_intgr0_gd0(dev); break; case IS_MODE_PREVIEW_VIDEO: dev->scenario_id = ISS_PREVIEW_VIDEO; fimc_is_hw_wait_intmsr0_intmsd0(dev); clear_bit(IS_ST_RUN, &dev->state); set_bit(IS_ST_CHANGE_MODE, &dev->state); writel(HIC_PREVIEW_VIDEO, dev->regs + ISSR0); writel(dev->sensor.id, dev->regs + ISSR1); fimc_is_hw_set_intgr0_gd0(dev); break; case IS_MODE_CAPTURE_STILL: dev->scenario_id = ISS_CAPTURE_STILL; fimc_is_hw_wait_intmsr0_intmsd0(dev); clear_bit(IS_ST_RUN, &dev->state); set_bit(IS_ST_CHANGE_MODE, &dev->state); writel(HIC_CAPTURE_STILL, dev->regs + ISSR0); writel(dev->sensor.id, dev->regs + ISSR1); fimc_is_hw_set_intgr0_gd0(dev); break; case IS_MODE_CAPTURE_VIDEO: dev->scenario_id = ISS_CAPTURE_VIDEO; fimc_is_hw_wait_intmsr0_intmsd0(dev); clear_bit(IS_ST_RUN, &dev->state); set_bit(IS_ST_CHANGE_MODE, &dev->state); writel(HIC_CAPTURE_VIDEO, dev->regs + ISSR0); writel(dev->sensor.id, dev->regs + ISSR1); fimc_is_hw_set_intgr0_gd0(dev); break; } } /* Group 3. Initial setting */ void fimc_is_hw_set_init(struct fimc_is_dev *dev) { u32 length; switch (dev->scenario_id) { case ISS_PREVIEW_STILL: IS_SET_PARAM_GLOBAL_SHOTMODE_CMD(dev, 0); IS_SET_PARAM_BIT(dev, PARAM_GLOBAL_SHOTMODE); IS_INC_PARAM_NUM(dev); IS_SENSOR_SET_FRAME_RATE(dev, DEFAULT_PREVIEW_STILL_FRAMERATE); IS_SET_PARAM_BIT(dev, PARAM_SENSOR_FRAME_RATE); IS_INC_PARAM_NUM(dev); /* ISP */ IS_ISP_SET_PARAM_CONTROL_CMD(dev, init_val_isp_preview_still.control.cmd); IS_ISP_SET_PARAM_CONTROL_BYPASS(dev, init_val_isp_preview_still.control.bypass); IS_ISP_SET_PARAM_CONTROL_ERR(dev, init_val_isp_preview_still.control.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_CONTROL); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_OTF_INPUT_CMD(dev, init_val_isp_preview_still.otf_input.cmd); IS_ISP_SET_PARAM_OTF_INPUT_WIDTH(dev, init_val_isp_preview_still.otf_input.width); IS_ISP_SET_PARAM_OTF_INPUT_HEIGHT(dev, init_val_isp_preview_still.otf_input.height); dev->sensor.width_prev = init_val_isp_preview_still.otf_input.width; dev->sensor.height_prev = init_val_isp_preview_still.otf_input.height; IS_ISP_SET_PARAM_OTF_INPUT_FORMAT(dev, init_val_isp_preview_still.otf_input.format); IS_ISP_SET_PARAM_OTF_INPUT_BITWIDTH(dev, init_val_isp_preview_still.otf_input.bitwidth); IS_ISP_SET_PARAM_OTF_INPUT_ORDER(dev, init_val_isp_preview_still.otf_input.order); IS_ISP_SET_PARAM_OTF_INPUT_ERR(dev, init_val_isp_preview_still.otf_input.err); IS_ISP_SET_PARAM_OTF_INPUT_RESERVED3(dev, init_val_isp_preview_still.otf_input.reserved[3]); IS_ISP_SET_PARAM_OTF_INPUT_RESERVED4(dev, init_val_isp_preview_still.otf_input.reserved[4]); IS_SET_PARAM_BIT(dev, PARAM_ISP_OTF_INPUT); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_DMA_INPUT1_CMD(dev, init_val_isp_preview_still.dma1_input.cmd); IS_ISP_SET_PARAM_DMA_INPUT1_WIDTH(dev, init_val_isp_preview_still.dma1_input.width); IS_ISP_SET_PARAM_DMA_INPUT1_HEIGHT(dev, init_val_isp_preview_still.dma1_input.height); IS_ISP_SET_PARAM_DMA_INPUT1_FORMAT(dev, init_val_isp_preview_still.dma1_input.format); IS_ISP_SET_PARAM_DMA_INPUT1_BITWIDTH(dev, init_val_isp_preview_still.dma1_input.bitwidth); IS_ISP_SET_PARAM_DMA_INPUT1_PLANE(dev, init_val_isp_preview_still.dma1_input.plane); IS_ISP_SET_PARAM_DMA_INPUT1_ORDER(dev, init_val_isp_preview_still.dma1_input.order); IS_ISP_SET_PARAM_DMA_INPUT1_BUFFERNUM(dev, init_val_isp_preview_still.dma1_input.buffer_number); IS_ISP_SET_PARAM_DMA_INPUT1_BUFFERADDR(dev, init_val_isp_preview_still.dma1_input.buffer_address); IS_ISP_SET_PARAM_DMA_INPUT1_ERR(dev, init_val_isp_preview_still.dma1_input.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_DMA1_INPUT); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_DMA_INPUT2_CMD(dev, init_val_isp_preview_still.dma2_input.cmd); IS_ISP_SET_PARAM_DMA_INPUT2_WIDTH(dev, init_val_isp_preview_still.dma2_input.width); IS_ISP_SET_PARAM_DMA_INPUT2_HEIGHT(dev, init_val_isp_preview_still.dma2_input.height); IS_ISP_SET_PARAM_DMA_INPUT2_FORMAT(dev, init_val_isp_preview_still.dma2_input.format); IS_ISP_SET_PARAM_DMA_INPUT2_BITWIDTH(dev, init_val_isp_preview_still.dma2_input.bitwidth); IS_ISP_SET_PARAM_DMA_INPUT2_PLANE(dev, init_val_isp_preview_still.dma2_input.plane); IS_ISP_SET_PARAM_DMA_INPUT2_ORDER(dev, init_val_isp_preview_still.dma2_input.order); IS_ISP_SET_PARAM_DMA_INPUT2_BUFFERNUM(dev, init_val_isp_preview_still.dma2_input.buffer_number); IS_ISP_SET_PARAM_DMA_INPUT2_BUFFERADDR(dev, init_val_isp_preview_still.dma2_input.buffer_address); IS_ISP_SET_PARAM_DMA_INPUT2_ERR(dev, init_val_isp_preview_still.dma2_input.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_DMA2_INPUT); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_AA_CMD(dev, init_val_isp_preview_still.aa.cmd); IS_ISP_SET_PARAM_AA_TARGET(dev, init_val_isp_preview_still.aa.target); IS_ISP_SET_PARAM_AA_MODE(dev, init_val_isp_preview_still.aa.mode); IS_ISP_SET_PARAM_AA_FACE(dev, init_val_isp_preview_still.aa.face); IS_ISP_SET_PARAM_AA_WIN_POS_X(dev, init_val_isp_preview_still.aa.win_pos_x); IS_ISP_SET_PARAM_AA_WIN_POS_Y(dev, init_val_isp_preview_still.aa.win_pos_y); IS_ISP_SET_PARAM_AA_ERR(dev, init_val_isp_preview_still.aa.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_AA); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_FLASH_CMD(dev, init_val_isp_preview_still.flash.cmd); IS_ISP_SET_PARAM_FLASH_REDEYE(dev, init_val_isp_preview_still.flash.redeye); IS_ISP_SET_PARAM_FLASH_ERR(dev, init_val_isp_preview_still.flash.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_FLASH); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_AWB_CMD(dev, init_val_isp_preview_still.awb.cmd); IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev, init_val_isp_preview_still.awb.illumination); IS_ISP_SET_PARAM_AWB_ERR(dev, init_val_isp_preview_still.awb.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_AWB); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_EFFECT_CMD(dev, init_val_isp_preview_still.effect.cmd); IS_ISP_SET_PARAM_EFFECT_ERR(dev, init_val_isp_preview_still.effect.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_IMAGE_EFFECT); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_ISO_CMD(dev, init_val_isp_preview_still.iso.cmd); IS_ISP_SET_PARAM_ISO_VALUE(dev, init_val_isp_preview_still.iso.value); IS_ISP_SET_PARAM_ISO_ERR(dev, init_val_isp_preview_still.iso.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_ISO); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_ADJUST_CMD(dev, init_val_isp_preview_still.adjust.cmd); IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, init_val_isp_preview_still.adjust.contrast); IS_ISP_SET_PARAM_ADJUST_SATURATION(dev, init_val_isp_preview_still.adjust.saturation); IS_ISP_SET_PARAM_ADJUST_SHARPNESS(dev, init_val_isp_preview_still.adjust.sharpness); IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, init_val_isp_preview_still.adjust.exposure); IS_ISP_SET_PARAM_ADJUST_BRIGHTNESS(dev, init_val_isp_preview_still.adjust.brightness); IS_ISP_SET_PARAM_ADJUST_HUE(dev, init_val_isp_preview_still.adjust.hue); IS_ISP_SET_PARAM_ADJUST_SHUTTER_TIME_MIN(dev, init_val_isp_preview_still.adjust.shutter_time_min); IS_ISP_SET_PARAM_ADJUST_SHUTTER_TIME_MAX(dev, init_val_isp_preview_still.adjust.shutter_time_max); IS_ISP_SET_PARAM_ADJUST_ERR(dev, init_val_isp_preview_still.adjust.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_ADJUST); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_METERING_CMD(dev, init_val_isp_preview_still.metering.cmd); IS_ISP_SET_PARAM_METERING_WIN_POS_X(dev, init_val_isp_preview_still.metering.win_pos_x); IS_ISP_SET_PARAM_METERING_WIN_POS_Y(dev, init_val_isp_preview_still.metering.win_pos_y); IS_ISP_SET_PARAM_METERING_WIN_WIDTH(dev, init_val_isp_preview_still.metering.win_width); IS_ISP_SET_PARAM_METERING_WIN_HEIGHT(dev, init_val_isp_preview_still.metering.win_height); IS_ISP_SET_PARAM_METERING_ERR(dev, init_val_isp_preview_still.metering.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_METERING); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_AFC_CMD(dev, init_val_isp_preview_still.afc.cmd); IS_ISP_SET_PARAM_AFC_MANUAL(dev, init_val_isp_preview_still.afc.manual); IS_ISP_SET_PARAM_AFC_ERR(dev, init_val_isp_preview_still.afc.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_AFC); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_OTF_OUTPUT_CMD(dev, init_val_isp_preview_still.otf_output.cmd); IS_ISP_SET_PARAM_OTF_OUTPUT_WIDTH(dev, init_val_isp_preview_still.otf_output.width); IS_ISP_SET_PARAM_OTF_OUTPUT_HEIGHT(dev, init_val_isp_preview_still.otf_output.height); IS_ISP_SET_PARAM_OTF_OUTPUT_FORMAT(dev, init_val_isp_preview_still.otf_output.format); IS_ISP_SET_PARAM_OTF_OUTPUT_BITWIDTH(dev, init_val_isp_preview_still.otf_output.bitwidth); IS_ISP_SET_PARAM_OTF_OUTPUT_ORDER(dev, init_val_isp_preview_still.otf_output.order); IS_ISP_SET_PARAM_OTF_OUTPUT_ERR(dev, init_val_isp_preview_still.otf_output.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_OTF_OUTPUT); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_DMA_OUTPUT1_CMD(dev, init_val_isp_preview_still.dma1_output.cmd); IS_ISP_SET_PARAM_DMA_OUTPUT1_WIDTH(dev, init_val_isp_preview_still.dma1_output.width); IS_ISP_SET_PARAM_DMA_OUTPUT1_HEIGHT(dev, init_val_isp_preview_still.dma1_output.height); IS_ISP_SET_PARAM_DMA_OUTPUT1_FORMAT(dev, init_val_isp_preview_still.dma1_output.format); IS_ISP_SET_PARAM_DMA_OUTPUT1_BITWIDTH(dev, init_val_isp_preview_still.dma1_output.bitwidth); IS_ISP_SET_PARAM_DMA_OUTPUT1_PLANE(dev, init_val_isp_preview_still.dma1_output.plane); IS_ISP_SET_PARAM_DMA_OUTPUT1_ORDER(dev, init_val_isp_preview_still.dma1_output.order); IS_ISP_SET_PARAM_DMA_OUTPUT1_BUFFER_NUMBER(dev, init_val_isp_preview_still.dma1_output.buffer_number); IS_ISP_SET_PARAM_DMA_OUTPUT1_BUFFER_ADDRESS(dev, init_val_isp_preview_still.dma1_output.buffer_address); IS_ISP_SET_PARAM_DMA_OUTPUT1_ERR(dev, init_val_isp_preview_still.dma1_output.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_DMA1_OUTPUT); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_DMA_OUTPUT2_CMD(dev, init_val_isp_preview_still.dma2_output.cmd); IS_ISP_SET_PARAM_DMA_OUTPUT2_WIDTH(dev, init_val_isp_preview_still.dma2_output.width); IS_ISP_SET_PARAM_DMA_OUTPUT2_HEIGHT(dev, init_val_isp_preview_still.dma2_output.height); IS_ISP_SET_PARAM_DMA_OUTPUT2_FORMAT(dev, init_val_isp_preview_still.dma2_output.format); IS_ISP_SET_PARAM_DMA_OUTPUT2_BITWIDTH(dev, init_val_isp_preview_still.dma2_output.bitwidth); IS_ISP_SET_PARAM_DMA_OUTPUT2_PLANE(dev, init_val_isp_preview_still.dma2_output.plane); IS_ISP_SET_PARAM_DMA_OUTPUT2_ORDER(dev, init_val_isp_preview_still.dma2_output.order); IS_ISP_SET_PARAM_DMA_OUTPUT2_BUFFER_NUMBER(dev, init_val_isp_preview_still.dma2_output.buffer_number); IS_ISP_SET_PARAM_DMA_OUTPUT2_BUFFER_ADDRESS(dev, init_val_isp_preview_still.dma2_output.buffer_address); IS_ISP_SET_PARAM_DMA_OUTPUT2_ERR(dev, init_val_isp_preview_still.dma2_output.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_DMA2_OUTPUT); IS_INC_PARAM_NUM(dev); /* DRC */ IS_DRC_SET_PARAM_CONTROL_CMD(dev, init_val_drc_preview_still.control.cmd); IS_DRC_SET_PARAM_CONTROL_BYPASS(dev, init_val_drc_preview_still.control.bypass); IS_DRC_SET_PARAM_CONTROL_ERR(dev, init_val_drc_preview_still.control.err); IS_SET_PARAM_BIT(dev, PARAM_DRC_CONTROL); IS_INC_PARAM_NUM(dev); IS_DRC_SET_PARAM_OTF_INPUT_CMD(dev, init_val_drc_preview_still.otf_input.cmd); IS_DRC_SET_PARAM_OTF_INPUT_WIDTH(dev, init_val_drc_preview_still.otf_input.width); IS_DRC_SET_PARAM_OTF_INPUT_HEIGHT(dev, init_val_drc_preview_still.otf_input.height); IS_DRC_SET_PARAM_OTF_INPUT_FORMAT(dev, init_val_drc_preview_still.otf_input.format); IS_DRC_SET_PARAM_OTF_INPUT_BITWIDTH(dev, init_val_drc_preview_still.otf_input.bitwidth); IS_DRC_SET_PARAM_OTF_INPUT_ORDER(dev, init_val_drc_preview_still.otf_input.order); IS_DRC_SET_PARAM_OTF_INPUT_ERR(dev, init_val_drc_preview_still.otf_input.err); IS_SET_PARAM_BIT(dev, PARAM_DRC_OTF_INPUT); IS_INC_PARAM_NUM(dev); IS_DRC_SET_PARAM_DMA_INPUT_CMD(dev, init_val_drc_preview_still.dma_input.cmd); IS_DRC_SET_PARAM_DMA_INPUT_WIDTH(dev, init_val_drc_preview_still.dma_input.width); IS_DRC_SET_PARAM_DMA_INPUT_HEIGHT(dev, init_val_drc_preview_still.dma_input.height); IS_DRC_SET_PARAM_DMA_INPUT_FORMAT(dev, init_val_drc_preview_still.dma_input.format); IS_DRC_SET_PARAM_DMA_INPUT_BITWIDTH(dev, init_val_drc_preview_still.dma_input.bitwidth); IS_DRC_SET_PARAM_DMA_INPUT_PLANE(dev, init_val_drc_preview_still.dma_input.plane); IS_DRC_SET_PARAM_DMA_INPUT_ORDER(dev, init_val_drc_preview_still.dma_input.order); IS_DRC_SET_PARAM_DMA_INPUT_BUFFERNUM(dev, init_val_drc_preview_still.dma_input.buffer_number); IS_DRC_SET_PARAM_DMA_INPUT_BUFFERADDR(dev, init_val_drc_preview_still.dma_input.buffer_address); IS_DRC_SET_PARAM_DMA_INPUT_ERR(dev, init_val_drc_preview_still.dma_input.err); IS_SET_PARAM_BIT(dev, PARAM_DRC_DMA_INPUT); IS_INC_PARAM_NUM(dev); IS_DRC_SET_PARAM_OTF_OUTPUT_CMD(dev, init_val_drc_preview_still.otf_output.cmd); IS_DRC_SET_PARAM_OTF_OUTPUT_WIDTH(dev, init_val_drc_preview_still.otf_output.width); IS_DRC_SET_PARAM_OTF_OUTPUT_HEIGHT(dev, init_val_drc_preview_still.otf_output.height); IS_DRC_SET_PARAM_OTF_OUTPUT_FORMAT(dev, init_val_drc_preview_still.otf_output.format); IS_DRC_SET_PARAM_OTF_OUTPUT_BITWIDTH(dev, init_val_drc_preview_still.otf_output.bitwidth); IS_DRC_SET_PARAM_OTF_OUTPUT_ORDER(dev, init_val_drc_preview_still.otf_output.order); IS_DRC_SET_PARAM_OTF_OUTPUT_ERR(dev, init_val_drc_preview_still.otf_output.err); length = init_val_drc_preview_still.otf_output.width*init_val_drc_preview_still.otf_output.height; IS_SET_PARAM_BIT(dev, PARAM_DRC_OTF_OUTPUT); IS_INC_PARAM_NUM(dev); /* SCALER-C Macros */ IS_SCALERC_SET_PARAM_CONTROL_CMD(dev, init_val_scalerc_preview_still.control.cmd); IS_SCALERC_SET_PARAM_CONTROL_BYPASS(dev, init_val_scalerc_preview_still.control.bypass); IS_SCALERC_SET_PARAM_CONTROL_ERR(dev, init_val_scalerc_preview_still.control.err); IS_SET_PARAM_BIT(dev, PARAM_SCALERC_CONTROL); IS_INC_PARAM_NUM(dev); IS_SCALERC_SET_PARAM_OTF_INPUT_CMD(dev, init_val_scalerc_preview_still.otf_input.cmd); IS_SCALERC_SET_PARAM_OTF_INPUT_WIDTH(dev, init_val_scalerc_preview_still.otf_input.width); IS_SCALERC_SET_PARAM_OTF_INPUT_HEIGHT(dev, init_val_scalerc_preview_still.otf_input.height); IS_SCALERC_SET_PARAM_OTF_INPUT_FORMAT(dev, init_val_scalerc_preview_still.otf_input.format); IS_SCALERC_SET_PARAM_OTF_INPUT_BITWIDTH(dev, init_val_scalerc_preview_still.otf_input.bitwidth); IS_SCALERC_SET_PARAM_OTF_INPUT_ORDER(dev, init_val_scalerc_preview_still.otf_input.order); IS_SCALERC_SET_PARAM_OTF_INPUT_ERR(dev, init_val_scalerc_preview_still.otf_input.err); IS_SET_PARAM_BIT(dev, PARAM_SCALERC_OTF_INPUT); IS_INC_PARAM_NUM(dev); IS_SCALERC_SET_PARAM_EFFECT_CMD(dev, init_val_scalerc_preview_still.effect.cmd); IS_SCALERC_SET_PARAM_EFFECT_ERR(dev, init_val_scalerc_preview_still.effect.err); IS_SET_PARAM_BIT(dev, PARAM_SCALERC_IMAGE_EFFECT); IS_INC_PARAM_NUM(dev); IS_SCALERC_SET_PARAM_CROP_CMD(dev, init_val_scalerc_preview_still.crop.cmd); IS_SCALERC_SET_PARAM_CROP_POS_X(dev, init_val_scalerc_preview_still.crop.pos_x); IS_SCALERC_SET_PARAM_CROP_POS_Y(dev, init_val_scalerc_preview_still.crop.pos_y); IS_SCALERC_SET_PARAM_CROP_WIDTH(dev, init_val_scalerc_preview_still.crop.crop_width); IS_SCALERC_SET_PARAM_CROP_HEIGHT(dev, init_val_scalerc_preview_still.crop.crop_height); IS_SCALERC_SET_PARAM_CROP_ERR(dev, init_val_scalerc_preview_still.crop.err); IS_SET_PARAM_BIT(dev, PARAM_SCALERC_CROP); IS_INC_PARAM_NUM(dev); IS_SCALERC_SET_PARAM_SCALING_CMD(dev, init_val_scalerc_preview_still.scale.cmd); IS_SCALERC_SET_PARAM_SCALING_PRE_H_RATIO(dev, init_val_scalerc_preview_still.scale.pre_h_ratio); IS_SCALERC_SET_PARAM_SCALING_PRE_V_RATIO(dev, init_val_scalerc_preview_still.scale.pre_v_ratio); IS_SCALERC_SET_PARAM_SCALING_SH_FACTOR(dev, init_val_scalerc_preview_still.scale.sh_factor); IS_SCALERC_SET_PARAM_SCALING_H_RATIO(dev, init_val_scalerc_preview_still.scale.h_ratio); IS_SCALERC_SET_PARAM_SCALING_V_RATIO(dev, init_val_scalerc_preview_still.scale.v_ratio); IS_SCALERC_SET_PARAM_SCALING_ERR(dev, init_val_scalerc_preview_still.scale.err); IS_SET_PARAM_BIT(dev, PARAM_SCALERC_SCALING); IS_INC_PARAM_NUM(dev); IS_SCALERC_SET_PARAM_OTF_OUTPUT_CMD(dev, init_val_scalerc_preview_still.otf_output.cmd); IS_SCALERC_SET_PARAM_OTF_OUTPUT_WIDTH(dev, init_val_scalerc_preview_still.otf_output.width); IS_SCALERC_SET_PARAM_OTF_OUTPUT_HEIGHT(dev, init_val_scalerc_preview_still.otf_output.height); IS_SCALERC_SET_PARAM_OTF_OUTPUT_FORMAT(dev, init_val_scalerc_preview_still.otf_output.format); IS_SCALERC_SET_PARAM_OTF_OUTPUT_BITWIDTH(dev, init_val_scalerc_preview_still.otf_output.bitwidth); IS_SCALERC_SET_PARAM_OTF_OUTPUT_ORDER(dev, init_val_scalerc_preview_still.otf_output.order); IS_SCALERC_SET_PARAM_OTF_OUTPUT_ERR(dev, init_val_scalerc_preview_still.otf_output.err); IS_SET_PARAM_BIT(dev, PARAM_SCALERC_OTF_OUTPUT); IS_INC_PARAM_NUM(dev); IS_SCALERC_SET_PARAM_DMA_OUTPUT_CMD(dev, init_val_scalerc_preview_still.dma_output.cmd); IS_SCALERC_SET_PARAM_DMA_OUTPUT_WIDTH(dev, init_val_scalerc_preview_still.dma_output.width); IS_SCALERC_SET_PARAM_DMA_OUTPUT_HEIGHT(dev, init_val_scalerc_preview_still.dma_output.height); IS_SCALERC_SET_PARAM_DMA_OUTPUT_FORMAT(dev, init_val_scalerc_preview_still.dma_output.format); IS_SCALERC_SET_PARAM_DMA_OUTPUT_BITWIDTH(dev, init_val_scalerc_preview_still.dma_output.bitwidth); IS_SCALERC_SET_PARAM_DMA_OUTPUT_PLANE(dev, init_val_scalerc_preview_still.dma_output.plane); IS_SCALERC_SET_PARAM_DMA_OUTPUT_ORDER(dev, init_val_scalerc_preview_still.dma_output.order); IS_SCALERC_SET_PARAM_DMA_OUTPUT_BUFFERNUM(dev, init_val_scalerc_preview_still.dma_output.buffer_number); IS_SCALERC_SET_PARAM_DMA_OUTPUT_BUFFERADDR(dev, init_val_scalerc_preview_still.dma_output.buffer_address); IS_SCALERC_SET_PARAM_DMA_OUTPUT_OUTPATH(dev, init_val_scalerc_preview_still.dma_output.reserved[0]); IS_SCALERC_SET_PARAM_DMA_OUTPUT_ERR(dev, init_val_scalerc_preview_still.dma_output.err); IS_SET_PARAM_BIT(dev, PARAM_SCALERC_DMA_OUTPUT); IS_INC_PARAM_NUM(dev); /* ODC Macros */ IS_ODC_SET_PARAM_CONTROL_CMD(dev, init_val_odc_preview_still.control.cmd); IS_ODC_SET_PARAM_CONTROL_BYPASS(dev, init_val_odc_preview_still.control.bypass); IS_ODC_SET_PARAM_CONTROL_ERR(dev, init_val_odc_preview_still.control.err); IS_SET_PARAM_BIT(dev, PARAM_ODC_CONTROL); IS_INC_PARAM_NUM(dev); IS_ODC_SET_PARAM_OTF_INPUT_CMD(dev, init_val_odc_preview_still.otf_input.cmd); IS_ODC_SET_PARAM_OTF_INPUT_WIDTH(dev, init_val_odc_preview_still.otf_input.width); IS_ODC_SET_PARAM_OTF_INPUT_HEIGHT(dev, init_val_odc_preview_still.otf_input.height); IS_ODC_SET_PARAM_OTF_INPUT_FORMAT(dev, init_val_odc_preview_still.otf_input.format); IS_ODC_SET_PARAM_OTF_INPUT_BITWIDTH(dev, init_val_odc_preview_still.otf_input.bitwidth); IS_ODC_SET_PARAM_OTF_INPUT_ORDER(dev, init_val_odc_preview_still.otf_input.order); IS_ODC_SET_PARAM_OTF_INPUT_ERR(dev, init_val_odc_preview_still.otf_input.err); IS_SET_PARAM_BIT(dev, PARAM_ODC_OTF_INPUT); IS_INC_PARAM_NUM(dev); IS_ODC_SET_PARAM_OTF_OUTPUT_CMD(dev, init_val_odc_preview_still.otf_output.cmd); IS_ODC_SET_PARAM_OTF_OUTPUT_WIDTH(dev, init_val_odc_preview_still.otf_output.width); IS_ODC_SET_PARAM_OTF_OUTPUT_HEIGHT(dev, init_val_odc_preview_still.otf_output.height); IS_ODC_SET_PARAM_OTF_OUTPUT_FORMAT(dev, init_val_odc_preview_still.otf_output.format); IS_ODC_SET_PARAM_OTF_OUTPUT_BITWIDTH(dev, init_val_odc_preview_still.otf_output.bitwidth); IS_ODC_SET_PARAM_OTF_OUTPUT_ORDER(dev, init_val_odc_preview_still.otf_output.order); IS_ODC_SET_PARAM_OTF_OUTPUT_ERR(dev, init_val_odc_preview_still.otf_output.err); IS_SET_PARAM_BIT(dev, PARAM_ODC_OTF_OUTPUT); IS_INC_PARAM_NUM(dev); /* DIS Macros */ IS_DIS_SET_PARAM_CONTROL_CMD(dev, init_val_dis_preview_still.control.cmd); IS_DIS_SET_PARAM_CONTROL_BYPASS(dev, init_val_dis_preview_still.control.bypass); IS_DIS_SET_PARAM_CONTROL_ERR(dev, init_val_dis_preview_still.control.err); IS_SET_PARAM_BIT(dev, PARAM_DIS_CONTROL); IS_INC_PARAM_NUM(dev); IS_DIS_SET_PARAM_OTF_INPUT_CMD(dev, init_val_dis_preview_still.otf_input.cmd); IS_DIS_SET_PARAM_OTF_INPUT_WIDTH(dev, init_val_dis_preview_still.otf_input.width); IS_DIS_SET_PARAM_OTF_INPUT_HEIGHT(dev, init_val_dis_preview_still.otf_input.height); IS_DIS_SET_PARAM_OTF_INPUT_FORMAT(dev, init_val_dis_preview_still.otf_input.format); IS_DIS_SET_PARAM_OTF_INPUT_BITWIDTH(dev, init_val_dis_preview_still.otf_input.bitwidth); IS_DIS_SET_PARAM_OTF_INPUT_ORDER(dev, init_val_dis_preview_still.otf_input.order); IS_DIS_SET_PARAM_OTF_INPUT_ERR(dev, init_val_dis_preview_still.otf_input.err); IS_SET_PARAM_BIT(dev, PARAM_DIS_OTF_INPUT); IS_INC_PARAM_NUM(dev); IS_DIS_SET_PARAM_OTF_OUTPUT_CMD(dev, init_val_dis_preview_still.otf_output.cmd); IS_DIS_SET_PARAM_OTF_OUTPUT_WIDTH(dev, init_val_dis_preview_still.otf_output.width); IS_DIS_SET_PARAM_OTF_OUTPUT_HEIGHT(dev, init_val_dis_preview_still.otf_output.height); IS_DIS_SET_PARAM_OTF_OUTPUT_FORMAT(dev, init_val_dis_preview_still.otf_output.format); IS_DIS_SET_PARAM_OTF_OUTPUT_BITWIDTH(dev, init_val_dis_preview_still.otf_output.bitwidth); IS_DIS_SET_PARAM_OTF_OUTPUT_ORDER(dev, init_val_dis_preview_still.otf_output.order); IS_DIS_SET_PARAM_OTF_OUTPUT_ERR(dev, init_val_dis_preview_still.otf_output.err); IS_SET_PARAM_BIT(dev, PARAM_DIS_OTF_OUTPUT); IS_INC_PARAM_NUM(dev); /* TDNR Macros */ IS_TDNR_SET_PARAM_CONTROL_CMD(dev, init_val_tdnr_preview_still.control.cmd); IS_TDNR_SET_PARAM_CONTROL_BYPASS(dev, init_val_tdnr_preview_still.control.bypass); IS_TDNR_SET_PARAM_CONTROL_ERR(dev, init_val_tdnr_preview_still.control.err); IS_SET_PARAM_BIT(dev, PARAM_TDNR_CONTROL); IS_INC_PARAM_NUM(dev); IS_TDNR_SET_PARAM_OTF_INPUT_CMD(dev, init_val_tdnr_preview_still.otf_input.cmd); IS_TDNR_SET_PARAM_OTF_INPUT_WIDTH(dev, init_val_tdnr_preview_still.otf_input.width); IS_TDNR_SET_PARAM_OTF_INPUT_HEIGHT(dev, init_val_tdnr_preview_still.otf_input.height); IS_TDNR_SET_PARAM_OTF_INPUT_FORMAT(dev, init_val_tdnr_preview_still.otf_input.format); IS_TDNR_SET_PARAM_OTF_INPUT_BITWIDTH(dev, init_val_tdnr_preview_still.otf_input.bitwidth); IS_TDNR_SET_PARAM_OTF_INPUT_ORDER(dev, init_val_tdnr_preview_still.otf_input.order); IS_TDNR_SET_PARAM_OTF_INPUT_ERR(dev, init_val_tdnr_preview_still.otf_input.err); IS_SET_PARAM_BIT(dev, PARAM_TDNR_OTF_INPUT); IS_INC_PARAM_NUM(dev); IS_TDNR_SET_PARAM_FRAME_CMD(dev, init_val_tdnr_preview_still.frame.cmd); IS_TDNR_SET_PARAM_FRAME_ERR(dev, init_val_tdnr_preview_still.frame.err); IS_SET_PARAM_BIT(dev, PARAM_TDNR_1ST_FRAME); IS_INC_PARAM_NUM(dev); IS_TDNR_SET_PARAM_OTF_OUTPUT_CMD(dev, init_val_tdnr_preview_still.otf_output.cmd); IS_TDNR_SET_PARAM_OTF_OUTPUT_WIDTH(dev, init_val_tdnr_preview_still.otf_output.width); IS_TDNR_SET_PARAM_OTF_OUTPUT_HEIGHT(dev, init_val_tdnr_preview_still.otf_output.height); IS_TDNR_SET_PARAM_OTF_OUTPUT_FORMAT(dev, init_val_tdnr_preview_still.otf_output.format); IS_TDNR_SET_PARAM_OTF_OUTPUT_BITWIDTH(dev, init_val_tdnr_preview_still.otf_output.bitwidth); IS_TDNR_SET_PARAM_OTF_OUTPUT_ORDER(dev, init_val_tdnr_preview_still.otf_output.order); IS_TDNR_SET_PARAM_OTF_OUTPUT_ERR(dev, init_val_tdnr_preview_still.otf_output.err); IS_SET_PARAM_BIT(dev, PARAM_TDNR_OTF_OUTPUT); IS_INC_PARAM_NUM(dev); IS_TDNR_SET_PARAM_DMA_OUTPUT_CMD(dev, init_val_tdnr_preview_still.dma_output.cmd); IS_TDNR_SET_PARAM_DMA_OUTPUT_WIDTH(dev, init_val_tdnr_preview_still.dma_output.width); IS_TDNR_SET_PARAM_DMA_OUTPUT_HEIGHT(dev, init_val_tdnr_preview_still.dma_output.height); IS_TDNR_SET_PARAM_DMA_OUTPUT_FORMAT(dev, init_val_tdnr_preview_still.dma_output.format); IS_TDNR_SET_PARAM_DMA_OUTPUT_BITWIDTH(dev, init_val_tdnr_preview_still.dma_output.bitwidth); IS_TDNR_SET_PARAM_DMA_OUTPUT_PLANE(dev, init_val_tdnr_preview_still.dma_output.plane); IS_TDNR_SET_PARAM_DMA_OUTPUT_ORDER(dev, init_val_tdnr_preview_still.dma_output.order); IS_TDNR_SET_PARAM_DMA_OUTPUT_BUFFERNUM(dev, init_val_tdnr_preview_still.dma_output.buffer_number); IS_TDNR_SET_PARAM_DMA_OUTPUT_BUFFERADDR(dev, init_val_tdnr_preview_still.dma_output.buffer_address); IS_TDNR_SET_PARAM_DMA_OUTPUT_ERR(dev, init_val_tdnr_preview_still.dma_output.err); IS_SET_PARAM_BIT(dev, PARAM_TDNR_DMA_OUTPUT); IS_INC_PARAM_NUM(dev); /* SCALER-P Macros */ IS_SCALERP_SET_PARAM_CONTROL_CMD(dev, init_val_scalerp_preview_still.control.cmd); IS_SCALERP_SET_PARAM_CONTROL_BYPASS(dev, init_val_scalerp_preview_still.control.bypass); IS_SCALERP_SET_PARAM_CONTROL_ERR(dev, init_val_scalerp_preview_still.control.err); IS_SET_PARAM_BIT(dev, PARAM_SCALERP_CONTROL); IS_INC_PARAM_NUM(dev); IS_SCALERP_SET_PARAM_OTF_INPUT_CMD(dev, init_val_scalerp_preview_still.otf_input.cmd); IS_SCALERP_SET_PARAM_OTF_INPUT_WIDTH(dev, init_val_scalerp_preview_still.otf_input.width); IS_SCALERP_SET_PARAM_OTF_INPUT_HEIGHT(dev, init_val_scalerp_preview_still.otf_input.height); IS_SCALERP_SET_PARAM_OTF_INPUT_FORMAT(dev, init_val_scalerp_preview_still.otf_input.format); IS_SCALERP_SET_PARAM_OTF_INPUT_BITWIDTH(dev, init_val_scalerp_preview_still.otf_input.bitwidth); IS_SCALERP_SET_PARAM_OTF_INPUT_ORDER(dev, init_val_scalerp_preview_still.otf_input.order); IS_SCALERP_SET_PARAM_OTF_INPUT_ERR(dev, init_val_scalerp_preview_still.otf_input.err); IS_SET_PARAM_BIT(dev, PARAM_SCALERP_OTF_INPUT); IS_INC_PARAM_NUM(dev); IS_SCALERP_SET_PARAM_EFFECT_CMD(dev, init_val_scalerp_preview_still.effect.cmd); IS_SCALERP_SET_PARAM_EFFECT_ERR(dev, init_val_scalerp_preview_still.effect.err); IS_SET_PARAM_BIT(dev, PARAM_SCALERP_IMAGE_EFFECT); IS_INC_PARAM_NUM(dev); IS_SCALERP_SET_PARAM_CROP_CMD(dev, init_val_scalerp_preview_still.crop.cmd); IS_SCALERP_SET_PARAM_CROP_POS_X(dev, init_val_scalerp_preview_still.crop.pos_x); IS_SCALERP_SET_PARAM_CROP_POS_Y(dev, init_val_scalerp_preview_still.crop.pos_y); IS_SCALERP_SET_PARAM_CROP_WIDTH(dev, init_val_scalerp_preview_still.crop.crop_width); IS_SCALERP_SET_PARAM_CROP_HEIGHT(dev, init_val_scalerp_preview_still.crop.crop_height); IS_SCALERP_SET_PARAM_CROP_ERR(dev, init_val_scalerp_preview_still.crop.err); IS_SET_PARAM_BIT(dev, PARAM_SCALERP_CROP); IS_INC_PARAM_NUM(dev); IS_SCALERP_SET_PARAM_SCALING_CMD(dev, init_val_scalerp_preview_still.scale.cmd); IS_SCALERP_SET_PARAM_SCALING_PRE_H_RATIO(dev, init_val_scalerp_preview_still.scale.pre_h_ratio); IS_SCALERP_SET_PARAM_SCALING_PRE_V_RATIO(dev, init_val_scalerp_preview_still.scale.pre_v_ratio); IS_SCALERP_SET_PARAM_SCALING_SH_FACTOR(dev, init_val_scalerp_preview_still.scale.sh_factor); IS_SCALERP_SET_PARAM_SCALING_H_RATIO(dev, init_val_scalerp_preview_still.scale.h_ratio); IS_SCALERP_SET_PARAM_SCALING_V_RATIO(dev, init_val_scalerp_preview_still.scale.v_ratio); IS_SCALERP_SET_PARAM_SCALING_ERR(dev, init_val_scalerp_preview_still.scale.err); IS_SET_PARAM_BIT(dev, PARAM_SCALERP_SCALING); IS_INC_PARAM_NUM(dev); IS_SCALERP_SET_PARAM_ROTATION_CMD(dev, init_val_scalerp_preview_still.rotation.cmd); IS_SCALERP_SET_PARAM_ROTATION_ERR(dev, init_val_scalerp_preview_still.rotation.err); IS_SET_PARAM_BIT(dev, PARAM_SCALERP_ROTATION); IS_INC_PARAM_NUM(dev); IS_SCALERP_SET_PARAM_FLIP_CMD(dev, init_val_scalerp_preview_still.flip.cmd); IS_SCALERP_SET_PARAM_FLIP_ERR(dev, init_val_scalerp_preview_still.flip.err); IS_SET_PARAM_BIT(dev, PARAM_SCALERP_FLIP); IS_INC_PARAM_NUM(dev); IS_SCALERP_SET_PARAM_OTF_OUTPUT_CMD(dev, init_val_scalerp_preview_still.otf_output.cmd); IS_SCALERP_SET_PARAM_OTF_OUTPUT_WIDTH(dev, init_val_scalerp_preview_still.otf_output.width); IS_SCALERP_SET_PARAM_OTF_OUTPUT_HEIGHT(dev, init_val_scalerp_preview_still.otf_output.height); IS_SCALERP_SET_PARAM_OTF_OUTPUT_FORMAT(dev, init_val_scalerp_preview_still.otf_output.format); IS_SCALERP_SET_PARAM_OTF_OUTPUT_BITWIDTH(dev, init_val_scalerp_preview_still.otf_output.bitwidth); IS_SCALERP_SET_PARAM_OTF_OUTPUT_ORDER(dev, init_val_scalerp_preview_still.otf_output.order); IS_SCALERP_SET_PARAM_OTF_OUTPUT_ERR(dev, init_val_scalerp_preview_still.otf_output.err); IS_SET_PARAM_BIT(dev, PARAM_SCALERP_OTF_OUTPUT); IS_INC_PARAM_NUM(dev); IS_SCALERP_SET_PARAM_DMA_OUTPUT_CMD(dev, init_val_scalerp_preview_still.dma_output.cmd); IS_SCALERP_SET_PARAM_DMA_OUTPUT_WIDTH(dev, init_val_scalerp_preview_still.dma_output.width); IS_SCALERP_SET_PARAM_DMA_OUTPUT_HEIGHT(dev, init_val_scalerp_preview_still.dma_output.height); IS_SCALERP_SET_PARAM_DMA_OUTPUT_FORMAT(dev, init_val_scalerp_preview_still.dma_output.format); IS_SCALERP_SET_PARAM_DMA_OUTPUT_BITWIDTH(dev, init_val_scalerp_preview_still.dma_output.bitwidth); IS_SCALERP_SET_PARAM_DMA_OUTPUT_PLANE(dev, init_val_scalerp_preview_still.dma_output.plane); IS_SCALERP_SET_PARAM_DMA_OUTPUT_ORDER(dev, init_val_scalerp_preview_still.dma_output.order); IS_SCALERP_SET_PARAM_DMA_OUTPUT_BUFFERNUM(dev, init_val_scalerp_preview_still.dma_output.buffer_number); IS_SCALERP_SET_PARAM_DMA_OUTPUT_BUFFERADDR(dev, init_val_scalerp_preview_still.dma_output.buffer_address); IS_SCALERP_SET_PARAM_DMA_OUTPUT_ERR(dev, init_val_scalerp_preview_still.dma_output.err); IS_SET_PARAM_BIT(dev, PARAM_SCALERP_DMA_OUTPUT); IS_INC_PARAM_NUM(dev); /* FD */ IS_FD_SET_PARAM_CONTROL_CMD(dev, init_val_fd_preview_still.control.cmd); IS_FD_SET_PARAM_CONTROL_BYPASS(dev, init_val_fd_preview_still.control.bypass); IS_FD_SET_PARAM_CONTROL_ERR(dev, init_val_fd_preview_still.control.err); IS_SET_PARAM_BIT(dev, PARAM_FD_CONTROL); IS_INC_PARAM_NUM(dev); IS_FD_SET_PARAM_OTF_INPUT_CMD(dev, init_val_fd_preview_still.otf_input.cmd); IS_FD_SET_PARAM_OTF_INPUT_WIDTH(dev, init_val_fd_preview_still.otf_input.width); IS_FD_SET_PARAM_OTF_INPUT_HEIGHT(dev, init_val_fd_preview_still.otf_input.height); IS_FD_SET_PARAM_OTF_INPUT_FORMAT(dev, init_val_fd_preview_still.otf_input.format); IS_FD_SET_PARAM_OTF_INPUT_BITWIDTH(dev, init_val_fd_preview_still.otf_input.bitwidth); IS_FD_SET_PARAM_OTF_INPUT_ORDER(dev, init_val_fd_preview_still.otf_input.order); IS_FD_SET_PARAM_OTF_INPUT_ERR(dev, init_val_fd_preview_still.otf_input.err); IS_SET_PARAM_BIT(dev, PARAM_FD_OTF_INPUT); IS_INC_PARAM_NUM(dev); IS_FD_SET_PARAM_DMA_INPUT_CMD(dev, init_val_fd_preview_still.dma_input.cmd); IS_FD_SET_PARAM_DMA_INPUT_WIDTH(dev, init_val_fd_preview_still.dma_input.width); IS_FD_SET_PARAM_DMA_INPUT_HEIGHT(dev, init_val_fd_preview_still.dma_input.height); IS_FD_SET_PARAM_DMA_INPUT_FORMAT(dev, init_val_fd_preview_still.dma_input.format); IS_FD_SET_PARAM_DMA_INPUT_BITWIDTH(dev, init_val_fd_preview_still.dma_input.bitwidth); IS_FD_SET_PARAM_DMA_INPUT_PLANE(dev, init_val_fd_preview_still.dma_input.plane); IS_FD_SET_PARAM_DMA_INPUT_ORDER(dev, init_val_fd_preview_still.dma_input.order); IS_FD_SET_PARAM_DMA_INPUT_BUFFERNUM(dev, init_val_fd_preview_still.dma_input.buffer_number); IS_FD_SET_PARAM_DMA_INPUT_BUFFERADDR(dev, init_val_fd_preview_still.dma_input.buffer_address); IS_FD_SET_PARAM_DMA_INPUT_ERR(dev, init_val_fd_preview_still.dma_input.err); IS_SET_PARAM_BIT(dev, PARAM_FD_DMA_INPUT); IS_INC_PARAM_NUM(dev); IS_FD_SET_PARAM_FD_CONFIG_CMD(dev, init_val_fd_preview_still.config.cmd); IS_FD_SET_PARAM_FD_CONFIG_MAX_NUMBER(dev, init_val_fd_preview_still.config.max_number); IS_FD_SET_PARAM_FD_CONFIG_ROLL_ANGLE(dev, init_val_fd_preview_still.config.roll_angle); IS_FD_SET_PARAM_FD_CONFIG_YAW_ANGLE(dev, init_val_fd_preview_still.config.yaw_angle); IS_FD_SET_PARAM_FD_CONFIG_SMILE_MODE(dev, init_val_fd_preview_still.config.smile_mode); IS_FD_SET_PARAM_FD_CONFIG_BLINK_MODE(dev, init_val_fd_preview_still.config.blink_mode); IS_FD_SET_PARAM_FD_CONFIG_EYE_DETECT(dev, init_val_fd_preview_still.config.eye_detect); IS_FD_SET_PARAM_FD_CONFIG_MOUTH_DETECT(dev, init_val_fd_preview_still.config.mouth_detect); IS_FD_SET_PARAM_FD_CONFIG_ORIENTATION(dev, init_val_fd_preview_still.config.orientation); IS_FD_SET_PARAM_FD_CONFIG_ORIENTATION_VALUE(dev, init_val_fd_preview_still.config.orientation_value); IS_FD_SET_PARAM_FD_CONFIG_ERR(dev, init_val_fd_preview_still.config.err); IS_SET_PARAM_BIT(dev, PARAM_FD_CONFIG); IS_INC_PARAM_NUM(dev); break; case ISS_PREVIEW_VIDEO: IS_SET_PARAM_GLOBAL_SHOTMODE_CMD(dev, 1); IS_SET_PARAM_BIT(dev, PARAM_GLOBAL_SHOTMODE); IS_INC_PARAM_NUM(dev); IS_SENSOR_SET_FRAME_RATE(dev, DEFAULT_PREVIEW_VIDEO_FRAMERATE); IS_SET_PARAM_BIT(dev, PARAM_SENSOR_FRAME_RATE); IS_INC_PARAM_NUM(dev); /* ISP */ IS_ISP_SET_PARAM_CONTROL_CMD(dev, init_val_isp_preview_video.control.cmd); IS_ISP_SET_PARAM_CONTROL_BYPASS(dev, init_val_isp_preview_video.control.bypass); IS_ISP_SET_PARAM_CONTROL_ERR(dev, init_val_isp_preview_video.control.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_CONTROL); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_OTF_INPUT_CMD(dev, init_val_isp_preview_video.otf_input.cmd); IS_ISP_SET_PARAM_OTF_INPUT_WIDTH(dev, init_val_isp_preview_video.otf_input.width); IS_ISP_SET_PARAM_OTF_INPUT_HEIGHT(dev, init_val_isp_preview_video.otf_input.height); dev->sensor.width_prev_cam = init_val_isp_preview_video.otf_input.width; dev->sensor.height_prev_cam = init_val_isp_preview_video.otf_input.height; IS_ISP_SET_PARAM_OTF_INPUT_FORMAT(dev, init_val_isp_preview_video.otf_input.format); IS_ISP_SET_PARAM_OTF_INPUT_BITWIDTH(dev, init_val_isp_preview_video.otf_input.bitwidth); IS_ISP_SET_PARAM_OTF_INPUT_ORDER(dev, init_val_isp_preview_video.otf_input.order); IS_ISP_SET_PARAM_OTF_INPUT_ERR(dev, init_val_isp_preview_video.otf_input.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_OTF_INPUT); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_DMA_INPUT1_CMD(dev, init_val_isp_preview_video.dma1_input.cmd); IS_ISP_SET_PARAM_DMA_INPUT1_WIDTH(dev, init_val_isp_preview_video.dma1_input.width); IS_ISP_SET_PARAM_DMA_INPUT1_HEIGHT(dev, init_val_isp_preview_video.dma1_input.height); IS_ISP_SET_PARAM_DMA_INPUT1_FORMAT(dev, init_val_isp_preview_video.dma1_input.format); IS_ISP_SET_PARAM_DMA_INPUT1_BITWIDTH(dev, init_val_isp_preview_video.dma1_input.bitwidth); IS_ISP_SET_PARAM_DMA_INPUT1_PLANE(dev, init_val_isp_preview_video.dma1_input.plane); IS_ISP_SET_PARAM_DMA_INPUT1_ORDER(dev, init_val_isp_preview_video.dma1_input.order); IS_ISP_SET_PARAM_DMA_INPUT1_BUFFERNUM(dev, init_val_isp_preview_video.dma1_input.buffer_number); IS_ISP_SET_PARAM_DMA_INPUT1_BUFFERADDR(dev, init_val_isp_preview_video.dma1_input.buffer_address); IS_ISP_SET_PARAM_DMA_INPUT1_ERR(dev, init_val_isp_preview_video.dma1_input.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_DMA1_INPUT); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_DMA_INPUT2_CMD(dev, init_val_isp_preview_video.dma2_input.cmd); IS_ISP_SET_PARAM_DMA_INPUT2_WIDTH(dev, init_val_isp_preview_video.dma2_input.width); IS_ISP_SET_PARAM_DMA_INPUT2_HEIGHT(dev, init_val_isp_preview_video.dma2_input.height); IS_ISP_SET_PARAM_DMA_INPUT2_FORMAT(dev, init_val_isp_preview_video.dma2_input.format); IS_ISP_SET_PARAM_DMA_INPUT2_BITWIDTH(dev, init_val_isp_preview_video.dma2_input.bitwidth); IS_ISP_SET_PARAM_DMA_INPUT2_PLANE(dev, init_val_isp_preview_video.dma2_input.plane); IS_ISP_SET_PARAM_DMA_INPUT2_ORDER(dev, init_val_isp_preview_video.dma2_input.order); IS_ISP_SET_PARAM_DMA_INPUT2_BUFFERNUM(dev, init_val_isp_preview_video.dma2_input.buffer_number); IS_ISP_SET_PARAM_DMA_INPUT2_BUFFERADDR(dev, init_val_isp_preview_video.dma2_input.buffer_address); IS_ISP_SET_PARAM_DMA_INPUT2_ERR(dev, init_val_isp_preview_video.dma2_input.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_DMA2_INPUT); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_AA_CMD(dev, init_val_isp_preview_video.aa.cmd); IS_ISP_SET_PARAM_AA_TARGET(dev, init_val_isp_preview_video.aa.target); IS_ISP_SET_PARAM_AA_MODE(dev, init_val_isp_preview_video.aa.mode); IS_ISP_SET_PARAM_AA_FACE(dev, init_val_isp_preview_video.aa.face); IS_ISP_SET_PARAM_AA_WIN_POS_X(dev, init_val_isp_preview_video.aa.win_pos_x); IS_ISP_SET_PARAM_AA_WIN_POS_Y(dev, init_val_isp_preview_video.aa.win_pos_y); IS_ISP_SET_PARAM_AA_ERR(dev, init_val_isp_preview_video.aa.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_AA); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_FLASH_CMD(dev, init_val_isp_preview_video.flash.cmd); IS_ISP_SET_PARAM_FLASH_REDEYE(dev, init_val_isp_preview_video.flash.redeye); IS_ISP_SET_PARAM_FLASH_ERR(dev, init_val_isp_preview_video.flash.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_FLASH); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_AWB_CMD(dev, init_val_isp_preview_video.awb.cmd); IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev, init_val_isp_preview_video.awb.illumination); IS_ISP_SET_PARAM_AWB_ERR(dev, init_val_isp_preview_video.awb.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_AWB); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_EFFECT_CMD(dev, init_val_isp_preview_video.effect.cmd); IS_ISP_SET_PARAM_EFFECT_ERR(dev, init_val_isp_preview_video.effect.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_IMAGE_EFFECT); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_ISO_CMD(dev, init_val_isp_preview_video.iso.cmd); IS_ISP_SET_PARAM_ISO_VALUE(dev, init_val_isp_preview_video.iso.value); IS_ISP_SET_PARAM_ISO_ERR(dev, init_val_isp_preview_video.iso.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_ISO); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_ADJUST_CMD(dev, init_val_isp_preview_video.adjust.cmd); IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, init_val_isp_preview_video.adjust.contrast); IS_ISP_SET_PARAM_ADJUST_SATURATION(dev, init_val_isp_preview_video.adjust.saturation); IS_ISP_SET_PARAM_ADJUST_SHARPNESS(dev, init_val_isp_preview_video.adjust.sharpness); IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, init_val_isp_preview_video.adjust.exposure); IS_ISP_SET_PARAM_ADJUST_BRIGHTNESS(dev, init_val_isp_preview_video.adjust.brightness); IS_ISP_SET_PARAM_ADJUST_HUE(dev, init_val_isp_preview_video.adjust.hue); IS_ISP_SET_PARAM_ADJUST_SHUTTER_TIME_MIN(dev, init_val_isp_preview_video.adjust.shutter_time_min); IS_ISP_SET_PARAM_ADJUST_SHUTTER_TIME_MAX(dev, init_val_isp_preview_video.adjust.shutter_time_max); IS_ISP_SET_PARAM_ADJUST_ERR(dev, init_val_isp_preview_video.adjust.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_ADJUST); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_METERING_CMD(dev, init_val_isp_preview_video.metering.cmd); IS_ISP_SET_PARAM_METERING_WIN_POS_X(dev, init_val_isp_preview_video.metering.win_pos_x); IS_ISP_SET_PARAM_METERING_WIN_POS_Y(dev, init_val_isp_preview_video.metering.win_pos_y); IS_ISP_SET_PARAM_METERING_WIN_WIDTH(dev, init_val_isp_preview_video.metering.win_width); IS_ISP_SET_PARAM_METERING_WIN_HEIGHT(dev, init_val_isp_preview_video.metering.win_height); IS_ISP_SET_PARAM_METERING_ERR(dev, init_val_isp_preview_video.metering.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_METERING); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_AFC_CMD(dev, init_val_isp_preview_video.afc.cmd); IS_ISP_SET_PARAM_AFC_MANUAL(dev, init_val_isp_preview_video.afc.manual); IS_ISP_SET_PARAM_AFC_ERR(dev, init_val_isp_preview_video.afc.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_AFC); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_OTF_OUTPUT_CMD(dev, init_val_isp_preview_video.otf_output.cmd); IS_ISP_SET_PARAM_OTF_OUTPUT_WIDTH(dev, init_val_isp_preview_video.otf_output.width); IS_ISP_SET_PARAM_OTF_OUTPUT_HEIGHT(dev, init_val_isp_preview_video.otf_output.height); IS_ISP_SET_PARAM_OTF_OUTPUT_FORMAT(dev, init_val_isp_preview_video.otf_output.format); IS_ISP_SET_PARAM_OTF_OUTPUT_BITWIDTH(dev, init_val_isp_preview_video.otf_output.bitwidth); IS_ISP_SET_PARAM_OTF_OUTPUT_ORDER(dev, init_val_isp_preview_video.otf_output.order); IS_ISP_SET_PARAM_OTF_OUTPUT_ERR(dev, init_val_isp_preview_video.otf_output.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_OTF_OUTPUT); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_DMA_OUTPUT1_CMD(dev, init_val_isp_preview_video.dma1_output.cmd); IS_ISP_SET_PARAM_DMA_OUTPUT1_WIDTH(dev, init_val_isp_preview_video.dma1_output.width); IS_ISP_SET_PARAM_DMA_OUTPUT1_HEIGHT(dev, init_val_isp_preview_video.dma1_output.height); IS_ISP_SET_PARAM_DMA_OUTPUT1_FORMAT(dev, init_val_isp_preview_video.dma1_output.format); IS_ISP_SET_PARAM_DMA_OUTPUT1_BITWIDTH(dev, init_val_isp_preview_video.dma1_output.bitwidth); IS_ISP_SET_PARAM_DMA_OUTPUT1_PLANE(dev, init_val_isp_preview_video.dma1_output.plane); IS_ISP_SET_PARAM_DMA_OUTPUT1_ORDER(dev, init_val_isp_preview_video.dma1_output.order); IS_ISP_SET_PARAM_DMA_OUTPUT1_BUFFER_NUMBER(dev, init_val_isp_preview_video.dma1_output.buffer_number); IS_ISP_SET_PARAM_DMA_OUTPUT1_BUFFER_ADDRESS(dev, init_val_isp_preview_video.dma1_output.buffer_address); IS_ISP_SET_PARAM_DMA_OUTPUT1_ERR(dev, init_val_isp_preview_video.dma1_output.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_DMA1_OUTPUT); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_DMA_OUTPUT2_CMD(dev, init_val_isp_preview_video.dma2_output.cmd); IS_ISP_SET_PARAM_DMA_OUTPUT2_WIDTH(dev, init_val_isp_preview_video.dma2_output.width); IS_ISP_SET_PARAM_DMA_OUTPUT2_HEIGHT(dev, init_val_isp_preview_video.dma2_output.height); IS_ISP_SET_PARAM_DMA_OUTPUT2_FORMAT(dev, init_val_isp_preview_video.dma2_output.format); IS_ISP_SET_PARAM_DMA_OUTPUT2_BITWIDTH(dev, init_val_isp_preview_video.dma2_output.bitwidth); IS_ISP_SET_PARAM_DMA_OUTPUT2_PLANE(dev, init_val_isp_preview_video.dma2_output.plane); IS_ISP_SET_PARAM_DMA_OUTPUT2_ORDER(dev, init_val_isp_preview_video.dma2_output.order); IS_ISP_SET_PARAM_DMA_OUTPUT2_BUFFER_NUMBER(dev, init_val_isp_preview_video.dma2_output.buffer_number); IS_ISP_SET_PARAM_DMA_OUTPUT2_BUFFER_ADDRESS(dev, init_val_isp_preview_video.dma2_output.buffer_address); IS_ISP_SET_PARAM_DMA_OUTPUT2_ERR(dev, init_val_isp_preview_video.dma2_output.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_DMA2_OUTPUT); IS_INC_PARAM_NUM(dev); /* DRC */ IS_DRC_SET_PARAM_CONTROL_CMD(dev, init_val_drc_preview_video.control.cmd); IS_DRC_SET_PARAM_CONTROL_BYPASS(dev, init_val_drc_preview_video.control.bypass); IS_DRC_SET_PARAM_CONTROL_ERR(dev, init_val_drc_preview_video.control.err); IS_SET_PARAM_BIT(dev, PARAM_DRC_CONTROL); IS_INC_PARAM_NUM(dev); IS_DRC_SET_PARAM_OTF_INPUT_CMD(dev, init_val_drc_preview_video.otf_input.cmd); IS_DRC_SET_PARAM_OTF_INPUT_WIDTH(dev, init_val_drc_preview_video.otf_input.width); IS_DRC_SET_PARAM_OTF_INPUT_HEIGHT(dev, init_val_drc_preview_video.otf_input.height); IS_DRC_SET_PARAM_OTF_INPUT_FORMAT(dev, init_val_drc_preview_video.otf_input.format); IS_DRC_SET_PARAM_OTF_INPUT_BITWIDTH(dev, init_val_drc_preview_video.otf_input.bitwidth); IS_DRC_SET_PARAM_OTF_INPUT_ORDER(dev, init_val_drc_preview_video.otf_input.order); IS_DRC_SET_PARAM_OTF_INPUT_ERR(dev, init_val_drc_preview_video.otf_input.err); IS_SET_PARAM_BIT(dev, PARAM_DRC_OTF_INPUT); IS_INC_PARAM_NUM(dev); IS_DRC_SET_PARAM_DMA_INPUT_CMD(dev, init_val_drc_preview_video.dma_input.cmd); IS_DRC_SET_PARAM_DMA_INPUT_WIDTH(dev, init_val_drc_preview_video.dma_input.width); IS_DRC_SET_PARAM_DMA_INPUT_HEIGHT(dev, init_val_drc_preview_video.dma_input.height); IS_DRC_SET_PARAM_DMA_INPUT_FORMAT(dev, init_val_drc_preview_video.dma_input.format); IS_DRC_SET_PARAM_DMA_INPUT_BITWIDTH(dev, init_val_drc_preview_video.dma_input.bitwidth); IS_DRC_SET_PARAM_DMA_INPUT_PLANE(dev, init_val_drc_preview_video.dma_input.plane); IS_DRC_SET_PARAM_DMA_INPUT_ORDER(dev, init_val_drc_preview_video.dma_input.order); IS_DRC_SET_PARAM_DMA_INPUT_BUFFERNUM(dev, init_val_drc_preview_video.dma_input.buffer_number); IS_DRC_SET_PARAM_DMA_INPUT_BUFFERADDR(dev, init_val_drc_preview_video.dma_input.buffer_address); IS_DRC_SET_PARAM_DMA_INPUT_ERR(dev, init_val_drc_preview_video.dma_input.err); IS_SET_PARAM_BIT(dev, PARAM_DRC_DMA_INPUT); IS_INC_PARAM_NUM(dev); IS_DRC_SET_PARAM_OTF_OUTPUT_CMD(dev, init_val_drc_preview_video.otf_output.cmd); IS_DRC_SET_PARAM_OTF_OUTPUT_WIDTH(dev, init_val_drc_preview_video.otf_output.width); IS_DRC_SET_PARAM_OTF_OUTPUT_HEIGHT(dev, init_val_drc_preview_video.otf_output.height); IS_DRC_SET_PARAM_OTF_OUTPUT_FORMAT(dev, init_val_drc_preview_video.otf_output.format); IS_DRC_SET_PARAM_OTF_OUTPUT_BITWIDTH(dev, init_val_drc_preview_video.otf_output.bitwidth); IS_DRC_SET_PARAM_OTF_OUTPUT_ORDER(dev, init_val_drc_preview_video.otf_output.order); IS_DRC_SET_PARAM_OTF_OUTPUT_ERR(dev, init_val_drc_preview_video.otf_output.err); IS_SET_PARAM_BIT(dev, PARAM_DRC_OTF_OUTPUT); IS_INC_PARAM_NUM(dev); /* FD */ IS_FD_SET_PARAM_CONTROL_CMD(dev, init_val_fd_preview_video.control.cmd); IS_FD_SET_PARAM_CONTROL_BYPASS(dev, init_val_fd_preview_video.control.bypass); IS_FD_SET_PARAM_CONTROL_ERR(dev, init_val_fd_preview_video.control.err); IS_SET_PARAM_BIT(dev, PARAM_FD_CONTROL); IS_INC_PARAM_NUM(dev); IS_FD_SET_PARAM_OTF_INPUT_CMD(dev, init_val_fd_preview_video.otf_input.cmd); IS_FD_SET_PARAM_OTF_INPUT_WIDTH(dev, init_val_fd_preview_video.otf_input.width); IS_FD_SET_PARAM_OTF_INPUT_HEIGHT(dev, init_val_fd_preview_video.otf_input.height); IS_FD_SET_PARAM_OTF_INPUT_FORMAT(dev, init_val_fd_preview_video.otf_input.format); IS_FD_SET_PARAM_OTF_INPUT_BITWIDTH(dev, init_val_fd_preview_video.otf_input.bitwidth); IS_FD_SET_PARAM_OTF_INPUT_ORDER(dev, init_val_fd_preview_video.otf_input.order); IS_FD_SET_PARAM_OTF_INPUT_ERR(dev, init_val_fd_preview_video.otf_input.err); IS_SET_PARAM_BIT(dev, PARAM_FD_OTF_INPUT); IS_INC_PARAM_NUM(dev); IS_FD_SET_PARAM_DMA_INPUT_CMD(dev, init_val_fd_preview_video.dma_input.cmd); IS_FD_SET_PARAM_DMA_INPUT_WIDTH(dev, init_val_fd_preview_video.dma_input.width); IS_FD_SET_PARAM_DMA_INPUT_HEIGHT(dev, init_val_fd_preview_video.dma_input.height); IS_FD_SET_PARAM_DMA_INPUT_FORMAT(dev, init_val_fd_preview_video.dma_input.format); IS_FD_SET_PARAM_DMA_INPUT_BITWIDTH(dev, init_val_fd_preview_video.dma_input.bitwidth); IS_FD_SET_PARAM_DMA_INPUT_PLANE(dev, init_val_fd_preview_video.dma_input.plane); IS_FD_SET_PARAM_DMA_INPUT_ORDER(dev, init_val_fd_preview_video.dma_input.order); IS_FD_SET_PARAM_DMA_INPUT_BUFFERNUM(dev, init_val_fd_preview_video.dma_input.buffer_number); IS_FD_SET_PARAM_DMA_INPUT_BUFFERADDR(dev, init_val_fd_preview_video.dma_input.buffer_address); IS_FD_SET_PARAM_DMA_INPUT_ERR(dev, init_val_fd_preview_video.dma_input.err); IS_SET_PARAM_BIT(dev, PARAM_FD_DMA_INPUT); IS_INC_PARAM_NUM(dev); IS_FD_SET_PARAM_FD_CONFIG_CMD(dev, init_val_fd_preview_video.config.cmd); IS_FD_SET_PARAM_FD_CONFIG_MAX_NUMBER(dev, init_val_fd_preview_video.config.max_number); IS_FD_SET_PARAM_FD_CONFIG_ROLL_ANGLE(dev, init_val_fd_preview_video.config.roll_angle); IS_FD_SET_PARAM_FD_CONFIG_YAW_ANGLE(dev, init_val_fd_preview_video.config.yaw_angle); IS_FD_SET_PARAM_FD_CONFIG_SMILE_MODE(dev, init_val_fd_preview_video.config.smile_mode); IS_FD_SET_PARAM_FD_CONFIG_BLINK_MODE(dev, init_val_fd_preview_video.config.blink_mode); IS_FD_SET_PARAM_FD_CONFIG_EYE_DETECT(dev, init_val_fd_preview_video.config.eye_detect); IS_FD_SET_PARAM_FD_CONFIG_MOUTH_DETECT(dev, init_val_fd_preview_video.config.mouth_detect); IS_FD_SET_PARAM_FD_CONFIG_ORIENTATION(dev, init_val_fd_preview_video.config.orientation); IS_FD_SET_PARAM_FD_CONFIG_ORIENTATION_VALUE(dev, init_val_fd_preview_video.config.orientation_value); IS_FD_SET_PARAM_FD_CONFIG_ERR(dev, init_val_fd_preview_video.config.err); IS_SET_PARAM_BIT(dev, PARAM_FD_CONFIG); IS_INC_PARAM_NUM(dev); break; case ISS_CAPTURE_STILL: IS_SET_PARAM_GLOBAL_SHOTMODE_CMD(dev, 1); IS_SET_PARAM_BIT(dev, PARAM_GLOBAL_SHOTMODE); IS_INC_PARAM_NUM(dev); IS_SENSOR_SET_FRAME_RATE(dev, DEFAULT_CAPTURE_STILL_FRAMERATE); IS_SET_PARAM_BIT(dev, PARAM_SENSOR_FRAME_RATE); IS_INC_PARAM_NUM(dev); /* ISP */ IS_ISP_SET_PARAM_CONTROL_CMD(dev, init_val_isp_capture.control.cmd); IS_ISP_SET_PARAM_CONTROL_BYPASS(dev, init_val_isp_capture.control.bypass); IS_ISP_SET_PARAM_CONTROL_ERR(dev, init_val_isp_capture.control.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_CONTROL); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_OTF_INPUT_CMD(dev, init_val_isp_capture.otf_input.cmd); IS_ISP_SET_PARAM_OTF_INPUT_WIDTH(dev, init_val_isp_capture.otf_input.width); IS_ISP_SET_PARAM_OTF_INPUT_HEIGHT(dev, init_val_isp_capture.otf_input.height); IS_ISP_SET_PARAM_OTF_INPUT_FORMAT(dev, init_val_isp_capture.otf_input.format); IS_ISP_SET_PARAM_OTF_INPUT_BITWIDTH(dev, init_val_isp_capture.otf_input.bitwidth); IS_ISP_SET_PARAM_OTF_INPUT_ORDER(dev, init_val_isp_capture.otf_input.order); IS_ISP_SET_PARAM_OTF_INPUT_ERR(dev, init_val_isp_capture.otf_input.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_OTF_INPUT); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_DMA_INPUT1_CMD(dev, init_val_isp_capture.dma1_input.cmd); IS_ISP_SET_PARAM_DMA_INPUT1_WIDTH(dev, init_val_isp_capture.dma1_input.width); IS_ISP_SET_PARAM_DMA_INPUT1_HEIGHT(dev, init_val_isp_capture.dma1_input.height); IS_ISP_SET_PARAM_DMA_INPUT1_FORMAT(dev, init_val_isp_capture.dma1_input.format); IS_ISP_SET_PARAM_DMA_INPUT1_BITWIDTH(dev, init_val_isp_capture.dma1_input.bitwidth); IS_ISP_SET_PARAM_DMA_INPUT1_PLANE(dev, init_val_isp_capture.dma1_input.plane); IS_ISP_SET_PARAM_DMA_INPUT1_ORDER(dev, init_val_isp_capture.dma1_input.order); IS_ISP_SET_PARAM_DMA_INPUT1_BUFFERNUM(dev, init_val_isp_capture.dma1_input.buffer_number); IS_ISP_SET_PARAM_DMA_INPUT1_BUFFERADDR(dev, init_val_isp_capture.dma1_input.buffer_address); IS_ISP_SET_PARAM_DMA_INPUT1_ERR(dev, init_val_isp_capture.dma1_input.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_DMA1_INPUT); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_DMA_INPUT2_CMD(dev, init_val_isp_capture.dma2_input.cmd); IS_ISP_SET_PARAM_DMA_INPUT2_WIDTH(dev, init_val_isp_capture.dma2_input.width); IS_ISP_SET_PARAM_DMA_INPUT2_HEIGHT(dev, init_val_isp_capture.dma2_input.height); IS_ISP_SET_PARAM_DMA_INPUT2_FORMAT(dev, init_val_isp_capture.dma2_input.format); IS_ISP_SET_PARAM_DMA_INPUT2_BITWIDTH(dev, init_val_isp_capture.dma2_input.bitwidth); IS_ISP_SET_PARAM_DMA_INPUT2_PLANE(dev, init_val_isp_capture.dma2_input.plane); IS_ISP_SET_PARAM_DMA_INPUT2_ORDER(dev, init_val_isp_capture.dma2_input.order); IS_ISP_SET_PARAM_DMA_INPUT2_BUFFERNUM(dev, init_val_isp_capture.dma2_input.buffer_number); IS_ISP_SET_PARAM_DMA_INPUT2_BUFFERADDR(dev, init_val_isp_capture.dma2_input.buffer_address); IS_ISP_SET_PARAM_DMA_INPUT2_ERR(dev, init_val_isp_capture.dma2_input.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_DMA2_INPUT); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_AA_CMD(dev, init_val_isp_capture.aa.cmd); IS_ISP_SET_PARAM_AA_TARGET(dev, init_val_isp_capture.aa.target); IS_ISP_SET_PARAM_AA_MODE(dev, init_val_isp_capture.aa.mode); IS_ISP_SET_PARAM_AA_FACE(dev, init_val_isp_capture.aa.face); IS_ISP_SET_PARAM_AA_WIN_POS_X(dev, init_val_isp_capture.aa.win_pos_x); IS_ISP_SET_PARAM_AA_WIN_POS_Y(dev, init_val_isp_capture.aa.win_pos_y); IS_ISP_SET_PARAM_AA_ERR(dev, init_val_isp_capture.aa.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_AA); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_FLASH_CMD(dev, init_val_isp_capture.flash.cmd); IS_ISP_SET_PARAM_FLASH_REDEYE(dev, init_val_isp_capture.flash.redeye); IS_ISP_SET_PARAM_FLASH_ERR(dev, init_val_isp_capture.flash.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_FLASH); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_AWB_CMD(dev, init_val_isp_capture.awb.cmd); IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev, init_val_isp_capture.awb.illumination); IS_ISP_SET_PARAM_AWB_ERR(dev, init_val_isp_capture.awb.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_AWB); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_EFFECT_CMD(dev, init_val_isp_capture.effect.cmd); IS_ISP_SET_PARAM_EFFECT_ERR(dev, init_val_isp_capture.effect.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_IMAGE_EFFECT); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_ISO_CMD(dev, init_val_isp_capture.iso.cmd); IS_ISP_SET_PARAM_ISO_VALUE(dev, init_val_isp_capture.iso.value); IS_ISP_SET_PARAM_ISO_ERR(dev, init_val_isp_capture.iso.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_ISO); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_ADJUST_CMD(dev, init_val_isp_capture.adjust.cmd); IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, init_val_isp_capture.adjust.contrast); IS_ISP_SET_PARAM_ADJUST_SATURATION(dev, init_val_isp_capture.adjust.saturation); IS_ISP_SET_PARAM_ADJUST_SHARPNESS(dev, init_val_isp_capture.adjust.sharpness); IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, init_val_isp_capture.adjust.exposure); IS_ISP_SET_PARAM_ADJUST_BRIGHTNESS(dev, init_val_isp_capture.adjust.brightness); IS_ISP_SET_PARAM_ADJUST_HUE(dev, init_val_isp_capture.adjust.hue); IS_ISP_SET_PARAM_ADJUST_SHUTTER_TIME_MIN(dev, init_val_isp_capture.adjust.shutter_time_min); IS_ISP_SET_PARAM_ADJUST_SHUTTER_TIME_MAX(dev, init_val_isp_capture.adjust.shutter_time_max); IS_ISP_SET_PARAM_ADJUST_ERR(dev, init_val_isp_capture.adjust.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_ADJUST); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_METERING_CMD(dev, init_val_isp_capture.metering.cmd); IS_ISP_SET_PARAM_METERING_WIN_POS_X(dev, init_val_isp_capture.metering.win_pos_x); IS_ISP_SET_PARAM_METERING_WIN_POS_Y(dev, init_val_isp_capture.metering.win_pos_y); IS_ISP_SET_PARAM_METERING_WIN_WIDTH(dev, init_val_isp_capture.metering.win_width); IS_ISP_SET_PARAM_METERING_WIN_HEIGHT(dev, init_val_isp_capture.metering.win_height); IS_ISP_SET_PARAM_METERING_ERR(dev, init_val_isp_capture.metering.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_METERING); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_AFC_CMD(dev, init_val_isp_capture.afc.cmd); IS_ISP_SET_PARAM_AFC_MANUAL(dev, init_val_isp_capture.afc.manual); IS_ISP_SET_PARAM_AFC_ERR(dev, init_val_isp_capture.afc.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_AFC); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_OTF_OUTPUT_CMD(dev, init_val_isp_capture.otf_output.cmd); IS_ISP_SET_PARAM_OTF_OUTPUT_WIDTH(dev, init_val_isp_capture.otf_output.width); IS_ISP_SET_PARAM_OTF_OUTPUT_HEIGHT(dev, init_val_isp_capture.otf_output.height); IS_ISP_SET_PARAM_OTF_OUTPUT_FORMAT(dev, init_val_isp_capture.otf_output.format); IS_ISP_SET_PARAM_OTF_OUTPUT_BITWIDTH(dev, init_val_isp_capture.otf_output.bitwidth); IS_ISP_SET_PARAM_OTF_OUTPUT_ORDER(dev, init_val_isp_capture.otf_output.order); IS_ISP_SET_PARAM_OTF_OUTPUT_ERR(dev, init_val_isp_capture.otf_output.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_OTF_OUTPUT); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_DMA_OUTPUT1_CMD(dev, init_val_isp_capture.dma1_output.cmd); IS_ISP_SET_PARAM_DMA_OUTPUT1_WIDTH(dev, init_val_isp_capture.dma1_output.width); IS_ISP_SET_PARAM_DMA_OUTPUT1_HEIGHT(dev, init_val_isp_capture.dma1_output.height); IS_ISP_SET_PARAM_DMA_OUTPUT1_FORMAT(dev, init_val_isp_capture.dma1_output.format); IS_ISP_SET_PARAM_DMA_OUTPUT1_BITWIDTH(dev, init_val_isp_capture.dma1_output.bitwidth); IS_ISP_SET_PARAM_DMA_OUTPUT1_PLANE(dev, init_val_isp_capture.dma1_output.plane); IS_ISP_SET_PARAM_DMA_OUTPUT1_ORDER(dev, init_val_isp_capture.dma1_output.order); IS_ISP_SET_PARAM_DMA_OUTPUT1_BUFFER_NUMBER(dev, init_val_isp_capture.dma1_output.buffer_number); IS_ISP_SET_PARAM_DMA_OUTPUT1_BUFFER_ADDRESS(dev, init_val_isp_capture.dma1_output.buffer_address); IS_ISP_SET_PARAM_DMA_OUTPUT1_ERR(dev, init_val_isp_capture.dma1_output.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_DMA1_OUTPUT); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_DMA_OUTPUT2_CMD(dev, init_val_isp_capture.dma2_output.cmd); IS_ISP_SET_PARAM_DMA_OUTPUT2_WIDTH(dev, init_val_isp_capture.dma2_output.width); IS_ISP_SET_PARAM_DMA_OUTPUT2_HEIGHT(dev, init_val_isp_capture.dma2_output.height); IS_ISP_SET_PARAM_DMA_OUTPUT2_FORMAT(dev, init_val_isp_capture.dma2_output.format); IS_ISP_SET_PARAM_DMA_OUTPUT2_BITWIDTH(dev, init_val_isp_capture.dma2_output.bitwidth); IS_ISP_SET_PARAM_DMA_OUTPUT2_PLANE(dev, init_val_isp_capture.dma2_output.plane); IS_ISP_SET_PARAM_DMA_OUTPUT2_ORDER(dev, init_val_isp_capture.dma2_output.order); IS_ISP_SET_PARAM_DMA_OUTPUT2_BUFFER_NUMBER(dev, init_val_isp_capture.dma2_output.buffer_number); IS_ISP_SET_PARAM_DMA_OUTPUT2_BUFFER_ADDRESS(dev, init_val_isp_capture.dma2_output.buffer_address); IS_ISP_SET_PARAM_DMA_OUTPUT2_ERR(dev, init_val_isp_capture.dma2_output.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_DMA2_OUTPUT); IS_INC_PARAM_NUM(dev); /* DRC */ IS_DRC_SET_PARAM_CONTROL_CMD(dev, init_val_drc_capture.control.cmd); IS_DRC_SET_PARAM_CONTROL_BYPASS(dev, init_val_drc_capture.control.bypass); IS_DRC_SET_PARAM_CONTROL_ERR(dev, init_val_drc_capture.control.err); IS_SET_PARAM_BIT(dev, PARAM_DRC_CONTROL); IS_INC_PARAM_NUM(dev); IS_DRC_SET_PARAM_OTF_INPUT_CMD(dev, init_val_drc_capture.otf_input.cmd); IS_DRC_SET_PARAM_OTF_INPUT_WIDTH(dev, init_val_drc_capture.otf_input.width); IS_DRC_SET_PARAM_OTF_INPUT_HEIGHT(dev, init_val_drc_capture.otf_input.height); IS_DRC_SET_PARAM_OTF_INPUT_FORMAT(dev, init_val_drc_capture.otf_input.format); IS_DRC_SET_PARAM_OTF_INPUT_BITWIDTH(dev, init_val_drc_capture.otf_input.bitwidth); IS_DRC_SET_PARAM_OTF_INPUT_ORDER(dev, init_val_drc_capture.otf_input.order); IS_DRC_SET_PARAM_OTF_INPUT_ERR(dev, init_val_drc_capture.otf_input.err); IS_SET_PARAM_BIT(dev, PARAM_DRC_OTF_INPUT); IS_INC_PARAM_NUM(dev); IS_DRC_SET_PARAM_DMA_INPUT_CMD(dev, init_val_drc_capture.dma_input.cmd); IS_DRC_SET_PARAM_DMA_INPUT_WIDTH(dev, init_val_drc_capture.dma_input.width); IS_DRC_SET_PARAM_DMA_INPUT_HEIGHT(dev, init_val_drc_capture.dma_input.height); IS_DRC_SET_PARAM_DMA_INPUT_FORMAT(dev, init_val_drc_capture.dma_input.format); IS_DRC_SET_PARAM_DMA_INPUT_BITWIDTH(dev, init_val_drc_capture.dma_input.bitwidth); IS_DRC_SET_PARAM_DMA_INPUT_PLANE(dev, init_val_drc_capture.dma_input.plane); IS_DRC_SET_PARAM_DMA_INPUT_ORDER(dev, init_val_drc_capture.dma_input.order); IS_DRC_SET_PARAM_DMA_INPUT_BUFFERNUM(dev, init_val_drc_capture.dma_input.buffer_number); IS_DRC_SET_PARAM_DMA_INPUT_BUFFERADDR(dev, init_val_drc_capture.dma_input.buffer_address); IS_DRC_SET_PARAM_DMA_INPUT_ERR(dev, init_val_drc_capture.dma_input.err); IS_SET_PARAM_BIT(dev, PARAM_DRC_DMA_INPUT); IS_INC_PARAM_NUM(dev); IS_DRC_SET_PARAM_OTF_OUTPUT_CMD(dev, init_val_drc_capture.otf_output.cmd); IS_DRC_SET_PARAM_OTF_OUTPUT_WIDTH(dev, init_val_drc_capture.otf_output.width); IS_DRC_SET_PARAM_OTF_OUTPUT_HEIGHT(dev, init_val_drc_capture.otf_output.height); IS_DRC_SET_PARAM_OTF_OUTPUT_FORMAT(dev, init_val_drc_capture.otf_output.format); IS_DRC_SET_PARAM_OTF_OUTPUT_BITWIDTH(dev, init_val_drc_capture.otf_output.bitwidth); IS_DRC_SET_PARAM_OTF_OUTPUT_ORDER(dev, init_val_drc_capture.otf_output.order); IS_DRC_SET_PARAM_OTF_OUTPUT_ERR(dev, init_val_drc_capture.otf_output.err); IS_SET_PARAM_BIT(dev, PARAM_DRC_OTF_OUTPUT); IS_INC_PARAM_NUM(dev); /* FD */ IS_FD_SET_PARAM_CONTROL_CMD(dev, init_val_fd_capture.control.cmd); IS_FD_SET_PARAM_CONTROL_BYPASS(dev, init_val_fd_capture.control.bypass); IS_FD_SET_PARAM_CONTROL_ERR(dev, init_val_fd_capture.control.err); IS_SET_PARAM_BIT(dev, PARAM_FD_CONTROL); IS_INC_PARAM_NUM(dev); IS_FD_SET_PARAM_OTF_INPUT_CMD(dev, init_val_fd_capture.otf_input.cmd); IS_FD_SET_PARAM_OTF_INPUT_WIDTH(dev, init_val_fd_capture.otf_input.width); IS_FD_SET_PARAM_OTF_INPUT_HEIGHT(dev, init_val_fd_capture.otf_input.height); IS_FD_SET_PARAM_OTF_INPUT_FORMAT(dev, init_val_fd_capture.otf_input.format); IS_FD_SET_PARAM_OTF_INPUT_BITWIDTH(dev, init_val_fd_capture.otf_input.bitwidth); IS_FD_SET_PARAM_OTF_INPUT_ORDER(dev, init_val_fd_capture.otf_input.order); IS_FD_SET_PARAM_OTF_INPUT_ERR(dev, init_val_fd_capture.otf_input.err); IS_SET_PARAM_BIT(dev, PARAM_FD_OTF_INPUT); IS_INC_PARAM_NUM(dev); IS_FD_SET_PARAM_DMA_INPUT_CMD(dev, init_val_fd_capture.dma_input.cmd); IS_FD_SET_PARAM_DMA_INPUT_WIDTH(dev, init_val_fd_capture.dma_input.width); IS_FD_SET_PARAM_DMA_INPUT_HEIGHT(dev, init_val_fd_capture.dma_input.height); IS_FD_SET_PARAM_DMA_INPUT_FORMAT(dev, init_val_fd_capture.dma_input.format); IS_FD_SET_PARAM_DMA_INPUT_BITWIDTH(dev, init_val_fd_capture.dma_input.bitwidth); IS_FD_SET_PARAM_DMA_INPUT_PLANE(dev, init_val_fd_capture.dma_input.plane); IS_FD_SET_PARAM_DMA_INPUT_ORDER(dev, init_val_fd_capture.dma_input.order); IS_FD_SET_PARAM_DMA_INPUT_BUFFERNUM(dev, init_val_fd_capture.dma_input.buffer_number); IS_FD_SET_PARAM_DMA_INPUT_BUFFERADDR(dev, init_val_fd_capture.dma_input.buffer_address); IS_FD_SET_PARAM_DMA_INPUT_ERR(dev, init_val_fd_capture.dma_input.err); IS_SET_PARAM_BIT(dev, PARAM_FD_DMA_INPUT); IS_INC_PARAM_NUM(dev); IS_FD_SET_PARAM_FD_CONFIG_CMD(dev, init_val_fd_capture.config.cmd); IS_FD_SET_PARAM_FD_CONFIG_MAX_NUMBER(dev, init_val_fd_capture.config.max_number); IS_FD_SET_PARAM_FD_CONFIG_ROLL_ANGLE(dev, init_val_fd_capture.config.roll_angle); IS_FD_SET_PARAM_FD_CONFIG_YAW_ANGLE(dev, init_val_fd_capture.config.yaw_angle); IS_FD_SET_PARAM_FD_CONFIG_SMILE_MODE(dev, init_val_fd_capture.config.smile_mode); IS_FD_SET_PARAM_FD_CONFIG_BLINK_MODE(dev, init_val_fd_capture.config.blink_mode); IS_FD_SET_PARAM_FD_CONFIG_EYE_DETECT(dev, init_val_fd_capture.config.eye_detect); IS_FD_SET_PARAM_FD_CONFIG_MOUTH_DETECT(dev, init_val_fd_capture.config.mouth_detect); IS_FD_SET_PARAM_FD_CONFIG_ORIENTATION(dev, init_val_fd_capture.config.orientation); IS_FD_SET_PARAM_FD_CONFIG_ORIENTATION_VALUE(dev, init_val_fd_capture.config.orientation_value); IS_FD_SET_PARAM_FD_CONFIG_ERR(dev, init_val_fd_capture.config.err); IS_SET_PARAM_BIT(dev, PARAM_FD_CONFIG); IS_INC_PARAM_NUM(dev); break; case ISS_CAPTURE_VIDEO: IS_SET_PARAM_GLOBAL_SHOTMODE_CMD(dev, 1); IS_SET_PARAM_BIT(dev, PARAM_SENSOR_FRAME_RATE); IS_INC_PARAM_NUM(dev); IS_SENSOR_SET_FRAME_RATE(dev, DEFAULT_CAPTURE_VIDEO_FRAMERATE); IS_SET_PARAM_BIT(dev, PARAM_SENSOR_CONTROL); IS_INC_PARAM_NUM(dev); /* ISP */ IS_ISP_SET_PARAM_CONTROL_CMD(dev, init_val_isp_camcording.control.cmd); IS_ISP_SET_PARAM_CONTROL_BYPASS(dev, init_val_isp_camcording.control.bypass); IS_ISP_SET_PARAM_CONTROL_ERR(dev, init_val_isp_camcording.control.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_CONTROL); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_OTF_INPUT_CMD(dev, init_val_isp_camcording.otf_input.cmd); IS_ISP_SET_PARAM_OTF_INPUT_WIDTH(dev, init_val_isp_camcording.otf_input.width); IS_ISP_SET_PARAM_OTF_INPUT_HEIGHT(dev, init_val_isp_camcording.otf_input.height); IS_ISP_SET_PARAM_OTF_INPUT_FORMAT(dev, init_val_isp_camcording.otf_input.format); IS_ISP_SET_PARAM_OTF_INPUT_BITWIDTH(dev, init_val_isp_camcording.otf_input.bitwidth); IS_ISP_SET_PARAM_OTF_INPUT_ORDER(dev, init_val_isp_camcording.otf_input.order); IS_ISP_SET_PARAM_OTF_INPUT_ERR(dev, init_val_isp_camcording.otf_input.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_OTF_INPUT); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_DMA_INPUT1_CMD(dev, init_val_isp_camcording.dma1_input.cmd); IS_ISP_SET_PARAM_DMA_INPUT1_WIDTH(dev, init_val_isp_camcording.dma1_input.width); IS_ISP_SET_PARAM_DMA_INPUT1_HEIGHT(dev, init_val_isp_camcording.dma1_input.height); IS_ISP_SET_PARAM_DMA_INPUT1_FORMAT(dev, init_val_isp_camcording.dma1_input.format); IS_ISP_SET_PARAM_DMA_INPUT1_BITWIDTH(dev, init_val_isp_camcording.dma1_input.bitwidth); IS_ISP_SET_PARAM_DMA_INPUT1_PLANE(dev, init_val_isp_camcording.dma1_input.plane); IS_ISP_SET_PARAM_DMA_INPUT1_ORDER(dev, init_val_isp_camcording.dma1_input.order); IS_ISP_SET_PARAM_DMA_INPUT1_BUFFERNUM(dev, init_val_isp_camcording.dma1_input.buffer_number); IS_ISP_SET_PARAM_DMA_INPUT1_BUFFERADDR(dev, init_val_isp_camcording.dma1_input.buffer_address); IS_ISP_SET_PARAM_DMA_INPUT1_ERR(dev, init_val_isp_camcording.dma1_input.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_DMA1_INPUT); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_DMA_INPUT2_CMD(dev, init_val_isp_camcording.dma2_input.cmd); IS_ISP_SET_PARAM_DMA_INPUT2_WIDTH(dev, init_val_isp_camcording.dma2_input.width); IS_ISP_SET_PARAM_DMA_INPUT2_HEIGHT(dev, init_val_isp_camcording.dma2_input.height); IS_ISP_SET_PARAM_DMA_INPUT2_FORMAT(dev, init_val_isp_camcording.dma2_input.format); IS_ISP_SET_PARAM_DMA_INPUT2_BITWIDTH(dev, init_val_isp_camcording.dma2_input.bitwidth); IS_ISP_SET_PARAM_DMA_INPUT2_PLANE(dev, init_val_isp_camcording.dma2_input.plane); IS_ISP_SET_PARAM_DMA_INPUT2_ORDER(dev, init_val_isp_camcording.dma2_input.order); IS_ISP_SET_PARAM_DMA_INPUT2_BUFFERNUM(dev, init_val_isp_camcording.dma2_input.buffer_number); IS_ISP_SET_PARAM_DMA_INPUT2_BUFFERADDR(dev, init_val_isp_camcording.dma2_input.buffer_address); IS_ISP_SET_PARAM_DMA_INPUT2_ERR(dev, init_val_isp_camcording.dma2_input.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_DMA2_INPUT); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_AA_CMD(dev, init_val_isp_camcording.aa.cmd); IS_ISP_SET_PARAM_AA_TARGET(dev, init_val_isp_camcording.aa.target); IS_ISP_SET_PARAM_AA_MODE(dev, init_val_isp_camcording.aa.mode); IS_ISP_SET_PARAM_AA_FACE(dev, init_val_isp_camcording.aa.face); IS_ISP_SET_PARAM_AA_WIN_POS_X(dev, init_val_isp_camcording.aa.win_pos_x); IS_ISP_SET_PARAM_AA_WIN_POS_Y(dev, init_val_isp_camcording.aa.win_pos_y); IS_ISP_SET_PARAM_AA_ERR(dev, init_val_isp_camcording.aa.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_AA); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_FLASH_CMD(dev, init_val_isp_camcording.flash.cmd); IS_ISP_SET_PARAM_FLASH_REDEYE(dev, init_val_isp_camcording.flash.redeye); IS_ISP_SET_PARAM_FLASH_ERR(dev, init_val_isp_camcording.flash.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_FLASH); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_AWB_CMD(dev, init_val_isp_camcording.awb.cmd); IS_ISP_SET_PARAM_AWB_ILLUMINATION(dev, init_val_isp_camcording.awb.illumination); IS_ISP_SET_PARAM_AWB_ERR(dev, init_val_isp_camcording.awb.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_AWB); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_EFFECT_CMD(dev, init_val_isp_camcording.effect.cmd); IS_ISP_SET_PARAM_EFFECT_ERR(dev, init_val_isp_camcording.effect.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_IMAGE_EFFECT); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_ISO_CMD(dev, init_val_isp_camcording.iso.cmd); IS_ISP_SET_PARAM_ISO_VALUE(dev, init_val_isp_camcording.iso.value); IS_ISP_SET_PARAM_ISO_ERR(dev, init_val_isp_camcording.iso.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_ISO); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_ADJUST_CMD(dev, init_val_isp_camcording.adjust.cmd); IS_ISP_SET_PARAM_ADJUST_CONTRAST(dev, init_val_isp_camcording.adjust.contrast); IS_ISP_SET_PARAM_ADJUST_SATURATION(dev, init_val_isp_camcording.adjust.saturation); IS_ISP_SET_PARAM_ADJUST_SHARPNESS(dev, init_val_isp_camcording.adjust.sharpness); IS_ISP_SET_PARAM_ADJUST_EXPOSURE(dev, init_val_isp_camcording.adjust.exposure); IS_ISP_SET_PARAM_ADJUST_BRIGHTNESS(dev, init_val_isp_camcording.adjust.brightness); IS_ISP_SET_PARAM_ADJUST_HUE(dev, init_val_isp_camcording.adjust.hue); IS_ISP_SET_PARAM_ADJUST_SHUTTER_TIME_MIN(dev, init_val_isp_camcording.adjust.shutter_time_min); IS_ISP_SET_PARAM_ADJUST_SHUTTER_TIME_MAX(dev, init_val_isp_camcording.adjust.shutter_time_max); IS_ISP_SET_PARAM_ADJUST_ERR(dev, init_val_isp_camcording.adjust.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_ADJUST); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_METERING_CMD(dev, init_val_isp_camcording.metering.cmd); IS_ISP_SET_PARAM_METERING_WIN_POS_X(dev, init_val_isp_camcording.metering.win_pos_x); IS_ISP_SET_PARAM_METERING_WIN_POS_Y(dev, init_val_isp_camcording.metering.win_pos_y); IS_ISP_SET_PARAM_METERING_WIN_WIDTH(dev, init_val_isp_camcording.metering.win_width); IS_ISP_SET_PARAM_METERING_WIN_HEIGHT(dev, init_val_isp_camcording.metering.win_height); IS_ISP_SET_PARAM_METERING_ERR(dev, init_val_isp_camcording.metering.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_METERING); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_AFC_CMD(dev, init_val_isp_camcording.afc.cmd); IS_ISP_SET_PARAM_AFC_MANUAL(dev, init_val_isp_camcording.afc.manual); IS_ISP_SET_PARAM_AFC_ERR(dev, init_val_isp_camcording.afc.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_AFC); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_OTF_OUTPUT_CMD(dev, init_val_isp_camcording.otf_output.cmd); IS_ISP_SET_PARAM_OTF_OUTPUT_WIDTH(dev, init_val_isp_camcording.otf_output.width); IS_ISP_SET_PARAM_OTF_OUTPUT_HEIGHT(dev, init_val_isp_camcording.otf_output.height); IS_ISP_SET_PARAM_OTF_OUTPUT_FORMAT(dev, init_val_isp_camcording.otf_output.format); IS_ISP_SET_PARAM_OTF_OUTPUT_BITWIDTH(dev, init_val_isp_camcording.otf_output.bitwidth); IS_ISP_SET_PARAM_OTF_OUTPUT_ORDER(dev, init_val_isp_camcording.otf_output.order); IS_ISP_SET_PARAM_OTF_OUTPUT_ERR(dev, init_val_isp_camcording.otf_output.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_OTF_OUTPUT); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_DMA_OUTPUT1_CMD(dev, init_val_isp_camcording.dma1_output.cmd); IS_ISP_SET_PARAM_DMA_OUTPUT1_WIDTH(dev, init_val_isp_camcording.dma1_output.width); IS_ISP_SET_PARAM_DMA_OUTPUT1_HEIGHT(dev, init_val_isp_camcording.dma1_output.height); IS_ISP_SET_PARAM_DMA_OUTPUT1_FORMAT(dev, init_val_isp_camcording.dma1_output.format); IS_ISP_SET_PARAM_DMA_OUTPUT1_BITWIDTH(dev, init_val_isp_camcording.dma1_output.bitwidth); IS_ISP_SET_PARAM_DMA_OUTPUT1_PLANE(dev, init_val_isp_camcording.dma1_output.plane); IS_ISP_SET_PARAM_DMA_OUTPUT1_ORDER(dev, init_val_isp_camcording.dma1_output.order); IS_ISP_SET_PARAM_DMA_OUTPUT1_BUFFER_NUMBER(dev, init_val_isp_camcording.dma1_output.buffer_number); IS_ISP_SET_PARAM_DMA_OUTPUT1_BUFFER_ADDRESS(dev, init_val_isp_camcording.dma1_output.buffer_address); IS_ISP_SET_PARAM_DMA_OUTPUT1_ERR(dev, init_val_isp_camcording.dma1_output.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_DMA1_OUTPUT); IS_INC_PARAM_NUM(dev); IS_ISP_SET_PARAM_DMA_OUTPUT2_CMD(dev, init_val_isp_camcording.dma2_output.cmd); IS_ISP_SET_PARAM_DMA_OUTPUT2_WIDTH(dev, init_val_isp_camcording.dma2_output.width); IS_ISP_SET_PARAM_DMA_OUTPUT2_HEIGHT(dev, init_val_isp_camcording.dma2_output.height); IS_ISP_SET_PARAM_DMA_OUTPUT2_FORMAT(dev, init_val_isp_camcording.dma2_output.format); IS_ISP_SET_PARAM_DMA_OUTPUT2_BITWIDTH(dev, init_val_isp_camcording.dma2_output.bitwidth); IS_ISP_SET_PARAM_DMA_OUTPUT2_PLANE(dev, init_val_isp_camcording.dma2_output.plane); IS_ISP_SET_PARAM_DMA_OUTPUT2_ORDER(dev, init_val_isp_camcording.dma2_output.order); IS_ISP_SET_PARAM_DMA_OUTPUT2_BUFFER_NUMBER(dev, init_val_isp_camcording.dma2_output.buffer_number); IS_ISP_SET_PARAM_DMA_OUTPUT2_BUFFER_ADDRESS(dev, init_val_isp_camcording.dma2_output.buffer_address); IS_ISP_SET_PARAM_DMA_OUTPUT2_ERR(dev, init_val_isp_camcording.dma2_output.err); IS_SET_PARAM_BIT(dev, PARAM_ISP_DMA2_OUTPUT); IS_INC_PARAM_NUM(dev); /* DRC */ IS_DRC_SET_PARAM_CONTROL_CMD(dev, init_val_drc_camcording.control.cmd); IS_DRC_SET_PARAM_CONTROL_BYPASS(dev, init_val_drc_camcording.control.bypass); IS_DRC_SET_PARAM_CONTROL_ERR(dev, init_val_drc_camcording.control.err); IS_SET_PARAM_BIT(dev, PARAM_DRC_CONTROL); IS_INC_PARAM_NUM(dev); IS_DRC_SET_PARAM_OTF_INPUT_CMD(dev, init_val_drc_camcording.otf_input.cmd); IS_DRC_SET_PARAM_OTF_INPUT_WIDTH(dev, init_val_drc_camcording.otf_input.width); IS_DRC_SET_PARAM_OTF_INPUT_HEIGHT(dev, init_val_drc_camcording.otf_input.height); IS_DRC_SET_PARAM_OTF_INPUT_FORMAT(dev, init_val_drc_camcording.otf_input.format); IS_DRC_SET_PARAM_OTF_INPUT_BITWIDTH(dev, init_val_drc_camcording.otf_input.bitwidth); IS_DRC_SET_PARAM_OTF_INPUT_ORDER(dev, init_val_drc_camcording.otf_input.order); IS_DRC_SET_PARAM_OTF_INPUT_ERR(dev, init_val_drc_camcording.otf_input.err); IS_SET_PARAM_BIT(dev, PARAM_DRC_OTF_INPUT); IS_INC_PARAM_NUM(dev); IS_DRC_SET_PARAM_DMA_INPUT_CMD(dev, init_val_drc_camcording.dma_input.cmd); IS_DRC_SET_PARAM_DMA_INPUT_WIDTH(dev, init_val_drc_camcording.dma_input.width); IS_DRC_SET_PARAM_DMA_INPUT_HEIGHT(dev, init_val_drc_camcording.dma_input.height); IS_DRC_SET_PARAM_DMA_INPUT_FORMAT(dev, init_val_drc_camcording.dma_input.format); IS_DRC_SET_PARAM_DMA_INPUT_BITWIDTH(dev, init_val_drc_camcording.dma_input.bitwidth); IS_DRC_SET_PARAM_DMA_INPUT_PLANE(dev, init_val_drc_camcording.dma_input.plane); IS_DRC_SET_PARAM_DMA_INPUT_ORDER(dev, init_val_drc_camcording.dma_input.order); IS_DRC_SET_PARAM_DMA_INPUT_BUFFERNUM(dev, init_val_drc_camcording.dma_input.buffer_number); IS_DRC_SET_PARAM_DMA_INPUT_BUFFERADDR(dev, init_val_drc_camcording.dma_input.buffer_address); IS_DRC_SET_PARAM_DMA_INPUT_ERR(dev, init_val_drc_camcording.dma_input.err); IS_SET_PARAM_BIT(dev, PARAM_DRC_DMA_INPUT); IS_INC_PARAM_NUM(dev); IS_DRC_SET_PARAM_OTF_OUTPUT_CMD(dev, init_val_drc_camcording.otf_output.cmd); IS_DRC_SET_PARAM_OTF_OUTPUT_WIDTH(dev, init_val_drc_camcording.otf_output.width); IS_DRC_SET_PARAM_OTF_OUTPUT_HEIGHT(dev, init_val_drc_camcording.otf_output.height); IS_DRC_SET_PARAM_OTF_OUTPUT_FORMAT(dev, init_val_drc_camcording.otf_output.format); IS_DRC_SET_PARAM_OTF_OUTPUT_BITWIDTH(dev, init_val_drc_camcording.otf_output.bitwidth); IS_DRC_SET_PARAM_OTF_OUTPUT_ORDER(dev, init_val_drc_camcording.otf_output.order); IS_DRC_SET_PARAM_OTF_OUTPUT_ERR(dev, init_val_drc_camcording.otf_output.err); IS_SET_PARAM_BIT(dev, PARAM_DRC_OTF_OUTPUT); IS_INC_PARAM_NUM(dev); /* FD */ IS_FD_SET_PARAM_CONTROL_CMD(dev, init_val_fd_camcording.control.cmd); IS_FD_SET_PARAM_CONTROL_BYPASS(dev, init_val_fd_camcording.control.bypass); IS_FD_SET_PARAM_CONTROL_ERR(dev, init_val_fd_camcording.control.err); IS_SET_PARAM_BIT(dev, PARAM_FD_CONTROL); IS_INC_PARAM_NUM(dev); IS_FD_SET_PARAM_OTF_INPUT_CMD(dev, init_val_fd_camcording.otf_input.cmd); IS_FD_SET_PARAM_OTF_INPUT_WIDTH(dev, init_val_fd_camcording.otf_input.width); IS_FD_SET_PARAM_OTF_INPUT_HEIGHT(dev, init_val_fd_camcording.otf_input.height); IS_FD_SET_PARAM_OTF_INPUT_FORMAT(dev, init_val_fd_camcording.otf_input.format); IS_FD_SET_PARAM_OTF_INPUT_BITWIDTH(dev, init_val_fd_camcording.otf_input.bitwidth); IS_FD_SET_PARAM_OTF_INPUT_ORDER(dev, init_val_fd_camcording.otf_input.order); IS_FD_SET_PARAM_OTF_INPUT_ERR(dev, init_val_fd_camcording.otf_input.err); IS_SET_PARAM_BIT(dev, PARAM_FD_OTF_INPUT); IS_INC_PARAM_NUM(dev); IS_FD_SET_PARAM_DMA_INPUT_CMD(dev, init_val_fd_camcording.dma_input.cmd); IS_FD_SET_PARAM_DMA_INPUT_WIDTH(dev, init_val_fd_camcording.dma_input.width); IS_FD_SET_PARAM_DMA_INPUT_HEIGHT(dev, init_val_fd_camcording.dma_input.height); IS_FD_SET_PARAM_DMA_INPUT_FORMAT(dev, init_val_fd_camcording.dma_input.format); IS_FD_SET_PARAM_DMA_INPUT_BITWIDTH(dev, init_val_fd_camcording.dma_input.bitwidth); IS_FD_SET_PARAM_DMA_INPUT_PLANE(dev, init_val_fd_camcording.dma_input.plane); IS_FD_SET_PARAM_DMA_INPUT_ORDER(dev, init_val_fd_camcording.dma_input.order); IS_FD_SET_PARAM_DMA_INPUT_BUFFERNUM(dev, init_val_fd_camcording.dma_input.buffer_number); IS_FD_SET_PARAM_DMA_INPUT_BUFFERADDR(dev, init_val_fd_camcording.dma_input.buffer_address); IS_FD_SET_PARAM_DMA_INPUT_ERR(dev, init_val_fd_camcording.dma_input.err); IS_SET_PARAM_BIT(dev, PARAM_FD_DMA_INPUT); IS_INC_PARAM_NUM(dev); IS_FD_SET_PARAM_FD_CONFIG_CMD(dev, init_val_fd_camcording.config.cmd); IS_FD_SET_PARAM_FD_CONFIG_MAX_NUMBER(dev, init_val_fd_camcording.config.max_number); IS_FD_SET_PARAM_FD_CONFIG_ROLL_ANGLE(dev, init_val_fd_camcording.config.roll_angle); IS_FD_SET_PARAM_FD_CONFIG_YAW_ANGLE(dev, init_val_fd_camcording.config.yaw_angle); IS_FD_SET_PARAM_FD_CONFIG_SMILE_MODE(dev, init_val_fd_camcording.config.smile_mode); IS_FD_SET_PARAM_FD_CONFIG_BLINK_MODE(dev, init_val_fd_camcording.config.blink_mode); IS_FD_SET_PARAM_FD_CONFIG_EYE_DETECT(dev, init_val_fd_camcording.config.eye_detect); IS_FD_SET_PARAM_FD_CONFIG_MOUTH_DETECT(dev, init_val_fd_camcording.config.mouth_detect); IS_FD_SET_PARAM_FD_CONFIG_ORIENTATION(dev, init_val_fd_camcording.config.orientation); IS_FD_SET_PARAM_FD_CONFIG_ORIENTATION_VALUE(dev, init_val_fd_camcording.config.orientation_value); IS_FD_SET_PARAM_FD_CONFIG_ERR(dev, init_val_fd_camcording.config.err); IS_SET_PARAM_BIT(dev, PARAM_FD_CONFIG); IS_INC_PARAM_NUM(dev); break; } }
gpl-2.0
Stefan-Schmidt/linux-2.6
fs/hfsplus/catalog.c
56
10886
/* * linux/fs/hfsplus/catalog.c * * Copyright (C) 2001 * Brad Boyer (flar@allandria.com) * (C) 2003 Ardis Technologies <roman@ardistech.com> * * Handling of catalog records */ #include "hfsplus_fs.h" #include "hfsplus_raw.h" int hfsplus_cat_case_cmp_key(const hfsplus_btree_key *k1, const hfsplus_btree_key *k2) { __be32 k1p, k2p; k1p = k1->cat.parent; k2p = k2->cat.parent; if (k1p != k2p) return be32_to_cpu(k1p) < be32_to_cpu(k2p) ? -1 : 1; return hfsplus_strcasecmp(&k1->cat.name, &k2->cat.name); } int hfsplus_cat_bin_cmp_key(const hfsplus_btree_key *k1, const hfsplus_btree_key *k2) { __be32 k1p, k2p; k1p = k1->cat.parent; k2p = k2->cat.parent; if (k1p != k2p) return be32_to_cpu(k1p) < be32_to_cpu(k2p) ? -1 : 1; return hfsplus_strcmp(&k1->cat.name, &k2->cat.name); } void hfsplus_cat_build_key(struct super_block *sb, hfsplus_btree_key *key, u32 parent, struct qstr *str) { int len; key->cat.parent = cpu_to_be32(parent); if (str) { hfsplus_asc2uni(sb, &key->cat.name, str->name, str->len); len = be16_to_cpu(key->cat.name.length); } else { key->cat.name.length = 0; len = 0; } key->key_len = cpu_to_be16(6 + 2 * len); } static void hfsplus_cat_build_key_uni(hfsplus_btree_key *key, u32 parent, struct hfsplus_unistr *name) { int ustrlen; ustrlen = be16_to_cpu(name->length); key->cat.parent = cpu_to_be32(parent); key->cat.name.length = cpu_to_be16(ustrlen); ustrlen *= 2; memcpy(key->cat.name.unicode, name->unicode, ustrlen); key->key_len = cpu_to_be16(6 + ustrlen); } void hfsplus_cat_set_perms(struct inode *inode, struct hfsplus_perm *perms) { if (inode->i_flags & S_IMMUTABLE) perms->rootflags |= HFSPLUS_FLG_IMMUTABLE; else perms->rootflags &= ~HFSPLUS_FLG_IMMUTABLE; if (inode->i_flags & S_APPEND) perms->rootflags |= HFSPLUS_FLG_APPEND; else perms->rootflags &= ~HFSPLUS_FLG_APPEND; perms->userflags = HFSPLUS_I(inode)->userflags; perms->mode = cpu_to_be16(inode->i_mode); perms->owner = cpu_to_be32(inode->i_uid); perms->group = cpu_to_be32(inode->i_gid); if (S_ISREG(inode->i_mode)) perms->dev = cpu_to_be32(inode->i_nlink); else if (S_ISBLK(inode->i_mode) || S_ISCHR(inode->i_mode)) perms->dev = cpu_to_be32(inode->i_rdev); else perms->dev = 0; } static int hfsplus_cat_build_record(hfsplus_cat_entry *entry, u32 cnid, struct inode *inode) { struct hfsplus_sb_info *sbi = HFSPLUS_SB(inode->i_sb); if (S_ISDIR(inode->i_mode)) { struct hfsplus_cat_folder *folder; folder = &entry->folder; memset(folder, 0, sizeof(*folder)); folder->type = cpu_to_be16(HFSPLUS_FOLDER); folder->id = cpu_to_be32(inode->i_ino); HFSPLUS_I(inode)->create_date = folder->create_date = folder->content_mod_date = folder->attribute_mod_date = folder->access_date = hfsp_now2mt(); hfsplus_cat_set_perms(inode, &folder->permissions); if (inode == sbi->hidden_dir) /* invisible and namelocked */ folder->user_info.frFlags = cpu_to_be16(0x5000); return sizeof(*folder); } else { struct hfsplus_cat_file *file; file = &entry->file; memset(file, 0, sizeof(*file)); file->type = cpu_to_be16(HFSPLUS_FILE); file->flags = cpu_to_be16(HFSPLUS_FILE_THREAD_EXISTS); file->id = cpu_to_be32(cnid); HFSPLUS_I(inode)->create_date = file->create_date = file->content_mod_date = file->attribute_mod_date = file->access_date = hfsp_now2mt(); if (cnid == inode->i_ino) { hfsplus_cat_set_perms(inode, &file->permissions); if (S_ISLNK(inode->i_mode)) { file->user_info.fdType = cpu_to_be32(HFSP_SYMLINK_TYPE); file->user_info.fdCreator = cpu_to_be32(HFSP_SYMLINK_CREATOR); } else { file->user_info.fdType = cpu_to_be32(sbi->type); file->user_info.fdCreator = cpu_to_be32(sbi->creator); } if ((file->permissions.rootflags | file->permissions.userflags) & HFSPLUS_FLG_IMMUTABLE) file->flags |= cpu_to_be16(HFSPLUS_FILE_LOCKED); } else { file->user_info.fdType = cpu_to_be32(HFSP_HARDLINK_TYPE); file->user_info.fdCreator = cpu_to_be32(HFSP_HFSPLUS_CREATOR); file->user_info.fdFlags = cpu_to_be16(0x100); file->create_date = HFSPLUS_I(sbi->hidden_dir)->create_date; file->permissions.dev = cpu_to_be32(HFSPLUS_I(inode)->linkid); } return sizeof(*file); } } static int hfsplus_fill_cat_thread(struct super_block *sb, hfsplus_cat_entry *entry, int type, u32 parentid, struct qstr *str) { entry->type = cpu_to_be16(type); entry->thread.reserved = 0; entry->thread.parentID = cpu_to_be32(parentid); hfsplus_asc2uni(sb, &entry->thread.nodeName, str->name, str->len); return 10 + be16_to_cpu(entry->thread.nodeName.length) * 2; } /* Try to get a catalog entry for given catalog id */ int hfsplus_find_cat(struct super_block *sb, u32 cnid, struct hfs_find_data *fd) { hfsplus_cat_entry tmp; int err; u16 type; hfsplus_cat_build_key(sb, fd->search_key, cnid, NULL); err = hfs_brec_read(fd, &tmp, sizeof(hfsplus_cat_entry)); if (err) return err; type = be16_to_cpu(tmp.type); if (type != HFSPLUS_FOLDER_THREAD && type != HFSPLUS_FILE_THREAD) { printk(KERN_ERR "hfs: found bad thread record in catalog\n"); return -EIO; } if (be16_to_cpu(tmp.thread.nodeName.length) > 255) { printk(KERN_ERR "hfs: catalog name length corrupted\n"); return -EIO; } hfsplus_cat_build_key_uni(fd->search_key, be32_to_cpu(tmp.thread.parentID), &tmp.thread.nodeName); return hfs_brec_find(fd); } int hfsplus_create_cat(u32 cnid, struct inode *dir, struct qstr *str, struct inode *inode) { struct super_block *sb = dir->i_sb; struct hfs_find_data fd; hfsplus_cat_entry entry; int entry_size; int err; dprint(DBG_CAT_MOD, "create_cat: %s,%u(%d)\n", str->name, cnid, inode->i_nlink); hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL); entry_size = hfsplus_fill_cat_thread(sb, &entry, S_ISDIR(inode->i_mode) ? HFSPLUS_FOLDER_THREAD : HFSPLUS_FILE_THREAD, dir->i_ino, str); err = hfs_brec_find(&fd); if (err != -ENOENT) { if (!err) err = -EEXIST; goto err2; } err = hfs_brec_insert(&fd, &entry, entry_size); if (err) goto err2; hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, str); entry_size = hfsplus_cat_build_record(&entry, cnid, inode); err = hfs_brec_find(&fd); if (err != -ENOENT) { /* panic? */ if (!err) err = -EEXIST; goto err1; } err = hfs_brec_insert(&fd, &entry, entry_size); if (err) goto err1; dir->i_size++; dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(dir); hfs_find_exit(&fd); return 0; err1: hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL); if (!hfs_brec_find(&fd)) hfs_brec_remove(&fd); err2: hfs_find_exit(&fd); return err; } int hfsplus_delete_cat(u32 cnid, struct inode *dir, struct qstr *str) { struct super_block *sb = dir->i_sb; struct hfs_find_data fd; struct hfsplus_fork_raw fork; struct list_head *pos; int err, off; u16 type; dprint(DBG_CAT_MOD, "delete_cat: %s,%u\n", str ? str->name : NULL, cnid); hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &fd); if (!str) { int len; hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL); err = hfs_brec_find(&fd); if (err) goto out; off = fd.entryoffset + offsetof(struct hfsplus_cat_thread, nodeName); fd.search_key->cat.parent = cpu_to_be32(dir->i_ino); hfs_bnode_read(fd.bnode, &fd.search_key->cat.name.length, off, 2); len = be16_to_cpu(fd.search_key->cat.name.length) * 2; hfs_bnode_read(fd.bnode, &fd.search_key->cat.name.unicode, off + 2, len); fd.search_key->key_len = cpu_to_be16(6 + len); } else hfsplus_cat_build_key(sb, fd.search_key, dir->i_ino, str); err = hfs_brec_find(&fd); if (err) goto out; type = hfs_bnode_read_u16(fd.bnode, fd.entryoffset); if (type == HFSPLUS_FILE) { #if 0 off = fd.entryoffset + offsetof(hfsplus_cat_file, data_fork); hfs_bnode_read(fd.bnode, &fork, off, sizeof(fork)); hfsplus_free_fork(sb, cnid, &fork, HFSPLUS_TYPE_DATA); #endif off = fd.entryoffset + offsetof(struct hfsplus_cat_file, rsrc_fork); hfs_bnode_read(fd.bnode, &fork, off, sizeof(fork)); hfsplus_free_fork(sb, cnid, &fork, HFSPLUS_TYPE_RSRC); } list_for_each(pos, &HFSPLUS_I(dir)->open_dir_list) { struct hfsplus_readdir_data *rd = list_entry(pos, struct hfsplus_readdir_data, list); if (fd.tree->keycmp(fd.search_key, (void *)&rd->key) < 0) rd->file->f_pos--; } err = hfs_brec_remove(&fd); if (err) goto out; hfsplus_cat_build_key(sb, fd.search_key, cnid, NULL); err = hfs_brec_find(&fd); if (err) goto out; err = hfs_brec_remove(&fd); if (err) goto out; dir->i_size--; dir->i_mtime = dir->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(dir); out: hfs_find_exit(&fd); return err; } int hfsplus_rename_cat(u32 cnid, struct inode *src_dir, struct qstr *src_name, struct inode *dst_dir, struct qstr *dst_name) { struct super_block *sb = src_dir->i_sb; struct hfs_find_data src_fd, dst_fd; hfsplus_cat_entry entry; int entry_size, type; int err = 0; dprint(DBG_CAT_MOD, "rename_cat: %u - %lu,%s - %lu,%s\n", cnid, src_dir->i_ino, src_name->name, dst_dir->i_ino, dst_name->name); hfs_find_init(HFSPLUS_SB(sb)->cat_tree, &src_fd); dst_fd = src_fd; /* find the old dir entry and read the data */ hfsplus_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name); err = hfs_brec_find(&src_fd); if (err) goto out; hfs_bnode_read(src_fd.bnode, &entry, src_fd.entryoffset, src_fd.entrylength); /* create new dir entry with the data from the old entry */ hfsplus_cat_build_key(sb, dst_fd.search_key, dst_dir->i_ino, dst_name); err = hfs_brec_find(&dst_fd); if (err != -ENOENT) { if (!err) err = -EEXIST; goto out; } err = hfs_brec_insert(&dst_fd, &entry, src_fd.entrylength); if (err) goto out; dst_dir->i_size++; dst_dir->i_mtime = dst_dir->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(dst_dir); /* finally remove the old entry */ hfsplus_cat_build_key(sb, src_fd.search_key, src_dir->i_ino, src_name); err = hfs_brec_find(&src_fd); if (err) goto out; err = hfs_brec_remove(&src_fd); if (err) goto out; src_dir->i_size--; src_dir->i_mtime = src_dir->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(src_dir); /* remove old thread entry */ hfsplus_cat_build_key(sb, src_fd.search_key, cnid, NULL); err = hfs_brec_find(&src_fd); if (err) goto out; type = hfs_bnode_read_u16(src_fd.bnode, src_fd.entryoffset); err = hfs_brec_remove(&src_fd); if (err) goto out; /* create new thread entry */ hfsplus_cat_build_key(sb, dst_fd.search_key, cnid, NULL); entry_size = hfsplus_fill_cat_thread(sb, &entry, type, dst_dir->i_ino, dst_name); err = hfs_brec_find(&dst_fd); if (err != -ENOENT) { if (!err) err = -EEXIST; goto out; } err = hfs_brec_insert(&dst_fd, &entry, entry_size); out: hfs_bnode_put(dst_fd.bnode); hfs_find_exit(&src_fd); return err; }
gpl-2.0
GaloisInc/linux-deadline
drivers/spi/amba-pl022.c
56
65403
/* * drivers/spi/amba-pl022.c * * A driver for the ARM PL022 PrimeCell SSP/SPI bus master. * * Copyright (C) 2008-2009 ST-Ericsson AB * Copyright (C) 2006 STMicroelectronics Pvt. Ltd. * * Author: Linus Walleij <linus.walleij@stericsson.com> * * Initial version inspired by: * linux-2.6.17-rc3-mm1/drivers/spi/pxa2xx_spi.c * Initial adoption to PL022 by: * Sachin Verma <sachin.verma@st.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ /* * TODO: * - add timeout on polled transfers */ #include <linux/init.h> #include <linux/module.h> #include <linux/device.h> #include <linux/ioport.h> #include <linux/errno.h> #include <linux/interrupt.h> #include <linux/spi/spi.h> #include <linux/workqueue.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/amba/bus.h> #include <linux/amba/pl022.h> #include <linux/io.h> #include <linux/slab.h> #include <linux/dmaengine.h> #include <linux/dma-mapping.h> #include <linux/scatterlist.h> /* * This macro is used to define some register default values. * reg is masked with mask, the OR:ed with an (again masked) * val shifted sb steps to the left. */ #define SSP_WRITE_BITS(reg, val, mask, sb) \ ((reg) = (((reg) & ~(mask)) | (((val)<<(sb)) & (mask)))) /* * This macro is also used to define some default values. * It will just shift val by sb steps to the left and mask * the result with mask. */ #define GEN_MASK_BITS(val, mask, sb) \ (((val)<<(sb)) & (mask)) #define DRIVE_TX 0 #define DO_NOT_DRIVE_TX 1 #define DO_NOT_QUEUE_DMA 0 #define QUEUE_DMA 1 #define RX_TRANSFER 1 #define TX_TRANSFER 2 /* * Macros to access SSP Registers with their offsets */ #define SSP_CR0(r) (r + 0x000) #define SSP_CR1(r) (r + 0x004) #define SSP_DR(r) (r + 0x008) #define SSP_SR(r) (r + 0x00C) #define SSP_CPSR(r) (r + 0x010) #define SSP_IMSC(r) (r + 0x014) #define SSP_RIS(r) (r + 0x018) #define SSP_MIS(r) (r + 0x01C) #define SSP_ICR(r) (r + 0x020) #define SSP_DMACR(r) (r + 0x024) #define SSP_ITCR(r) (r + 0x080) #define SSP_ITIP(r) (r + 0x084) #define SSP_ITOP(r) (r + 0x088) #define SSP_TDR(r) (r + 0x08C) #define SSP_PID0(r) (r + 0xFE0) #define SSP_PID1(r) (r + 0xFE4) #define SSP_PID2(r) (r + 0xFE8) #define SSP_PID3(r) (r + 0xFEC) #define SSP_CID0(r) (r + 0xFF0) #define SSP_CID1(r) (r + 0xFF4) #define SSP_CID2(r) (r + 0xFF8) #define SSP_CID3(r) (r + 0xFFC) /* * SSP Control Register 0 - SSP_CR0 */ #define SSP_CR0_MASK_DSS (0x0FUL << 0) #define SSP_CR0_MASK_FRF (0x3UL << 4) #define SSP_CR0_MASK_SPO (0x1UL << 6) #define SSP_CR0_MASK_SPH (0x1UL << 7) #define SSP_CR0_MASK_SCR (0xFFUL << 8) /* * The ST version of this block moves som bits * in SSP_CR0 and extends it to 32 bits */ #define SSP_CR0_MASK_DSS_ST (0x1FUL << 0) #define SSP_CR0_MASK_HALFDUP_ST (0x1UL << 5) #define SSP_CR0_MASK_CSS_ST (0x1FUL << 16) #define SSP_CR0_MASK_FRF_ST (0x3UL << 21) /* * SSP Control Register 0 - SSP_CR1 */ #define SSP_CR1_MASK_LBM (0x1UL << 0) #define SSP_CR1_MASK_SSE (0x1UL << 1) #define SSP_CR1_MASK_MS (0x1UL << 2) #define SSP_CR1_MASK_SOD (0x1UL << 3) /* * The ST version of this block adds some bits * in SSP_CR1 */ #define SSP_CR1_MASK_RENDN_ST (0x1UL << 4) #define SSP_CR1_MASK_TENDN_ST (0x1UL << 5) #define SSP_CR1_MASK_MWAIT_ST (0x1UL << 6) #define SSP_CR1_MASK_RXIFLSEL_ST (0x7UL << 7) #define SSP_CR1_MASK_TXIFLSEL_ST (0x7UL << 10) /* This one is only in the PL023 variant */ #define SSP_CR1_MASK_FBCLKDEL_ST (0x7UL << 13) /* * SSP Status Register - SSP_SR */ #define SSP_SR_MASK_TFE (0x1UL << 0) /* Transmit FIFO empty */ #define SSP_SR_MASK_TNF (0x1UL << 1) /* Transmit FIFO not full */ #define SSP_SR_MASK_RNE (0x1UL << 2) /* Receive FIFO not empty */ #define SSP_SR_MASK_RFF (0x1UL << 3) /* Receive FIFO full */ #define SSP_SR_MASK_BSY (0x1UL << 4) /* Busy Flag */ /* * SSP Clock Prescale Register - SSP_CPSR */ #define SSP_CPSR_MASK_CPSDVSR (0xFFUL << 0) /* * SSP Interrupt Mask Set/Clear Register - SSP_IMSC */ #define SSP_IMSC_MASK_RORIM (0x1UL << 0) /* Receive Overrun Interrupt mask */ #define SSP_IMSC_MASK_RTIM (0x1UL << 1) /* Receive timeout Interrupt mask */ #define SSP_IMSC_MASK_RXIM (0x1UL << 2) /* Receive FIFO Interrupt mask */ #define SSP_IMSC_MASK_TXIM (0x1UL << 3) /* Transmit FIFO Interrupt mask */ /* * SSP Raw Interrupt Status Register - SSP_RIS */ /* Receive Overrun Raw Interrupt status */ #define SSP_RIS_MASK_RORRIS (0x1UL << 0) /* Receive Timeout Raw Interrupt status */ #define SSP_RIS_MASK_RTRIS (0x1UL << 1) /* Receive FIFO Raw Interrupt status */ #define SSP_RIS_MASK_RXRIS (0x1UL << 2) /* Transmit FIFO Raw Interrupt status */ #define SSP_RIS_MASK_TXRIS (0x1UL << 3) /* * SSP Masked Interrupt Status Register - SSP_MIS */ /* Receive Overrun Masked Interrupt status */ #define SSP_MIS_MASK_RORMIS (0x1UL << 0) /* Receive Timeout Masked Interrupt status */ #define SSP_MIS_MASK_RTMIS (0x1UL << 1) /* Receive FIFO Masked Interrupt status */ #define SSP_MIS_MASK_RXMIS (0x1UL << 2) /* Transmit FIFO Masked Interrupt status */ #define SSP_MIS_MASK_TXMIS (0x1UL << 3) /* * SSP Interrupt Clear Register - SSP_ICR */ /* Receive Overrun Raw Clear Interrupt bit */ #define SSP_ICR_MASK_RORIC (0x1UL << 0) /* Receive Timeout Clear Interrupt bit */ #define SSP_ICR_MASK_RTIC (0x1UL << 1) /* * SSP DMA Control Register - SSP_DMACR */ /* Receive DMA Enable bit */ #define SSP_DMACR_MASK_RXDMAE (0x1UL << 0) /* Transmit DMA Enable bit */ #define SSP_DMACR_MASK_TXDMAE (0x1UL << 1) /* * SSP Integration Test control Register - SSP_ITCR */ #define SSP_ITCR_MASK_ITEN (0x1UL << 0) #define SSP_ITCR_MASK_TESTFIFO (0x1UL << 1) /* * SSP Integration Test Input Register - SSP_ITIP */ #define ITIP_MASK_SSPRXD (0x1UL << 0) #define ITIP_MASK_SSPFSSIN (0x1UL << 1) #define ITIP_MASK_SSPCLKIN (0x1UL << 2) #define ITIP_MASK_RXDMAC (0x1UL << 3) #define ITIP_MASK_TXDMAC (0x1UL << 4) #define ITIP_MASK_SSPTXDIN (0x1UL << 5) /* * SSP Integration Test output Register - SSP_ITOP */ #define ITOP_MASK_SSPTXD (0x1UL << 0) #define ITOP_MASK_SSPFSSOUT (0x1UL << 1) #define ITOP_MASK_SSPCLKOUT (0x1UL << 2) #define ITOP_MASK_SSPOEn (0x1UL << 3) #define ITOP_MASK_SSPCTLOEn (0x1UL << 4) #define ITOP_MASK_RORINTR (0x1UL << 5) #define ITOP_MASK_RTINTR (0x1UL << 6) #define ITOP_MASK_RXINTR (0x1UL << 7) #define ITOP_MASK_TXINTR (0x1UL << 8) #define ITOP_MASK_INTR (0x1UL << 9) #define ITOP_MASK_RXDMABREQ (0x1UL << 10) #define ITOP_MASK_RXDMASREQ (0x1UL << 11) #define ITOP_MASK_TXDMABREQ (0x1UL << 12) #define ITOP_MASK_TXDMASREQ (0x1UL << 13) /* * SSP Test Data Register - SSP_TDR */ #define TDR_MASK_TESTDATA (0xFFFFFFFF) /* * Message State * we use the spi_message.state (void *) pointer to * hold a single state value, that's why all this * (void *) casting is done here. */ #define STATE_START ((void *) 0) #define STATE_RUNNING ((void *) 1) #define STATE_DONE ((void *) 2) #define STATE_ERROR ((void *) -1) /* * Queue State */ #define QUEUE_RUNNING (0) #define QUEUE_STOPPED (1) /* * SSP State - Whether Enabled or Disabled */ #define SSP_DISABLED (0) #define SSP_ENABLED (1) /* * SSP DMA State - Whether DMA Enabled or Disabled */ #define SSP_DMA_DISABLED (0) #define SSP_DMA_ENABLED (1) /* * SSP Clock Defaults */ #define SSP_DEFAULT_CLKRATE 0x2 #define SSP_DEFAULT_PRESCALE 0x40 /* * SSP Clock Parameter ranges */ #define CPSDVR_MIN 0x02 #define CPSDVR_MAX 0xFE #define SCR_MIN 0x00 #define SCR_MAX 0xFF /* * SSP Interrupt related Macros */ #define DEFAULT_SSP_REG_IMSC 0x0UL #define DISABLE_ALL_INTERRUPTS DEFAULT_SSP_REG_IMSC #define ENABLE_ALL_INTERRUPTS (~DEFAULT_SSP_REG_IMSC) #define CLEAR_ALL_INTERRUPTS 0x3 /* * The type of reading going on on this chip */ enum ssp_reading { READING_NULL, READING_U8, READING_U16, READING_U32 }; /** * The type of writing going on on this chip */ enum ssp_writing { WRITING_NULL, WRITING_U8, WRITING_U16, WRITING_U32 }; /** * struct vendor_data - vendor-specific config parameters * for PL022 derivates * @fifodepth: depth of FIFOs (both) * @max_bpw: maximum number of bits per word * @unidir: supports unidirection transfers * @extended_cr: 32 bit wide control register 0 with extra * features and extra features in CR1 as found in the ST variants * @pl023: supports a subset of the ST extensions called "PL023" */ struct vendor_data { int fifodepth; int max_bpw; bool unidir; bool extended_cr; bool pl023; }; /** * struct pl022 - This is the private SSP driver data structure * @adev: AMBA device model hookup * @vendor: Vendor data for the IP block * @phybase: The physical memory where the SSP device resides * @virtbase: The virtual memory where the SSP is mapped * @master: SPI framework hookup * @master_info: controller-specific data from machine setup * @regs: SSP controller register's virtual address * @pump_messages: Work struct for scheduling work to the workqueue * @lock: spinlock to syncronise access to driver data * @workqueue: a workqueue on which any spi_message request is queued * @busy: workqueue is busy * @run: workqueue is running * @pump_transfers: Tasklet used in Interrupt Transfer mode * @cur_msg: Pointer to current spi_message being processed * @cur_transfer: Pointer to current spi_transfer * @cur_chip: pointer to current clients chip(assigned from controller_state) * @tx: current position in TX buffer to be read * @tx_end: end position in TX buffer to be read * @rx: current position in RX buffer to be written * @rx_end: end position in RX buffer to be written * @readingtype: the type of read currently going on * @writingtype: the type or write currently going on */ struct pl022 { struct amba_device *adev; struct vendor_data *vendor; resource_size_t phybase; void __iomem *virtbase; struct clk *clk; struct spi_master *master; struct pl022_ssp_controller *master_info; /* Driver message queue */ struct workqueue_struct *workqueue; struct work_struct pump_messages; spinlock_t queue_lock; struct list_head queue; int busy; int run; /* Message transfer pump */ struct tasklet_struct pump_transfers; struct spi_message *cur_msg; struct spi_transfer *cur_transfer; struct chip_data *cur_chip; void *tx; void *tx_end; void *rx; void *rx_end; enum ssp_reading read; enum ssp_writing write; u32 exp_fifo_level; /* DMA settings */ #ifdef CONFIG_DMA_ENGINE struct dma_chan *dma_rx_channel; struct dma_chan *dma_tx_channel; struct sg_table sgt_rx; struct sg_table sgt_tx; char *dummypage; #endif }; /** * struct chip_data - To maintain runtime state of SSP for each client chip * @cr0: Value of control register CR0 of SSP - on later ST variants this * register is 32 bits wide rather than just 16 * @cr1: Value of control register CR1 of SSP * @dmacr: Value of DMA control Register of SSP * @cpsr: Value of Clock prescale register * @n_bytes: how many bytes(power of 2) reqd for a given data width of client * @enable_dma: Whether to enable DMA or not * @write: function ptr to be used to write when doing xfer for this chip * @read: function ptr to be used to read when doing xfer for this chip * @cs_control: chip select callback provided by chip * @xfer_type: polling/interrupt/DMA * * Runtime state of the SSP controller, maintained per chip, * This would be set according to the current message that would be served */ struct chip_data { u32 cr0; u16 cr1; u16 dmacr; u16 cpsr; u8 n_bytes; bool enable_dma; enum ssp_reading read; enum ssp_writing write; void (*cs_control) (u32 command); int xfer_type; }; /** * null_cs_control - Dummy chip select function * @command: select/delect the chip * * If no chip select function is provided by client this is used as dummy * chip select */ static void null_cs_control(u32 command) { pr_debug("pl022: dummy chip select control, CS=0x%x\n", command); } /** * giveback - current spi_message is over, schedule next message and call * callback of this message. Assumes that caller already * set message->status; dma and pio irqs are blocked * @pl022: SSP driver private data structure */ static void giveback(struct pl022 *pl022) { struct spi_transfer *last_transfer; unsigned long flags; struct spi_message *msg; void (*curr_cs_control) (u32 command); /* * This local reference to the chip select function * is needed because we set curr_chip to NULL * as a step toward termininating the message. */ curr_cs_control = pl022->cur_chip->cs_control; spin_lock_irqsave(&pl022->queue_lock, flags); msg = pl022->cur_msg; pl022->cur_msg = NULL; pl022->cur_transfer = NULL; pl022->cur_chip = NULL; queue_work(pl022->workqueue, &pl022->pump_messages); spin_unlock_irqrestore(&pl022->queue_lock, flags); last_transfer = list_entry(msg->transfers.prev, struct spi_transfer, transfer_list); /* Delay if requested before any change in chip select */ if (last_transfer->delay_usecs) /* * FIXME: This runs in interrupt context. * Is this really smart? */ udelay(last_transfer->delay_usecs); /* * Drop chip select UNLESS cs_change is true or we are returning * a message with an error, or next message is for another chip */ if (!last_transfer->cs_change) curr_cs_control(SSP_CHIP_DESELECT); else { struct spi_message *next_msg; /* Holding of cs was hinted, but we need to make sure * the next message is for the same chip. Don't waste * time with the following tests unless this was hinted. * * We cannot postpone this until pump_messages, because * after calling msg->complete (below) the driver that * sent the current message could be unloaded, which * could invalidate the cs_control() callback... */ /* get a pointer to the next message, if any */ spin_lock_irqsave(&pl022->queue_lock, flags); if (list_empty(&pl022->queue)) next_msg = NULL; else next_msg = list_entry(pl022->queue.next, struct spi_message, queue); spin_unlock_irqrestore(&pl022->queue_lock, flags); /* see if the next and current messages point * to the same chip */ if (next_msg && next_msg->spi != msg->spi) next_msg = NULL; if (!next_msg || msg->state == STATE_ERROR) curr_cs_control(SSP_CHIP_DESELECT); } msg->state = NULL; if (msg->complete) msg->complete(msg->context); /* This message is completed, so let's turn off the clocks! */ clk_disable(pl022->clk); amba_pclk_disable(pl022->adev); } /** * flush - flush the FIFO to reach a clean state * @pl022: SSP driver private data structure */ static int flush(struct pl022 *pl022) { unsigned long limit = loops_per_jiffy << 1; dev_dbg(&pl022->adev->dev, "flush\n"); do { while (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) readw(SSP_DR(pl022->virtbase)); } while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_BSY) && limit--); pl022->exp_fifo_level = 0; return limit; } /** * restore_state - Load configuration of current chip * @pl022: SSP driver private data structure */ static void restore_state(struct pl022 *pl022) { struct chip_data *chip = pl022->cur_chip; if (pl022->vendor->extended_cr) writel(chip->cr0, SSP_CR0(pl022->virtbase)); else writew(chip->cr0, SSP_CR0(pl022->virtbase)); writew(chip->cr1, SSP_CR1(pl022->virtbase)); writew(chip->dmacr, SSP_DMACR(pl022->virtbase)); writew(chip->cpsr, SSP_CPSR(pl022->virtbase)); writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); } /* * Default SSP Register Values */ #define DEFAULT_SSP_REG_CR0 ( \ GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \ GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 4) | \ GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \ ) /* ST versions have slightly different bit layout */ #define DEFAULT_SSP_REG_CR0_ST ( \ GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \ GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP_ST, 5) | \ GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \ GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS_ST, 16) | \ GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF_ST, 21) \ ) /* The PL023 version is slightly different again */ #define DEFAULT_SSP_REG_CR0_ST_PL023 ( \ GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS_ST, 0) | \ GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \ GEN_MASK_BITS(SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) \ ) #define DEFAULT_SSP_REG_CR1 ( \ GEN_MASK_BITS(LOOPBACK_DISABLED, SSP_CR1_MASK_LBM, 0) | \ GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \ GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \ GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) \ ) /* ST versions extend this register to use all 16 bits */ #define DEFAULT_SSP_REG_CR1_ST ( \ DEFAULT_SSP_REG_CR1 | \ GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \ GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \ GEN_MASK_BITS(SSP_MWIRE_WAIT_ZERO, SSP_CR1_MASK_MWAIT_ST, 6) |\ GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \ GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) \ ) /* * The PL023 variant has further differences: no loopback mode, no microwire * support, and a new clock feedback delay setting. */ #define DEFAULT_SSP_REG_CR1_ST_PL023 ( \ GEN_MASK_BITS(SSP_DISABLED, SSP_CR1_MASK_SSE, 1) | \ GEN_MASK_BITS(SSP_MASTER, SSP_CR1_MASK_MS, 2) | \ GEN_MASK_BITS(DO_NOT_DRIVE_TX, SSP_CR1_MASK_SOD, 3) | \ GEN_MASK_BITS(SSP_RX_MSB, SSP_CR1_MASK_RENDN_ST, 4) | \ GEN_MASK_BITS(SSP_TX_MSB, SSP_CR1_MASK_TENDN_ST, 5) | \ GEN_MASK_BITS(SSP_RX_1_OR_MORE_ELEM, SSP_CR1_MASK_RXIFLSEL_ST, 7) | \ GEN_MASK_BITS(SSP_TX_1_OR_MORE_EMPTY_LOC, SSP_CR1_MASK_TXIFLSEL_ST, 10) | \ GEN_MASK_BITS(SSP_FEEDBACK_CLK_DELAY_NONE, SSP_CR1_MASK_FBCLKDEL_ST, 13) \ ) #define DEFAULT_SSP_REG_CPSR ( \ GEN_MASK_BITS(SSP_DEFAULT_PRESCALE, SSP_CPSR_MASK_CPSDVSR, 0) \ ) #define DEFAULT_SSP_REG_DMACR (\ GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0) | \ GEN_MASK_BITS(SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1) \ ) /** * load_ssp_default_config - Load default configuration for SSP * @pl022: SSP driver private data structure */ static void load_ssp_default_config(struct pl022 *pl022) { if (pl022->vendor->pl023) { writel(DEFAULT_SSP_REG_CR0_ST_PL023, SSP_CR0(pl022->virtbase)); writew(DEFAULT_SSP_REG_CR1_ST_PL023, SSP_CR1(pl022->virtbase)); } else if (pl022->vendor->extended_cr) { writel(DEFAULT_SSP_REG_CR0_ST, SSP_CR0(pl022->virtbase)); writew(DEFAULT_SSP_REG_CR1_ST, SSP_CR1(pl022->virtbase)); } else { writew(DEFAULT_SSP_REG_CR0, SSP_CR0(pl022->virtbase)); writew(DEFAULT_SSP_REG_CR1, SSP_CR1(pl022->virtbase)); } writew(DEFAULT_SSP_REG_DMACR, SSP_DMACR(pl022->virtbase)); writew(DEFAULT_SSP_REG_CPSR, SSP_CPSR(pl022->virtbase)); writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); } /** * This will write to TX and read from RX according to the parameters * set in pl022. */ static void readwriter(struct pl022 *pl022) { /* * The FIFO depth is different inbetween primecell variants. * I believe filling in too much in the FIFO might cause * errons in 8bit wide transfers on ARM variants (just 8 words * FIFO, means only 8x8 = 64 bits in FIFO) at least. * * To prevent this issue, the TX FIFO is only filled to the * unused RX FIFO fill length, regardless of what the TX * FIFO status flag indicates. */ dev_dbg(&pl022->adev->dev, "%s, rx: %p, rxend: %p, tx: %p, txend: %p\n", __func__, pl022->rx, pl022->rx_end, pl022->tx, pl022->tx_end); /* Read as much as you can */ while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) && (pl022->rx < pl022->rx_end)) { switch (pl022->read) { case READING_NULL: readw(SSP_DR(pl022->virtbase)); break; case READING_U8: *(u8 *) (pl022->rx) = readw(SSP_DR(pl022->virtbase)) & 0xFFU; break; case READING_U16: *(u16 *) (pl022->rx) = (u16) readw(SSP_DR(pl022->virtbase)); break; case READING_U32: *(u32 *) (pl022->rx) = readl(SSP_DR(pl022->virtbase)); break; } pl022->rx += (pl022->cur_chip->n_bytes); pl022->exp_fifo_level--; } /* * Write as much as possible up to the RX FIFO size */ while ((pl022->exp_fifo_level < pl022->vendor->fifodepth) && (pl022->tx < pl022->tx_end)) { switch (pl022->write) { case WRITING_NULL: writew(0x0, SSP_DR(pl022->virtbase)); break; case WRITING_U8: writew(*(u8 *) (pl022->tx), SSP_DR(pl022->virtbase)); break; case WRITING_U16: writew((*(u16 *) (pl022->tx)), SSP_DR(pl022->virtbase)); break; case WRITING_U32: writel(*(u32 *) (pl022->tx), SSP_DR(pl022->virtbase)); break; } pl022->tx += (pl022->cur_chip->n_bytes); pl022->exp_fifo_level++; /* * This inner reader takes care of things appearing in the RX * FIFO as we're transmitting. This will happen a lot since the * clock starts running when you put things into the TX FIFO, * and then things are continously clocked into the RX FIFO. */ while ((readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RNE) && (pl022->rx < pl022->rx_end)) { switch (pl022->read) { case READING_NULL: readw(SSP_DR(pl022->virtbase)); break; case READING_U8: *(u8 *) (pl022->rx) = readw(SSP_DR(pl022->virtbase)) & 0xFFU; break; case READING_U16: *(u16 *) (pl022->rx) = (u16) readw(SSP_DR(pl022->virtbase)); break; case READING_U32: *(u32 *) (pl022->rx) = readl(SSP_DR(pl022->virtbase)); break; } pl022->rx += (pl022->cur_chip->n_bytes); pl022->exp_fifo_level--; } } /* * When we exit here the TX FIFO should be full and the RX FIFO * should be empty */ } /** * next_transfer - Move to the Next transfer in the current spi message * @pl022: SSP driver private data structure * * This function moves though the linked list of spi transfers in the * current spi message and returns with the state of current spi * message i.e whether its last transfer is done(STATE_DONE) or * Next transfer is ready(STATE_RUNNING) */ static void *next_transfer(struct pl022 *pl022) { struct spi_message *msg = pl022->cur_msg; struct spi_transfer *trans = pl022->cur_transfer; /* Move to next transfer */ if (trans->transfer_list.next != &msg->transfers) { pl022->cur_transfer = list_entry(trans->transfer_list.next, struct spi_transfer, transfer_list); return STATE_RUNNING; } return STATE_DONE; } /* * This DMA functionality is only compiled in if we have * access to the generic DMA devices/DMA engine. */ #ifdef CONFIG_DMA_ENGINE static void unmap_free_dma_scatter(struct pl022 *pl022) { /* Unmap and free the SG tables */ dma_unmap_sg(&pl022->adev->dev, pl022->sgt_tx.sgl, pl022->sgt_tx.nents, DMA_TO_DEVICE); dma_unmap_sg(&pl022->adev->dev, pl022->sgt_rx.sgl, pl022->sgt_rx.nents, DMA_FROM_DEVICE); sg_free_table(&pl022->sgt_rx); sg_free_table(&pl022->sgt_tx); } static void dma_callback(void *data) { struct pl022 *pl022 = data; struct spi_message *msg = pl022->cur_msg; BUG_ON(!pl022->sgt_rx.sgl); #ifdef VERBOSE_DEBUG /* * Optionally dump out buffers to inspect contents, this is * good if you want to convince yourself that the loopback * read/write contents are the same, when adopting to a new * DMA engine. */ { struct scatterlist *sg; unsigned int i; dma_sync_sg_for_cpu(&pl022->adev->dev, pl022->sgt_rx.sgl, pl022->sgt_rx.nents, DMA_FROM_DEVICE); for_each_sg(pl022->sgt_rx.sgl, sg, pl022->sgt_rx.nents, i) { dev_dbg(&pl022->adev->dev, "SPI RX SG ENTRY: %d", i); print_hex_dump(KERN_ERR, "SPI RX: ", DUMP_PREFIX_OFFSET, 16, 1, sg_virt(sg), sg_dma_len(sg), 1); } for_each_sg(pl022->sgt_tx.sgl, sg, pl022->sgt_tx.nents, i) { dev_dbg(&pl022->adev->dev, "SPI TX SG ENTRY: %d", i); print_hex_dump(KERN_ERR, "SPI TX: ", DUMP_PREFIX_OFFSET, 16, 1, sg_virt(sg), sg_dma_len(sg), 1); } } #endif unmap_free_dma_scatter(pl022); /* Update total bytes transfered */ msg->actual_length += pl022->cur_transfer->len; if (pl022->cur_transfer->cs_change) pl022->cur_chip-> cs_control(SSP_CHIP_DESELECT); /* Move to next transfer */ msg->state = next_transfer(pl022); tasklet_schedule(&pl022->pump_transfers); } static void setup_dma_scatter(struct pl022 *pl022, void *buffer, unsigned int length, struct sg_table *sgtab) { struct scatterlist *sg; int bytesleft = length; void *bufp = buffer; int mapbytes; int i; if (buffer) { for_each_sg(sgtab->sgl, sg, sgtab->nents, i) { /* * If there are less bytes left than what fits * in the current page (plus page alignment offset) * we just feed in this, else we stuff in as much * as we can. */ if (bytesleft < (PAGE_SIZE - offset_in_page(bufp))) mapbytes = bytesleft; else mapbytes = PAGE_SIZE - offset_in_page(bufp); sg_set_page(sg, virt_to_page(bufp), mapbytes, offset_in_page(bufp)); bufp += mapbytes; bytesleft -= mapbytes; dev_dbg(&pl022->adev->dev, "set RX/TX target page @ %p, %d bytes, %d left\n", bufp, mapbytes, bytesleft); } } else { /* Map the dummy buffer on every page */ for_each_sg(sgtab->sgl, sg, sgtab->nents, i) { if (bytesleft < PAGE_SIZE) mapbytes = bytesleft; else mapbytes = PAGE_SIZE; sg_set_page(sg, virt_to_page(pl022->dummypage), mapbytes, 0); bytesleft -= mapbytes; dev_dbg(&pl022->adev->dev, "set RX/TX to dummy page %d bytes, %d left\n", mapbytes, bytesleft); } } BUG_ON(bytesleft); } /** * configure_dma - configures the channels for the next transfer * @pl022: SSP driver's private data structure */ static int configure_dma(struct pl022 *pl022) { struct dma_slave_config rx_conf = { .src_addr = SSP_DR(pl022->phybase), .direction = DMA_FROM_DEVICE, .src_maxburst = pl022->vendor->fifodepth >> 1, }; struct dma_slave_config tx_conf = { .dst_addr = SSP_DR(pl022->phybase), .direction = DMA_TO_DEVICE, .dst_maxburst = pl022->vendor->fifodepth >> 1, }; unsigned int pages; int ret; int sglen; struct dma_chan *rxchan = pl022->dma_rx_channel; struct dma_chan *txchan = pl022->dma_tx_channel; struct dma_async_tx_descriptor *rxdesc; struct dma_async_tx_descriptor *txdesc; dma_cookie_t cookie; /* Check that the channels are available */ if (!rxchan || !txchan) return -ENODEV; switch (pl022->read) { case READING_NULL: /* Use the same as for writing */ rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; break; case READING_U8: rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; break; case READING_U16: rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; break; case READING_U32: rx_conf.src_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES; break; } switch (pl022->write) { case WRITING_NULL: /* Use the same as for reading */ tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_UNDEFINED; break; case WRITING_U8: tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; break; case WRITING_U16: tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_2_BYTES; break; case WRITING_U32: tx_conf.dst_addr_width = DMA_SLAVE_BUSWIDTH_4_BYTES;; break; } /* SPI pecularity: we need to read and write the same width */ if (rx_conf.src_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) rx_conf.src_addr_width = tx_conf.dst_addr_width; if (tx_conf.dst_addr_width == DMA_SLAVE_BUSWIDTH_UNDEFINED) tx_conf.dst_addr_width = rx_conf.src_addr_width; BUG_ON(rx_conf.src_addr_width != tx_conf.dst_addr_width); rxchan->device->device_control(rxchan, DMA_SLAVE_CONFIG, (unsigned long) &rx_conf); txchan->device->device_control(txchan, DMA_SLAVE_CONFIG, (unsigned long) &tx_conf); /* Create sglists for the transfers */ pages = (pl022->cur_transfer->len >> PAGE_SHIFT) + 1; dev_dbg(&pl022->adev->dev, "using %d pages for transfer\n", pages); ret = sg_alloc_table(&pl022->sgt_rx, pages, GFP_KERNEL); if (ret) goto err_alloc_rx_sg; ret = sg_alloc_table(&pl022->sgt_tx, pages, GFP_KERNEL); if (ret) goto err_alloc_tx_sg; /* Fill in the scatterlists for the RX+TX buffers */ setup_dma_scatter(pl022, pl022->rx, pl022->cur_transfer->len, &pl022->sgt_rx); setup_dma_scatter(pl022, pl022->tx, pl022->cur_transfer->len, &pl022->sgt_tx); /* Map DMA buffers */ sglen = dma_map_sg(&pl022->adev->dev, pl022->sgt_rx.sgl, pl022->sgt_rx.nents, DMA_FROM_DEVICE); if (!sglen) goto err_rx_sgmap; sglen = dma_map_sg(&pl022->adev->dev, pl022->sgt_tx.sgl, pl022->sgt_tx.nents, DMA_TO_DEVICE); if (!sglen) goto err_tx_sgmap; /* Send both scatterlists */ rxdesc = rxchan->device->device_prep_slave_sg(rxchan, pl022->sgt_rx.sgl, pl022->sgt_rx.nents, DMA_FROM_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!rxdesc) goto err_rxdesc; txdesc = txchan->device->device_prep_slave_sg(txchan, pl022->sgt_tx.sgl, pl022->sgt_tx.nents, DMA_TO_DEVICE, DMA_PREP_INTERRUPT | DMA_CTRL_ACK); if (!txdesc) goto err_txdesc; /* Put the callback on the RX transfer only, that should finish last */ rxdesc->callback = dma_callback; rxdesc->callback_param = pl022; /* Submit and fire RX and TX with TX last so we're ready to read! */ cookie = rxdesc->tx_submit(rxdesc); if (dma_submit_error(cookie)) goto err_submit_rx; cookie = txdesc->tx_submit(txdesc); if (dma_submit_error(cookie)) goto err_submit_tx; rxchan->device->device_issue_pending(rxchan); txchan->device->device_issue_pending(txchan); return 0; err_submit_tx: err_submit_rx: err_txdesc: txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0); err_rxdesc: rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0); dma_unmap_sg(&pl022->adev->dev, pl022->sgt_tx.sgl, pl022->sgt_tx.nents, DMA_TO_DEVICE); err_tx_sgmap: dma_unmap_sg(&pl022->adev->dev, pl022->sgt_rx.sgl, pl022->sgt_tx.nents, DMA_FROM_DEVICE); err_rx_sgmap: sg_free_table(&pl022->sgt_tx); err_alloc_tx_sg: sg_free_table(&pl022->sgt_rx); err_alloc_rx_sg: return -ENOMEM; } static int __init pl022_dma_probe(struct pl022 *pl022) { dma_cap_mask_t mask; /* Try to acquire a generic DMA engine slave channel */ dma_cap_zero(mask); dma_cap_set(DMA_SLAVE, mask); /* * We need both RX and TX channels to do DMA, else do none * of them. */ pl022->dma_rx_channel = dma_request_channel(mask, pl022->master_info->dma_filter, pl022->master_info->dma_rx_param); if (!pl022->dma_rx_channel) { dev_err(&pl022->adev->dev, "no RX DMA channel!\n"); goto err_no_rxchan; } pl022->dma_tx_channel = dma_request_channel(mask, pl022->master_info->dma_filter, pl022->master_info->dma_tx_param); if (!pl022->dma_tx_channel) { dev_err(&pl022->adev->dev, "no TX DMA channel!\n"); goto err_no_txchan; } pl022->dummypage = kmalloc(PAGE_SIZE, GFP_KERNEL); if (!pl022->dummypage) { dev_err(&pl022->adev->dev, "no DMA dummypage!\n"); goto err_no_dummypage; } dev_info(&pl022->adev->dev, "setup for DMA on RX %s, TX %s\n", dma_chan_name(pl022->dma_rx_channel), dma_chan_name(pl022->dma_tx_channel)); return 0; err_no_dummypage: dma_release_channel(pl022->dma_tx_channel); err_no_txchan: dma_release_channel(pl022->dma_rx_channel); pl022->dma_rx_channel = NULL; err_no_rxchan: return -ENODEV; } static void terminate_dma(struct pl022 *pl022) { struct dma_chan *rxchan = pl022->dma_rx_channel; struct dma_chan *txchan = pl022->dma_tx_channel; rxchan->device->device_control(rxchan, DMA_TERMINATE_ALL, 0); txchan->device->device_control(txchan, DMA_TERMINATE_ALL, 0); unmap_free_dma_scatter(pl022); } static void pl022_dma_remove(struct pl022 *pl022) { if (pl022->busy) terminate_dma(pl022); if (pl022->dma_tx_channel) dma_release_channel(pl022->dma_tx_channel); if (pl022->dma_rx_channel) dma_release_channel(pl022->dma_rx_channel); kfree(pl022->dummypage); } #else static inline int configure_dma(struct pl022 *pl022) { return -ENODEV; } static inline int pl022_dma_probe(struct pl022 *pl022) { return 0; } static inline void pl022_dma_remove(struct pl022 *pl022) { } #endif /** * pl022_interrupt_handler - Interrupt handler for SSP controller * * This function handles interrupts generated for an interrupt based transfer. * If a receive overrun (ROR) interrupt is there then we disable SSP, flag the * current message's state as STATE_ERROR and schedule the tasklet * pump_transfers which will do the postprocessing of the current message by * calling giveback(). Otherwise it reads data from RX FIFO till there is no * more data, and writes data in TX FIFO till it is not full. If we complete * the transfer we move to the next transfer and schedule the tasklet. */ static irqreturn_t pl022_interrupt_handler(int irq, void *dev_id) { struct pl022 *pl022 = dev_id; struct spi_message *msg = pl022->cur_msg; u16 irq_status = 0; u16 flag = 0; if (unlikely(!msg)) { dev_err(&pl022->adev->dev, "bad message state in interrupt handler"); /* Never fail */ return IRQ_HANDLED; } /* Read the Interrupt Status Register */ irq_status = readw(SSP_MIS(pl022->virtbase)); if (unlikely(!irq_status)) return IRQ_NONE; /* * This handles the FIFO interrupts, the timeout * interrupts are flatly ignored, they cannot be * trusted. */ if (unlikely(irq_status & SSP_MIS_MASK_RORMIS)) { /* * Overrun interrupt - bail out since our Data has been * corrupted */ dev_err(&pl022->adev->dev, "FIFO overrun\n"); if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_RFF) dev_err(&pl022->adev->dev, "RXFIFO is full\n"); if (readw(SSP_SR(pl022->virtbase)) & SSP_SR_MASK_TNF) dev_err(&pl022->adev->dev, "TXFIFO is full\n"); /* * Disable and clear interrupts, disable SSP, * mark message with bad status so it can be * retried. */ writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); msg->state = STATE_ERROR; /* Schedule message queue handler */ tasklet_schedule(&pl022->pump_transfers); return IRQ_HANDLED; } readwriter(pl022); if ((pl022->tx == pl022->tx_end) && (flag == 0)) { flag = 1; /* Disable Transmit interrupt */ writew(readw(SSP_IMSC(pl022->virtbase)) & (~SSP_IMSC_MASK_TXIM), SSP_IMSC(pl022->virtbase)); } /* * Since all transactions must write as much as shall be read, * we can conclude the entire transaction once RX is complete. * At this point, all TX will always be finished. */ if (pl022->rx >= pl022->rx_end) { writew(DISABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); writew(CLEAR_ALL_INTERRUPTS, SSP_ICR(pl022->virtbase)); if (unlikely(pl022->rx > pl022->rx_end)) { dev_warn(&pl022->adev->dev, "read %u surplus " "bytes (did you request an odd " "number of bytes on a 16bit bus?)\n", (u32) (pl022->rx - pl022->rx_end)); } /* Update total bytes transfered */ msg->actual_length += pl022->cur_transfer->len; if (pl022->cur_transfer->cs_change) pl022->cur_chip-> cs_control(SSP_CHIP_DESELECT); /* Move to next transfer */ msg->state = next_transfer(pl022); tasklet_schedule(&pl022->pump_transfers); return IRQ_HANDLED; } return IRQ_HANDLED; } /** * This sets up the pointers to memory for the next message to * send out on the SPI bus. */ static int set_up_next_transfer(struct pl022 *pl022, struct spi_transfer *transfer) { int residue; /* Sanity check the message for this bus width */ residue = pl022->cur_transfer->len % pl022->cur_chip->n_bytes; if (unlikely(residue != 0)) { dev_err(&pl022->adev->dev, "message of %u bytes to transmit but the current " "chip bus has a data width of %u bytes!\n", pl022->cur_transfer->len, pl022->cur_chip->n_bytes); dev_err(&pl022->adev->dev, "skipping this message\n"); return -EIO; } pl022->tx = (void *)transfer->tx_buf; pl022->tx_end = pl022->tx + pl022->cur_transfer->len; pl022->rx = (void *)transfer->rx_buf; pl022->rx_end = pl022->rx + pl022->cur_transfer->len; pl022->write = pl022->tx ? pl022->cur_chip->write : WRITING_NULL; pl022->read = pl022->rx ? pl022->cur_chip->read : READING_NULL; return 0; } /** * pump_transfers - Tasklet function which schedules next transfer * when running in interrupt or DMA transfer mode. * @data: SSP driver private data structure * */ static void pump_transfers(unsigned long data) { struct pl022 *pl022 = (struct pl022 *) data; struct spi_message *message = NULL; struct spi_transfer *transfer = NULL; struct spi_transfer *previous = NULL; /* Get current state information */ message = pl022->cur_msg; transfer = pl022->cur_transfer; /* Handle for abort */ if (message->state == STATE_ERROR) { message->status = -EIO; giveback(pl022); return; } /* Handle end of message */ if (message->state == STATE_DONE) { message->status = 0; giveback(pl022); return; } /* Delay if requested at end of transfer before CS change */ if (message->state == STATE_RUNNING) { previous = list_entry(transfer->transfer_list.prev, struct spi_transfer, transfer_list); if (previous->delay_usecs) /* * FIXME: This runs in interrupt context. * Is this really smart? */ udelay(previous->delay_usecs); /* Drop chip select only if cs_change is requested */ if (previous->cs_change) pl022->cur_chip->cs_control(SSP_CHIP_SELECT); } else { /* STATE_START */ message->state = STATE_RUNNING; } if (set_up_next_transfer(pl022, transfer)) { message->state = STATE_ERROR; message->status = -EIO; giveback(pl022); return; } /* Flush the FIFOs and let's go! */ flush(pl022); if (pl022->cur_chip->enable_dma) { if (configure_dma(pl022)) { dev_dbg(&pl022->adev->dev, "configuration of DMA failed, fall back to interrupt mode\n"); goto err_config_dma; } return; } err_config_dma: writew(ENABLE_ALL_INTERRUPTS, SSP_IMSC(pl022->virtbase)); } static void do_interrupt_dma_transfer(struct pl022 *pl022) { u32 irqflags = ENABLE_ALL_INTERRUPTS; /* Enable target chip */ pl022->cur_chip->cs_control(SSP_CHIP_SELECT); if (set_up_next_transfer(pl022, pl022->cur_transfer)) { /* Error path */ pl022->cur_msg->state = STATE_ERROR; pl022->cur_msg->status = -EIO; giveback(pl022); return; } /* If we're using DMA, set up DMA here */ if (pl022->cur_chip->enable_dma) { /* Configure DMA transfer */ if (configure_dma(pl022)) { dev_dbg(&pl022->adev->dev, "configuration of DMA failed, fall back to interrupt mode\n"); goto err_config_dma; } /* Disable interrupts in DMA mode, IRQ from DMA controller */ irqflags = DISABLE_ALL_INTERRUPTS; } err_config_dma: /* Enable SSP, turn on interrupts */ writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), SSP_CR1(pl022->virtbase)); writew(irqflags, SSP_IMSC(pl022->virtbase)); } static void do_polling_transfer(struct pl022 *pl022) { struct spi_message *message = NULL; struct spi_transfer *transfer = NULL; struct spi_transfer *previous = NULL; struct chip_data *chip; chip = pl022->cur_chip; message = pl022->cur_msg; while (message->state != STATE_DONE) { /* Handle for abort */ if (message->state == STATE_ERROR) break; transfer = pl022->cur_transfer; /* Delay if requested at end of transfer */ if (message->state == STATE_RUNNING) { previous = list_entry(transfer->transfer_list.prev, struct spi_transfer, transfer_list); if (previous->delay_usecs) udelay(previous->delay_usecs); if (previous->cs_change) pl022->cur_chip->cs_control(SSP_CHIP_SELECT); } else { /* STATE_START */ message->state = STATE_RUNNING; pl022->cur_chip->cs_control(SSP_CHIP_SELECT); } /* Configuration Changing Per Transfer */ if (set_up_next_transfer(pl022, transfer)) { /* Error path */ message->state = STATE_ERROR; break; } /* Flush FIFOs and enable SSP */ flush(pl022); writew((readw(SSP_CR1(pl022->virtbase)) | SSP_CR1_MASK_SSE), SSP_CR1(pl022->virtbase)); dev_dbg(&pl022->adev->dev, "polling transfer ongoing ...\n"); /* FIXME: insert a timeout so we don't hang here indefinately */ while (pl022->tx < pl022->tx_end || pl022->rx < pl022->rx_end) readwriter(pl022); /* Update total byte transfered */ message->actual_length += pl022->cur_transfer->len; if (pl022->cur_transfer->cs_change) pl022->cur_chip->cs_control(SSP_CHIP_DESELECT); /* Move to next transfer */ message->state = next_transfer(pl022); } /* Handle end of message */ if (message->state == STATE_DONE) message->status = 0; else message->status = -EIO; giveback(pl022); return; } /** * pump_messages - Workqueue function which processes spi message queue * @data: pointer to private data of SSP driver * * This function checks if there is any spi message in the queue that * needs processing and delegate control to appropriate function * do_polling_transfer()/do_interrupt_dma_transfer() * based on the kind of the transfer * */ static void pump_messages(struct work_struct *work) { struct pl022 *pl022 = container_of(work, struct pl022, pump_messages); unsigned long flags; /* Lock queue and check for queue work */ spin_lock_irqsave(&pl022->queue_lock, flags); if (list_empty(&pl022->queue) || pl022->run == QUEUE_STOPPED) { pl022->busy = 0; spin_unlock_irqrestore(&pl022->queue_lock, flags); return; } /* Make sure we are not already running a message */ if (pl022->cur_msg) { spin_unlock_irqrestore(&pl022->queue_lock, flags); return; } /* Extract head of queue */ pl022->cur_msg = list_entry(pl022->queue.next, struct spi_message, queue); list_del_init(&pl022->cur_msg->queue); pl022->busy = 1; spin_unlock_irqrestore(&pl022->queue_lock, flags); /* Initial message state */ pl022->cur_msg->state = STATE_START; pl022->cur_transfer = list_entry(pl022->cur_msg->transfers.next, struct spi_transfer, transfer_list); /* Setup the SPI using the per chip configuration */ pl022->cur_chip = spi_get_ctldata(pl022->cur_msg->spi); /* * We enable the clocks here, then the clocks will be disabled when * giveback() is called in each method (poll/interrupt/DMA) */ amba_pclk_enable(pl022->adev); clk_enable(pl022->clk); restore_state(pl022); flush(pl022); if (pl022->cur_chip->xfer_type == POLLING_TRANSFER) do_polling_transfer(pl022); else do_interrupt_dma_transfer(pl022); } static int __init init_queue(struct pl022 *pl022) { INIT_LIST_HEAD(&pl022->queue); spin_lock_init(&pl022->queue_lock); pl022->run = QUEUE_STOPPED; pl022->busy = 0; tasklet_init(&pl022->pump_transfers, pump_transfers, (unsigned long)pl022); INIT_WORK(&pl022->pump_messages, pump_messages); pl022->workqueue = create_singlethread_workqueue( dev_name(pl022->master->dev.parent)); if (pl022->workqueue == NULL) return -EBUSY; return 0; } static int start_queue(struct pl022 *pl022) { unsigned long flags; spin_lock_irqsave(&pl022->queue_lock, flags); if (pl022->run == QUEUE_RUNNING || pl022->busy) { spin_unlock_irqrestore(&pl022->queue_lock, flags); return -EBUSY; } pl022->run = QUEUE_RUNNING; pl022->cur_msg = NULL; pl022->cur_transfer = NULL; pl022->cur_chip = NULL; spin_unlock_irqrestore(&pl022->queue_lock, flags); queue_work(pl022->workqueue, &pl022->pump_messages); return 0; } static int stop_queue(struct pl022 *pl022) { unsigned long flags; unsigned limit = 500; int status = 0; spin_lock_irqsave(&pl022->queue_lock, flags); /* This is a bit lame, but is optimized for the common execution path. * A wait_queue on the pl022->busy could be used, but then the common * execution path (pump_messages) would be required to call wake_up or * friends on every SPI message. Do this instead */ while (!list_empty(&pl022->queue) && pl022->busy && limit--) { spin_unlock_irqrestore(&pl022->queue_lock, flags); msleep(10); spin_lock_irqsave(&pl022->queue_lock, flags); } if (!list_empty(&pl022->queue) || pl022->busy) status = -EBUSY; else pl022->run = QUEUE_STOPPED; spin_unlock_irqrestore(&pl022->queue_lock, flags); return status; } static int destroy_queue(struct pl022 *pl022) { int status; status = stop_queue(pl022); /* we are unloading the module or failing to load (only two calls * to this routine), and neither call can handle a return value. * However, destroy_workqueue calls flush_workqueue, and that will * block until all work is done. If the reason that stop_queue * timed out is that the work will never finish, then it does no * good to call destroy_workqueue, so return anyway. */ if (status != 0) return status; destroy_workqueue(pl022->workqueue); return 0; } static int verify_controller_parameters(struct pl022 *pl022, struct pl022_config_chip const *chip_info) { if ((chip_info->iface < SSP_INTERFACE_MOTOROLA_SPI) || (chip_info->iface > SSP_INTERFACE_UNIDIRECTIONAL)) { dev_err(&pl022->adev->dev, "interface is configured incorrectly\n"); return -EINVAL; } if ((chip_info->iface == SSP_INTERFACE_UNIDIRECTIONAL) && (!pl022->vendor->unidir)) { dev_err(&pl022->adev->dev, "unidirectional mode not supported in this " "hardware version\n"); return -EINVAL; } if ((chip_info->hierarchy != SSP_MASTER) && (chip_info->hierarchy != SSP_SLAVE)) { dev_err(&pl022->adev->dev, "hierarchy is configured incorrectly\n"); return -EINVAL; } if ((chip_info->com_mode != INTERRUPT_TRANSFER) && (chip_info->com_mode != DMA_TRANSFER) && (chip_info->com_mode != POLLING_TRANSFER)) { dev_err(&pl022->adev->dev, "Communication mode is configured incorrectly\n"); return -EINVAL; } if ((chip_info->rx_lev_trig < SSP_RX_1_OR_MORE_ELEM) || (chip_info->rx_lev_trig > SSP_RX_32_OR_MORE_ELEM)) { dev_err(&pl022->adev->dev, "RX FIFO Trigger Level is configured incorrectly\n"); return -EINVAL; } if ((chip_info->tx_lev_trig < SSP_TX_1_OR_MORE_EMPTY_LOC) || (chip_info->tx_lev_trig > SSP_TX_32_OR_MORE_EMPTY_LOC)) { dev_err(&pl022->adev->dev, "TX FIFO Trigger Level is configured incorrectly\n"); return -EINVAL; } if (chip_info->iface == SSP_INTERFACE_NATIONAL_MICROWIRE) { if ((chip_info->ctrl_len < SSP_BITS_4) || (chip_info->ctrl_len > SSP_BITS_32)) { dev_err(&pl022->adev->dev, "CTRL LEN is configured incorrectly\n"); return -EINVAL; } if ((chip_info->wait_state != SSP_MWIRE_WAIT_ZERO) && (chip_info->wait_state != SSP_MWIRE_WAIT_ONE)) { dev_err(&pl022->adev->dev, "Wait State is configured incorrectly\n"); return -EINVAL; } /* Half duplex is only available in the ST Micro version */ if (pl022->vendor->extended_cr) { if ((chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) && (chip_info->duplex != SSP_MICROWIRE_CHANNEL_HALF_DUPLEX)) { dev_err(&pl022->adev->dev, "Microwire duplex mode is configured incorrectly\n"); return -EINVAL; } } else { if (chip_info->duplex != SSP_MICROWIRE_CHANNEL_FULL_DUPLEX) dev_err(&pl022->adev->dev, "Microwire half duplex mode requested," " but this is only available in the" " ST version of PL022\n"); return -EINVAL; } } return 0; } /** * pl022_transfer - transfer function registered to SPI master framework * @spi: spi device which is requesting transfer * @msg: spi message which is to handled is queued to driver queue * * This function is registered to the SPI framework for this SPI master * controller. It will queue the spi_message in the queue of driver if * the queue is not stopped and return. */ static int pl022_transfer(struct spi_device *spi, struct spi_message *msg) { struct pl022 *pl022 = spi_master_get_devdata(spi->master); unsigned long flags; spin_lock_irqsave(&pl022->queue_lock, flags); if (pl022->run == QUEUE_STOPPED) { spin_unlock_irqrestore(&pl022->queue_lock, flags); return -ESHUTDOWN; } msg->actual_length = 0; msg->status = -EINPROGRESS; msg->state = STATE_START; list_add_tail(&msg->queue, &pl022->queue); if (pl022->run == QUEUE_RUNNING && !pl022->busy) queue_work(pl022->workqueue, &pl022->pump_messages); spin_unlock_irqrestore(&pl022->queue_lock, flags); return 0; } static int calculate_effective_freq(struct pl022 *pl022, int freq, struct ssp_clock_params *clk_freq) { /* Lets calculate the frequency parameters */ u16 cpsdvsr = 2; u16 scr = 0; bool freq_found = false; u32 rate; u32 max_tclk; u32 min_tclk; rate = clk_get_rate(pl022->clk); /* cpsdvscr = 2 & scr 0 */ max_tclk = (rate / (CPSDVR_MIN * (1 + SCR_MIN))); /* cpsdvsr = 254 & scr = 255 */ min_tclk = (rate / (CPSDVR_MAX * (1 + SCR_MAX))); if ((freq <= max_tclk) && (freq >= min_tclk)) { while (cpsdvsr <= CPSDVR_MAX && !freq_found) { while (scr <= SCR_MAX && !freq_found) { if ((rate / (cpsdvsr * (1 + scr))) > freq) scr += 1; else { /* * This bool is made true when * effective frequency >= * target frequency is found */ freq_found = true; if ((rate / (cpsdvsr * (1 + scr))) != freq) { if (scr == SCR_MIN) { cpsdvsr -= 2; scr = SCR_MAX; } else scr -= 1; } } } if (!freq_found) { cpsdvsr += 2; scr = SCR_MIN; } } if (cpsdvsr != 0) { dev_dbg(&pl022->adev->dev, "SSP Effective Frequency is %u\n", (rate / (cpsdvsr * (1 + scr)))); clk_freq->cpsdvsr = (u8) (cpsdvsr & 0xFF); clk_freq->scr = (u8) (scr & 0xFF); dev_dbg(&pl022->adev->dev, "SSP cpsdvsr = %d, scr = %d\n", clk_freq->cpsdvsr, clk_freq->scr); } } else { dev_err(&pl022->adev->dev, "controller data is incorrect: out of range frequency"); return -EINVAL; } return 0; } /* * A piece of default chip info unless the platform * supplies it. */ static const struct pl022_config_chip pl022_default_chip_info = { .com_mode = POLLING_TRANSFER, .iface = SSP_INTERFACE_MOTOROLA_SPI, .hierarchy = SSP_SLAVE, .slave_tx_disable = DO_NOT_DRIVE_TX, .rx_lev_trig = SSP_RX_1_OR_MORE_ELEM, .tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC, .ctrl_len = SSP_BITS_8, .wait_state = SSP_MWIRE_WAIT_ZERO, .duplex = SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, .cs_control = null_cs_control, }; /** * pl022_setup - setup function registered to SPI master framework * @spi: spi device which is requesting setup * * This function is registered to the SPI framework for this SPI master * controller. If it is the first time when setup is called by this device, * this function will initialize the runtime state for this chip and save * the same in the device structure. Else it will update the runtime info * with the updated chip info. Nothing is really being written to the * controller hardware here, that is not done until the actual transfer * commence. */ static int pl022_setup(struct spi_device *spi) { struct pl022_config_chip const *chip_info; struct chip_data *chip; struct ssp_clock_params clk_freq; int status = 0; struct pl022 *pl022 = spi_master_get_devdata(spi->master); unsigned int bits = spi->bits_per_word; u32 tmp; if (!spi->max_speed_hz) return -EINVAL; /* Get controller_state if one is supplied */ chip = spi_get_ctldata(spi); if (chip == NULL) { chip = kzalloc(sizeof(struct chip_data), GFP_KERNEL); if (!chip) { dev_err(&spi->dev, "cannot allocate controller state\n"); return -ENOMEM; } dev_dbg(&spi->dev, "allocated memory for controller's runtime state\n"); } /* Get controller data if one is supplied */ chip_info = spi->controller_data; if (chip_info == NULL) { chip_info = &pl022_default_chip_info; /* spi_board_info.controller_data not is supplied */ dev_dbg(&spi->dev, "using default controller_data settings\n"); } else dev_dbg(&spi->dev, "using user supplied controller_data settings\n"); /* * We can override with custom divisors, else we use the board * frequency setting */ if ((0 == chip_info->clk_freq.cpsdvsr) && (0 == chip_info->clk_freq.scr)) { status = calculate_effective_freq(pl022, spi->max_speed_hz, &clk_freq); if (status < 0) goto err_config_params; } else { memcpy(&clk_freq, &chip_info->clk_freq, sizeof(clk_freq)); if ((clk_freq.cpsdvsr % 2) != 0) clk_freq.cpsdvsr = clk_freq.cpsdvsr - 1; } if ((clk_freq.cpsdvsr < CPSDVR_MIN) || (clk_freq.cpsdvsr > CPSDVR_MAX)) { dev_err(&spi->dev, "cpsdvsr is configured incorrectly\n"); goto err_config_params; } status = verify_controller_parameters(pl022, chip_info); if (status) { dev_err(&spi->dev, "controller data is incorrect"); goto err_config_params; } /* Now set controller state based on controller data */ chip->xfer_type = chip_info->com_mode; if (!chip_info->cs_control) { chip->cs_control = null_cs_control; dev_warn(&spi->dev, "chip select function is NULL for this chip\n"); } else chip->cs_control = chip_info->cs_control; if (bits <= 3) { /* PL022 doesn't support less than 4-bits */ status = -ENOTSUPP; goto err_config_params; } else if (bits <= 8) { dev_dbg(&spi->dev, "4 <= n <=8 bits per word\n"); chip->n_bytes = 1; chip->read = READING_U8; chip->write = WRITING_U8; } else if (bits <= 16) { dev_dbg(&spi->dev, "9 <= n <= 16 bits per word\n"); chip->n_bytes = 2; chip->read = READING_U16; chip->write = WRITING_U16; } else { if (pl022->vendor->max_bpw >= 32) { dev_dbg(&spi->dev, "17 <= n <= 32 bits per word\n"); chip->n_bytes = 4; chip->read = READING_U32; chip->write = WRITING_U32; } else { dev_err(&spi->dev, "illegal data size for this controller!\n"); dev_err(&spi->dev, "a standard pl022 can only handle " "1 <= n <= 16 bit words\n"); status = -ENOTSUPP; goto err_config_params; } } /* Now Initialize all register settings required for this chip */ chip->cr0 = 0; chip->cr1 = 0; chip->dmacr = 0; chip->cpsr = 0; if ((chip_info->com_mode == DMA_TRANSFER) && ((pl022->master_info)->enable_dma)) { chip->enable_dma = true; dev_dbg(&spi->dev, "DMA mode set in controller state\n"); if (status < 0) goto err_config_params; SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, SSP_DMACR_MASK_RXDMAE, 0); SSP_WRITE_BITS(chip->dmacr, SSP_DMA_ENABLED, SSP_DMACR_MASK_TXDMAE, 1); } else { chip->enable_dma = false; dev_dbg(&spi->dev, "DMA mode NOT set in controller state\n"); SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, SSP_DMACR_MASK_RXDMAE, 0); SSP_WRITE_BITS(chip->dmacr, SSP_DMA_DISABLED, SSP_DMACR_MASK_TXDMAE, 1); } chip->cpsr = clk_freq.cpsdvsr; /* Special setup for the ST micro extended control registers */ if (pl022->vendor->extended_cr) { u32 etx; if (pl022->vendor->pl023) { /* These bits are only in the PL023 */ SSP_WRITE_BITS(chip->cr1, chip_info->clkdelay, SSP_CR1_MASK_FBCLKDEL_ST, 13); } else { /* These bits are in the PL022 but not PL023 */ SSP_WRITE_BITS(chip->cr0, chip_info->duplex, SSP_CR0_MASK_HALFDUP_ST, 5); SSP_WRITE_BITS(chip->cr0, chip_info->ctrl_len, SSP_CR0_MASK_CSS_ST, 16); SSP_WRITE_BITS(chip->cr0, chip_info->iface, SSP_CR0_MASK_FRF_ST, 21); SSP_WRITE_BITS(chip->cr1, chip_info->wait_state, SSP_CR1_MASK_MWAIT_ST, 6); } SSP_WRITE_BITS(chip->cr0, bits - 1, SSP_CR0_MASK_DSS_ST, 0); if (spi->mode & SPI_LSB_FIRST) { tmp = SSP_RX_LSB; etx = SSP_TX_LSB; } else { tmp = SSP_RX_MSB; etx = SSP_TX_MSB; } SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_RENDN_ST, 4); SSP_WRITE_BITS(chip->cr1, etx, SSP_CR1_MASK_TENDN_ST, 5); SSP_WRITE_BITS(chip->cr1, chip_info->rx_lev_trig, SSP_CR1_MASK_RXIFLSEL_ST, 7); SSP_WRITE_BITS(chip->cr1, chip_info->tx_lev_trig, SSP_CR1_MASK_TXIFLSEL_ST, 10); } else { SSP_WRITE_BITS(chip->cr0, bits - 1, SSP_CR0_MASK_DSS, 0); SSP_WRITE_BITS(chip->cr0, chip_info->iface, SSP_CR0_MASK_FRF, 4); } /* Stuff that is common for all versions */ if (spi->mode & SPI_CPOL) tmp = SSP_CLK_POL_IDLE_HIGH; else tmp = SSP_CLK_POL_IDLE_LOW; SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPO, 6); if (spi->mode & SPI_CPHA) tmp = SSP_CLK_SECOND_EDGE; else tmp = SSP_CLK_FIRST_EDGE; SSP_WRITE_BITS(chip->cr0, tmp, SSP_CR0_MASK_SPH, 7); SSP_WRITE_BITS(chip->cr0, clk_freq.scr, SSP_CR0_MASK_SCR, 8); /* Loopback is available on all versions except PL023 */ if (!pl022->vendor->pl023) { if (spi->mode & SPI_LOOP) tmp = LOOPBACK_ENABLED; else tmp = LOOPBACK_DISABLED; SSP_WRITE_BITS(chip->cr1, tmp, SSP_CR1_MASK_LBM, 0); } SSP_WRITE_BITS(chip->cr1, SSP_DISABLED, SSP_CR1_MASK_SSE, 1); SSP_WRITE_BITS(chip->cr1, chip_info->hierarchy, SSP_CR1_MASK_MS, 2); SSP_WRITE_BITS(chip->cr1, chip_info->slave_tx_disable, SSP_CR1_MASK_SOD, 3); /* Save controller_state */ spi_set_ctldata(spi, chip); return status; err_config_params: spi_set_ctldata(spi, NULL); kfree(chip); return status; } /** * pl022_cleanup - cleanup function registered to SPI master framework * @spi: spi device which is requesting cleanup * * This function is registered to the SPI framework for this SPI master * controller. It will free the runtime state of chip. */ static void pl022_cleanup(struct spi_device *spi) { struct chip_data *chip = spi_get_ctldata(spi); spi_set_ctldata(spi, NULL); kfree(chip); } static int __devinit pl022_probe(struct amba_device *adev, struct amba_id *id) { struct device *dev = &adev->dev; struct pl022_ssp_controller *platform_info = adev->dev.platform_data; struct spi_master *master; struct pl022 *pl022 = NULL; /*Data for this driver */ int status = 0; dev_info(&adev->dev, "ARM PL022 driver, device ID: 0x%08x\n", adev->periphid); if (platform_info == NULL) { dev_err(&adev->dev, "probe - no platform data supplied\n"); status = -ENODEV; goto err_no_pdata; } /* Allocate master with space for data */ master = spi_alloc_master(dev, sizeof(struct pl022)); if (master == NULL) { dev_err(&adev->dev, "probe - cannot alloc SPI master\n"); status = -ENOMEM; goto err_no_master; } pl022 = spi_master_get_devdata(master); pl022->master = master; pl022->master_info = platform_info; pl022->adev = adev; pl022->vendor = id->data; /* * Bus Number Which has been Assigned to this SSP controller * on this board */ master->bus_num = platform_info->bus_id; master->num_chipselect = platform_info->num_chipselect; master->cleanup = pl022_cleanup; master->setup = pl022_setup; master->transfer = pl022_transfer; /* * Supports mode 0-3, loopback, and active low CS. Transfers are * always MS bit first on the original pl022. */ master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH | SPI_LOOP; if (pl022->vendor->extended_cr) master->mode_bits |= SPI_LSB_FIRST; dev_dbg(&adev->dev, "BUSNO: %d\n", master->bus_num); status = amba_request_regions(adev, NULL); if (status) goto err_no_ioregion; pl022->phybase = adev->res.start; pl022->virtbase = ioremap(adev->res.start, resource_size(&adev->res)); if (pl022->virtbase == NULL) { status = -ENOMEM; goto err_no_ioremap; } printk(KERN_INFO "pl022: mapped registers from 0x%08x to %p\n", adev->res.start, pl022->virtbase); pl022->clk = clk_get(&adev->dev, NULL); if (IS_ERR(pl022->clk)) { status = PTR_ERR(pl022->clk); dev_err(&adev->dev, "could not retrieve SSP/SPI bus clock\n"); goto err_no_clk; } /* Disable SSP */ writew((readw(SSP_CR1(pl022->virtbase)) & (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase)); load_ssp_default_config(pl022); status = request_irq(adev->irq[0], pl022_interrupt_handler, 0, "pl022", pl022); if (status < 0) { dev_err(&adev->dev, "probe - cannot get IRQ (%d)\n", status); goto err_no_irq; } /* Get DMA channels */ if (platform_info->enable_dma) { status = pl022_dma_probe(pl022); if (status != 0) goto err_no_dma; } /* Initialize and start queue */ status = init_queue(pl022); if (status != 0) { dev_err(&adev->dev, "probe - problem initializing queue\n"); goto err_init_queue; } status = start_queue(pl022); if (status != 0) { dev_err(&adev->dev, "probe - problem starting queue\n"); goto err_start_queue; } /* Register with the SPI framework */ amba_set_drvdata(adev, pl022); status = spi_register_master(master); if (status != 0) { dev_err(&adev->dev, "probe - problem registering spi master\n"); goto err_spi_register; } dev_dbg(dev, "probe succeded\n"); /* Disable the silicon block pclk and clock it when needed */ amba_pclk_disable(adev); return 0; err_spi_register: err_start_queue: err_init_queue: destroy_queue(pl022); pl022_dma_remove(pl022); err_no_dma: free_irq(adev->irq[0], pl022); err_no_irq: clk_put(pl022->clk); err_no_clk: iounmap(pl022->virtbase); err_no_ioremap: amba_release_regions(adev); err_no_ioregion: spi_master_put(master); err_no_master: err_no_pdata: return status; } static int __devexit pl022_remove(struct amba_device *adev) { struct pl022 *pl022 = amba_get_drvdata(adev); int status = 0; if (!pl022) return 0; /* Remove the queue */ status = destroy_queue(pl022); if (status != 0) { dev_err(&adev->dev, "queue remove failed (%d)\n", status); return status; } load_ssp_default_config(pl022); pl022_dma_remove(pl022); free_irq(adev->irq[0], pl022); clk_disable(pl022->clk); clk_put(pl022->clk); iounmap(pl022->virtbase); amba_release_regions(adev); tasklet_disable(&pl022->pump_transfers); spi_unregister_master(pl022->master); spi_master_put(pl022->master); amba_set_drvdata(adev, NULL); dev_dbg(&adev->dev, "remove succeded\n"); return 0; } #ifdef CONFIG_PM static int pl022_suspend(struct amba_device *adev, pm_message_t state) { struct pl022 *pl022 = amba_get_drvdata(adev); int status = 0; status = stop_queue(pl022); if (status) { dev_warn(&adev->dev, "suspend cannot stop queue\n"); return status; } amba_pclk_enable(adev); load_ssp_default_config(pl022); amba_pclk_disable(adev); dev_dbg(&adev->dev, "suspended\n"); return 0; } static int pl022_resume(struct amba_device *adev) { struct pl022 *pl022 = amba_get_drvdata(adev); int status = 0; /* Start the queue running */ status = start_queue(pl022); if (status) dev_err(&adev->dev, "problem starting queue (%d)\n", status); else dev_dbg(&adev->dev, "resumed\n"); return status; } #else #define pl022_suspend NULL #define pl022_resume NULL #endif /* CONFIG_PM */ static struct vendor_data vendor_arm = { .fifodepth = 8, .max_bpw = 16, .unidir = false, .extended_cr = false, .pl023 = false, }; static struct vendor_data vendor_st = { .fifodepth = 32, .max_bpw = 32, .unidir = false, .extended_cr = true, .pl023 = false, }; static struct vendor_data vendor_st_pl023 = { .fifodepth = 32, .max_bpw = 32, .unidir = false, .extended_cr = true, .pl023 = true, }; static struct amba_id pl022_ids[] = { { /* * ARM PL022 variant, this has a 16bit wide * and 8 locations deep TX/RX FIFO */ .id = 0x00041022, .mask = 0x000fffff, .data = &vendor_arm, }, { /* * ST Micro derivative, this has 32bit wide * and 32 locations deep TX/RX FIFO */ .id = 0x01080022, .mask = 0xffffffff, .data = &vendor_st, }, { /* * ST-Ericsson derivative "PL023" (this is not * an official ARM number), this is a PL022 SSP block * stripped to SPI mode only, it has 32bit wide * and 32 locations deep TX/RX FIFO but no extended * CR0/CR1 register */ .id = 0x00080023, .mask = 0xffffffff, .data = &vendor_st_pl023, }, { 0, 0 }, }; static struct amba_driver pl022_driver = { .drv = { .name = "ssp-pl022", }, .id_table = pl022_ids, .probe = pl022_probe, .remove = __devexit_p(pl022_remove), .suspend = pl022_suspend, .resume = pl022_resume, }; static int __init pl022_init(void) { return amba_driver_register(&pl022_driver); } subsys_initcall(pl022_init); static void __exit pl022_exit(void) { amba_driver_unregister(&pl022_driver); } module_exit(pl022_exit); MODULE_AUTHOR("Linus Walleij <linus.walleij@stericsson.com>"); MODULE_DESCRIPTION("PL022 SSP Controller Driver"); MODULE_LICENSE("GPL");
gpl-2.0
stevezilla/u-boot-amherst
drivers/net/fm/p5020.c
56
2402
/* * Copyright 2011 Freescale Semiconductor, Inc. * * SPDX-License-Identifier: GPL-2.0+ */ #include <common.h> #include <phy.h> #include <fm_eth.h> #include <asm/io.h> #include <asm/immap_85xx.h> #include <asm/fsl_serdes.h> static u32 port_to_devdisr[] = { [FM1_DTSEC1] = FSL_CORENET_DEVDISR2_DTSEC1_1, [FM1_DTSEC2] = FSL_CORENET_DEVDISR2_DTSEC1_2, [FM1_DTSEC3] = FSL_CORENET_DEVDISR2_DTSEC1_3, [FM1_DTSEC4] = FSL_CORENET_DEVDISR2_DTSEC1_4, [FM1_DTSEC5] = FSL_CORENET_DEVDISR2_DTSEC1_5, [FM1_10GEC1] = FSL_CORENET_DEVDISR2_10GEC1, }; static int is_device_disabled(enum fm_port port) { ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR); u32 devdisr2 = in_be32(&gur->devdisr2); return port_to_devdisr[port] & devdisr2; } void fman_disable_port(enum fm_port port) { ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR); /* don't allow disabling of DTSEC1 as its needed for MDIO */ if (port == FM1_DTSEC1) return; setbits_be32(&gur->devdisr2, port_to_devdisr[port]); } void fman_enable_port(enum fm_port port) { ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR); clrbits_be32(&gur->devdisr2, port_to_devdisr[port]); } phy_interface_t fman_port_enet_if(enum fm_port port) { ccsr_gur_t *gur = (void *)(CONFIG_SYS_MPC85xx_GUTS_ADDR); u32 rcwsr11 = in_be32(&gur->rcwsr[11]); if (is_device_disabled(port)) return PHY_INTERFACE_MODE_NONE; if ((port == FM1_10GEC1) && (is_serdes_configured(XAUI_FM1))) return PHY_INTERFACE_MODE_XGMII; /* handle RGMII first */ if ((port == FM1_DTSEC4) && ((rcwsr11 & FSL_CORENET_RCWSR11_EC1) == FSL_CORENET_RCWSR11_EC1_FM1_DTSEC4_RGMII)) return PHY_INTERFACE_MODE_RGMII; if ((port == FM1_DTSEC4) && ((rcwsr11 & FSL_CORENET_RCWSR11_EC1) == FSL_CORENET_RCWSR11_EC1_FM1_DTSEC4_MII)) return PHY_INTERFACE_MODE_MII; if ((port == FM1_DTSEC5) && ((rcwsr11 & FSL_CORENET_RCWSR11_EC2) == FSL_CORENET_RCWSR11_EC2_FM1_DTSEC5_RGMII)) return PHY_INTERFACE_MODE_RGMII; if ((port == FM1_DTSEC5) && ((rcwsr11 & FSL_CORENET_RCWSR11_EC2) == FSL_CORENET_RCWSR11_EC2_FM1_DTSEC5_MII)) return PHY_INTERFACE_MODE_MII; switch (port) { case FM1_DTSEC1: case FM1_DTSEC2: case FM1_DTSEC3: case FM1_DTSEC4: case FM1_DTSEC5: if (is_serdes_configured(SGMII_FM1_DTSEC1 + port - FM1_DTSEC1)) return PHY_INTERFACE_MODE_SGMII; break; default: return PHY_INTERFACE_MODE_NONE; } return PHY_INTERFACE_MODE_NONE; }
gpl-2.0
poranmeloge/test-github
stm32_rtt_wifi/bsp/efm32/Libraries/CMSIS/DSP_Lib/Source/MatrixFunctions/arm_mat_trans_f32.c
56
6170
/* ---------------------------------------------------------------------- * Copyright (C) 2010 ARM Limited. All rights reserved. * * $Date: 15. February 2012 * $Revision: V1.1.0 * * Project: CMSIS DSP Library * Title: arm_mat_trans_f32.c * * Description: Floating-point matrix transpose. * * Target Processor: Cortex-M4/Cortex-M3/Cortex-M0 * * Version 1.1.0 2012/02/15 * Updated with more optimizations, bug fixes and minor API changes. * * Version 1.0.10 2011/7/15 * Big Endian support added and Merged M0 and M3/M4 Source code. * * Version 1.0.3 2010/11/29 * Re-organized the CMSIS folders and updated documentation. * * Version 1.0.2 2010/11/11 * Documentation updated. * * Version 1.0.1 2010/10/05 * Production release and review comments incorporated. * * Version 1.0.0 2010/09/20 * Production release and review comments incorporated. * * Version 0.0.5 2010/04/26 * incorporated review comments and updated with latest CMSIS layer * * Version 0.0.3 2010/03/10 * Initial version * -------------------------------------------------------------------- */ /** * @defgroup MatrixTrans Matrix Transpose * * Tranposes a matrix. * Transposing an <code>M x N</code> matrix flips it around the center diagonal and results in an <code>N x M</code> matrix. * \image html MatrixTranspose.gif "Transpose of a 3 x 3 matrix" */ #include "arm_math.h" /** * @ingroup groupMatrix */ /** * @addtogroup MatrixTrans * @{ */ /** * @brief Floating-point matrix transpose. * @param[in] *pSrc points to the input matrix * @param[out] *pDst points to the output matrix * @return The function returns either <code>ARM_MATH_SIZE_MISMATCH</code> * or <code>ARM_MATH_SUCCESS</code> based on the outcome of size checking. */ arm_status arm_mat_trans_f32( const arm_matrix_instance_f32 * pSrc, arm_matrix_instance_f32 * pDst) { float32_t *pIn = pSrc->pData; /* input data matrix pointer */ float32_t *pOut = pDst->pData; /* output data matrix pointer */ float32_t *px; /* Temporary output data matrix pointer */ uint16_t nRows = pSrc->numRows; /* number of rows */ uint16_t nColumns = pSrc->numCols; /* number of columns */ #ifndef ARM_MATH_CM0 /* Run the below code for Cortex-M4 and Cortex-M3 */ uint16_t blkCnt, i = 0u, row = nRows; /* loop counters */ arm_status status; /* status of matrix transpose */ #ifdef ARM_MATH_MATRIX_CHECK /* Check for matrix mismatch condition */ if((pSrc->numRows != pDst->numCols) || (pSrc->numCols != pDst->numRows)) { /* Set status as ARM_MATH_SIZE_MISMATCH */ status = ARM_MATH_SIZE_MISMATCH; } else #endif /* #ifdef ARM_MATH_MATRIX_CHECK */ { /* Matrix transpose by exchanging the rows with columns */ /* row loop */ do { /* Loop Unrolling */ blkCnt = nColumns >> 2; /* The pointer px is set to starting address of the column being processed */ px = pOut + i; /* First part of the processing with loop unrolling. Compute 4 outputs at a time. ** a second loop below computes the remaining 1 to 3 samples. */ while(blkCnt > 0u) /* column loop */ { /* Read and store the input element in the destination */ *px = *pIn++; /* Update the pointer px to point to the next row of the transposed matrix */ px += nRows; /* Read and store the input element in the destination */ *px = *pIn++; /* Update the pointer px to point to the next row of the transposed matrix */ px += nRows; /* Read and store the input element in the destination */ *px = *pIn++; /* Update the pointer px to point to the next row of the transposed matrix */ px += nRows; /* Read and store the input element in the destination */ *px = *pIn++; /* Update the pointer px to point to the next row of the transposed matrix */ px += nRows; /* Decrement the column loop counter */ blkCnt--; } /* Perform matrix transpose for last 3 samples here. */ blkCnt = nColumns % 0x4u; while(blkCnt > 0u) { /* Read and store the input element in the destination */ *px = *pIn++; /* Update the pointer px to point to the next row of the transposed matrix */ px += nRows; /* Decrement the column loop counter */ blkCnt--; } #else /* Run the below code for Cortex-M0 */ uint16_t col, i = 0u, row = nRows; /* loop counters */ arm_status status; /* status of matrix transpose */ #ifdef ARM_MATH_MATRIX_CHECK /* Check for matrix mismatch condition */ if((pSrc->numRows != pDst->numCols) || (pSrc->numCols != pDst->numRows)) { /* Set status as ARM_MATH_SIZE_MISMATCH */ status = ARM_MATH_SIZE_MISMATCH; } else #endif /* #ifdef ARM_MATH_MATRIX_CHECK */ { /* Matrix transpose by exchanging the rows with columns */ /* row loop */ do { /* The pointer px is set to starting address of the column being processed */ px = pOut + i; /* Initialize column loop counter */ col = nColumns; while(col > 0u) { /* Read and store the input element in the destination */ *px = *pIn++; /* Update the pointer px to point to the next row of the transposed matrix */ px += nRows; /* Decrement the column loop counter */ col--; } #endif /* #ifndef ARM_MATH_CM0 */ i++; /* Decrement the row loop counter */ row--; } while(row > 0u); /* row loop end */ /* Set status as ARM_MATH_SUCCESS */ status = ARM_MATH_SUCCESS; } /* Return to application */ return (status); } /** * @} end of MatrixTrans group */
gpl-2.0
slayher/htc-kernel-mecha
drivers/usb/serial/sierra.c
312
30090
/* USB Driver for Sierra Wireless Copyright (C) 2006, 2007, 2008 Kevin Lloyd <klloyd@sierrawireless.com>, Copyright (C) 2008, 2009 Elina Pasheva, Matthew Safar, Rory Filer <linux@sierrawireless.com> IMPORTANT DISCLAIMER: This driver is not commercially supported by Sierra Wireless. Use at your own risk. This driver is free software; you can redistribute it and/or modify it under the terms of Version 2 of the GNU General Public License as published by the Free Software Foundation. Portions based on the option driver by Matthias Urlichs <smurf@smurf.noris.de> Whom based his on the Keyspan driver by Hugh Blemings <hugh@blemings.org> */ #define DRIVER_VERSION "v.1.3.8" #define DRIVER_AUTHOR "Kevin Lloyd, Elina Pasheva, Matthew Safar, Rory Filer" #define DRIVER_DESC "USB Driver for Sierra Wireless USB modems" #include <linux/kernel.h> #include <linux/jiffies.h> #include <linux/errno.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/usb/serial.h> #define SWIMS_USB_REQUEST_SetPower 0x00 #define SWIMS_USB_REQUEST_SetNmea 0x07 #define N_IN_URB 8 #define N_OUT_URB 64 #define IN_BUFLEN 4096 #define MAX_TRANSFER (PAGE_SIZE - 512) /* MAX_TRANSFER is chosen so that the VM is not stressed by allocations > PAGE_SIZE and the number of packets in a page is an integer 512 is the largest possible packet on EHCI */ static int debug; static int nmea; /* Used in interface blacklisting */ struct sierra_iface_info { const u32 infolen; /* number of interface numbers on blacklist */ const u8 *ifaceinfo; /* pointer to the array holding the numbers */ }; struct sierra_intf_private { spinlock_t susp_lock; unsigned int suspended:1; int in_flight; }; static int sierra_set_power_state(struct usb_device *udev, __u16 swiState) { int result; dev_dbg(&udev->dev, "%s\n", __func__); result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), SWIMS_USB_REQUEST_SetPower, /* __u8 request */ USB_TYPE_VENDOR, /* __u8 request type */ swiState, /* __u16 value */ 0, /* __u16 index */ NULL, /* void *data */ 0, /* __u16 size */ USB_CTRL_SET_TIMEOUT); /* int timeout */ return result; } static int sierra_vsc_set_nmea(struct usb_device *udev, __u16 enable) { int result; dev_dbg(&udev->dev, "%s\n", __func__); result = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), SWIMS_USB_REQUEST_SetNmea, /* __u8 request */ USB_TYPE_VENDOR, /* __u8 request type */ enable, /* __u16 value */ 0x0000, /* __u16 index */ NULL, /* void *data */ 0, /* __u16 size */ USB_CTRL_SET_TIMEOUT); /* int timeout */ return result; } static int sierra_calc_num_ports(struct usb_serial *serial) { int num_ports = 0; u8 ifnum, numendpoints; dev_dbg(&serial->dev->dev, "%s\n", __func__); ifnum = serial->interface->cur_altsetting->desc.bInterfaceNumber; numendpoints = serial->interface->cur_altsetting->desc.bNumEndpoints; /* Dummy interface present on some SKUs should be ignored */ if (ifnum == 0x99) num_ports = 0; else if (numendpoints <= 3) num_ports = 1; else num_ports = (numendpoints-1)/2; return num_ports; } static int is_blacklisted(const u8 ifnum, const struct sierra_iface_info *blacklist) { const u8 *info; int i; if (blacklist) { info = blacklist->ifaceinfo; for (i = 0; i < blacklist->infolen; i++) { if (info[i] == ifnum) return 1; } } return 0; } static int sierra_calc_interface(struct usb_serial *serial) { int interface; struct usb_interface *p_interface; struct usb_host_interface *p_host_interface; dev_dbg(&serial->dev->dev, "%s\n", __func__); /* Get the interface structure pointer from the serial struct */ p_interface = serial->interface; /* Get a pointer to the host interface structure */ p_host_interface = p_interface->cur_altsetting; /* read the interface descriptor for this active altsetting * to find out the interface number we are on */ interface = p_host_interface->desc.bInterfaceNumber; return interface; } static int sierra_probe(struct usb_serial *serial, const struct usb_device_id *id) { int result = 0; struct usb_device *udev; struct sierra_intf_private *data; u8 ifnum; udev = serial->dev; dev_dbg(&udev->dev, "%s\n", __func__); ifnum = sierra_calc_interface(serial); /* * If this interface supports more than 1 alternate * select the 2nd one */ if (serial->interface->num_altsetting == 2) { dev_dbg(&udev->dev, "Selecting alt setting for interface %d\n", ifnum); /* We know the alternate setting is 1 for the MC8785 */ usb_set_interface(udev, ifnum, 1); } /* ifnum could have changed - by calling usb_set_interface */ ifnum = sierra_calc_interface(serial); if (is_blacklisted(ifnum, (struct sierra_iface_info *)id->driver_info)) { dev_dbg(&serial->dev->dev, "Ignoring blacklisted interface #%d\n", ifnum); return -ENODEV; } data = serial->private = kzalloc(sizeof(struct sierra_intf_private), GFP_KERNEL); if (!data) return -ENOMEM; spin_lock_init(&data->susp_lock); return result; } static const u8 direct_ip_non_serial_ifaces[] = { 7, 8, 9, 10, 11 }; static const struct sierra_iface_info direct_ip_interface_blacklist = { .infolen = ARRAY_SIZE(direct_ip_non_serial_ifaces), .ifaceinfo = direct_ip_non_serial_ifaces, }; static struct usb_device_id id_table [] = { { USB_DEVICE(0x0F3D, 0x0112) }, /* Airprime/Sierra PC 5220 */ { USB_DEVICE(0x03F0, 0x1B1D) }, /* HP ev2200 a.k.a MC5720 */ { USB_DEVICE(0x03F0, 0x1E1D) }, /* HP hs2300 a.k.a MC8775 */ { USB_DEVICE(0x1199, 0x0017) }, /* Sierra Wireless EM5625 */ { USB_DEVICE(0x1199, 0x0018) }, /* Sierra Wireless MC5720 */ { USB_DEVICE(0x1199, 0x0218) }, /* Sierra Wireless MC5720 */ { USB_DEVICE(0x1199, 0x0020) }, /* Sierra Wireless MC5725 */ { USB_DEVICE(0x1199, 0x0220) }, /* Sierra Wireless MC5725 */ { USB_DEVICE(0x1199, 0x0022) }, /* Sierra Wireless EM5725 */ { USB_DEVICE(0x1199, 0x0024) }, /* Sierra Wireless MC5727 */ { USB_DEVICE(0x1199, 0x0224) }, /* Sierra Wireless MC5727 */ { USB_DEVICE(0x1199, 0x0019) }, /* Sierra Wireless AirCard 595 */ { USB_DEVICE(0x1199, 0x0021) }, /* Sierra Wireless AirCard 597E */ { USB_DEVICE(0x1199, 0x0112) }, /* Sierra Wireless AirCard 580 */ { USB_DEVICE(0x1199, 0x0120) }, /* Sierra Wireless USB Dongle 595U */ /* Sierra Wireless C597 */ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0023, 0xFF, 0xFF, 0xFF) }, /* Sierra Wireless T598 */ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x0025, 0xFF, 0xFF, 0xFF) }, { USB_DEVICE(0x1199, 0x0026) }, /* Sierra Wireless T11 */ { USB_DEVICE(0x1199, 0x0027) }, /* Sierra Wireless AC402 */ { USB_DEVICE(0x1199, 0x0028) }, /* Sierra Wireless MC5728 */ { USB_DEVICE(0x1199, 0x0029) }, /* Sierra Wireless Device */ { USB_DEVICE(0x1199, 0x6802) }, /* Sierra Wireless MC8755 */ { USB_DEVICE(0x1199, 0x6803) }, /* Sierra Wireless MC8765 */ { USB_DEVICE(0x1199, 0x6804) }, /* Sierra Wireless MC8755 */ { USB_DEVICE(0x1199, 0x6805) }, /* Sierra Wireless MC8765 */ { USB_DEVICE(0x1199, 0x6808) }, /* Sierra Wireless MC8755 */ { USB_DEVICE(0x1199, 0x6809) }, /* Sierra Wireless MC8765 */ { USB_DEVICE(0x1199, 0x6812) }, /* Sierra Wireless MC8775 & AC 875U */ { USB_DEVICE(0x1199, 0x6813) }, /* Sierra Wireless MC8775 */ { USB_DEVICE(0x1199, 0x6815) }, /* Sierra Wireless MC8775 */ { USB_DEVICE(0x1199, 0x6816) }, /* Sierra Wireless MC8775 */ { USB_DEVICE(0x1199, 0x6820) }, /* Sierra Wireless AirCard 875 */ { USB_DEVICE(0x1199, 0x6821) }, /* Sierra Wireless AirCard 875U */ { USB_DEVICE(0x1199, 0x6822) }, /* Sierra Wireless AirCard 875E */ { USB_DEVICE(0x1199, 0x6832) }, /* Sierra Wireless MC8780 */ { USB_DEVICE(0x1199, 0x6833) }, /* Sierra Wireless MC8781 */ { USB_DEVICE(0x1199, 0x6834) }, /* Sierra Wireless MC8780 */ { USB_DEVICE(0x1199, 0x6835) }, /* Sierra Wireless MC8781 */ { USB_DEVICE(0x1199, 0x6838) }, /* Sierra Wireless MC8780 */ { USB_DEVICE(0x1199, 0x6839) }, /* Sierra Wireless MC8781 */ { USB_DEVICE(0x1199, 0x683A) }, /* Sierra Wireless MC8785 */ { USB_DEVICE(0x1199, 0x683B) }, /* Sierra Wireless MC8785 Composite */ /* Sierra Wireless MC8790, MC8791, MC8792 Composite */ { USB_DEVICE(0x1199, 0x683C) }, { USB_DEVICE(0x1199, 0x683D) }, /* Sierra Wireless MC8791 Composite */ /* Sierra Wireless MC8790, MC8791, MC8792 */ { USB_DEVICE(0x1199, 0x683E) }, { USB_DEVICE(0x1199, 0x6850) }, /* Sierra Wireless AirCard 880 */ { USB_DEVICE(0x1199, 0x6851) }, /* Sierra Wireless AirCard 881 */ { USB_DEVICE(0x1199, 0x6852) }, /* Sierra Wireless AirCard 880 E */ { USB_DEVICE(0x1199, 0x6853) }, /* Sierra Wireless AirCard 881 E */ { USB_DEVICE(0x1199, 0x6855) }, /* Sierra Wireless AirCard 880 U */ { USB_DEVICE(0x1199, 0x6856) }, /* Sierra Wireless AirCard 881 U */ { USB_DEVICE(0x1199, 0x6859) }, /* Sierra Wireless AirCard 885 E */ { USB_DEVICE(0x1199, 0x685A) }, /* Sierra Wireless AirCard 885 E */ /* Sierra Wireless C885 */ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6880, 0xFF, 0xFF, 0xFF)}, /* Sierra Wireless C888, Air Card 501, USB 303, USB 304 */ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6890, 0xFF, 0xFF, 0xFF)}, /* Sierra Wireless C22/C33 */ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6891, 0xFF, 0xFF, 0xFF)}, /* Sierra Wireless HSPA Non-Composite Device */ { USB_DEVICE_AND_INTERFACE_INFO(0x1199, 0x6892, 0xFF, 0xFF, 0xFF)}, { USB_DEVICE(0x1199, 0x6893) }, /* Sierra Wireless Device */ { USB_DEVICE(0x1199, 0x68A3), /* Sierra Wireless Direct IP modems */ .driver_info = (kernel_ulong_t)&direct_ip_interface_blacklist }, { } }; MODULE_DEVICE_TABLE(usb, id_table); static struct usb_driver sierra_driver = { .name = "sierra", .probe = usb_serial_probe, .disconnect = usb_serial_disconnect, .suspend = usb_serial_suspend, .resume = usb_serial_resume, .id_table = id_table, .no_dynamic_id = 1, .supports_autosuspend = 1, }; struct sierra_port_private { spinlock_t lock; /* lock the structure */ int outstanding_urbs; /* number of out urbs in flight */ struct usb_anchor active; struct usb_anchor delayed; /* Input endpoints and buffers for this port */ struct urb *in_urbs[N_IN_URB]; /* Settings for the port */ int rts_state; /* Handshaking pins (outputs) */ int dtr_state; int cts_state; /* Handshaking pins (inputs) */ int dsr_state; int dcd_state; int ri_state; unsigned int opened:1; }; static int sierra_send_setup(struct usb_serial_port *port) { struct usb_serial *serial = port->serial; struct sierra_port_private *portdata; __u16 interface = 0; int val = 0; int do_send = 0; int retval; dev_dbg(&port->dev, "%s\n", __func__); portdata = usb_get_serial_port_data(port); if (portdata->dtr_state) val |= 0x01; if (portdata->rts_state) val |= 0x02; /* If composite device then properly report interface */ if (serial->num_ports == 1) { interface = sierra_calc_interface(serial); /* Control message is sent only to interfaces with * interrupt_in endpoints */ if (port->interrupt_in_urb) { /* send control message */ do_send = 1; } } /* Otherwise the need to do non-composite mapping */ else { if (port->bulk_out_endpointAddress == 2) interface = 0; else if (port->bulk_out_endpointAddress == 4) interface = 1; else if (port->bulk_out_endpointAddress == 5) interface = 2; do_send = 1; } if (!do_send) return 0; usb_autopm_get_interface(serial->interface); retval = usb_control_msg(serial->dev, usb_rcvctrlpipe(serial->dev, 0), 0x22, 0x21, val, interface, NULL, 0, USB_CTRL_SET_TIMEOUT); usb_autopm_put_interface(serial->interface); return retval; } static void sierra_set_termios(struct tty_struct *tty, struct usb_serial_port *port, struct ktermios *old_termios) { dev_dbg(&port->dev, "%s\n", __func__); tty_termios_copy_hw(tty->termios, old_termios); sierra_send_setup(port); } static int sierra_tiocmget(struct tty_struct *tty, struct file *file) { struct usb_serial_port *port = tty->driver_data; unsigned int value; struct sierra_port_private *portdata; dev_dbg(&port->dev, "%s\n", __func__); portdata = usb_get_serial_port_data(port); value = ((portdata->rts_state) ? TIOCM_RTS : 0) | ((portdata->dtr_state) ? TIOCM_DTR : 0) | ((portdata->cts_state) ? TIOCM_CTS : 0) | ((portdata->dsr_state) ? TIOCM_DSR : 0) | ((portdata->dcd_state) ? TIOCM_CAR : 0) | ((portdata->ri_state) ? TIOCM_RNG : 0); return value; } static int sierra_tiocmset(struct tty_struct *tty, struct file *file, unsigned int set, unsigned int clear) { struct usb_serial_port *port = tty->driver_data; struct sierra_port_private *portdata; portdata = usb_get_serial_port_data(port); if (set & TIOCM_RTS) portdata->rts_state = 1; if (set & TIOCM_DTR) portdata->dtr_state = 1; if (clear & TIOCM_RTS) portdata->rts_state = 0; if (clear & TIOCM_DTR) portdata->dtr_state = 0; return sierra_send_setup(port); } static void sierra_release_urb(struct urb *urb) { struct usb_serial_port *port; if (urb) { port = urb->context; dev_dbg(&port->dev, "%s: %p\n", __func__, urb); kfree(urb->transfer_buffer); usb_free_urb(urb); } } static void sierra_outdat_callback(struct urb *urb) { struct usb_serial_port *port = urb->context; struct sierra_port_private *portdata = usb_get_serial_port_data(port); struct sierra_intf_private *intfdata; int status = urb->status; dev_dbg(&port->dev, "%s - port %d\n", __func__, port->number); intfdata = port->serial->private; /* free up the transfer buffer, as usb_free_urb() does not do this */ kfree(urb->transfer_buffer); usb_autopm_put_interface_async(port->serial->interface); if (status) dev_dbg(&port->dev, "%s - nonzero write bulk status " "received: %d\n", __func__, status); spin_lock(&portdata->lock); --portdata->outstanding_urbs; spin_unlock(&portdata->lock); spin_lock(&intfdata->susp_lock); --intfdata->in_flight; spin_unlock(&intfdata->susp_lock); usb_serial_port_softint(port); } /* Write */ static int sierra_write(struct tty_struct *tty, struct usb_serial_port *port, const unsigned char *buf, int count) { struct sierra_port_private *portdata = usb_get_serial_port_data(port); struct sierra_intf_private *intfdata; struct usb_serial *serial = port->serial; unsigned long flags; unsigned char *buffer; struct urb *urb; size_t writesize = min((size_t)count, (size_t)MAX_TRANSFER); int retval = 0; /* verify that we actually have some data to write */ if (count == 0) return 0; portdata = usb_get_serial_port_data(port); intfdata = serial->private; dev_dbg(&port->dev, "%s: write (%zd bytes)\n", __func__, writesize); spin_lock_irqsave(&portdata->lock, flags); dev_dbg(&port->dev, "%s - outstanding_urbs: %d\n", __func__, portdata->outstanding_urbs); if (portdata->outstanding_urbs > N_OUT_URB) { spin_unlock_irqrestore(&portdata->lock, flags); dev_dbg(&port->dev, "%s - write limit hit\n", __func__); return 0; } portdata->outstanding_urbs++; dev_dbg(&port->dev, "%s - 1, outstanding_urbs: %d\n", __func__, portdata->outstanding_urbs); spin_unlock_irqrestore(&portdata->lock, flags); retval = usb_autopm_get_interface_async(serial->interface); if (retval < 0) { spin_lock_irqsave(&portdata->lock, flags); portdata->outstanding_urbs--; spin_unlock_irqrestore(&portdata->lock, flags); goto error_simple; } buffer = kmalloc(writesize, GFP_ATOMIC); if (!buffer) { dev_err(&port->dev, "out of memory\n"); retval = -ENOMEM; goto error_no_buffer; } urb = usb_alloc_urb(0, GFP_ATOMIC); if (!urb) { dev_err(&port->dev, "no more free urbs\n"); retval = -ENOMEM; goto error_no_urb; } memcpy(buffer, buf, writesize); usb_serial_debug_data(debug, &port->dev, __func__, writesize, buffer); usb_fill_bulk_urb(urb, serial->dev, usb_sndbulkpipe(serial->dev, port->bulk_out_endpointAddress), buffer, writesize, sierra_outdat_callback, port); /* Handle the need to send a zero length packet */ urb->transfer_flags |= URB_ZERO_PACKET; spin_lock_irqsave(&intfdata->susp_lock, flags); if (intfdata->suspended) { usb_anchor_urb(urb, &portdata->delayed); spin_unlock_irqrestore(&intfdata->susp_lock, flags); goto skip_power; } else { usb_anchor_urb(urb, &portdata->active); } /* send it down the pipe */ retval = usb_submit_urb(urb, GFP_ATOMIC); if (retval) { usb_unanchor_urb(urb); spin_unlock_irqrestore(&intfdata->susp_lock, flags); dev_err(&port->dev, "%s - usb_submit_urb(write bulk) failed " "with status = %d\n", __func__, retval); goto error; } else { intfdata->in_flight++; spin_unlock_irqrestore(&intfdata->susp_lock, flags); } skip_power: /* we are done with this urb, so let the host driver * really free it when it is finished with it */ usb_free_urb(urb); return writesize; error: usb_free_urb(urb); error_no_urb: kfree(buffer); error_no_buffer: spin_lock_irqsave(&portdata->lock, flags); --portdata->outstanding_urbs; dev_dbg(&port->dev, "%s - 2. outstanding_urbs: %d\n", __func__, portdata->outstanding_urbs); spin_unlock_irqrestore(&portdata->lock, flags); usb_autopm_put_interface_async(serial->interface); error_simple: return retval; } static void sierra_indat_callback(struct urb *urb) { int err; int endpoint; struct usb_serial_port *port; struct tty_struct *tty; unsigned char *data = urb->transfer_buffer; int status = urb->status; endpoint = usb_pipeendpoint(urb->pipe); port = urb->context; dev_dbg(&port->dev, "%s: %p\n", __func__, urb); if (status) { dev_dbg(&port->dev, "%s: nonzero status: %d on" " endpoint %02x\n", __func__, status, endpoint); } else { if (urb->actual_length) { tty = tty_port_tty_get(&port->port); tty_buffer_request_room(tty, urb->actual_length); tty_insert_flip_string(tty, data, urb->actual_length); tty_flip_buffer_push(tty); tty_kref_put(tty); usb_serial_debug_data(debug, &port->dev, __func__, urb->actual_length, data); } else { dev_dbg(&port->dev, "%s: empty read urb" " received\n", __func__); } } /* Resubmit urb so we continue receiving */ if (port->port.count && status != -ESHUTDOWN && status != -EPERM) { usb_mark_last_busy(port->serial->dev); err = usb_submit_urb(urb, GFP_ATOMIC); if (err) dev_err(&port->dev, "resubmit read urb failed." "(%d)\n", err); } return; } static void sierra_instat_callback(struct urb *urb) { int err; int status = urb->status; struct usb_serial_port *port = urb->context; struct sierra_port_private *portdata = usb_get_serial_port_data(port); struct usb_serial *serial = port->serial; dev_dbg(&port->dev, "%s: urb %p port %p has data %p\n", __func__, urb, port, portdata); if (status == 0) { struct usb_ctrlrequest *req_pkt = (struct usb_ctrlrequest *)urb->transfer_buffer; if (!req_pkt) { dev_dbg(&port->dev, "%s: NULL req_pkt\n", __func__); return; } if ((req_pkt->bRequestType == 0xA1) && (req_pkt->bRequest == 0x20)) { int old_dcd_state; unsigned char signals = *((unsigned char *) urb->transfer_buffer + sizeof(struct usb_ctrlrequest)); struct tty_struct *tty; dev_dbg(&port->dev, "%s: signal x%x\n", __func__, signals); old_dcd_state = portdata->dcd_state; portdata->cts_state = 1; portdata->dcd_state = ((signals & 0x01) ? 1 : 0); portdata->dsr_state = ((signals & 0x02) ? 1 : 0); portdata->ri_state = ((signals & 0x08) ? 1 : 0); tty = tty_port_tty_get(&port->port); if (tty && !C_CLOCAL(tty) && old_dcd_state && !portdata->dcd_state) tty_hangup(tty); tty_kref_put(tty); } else { dev_dbg(&port->dev, "%s: type %x req %x\n", __func__, req_pkt->bRequestType, req_pkt->bRequest); } } else dev_dbg(&port->dev, "%s: error %d\n", __func__, status); /* Resubmit urb so we continue receiving IRQ data */ if (port->port.count && status != -ESHUTDOWN && status != -ENOENT) { usb_mark_last_busy(serial->dev); urb->dev = serial->dev; err = usb_submit_urb(urb, GFP_ATOMIC); if (err) dev_err(&port->dev, "%s: resubmit intr urb " "failed. (%d)\n", __func__, err); } } static int sierra_write_room(struct tty_struct *tty) { struct usb_serial_port *port = tty->driver_data; struct sierra_port_private *portdata = usb_get_serial_port_data(port); unsigned long flags; dev_dbg(&port->dev, "%s - port %d\n", __func__, port->number); /* try to give a good number back based on if we have any free urbs at * this point in time */ spin_lock_irqsave(&portdata->lock, flags); if (portdata->outstanding_urbs > N_OUT_URB * 2 / 3) { spin_unlock_irqrestore(&portdata->lock, flags); dev_dbg(&port->dev, "%s - write limit hit\n", __func__); return 0; } spin_unlock_irqrestore(&portdata->lock, flags); return 2048; } static void sierra_stop_rx_urbs(struct usb_serial_port *port) { int i; struct sierra_port_private *portdata = usb_get_serial_port_data(port); for (i = 0; i < ARRAY_SIZE(portdata->in_urbs); i++) usb_kill_urb(portdata->in_urbs[i]); usb_kill_urb(port->interrupt_in_urb); } static int sierra_submit_rx_urbs(struct usb_serial_port *port, gfp_t mem_flags) { int ok_cnt; int err = -EINVAL; int i; struct urb *urb; struct sierra_port_private *portdata = usb_get_serial_port_data(port); ok_cnt = 0; for (i = 0; i < ARRAY_SIZE(portdata->in_urbs); i++) { urb = portdata->in_urbs[i]; if (!urb) continue; err = usb_submit_urb(urb, mem_flags); if (err) { dev_err(&port->dev, "%s: submit urb failed: %d\n", __func__, err); } else { ok_cnt++; } } if (ok_cnt && port->interrupt_in_urb) { err = usb_submit_urb(port->interrupt_in_urb, mem_flags); if (err) { dev_err(&port->dev, "%s: submit intr urb failed: %d\n", __func__, err); } } if (ok_cnt > 0) /* at least one rx urb submitted */ return 0; else return err; } static struct urb *sierra_setup_urb(struct usb_serial *serial, int endpoint, int dir, void *ctx, int len, gfp_t mem_flags, usb_complete_t callback) { struct urb *urb; u8 *buf; if (endpoint == -1) return NULL; urb = usb_alloc_urb(0, mem_flags); if (urb == NULL) { dev_dbg(&serial->dev->dev, "%s: alloc for endpoint %d failed\n", __func__, endpoint); return NULL; } buf = kmalloc(len, mem_flags); if (buf) { /* Fill URB using supplied data */ usb_fill_bulk_urb(urb, serial->dev, usb_sndbulkpipe(serial->dev, endpoint) | dir, buf, len, callback, ctx); /* debug */ dev_dbg(&serial->dev->dev, "%s %c u : %p d:%p\n", __func__, dir == USB_DIR_IN ? 'i' : 'o', urb, buf); } else { dev_dbg(&serial->dev->dev, "%s %c u:%p d:%p\n", __func__, dir == USB_DIR_IN ? 'i' : 'o', urb, buf); sierra_release_urb(urb); urb = NULL; } return urb; } static void sierra_close(struct usb_serial_port *port) { int i; struct usb_serial *serial = port->serial; struct sierra_port_private *portdata; struct sierra_intf_private *intfdata = port->serial->private; dev_dbg(&port->dev, "%s\n", __func__); portdata = usb_get_serial_port_data(port); portdata->rts_state = 0; portdata->dtr_state = 0; if (serial->dev) { mutex_lock(&serial->disc_mutex); if (!serial->disconnected) { serial->interface->needs_remote_wakeup = 0; usb_autopm_get_interface(serial->interface); sierra_send_setup(port); } mutex_unlock(&serial->disc_mutex); spin_lock_irq(&intfdata->susp_lock); portdata->opened = 0; spin_unlock_irq(&intfdata->susp_lock); /* Stop reading urbs */ sierra_stop_rx_urbs(port); /* .. and release them */ for (i = 0; i < N_IN_URB; i++) { sierra_release_urb(portdata->in_urbs[i]); portdata->in_urbs[i] = NULL; } } } static int sierra_open(struct tty_struct *tty, struct usb_serial_port *port) { struct sierra_port_private *portdata; struct usb_serial *serial = port->serial; struct sierra_intf_private *intfdata = serial->private; int i; int err; int endpoint; struct urb *urb; portdata = usb_get_serial_port_data(port); dev_dbg(&port->dev, "%s\n", __func__); /* Set some sane defaults */ portdata->rts_state = 1; portdata->dtr_state = 1; endpoint = port->bulk_in_endpointAddress; for (i = 0; i < ARRAY_SIZE(portdata->in_urbs); i++) { urb = sierra_setup_urb(serial, endpoint, USB_DIR_IN, port, IN_BUFLEN, GFP_KERNEL, sierra_indat_callback); portdata->in_urbs[i] = urb; } /* clear halt condition */ usb_clear_halt(serial->dev, usb_sndbulkpipe(serial->dev, endpoint) | USB_DIR_IN); err = sierra_submit_rx_urbs(port, GFP_KERNEL); if (err) { /* get rid of everything as in close */ sierra_close(port); /* restore balance for autopm */ usb_autopm_put_interface(serial->interface); return err; } sierra_send_setup(port); serial->interface->needs_remote_wakeup = 1; spin_lock_irq(&intfdata->susp_lock); portdata->opened = 1; spin_unlock_irq(&intfdata->susp_lock); usb_autopm_put_interface(serial->interface); return 0; } static void sierra_dtr_rts(struct usb_serial_port *port, int on) { struct usb_serial *serial = port->serial; struct sierra_port_private *portdata; portdata = usb_get_serial_port_data(port); portdata->rts_state = on; portdata->dtr_state = on; if (serial->dev) { mutex_lock(&serial->disc_mutex); if (!serial->disconnected) sierra_send_setup(port); mutex_unlock(&serial->disc_mutex); } } static int sierra_startup(struct usb_serial *serial) { struct usb_serial_port *port; struct sierra_port_private *portdata; int i; dev_dbg(&serial->dev->dev, "%s\n", __func__); /* Set Device mode to D0 */ sierra_set_power_state(serial->dev, 0x0000); /* Check NMEA and set */ if (nmea) sierra_vsc_set_nmea(serial->dev, 1); /* Now setup per port private data */ for (i = 0; i < serial->num_ports; i++) { port = serial->port[i]; portdata = kzalloc(sizeof(*portdata), GFP_KERNEL); if (!portdata) { dev_dbg(&port->dev, "%s: kmalloc for " "sierra_port_private (%d) failed!.\n", __func__, i); return -ENOMEM; } spin_lock_init(&portdata->lock); init_usb_anchor(&portdata->active); init_usb_anchor(&portdata->delayed); /* Set the port private data pointer */ usb_set_serial_port_data(port, portdata); } return 0; } static void sierra_release(struct usb_serial *serial) { int i; struct usb_serial_port *port; struct sierra_port_private *portdata; dev_dbg(&serial->dev->dev, "%s\n", __func__); for (i = 0; i < serial->num_ports; ++i) { port = serial->port[i]; if (!port) continue; portdata = usb_get_serial_port_data(port); if (!portdata) continue; kfree(portdata); } } #ifdef CONFIG_PM static void stop_read_write_urbs(struct usb_serial *serial) { int i; struct usb_serial_port *port; struct sierra_port_private *portdata; /* Stop reading/writing urbs */ for (i = 0; i < serial->num_ports; ++i) { port = serial->port[i]; portdata = usb_get_serial_port_data(port); sierra_stop_rx_urbs(port); usb_kill_anchored_urbs(&portdata->active); } } static int sierra_suspend(struct usb_serial *serial, pm_message_t message) { struct sierra_intf_private *intfdata; int b; if (serial->dev->auto_pm) { intfdata = serial->private; spin_lock_irq(&intfdata->susp_lock); b = intfdata->in_flight; if (b) { spin_unlock_irq(&intfdata->susp_lock); return -EBUSY; } else { intfdata->suspended = 1; spin_unlock_irq(&intfdata->susp_lock); } } stop_read_write_urbs(serial); return 0; } static int sierra_resume(struct usb_serial *serial) { struct usb_serial_port *port; struct sierra_intf_private *intfdata = serial->private; struct sierra_port_private *portdata; struct urb *urb; int ec = 0; int i, err; spin_lock_irq(&intfdata->susp_lock); for (i = 0; i < serial->num_ports; i++) { port = serial->port[i]; portdata = usb_get_serial_port_data(port); while ((urb = usb_get_from_anchor(&portdata->delayed))) { usb_anchor_urb(urb, &portdata->active); intfdata->in_flight++; err = usb_submit_urb(urb, GFP_ATOMIC); if (err < 0) { intfdata->in_flight--; usb_unanchor_urb(urb); usb_scuttle_anchored_urbs(&portdata->delayed); break; } } if (portdata->opened) { err = sierra_submit_rx_urbs(port, GFP_ATOMIC); if (err) ec++; } } intfdata->suspended = 0; spin_unlock_irq(&intfdata->susp_lock); return ec ? -EIO : 0; } #else #define sierra_suspend NULL #define sierra_resume NULL #endif static struct usb_serial_driver sierra_device = { .driver = { .owner = THIS_MODULE, .name = "sierra", }, .description = "Sierra USB modem", .id_table = id_table, .usb_driver = &sierra_driver, .calc_num_ports = sierra_calc_num_ports, .probe = sierra_probe, .open = sierra_open, .close = sierra_close, .dtr_rts = sierra_dtr_rts, .write = sierra_write, .write_room = sierra_write_room, .set_termios = sierra_set_termios, .tiocmget = sierra_tiocmget, .tiocmset = sierra_tiocmset, .attach = sierra_startup, .release = sierra_release, .suspend = sierra_suspend, .resume = sierra_resume, .read_int_callback = sierra_instat_callback, }; /* Functions used by new usb-serial code. */ static int __init sierra_init(void) { int retval; retval = usb_serial_register(&sierra_device); if (retval) goto failed_device_register; retval = usb_register(&sierra_driver); if (retval) goto failed_driver_register; printk(KERN_INFO KBUILD_MODNAME ": " DRIVER_VERSION ":" DRIVER_DESC "\n"); return 0; failed_driver_register: usb_serial_deregister(&sierra_device); failed_device_register: return retval; } static void __exit sierra_exit(void) { usb_deregister(&sierra_driver); usb_serial_deregister(&sierra_device); } module_init(sierra_init); module_exit(sierra_exit); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_VERSION(DRIVER_VERSION); MODULE_LICENSE("GPL"); module_param(nmea, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(nmea, "NMEA streaming"); module_param(debug, bool, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Debug messages");
gpl-2.0
icalik/linux
drivers/net/wireless/ath/ath10k/htt_tx.c
312
16555
/* * Copyright (c) 2005-2011 Atheros Communications Inc. * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. * * Permission to use, copy, modify, and/or distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #include <linux/etherdevice.h> #include "htt.h" #include "mac.h" #include "hif.h" #include "txrx.h" #include "debug.h" void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) { htt->num_pending_tx--; if (htt->num_pending_tx == htt->max_num_pending_tx - 1) ieee80211_wake_queues(htt->ar->hw); } static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) { spin_lock_bh(&htt->tx_lock); __ath10k_htt_tx_dec_pending(htt); spin_unlock_bh(&htt->tx_lock); } static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt) { int ret = 0; spin_lock_bh(&htt->tx_lock); if (htt->num_pending_tx >= htt->max_num_pending_tx) { ret = -EBUSY; goto exit; } htt->num_pending_tx++; if (htt->num_pending_tx == htt->max_num_pending_tx) ieee80211_stop_queues(htt->ar->hw); exit: spin_unlock_bh(&htt->tx_lock); return ret; } int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb) { struct ath10k *ar = htt->ar; int ret; lockdep_assert_held(&htt->tx_lock); ret = idr_alloc(&htt->pending_tx, skb, 0, 0x10000, GFP_ATOMIC); ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret); return ret; } void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) { struct ath10k *ar = htt->ar; lockdep_assert_held(&htt->tx_lock); ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id); idr_remove(&htt->pending_tx, msdu_id); } int ath10k_htt_tx_alloc(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", htt->max_num_pending_tx); spin_lock_init(&htt->tx_lock); idr_init(&htt->pending_tx); htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev, sizeof(struct ath10k_htt_txbuf), 4, 0); if (!htt->tx_pool) { idr_destroy(&htt->pending_tx); return -ENOMEM; } return 0; } static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx) { struct ath10k *ar = ctx; struct ath10k_htt *htt = &ar->htt; struct htt_tx_done tx_done = {0}; ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id); tx_done.discard = 1; tx_done.msdu_id = msdu_id; spin_lock_bh(&htt->tx_lock); ath10k_txrx_tx_unref(htt, &tx_done); spin_unlock_bh(&htt->tx_lock); return 0; } void ath10k_htt_tx_free(struct ath10k_htt *htt) { idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar); idr_destroy(&htt->pending_tx); dma_pool_destroy(htt->tx_pool); } void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) { dev_kfree_skb_any(skb); } int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; struct sk_buff *skb; struct htt_cmd *cmd; int len = 0; int ret; len += sizeof(cmd->hdr); len += sizeof(cmd->ver_req); skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ; ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { dev_kfree_skb_any(skb); return ret; } return 0; } int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie) { struct ath10k *ar = htt->ar; struct htt_stats_req *req; struct sk_buff *skb; struct htt_cmd *cmd; int len = 0, ret; len += sizeof(cmd->hdr); len += sizeof(cmd->stats_req); skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ; req = &cmd->stats_req; memset(req, 0, sizeof(*req)); /* currently we support only max 8 bit masks so no need to worry * about endian support */ req->upload_types[0] = mask; req->reset_types[0] = mask; req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID; req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff); req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { ath10k_warn(ar, "failed to send htt type stats request: %d", ret); dev_kfree_skb_any(skb); return ret; } return 0; } int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) { struct ath10k *ar = htt->ar; struct sk_buff *skb; struct htt_cmd *cmd; struct htt_rx_ring_setup_ring *ring; const int num_rx_ring = 1; u16 flags; u32 fw_idx; int len; int ret; /* * the HW expects the buffer to be an integral number of 4-byte * "words" */ BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr) + (sizeof(*ring) * num_rx_ring); skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; ring = &cmd->rx_setup.rings[0]; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; cmd->rx_setup.hdr.num_rings = 1; /* FIXME: do we need all of this? */ flags = 0; flags |= HTT_RX_RING_FLAGS_MAC80211_HDR; flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; flags |= HTT_RX_RING_FLAGS_PPDU_START; flags |= HTT_RX_RING_FLAGS_PPDU_END; flags |= HTT_RX_RING_FLAGS_MPDU_START; flags |= HTT_RX_RING_FLAGS_MPDU_END; flags |= HTT_RX_RING_FLAGS_MSDU_START; flags |= HTT_RX_RING_FLAGS_MSDU_END; flags |= HTT_RX_RING_FLAGS_RX_ATTENTION; flags |= HTT_RX_RING_FLAGS_FRAG_INFO; flags |= HTT_RX_RING_FLAGS_UNICAST_RX; flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; flags |= HTT_RX_RING_FLAGS_CTRL_RX; flags |= HTT_RX_RING_FLAGS_MGMT_RX; flags |= HTT_RX_RING_FLAGS_NULL_RX; flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX; fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); ring->fw_idx_shadow_reg_paddr = __cpu_to_le32(htt->rx_ring.alloc_idx.paddr); ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr); ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); ring->flags = __cpu_to_le16(flags); ring->fw_idx_init_val = __cpu_to_le16(fw_idx); #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4) ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status)); ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload)); ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start)); ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end)); ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start)); ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end)); ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start)); ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end)); ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention)); ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info)); #undef desc_offset ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { dev_kfree_skb_any(skb); return ret; } return 0; } int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, u8 max_subfrms_ampdu, u8 max_subfrms_amsdu) { struct ath10k *ar = htt->ar; struct htt_aggr_conf *aggr_conf; struct sk_buff *skb; struct htt_cmd *cmd; int len; int ret; /* Firmware defaults are: amsdu = 3 and ampdu = 64 */ if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64) return -EINVAL; if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31) return -EINVAL; len = sizeof(cmd->hdr); len += sizeof(cmd->aggr_conf); skb = ath10k_htc_alloc_skb(ar, len); if (!skb) return -ENOMEM; skb_put(skb, len); cmd = (struct htt_cmd *)skb->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG; aggr_conf = &cmd->aggr_conf; aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu; aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu; ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d", aggr_conf->max_num_amsdu_subframes, aggr_conf->max_num_ampdu_subframes); ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); if (ret) { dev_kfree_skb_any(skb); return ret; } return 0; } int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { struct ath10k *ar = htt->ar; struct device *dev = ar->dev; struct sk_buff *txdesc = NULL; struct htt_cmd *cmd; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); u8 vdev_id = skb_cb->vdev_id; int len = 0; int msdu_id = -1; int res; res = ath10k_htt_tx_inc_pending(htt); if (res) goto err; len += sizeof(cmd->hdr); len += sizeof(cmd->mgmt_tx); spin_lock_bh(&htt->tx_lock); res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); if (res < 0) { spin_unlock_bh(&htt->tx_lock); goto err_tx_dec; } msdu_id = res; spin_unlock_bh(&htt->tx_lock); txdesc = ath10k_htc_alloc_skb(ar, len); if (!txdesc) { res = -ENOMEM; goto err_free_msdu_id; } skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, DMA_TO_DEVICE); res = dma_mapping_error(dev, skb_cb->paddr); if (res) goto err_free_txdesc; skb_put(txdesc, len); cmd = (struct htt_cmd *)txdesc->data; cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); cmd->mgmt_tx.len = __cpu_to_le32(msdu->len); cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); memcpy(cmd->mgmt_tx.hdr, msdu->data, min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); skb_cb->htt.txbuf = NULL; res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); if (res) goto err_unmap_msdu; return 0; err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_txdesc: dev_kfree_skb_any(txdesc); err_free_msdu_id: spin_lock_bh(&htt->tx_lock); ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); err_tx_dec: ath10k_htt_tx_dec_pending(htt); err: return res; } int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) { struct ath10k *ar = htt->ar; struct device *dev = ar->dev; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); struct ath10k_hif_sg_item sg_items[2]; struct htt_data_tx_desc_frag *frags; u8 vdev_id = skb_cb->vdev_id; u8 tid = skb_cb->htt.tid; int prefetch_len; int res; u8 flags0 = 0; u16 msdu_id, flags1 = 0; dma_addr_t paddr; u32 frags_paddr; bool use_frags; res = ath10k_htt_tx_inc_pending(htt); if (res) goto err; spin_lock_bh(&htt->tx_lock); res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); if (res < 0) { spin_unlock_bh(&htt->tx_lock); goto err_tx_dec; } msdu_id = res; spin_unlock_bh(&htt->tx_lock); prefetch_len = min(htt->prefetch_len, msdu->len); prefetch_len = roundup(prefetch_len, 4); /* Since HTT 3.0 there is no separate mgmt tx command. However in case * of mgmt tx using TX_FRM there is not tx fragment list. Instead of tx * fragment list host driver specifies directly frame pointer. */ use_frags = htt->target_version_major < 3 || !ieee80211_is_mgmt(hdr->frame_control); skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC, &paddr); if (!skb_cb->htt.txbuf) { res = -ENOMEM; goto err_free_msdu_id; } skb_cb->htt.txbuf_paddr = paddr; if ((ieee80211_is_action(hdr->frame_control) || ieee80211_is_deauth(hdr->frame_control) || ieee80211_is_disassoc(hdr->frame_control)) && ieee80211_has_protected(hdr->frame_control)) skb_put(msdu, IEEE80211_CCMP_MIC_LEN); skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, DMA_TO_DEVICE); res = dma_mapping_error(dev, skb_cb->paddr); if (res) goto err_free_txbuf; if (likely(use_frags)) { frags = skb_cb->htt.txbuf->frags; frags[0].paddr = __cpu_to_le32(skb_cb->paddr); frags[0].len = __cpu_to_le32(msdu->len); frags[1].paddr = 0; frags[1].len = 0; flags0 |= SM(ATH10K_HW_TXRX_NATIVE_WIFI, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); frags_paddr = skb_cb->htt.txbuf_paddr; } else { flags0 |= SM(ATH10K_HW_TXRX_MGMT, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); frags_paddr = skb_cb->paddr; } /* Normally all commands go through HTC which manages tx credits for * each endpoint and notifies when tx is completed. * * HTT endpoint is creditless so there's no need to care about HTC * flags. In that case it is trivial to fill the HTC header here. * * MSDU transmission is considered completed upon HTT event. This * implies no relevant resources can be freed until after the event is * received. That's why HTC tx completion handler itself is ignored by * setting NULL to transfer_context for all sg items. * * There is simply no point in pushing HTT TX_FRM through HTC tx path * as it's a waste of resources. By bypassing HTC it is possible to * avoid extra memory allocations, compress data structures and thus * improve performance. */ skb_cb->htt.txbuf->htc_hdr.eid = htt->eid; skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16( sizeof(skb_cb->htt.txbuf->cmd_hdr) + sizeof(skb_cb->htt.txbuf->cmd_tx) + prefetch_len); skb_cb->htt.txbuf->htc_hdr.flags = 0; if (!ieee80211_has_protected(hdr->frame_control)) flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); if (msdu->ip_summed == CHECKSUM_PARTIAL) { flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; } /* Prevent firmware from sending up tx inspection requests. There's * nothing ath10k can do with frames requested for inspection so force * it to simply rely a regular tx completion with discard status. */ flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; skb_cb->htt.txbuf->cmd_tx.flags0 = flags0; skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr); skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID); skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq); trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid); ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n", flags0, flags1, msdu->len, msdu_id, frags_paddr, (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq); ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", msdu->data, msdu->len); trace_ath10k_tx_hdr(ar, msdu->data, msdu->len); trace_ath10k_tx_payload(ar, msdu->data, msdu->len); sg_items[0].transfer_id = 0; sg_items[0].transfer_context = NULL; sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr; sg_items[0].paddr = skb_cb->htt.txbuf_paddr + sizeof(skb_cb->htt.txbuf->frags); sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) + sizeof(skb_cb->htt.txbuf->cmd_hdr) + sizeof(skb_cb->htt.txbuf->cmd_tx); sg_items[1].transfer_id = 0; sg_items[1].transfer_context = NULL; sg_items[1].vaddr = msdu->data; sg_items[1].paddr = skb_cb->paddr; sg_items[1].len = prefetch_len; res = ath10k_hif_tx_sg(htt->ar, htt->ar->htc.endpoint[htt->eid].ul_pipe_id, sg_items, ARRAY_SIZE(sg_items)); if (res) goto err_unmap_msdu; return 0; err_unmap_msdu: dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); err_free_txbuf: dma_pool_free(htt->tx_pool, skb_cb->htt.txbuf, skb_cb->htt.txbuf_paddr); err_free_msdu_id: spin_lock_bh(&htt->tx_lock); ath10k_htt_tx_free_msdu_id(htt, msdu_id); spin_unlock_bh(&htt->tx_lock); err_tx_dec: ath10k_htt_tx_dec_pending(htt); err: return res; }
gpl-2.0
rastomanchik/android_kernel_xiaomi_armani
fs/ext4/file.c
824
7445
/* * linux/fs/ext4/file.c * * Copyright (C) 1992, 1993, 1994, 1995 * Remy Card (card@masi.ibp.fr) * Laboratoire MASI - Institut Blaise Pascal * Universite Pierre et Marie Curie (Paris VI) * * from * * linux/fs/minix/file.c * * Copyright (C) 1991, 1992 Linus Torvalds * * ext4 fs regular file handling primitives * * 64-bit file support on 64-bit platforms by Jakub Jelinek * (jj@sunsite.ms.mff.cuni.cz) */ #include <linux/time.h> #include <linux/fs.h> #include <linux/jbd2.h> #include <linux/mount.h> #include <linux/path.h> #include <linux/quotaops.h> #include "ext4.h" #include "ext4_jbd2.h" #include "xattr.h" #include "acl.h" /* * Called when an inode is released. Note that this is different * from ext4_file_open: open gets called at every open, but release * gets called only when /all/ the files are closed. */ static int ext4_release_file(struct inode *inode, struct file *filp) { if (ext4_test_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE)) { ext4_alloc_da_blocks(inode); ext4_clear_inode_state(inode, EXT4_STATE_DA_ALLOC_CLOSE); } /* if we are the last writer on the inode, drop the block reservation */ if ((filp->f_mode & FMODE_WRITE) && (atomic_read(&inode->i_writecount) == 1) && !EXT4_I(inode)->i_reserved_data_blocks) { down_write(&EXT4_I(inode)->i_data_sem); ext4_discard_preallocations(inode); up_write(&EXT4_I(inode)->i_data_sem); } if (is_dx(inode) && filp->private_data) ext4_htree_free_dir_info(filp->private_data); return 0; } static void ext4_aiodio_wait(struct inode *inode) { wait_queue_head_t *wq = ext4_ioend_wq(inode); wait_event(*wq, (atomic_read(&EXT4_I(inode)->i_aiodio_unwritten) == 0)); } /* * This tests whether the IO in question is block-aligned or not. * Ext4 utilizes unwritten extents when hole-filling during direct IO, and they * are converted to written only after the IO is complete. Until they are * mapped, these blocks appear as holes, so dio_zero_block() will assume that * it needs to zero out portions of the start and/or end block. If 2 AIO * threads are at work on the same unwritten block, they must be synchronized * or one thread will zero the other's data, causing corruption. */ static int ext4_unaligned_aio(struct inode *inode, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct super_block *sb = inode->i_sb; int blockmask = sb->s_blocksize - 1; size_t count = iov_length(iov, nr_segs); loff_t final_size = pos + count; if (pos >= i_size_read(inode)) return 0; if ((pos & blockmask) || (final_size & blockmask)) return 1; return 0; } static ssize_t ext4_file_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct inode *inode = iocb->ki_filp->f_path.dentry->d_inode; int unaligned_aio = 0; int ret; /* * If we have encountered a bitmap-format file, the size limit * is smaller than s_maxbytes, which is for extent-mapped files. */ if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) { struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); size_t length = iov_length(iov, nr_segs); if ((pos > sbi->s_bitmap_maxbytes || (pos == sbi->s_bitmap_maxbytes && length > 0))) return -EFBIG; if (pos + length > sbi->s_bitmap_maxbytes) { nr_segs = iov_shorten((struct iovec *)iov, nr_segs, sbi->s_bitmap_maxbytes - pos); } } else if (unlikely((iocb->ki_filp->f_flags & O_DIRECT) && !is_sync_kiocb(iocb))) { unaligned_aio = ext4_unaligned_aio(inode, iov, nr_segs, pos); } /* Unaligned direct AIO must be serialized; see comment above */ if (unaligned_aio) { static unsigned long unaligned_warn_time; /* Warn about this once per day */ if (printk_timed_ratelimit(&unaligned_warn_time, 60*60*24*HZ)) ext4_msg(inode->i_sb, KERN_WARNING, "Unaligned AIO/DIO on inode %ld by %s; " "performance will be poor.", inode->i_ino, current->comm); mutex_lock(ext4_aio_mutex(inode)); ext4_aiodio_wait(inode); } ret = generic_file_aio_write(iocb, iov, nr_segs, pos); if (unaligned_aio) mutex_unlock(ext4_aio_mutex(inode)); return ret; } static const struct vm_operations_struct ext4_file_vm_ops = { .fault = filemap_fault, .page_mkwrite = ext4_page_mkwrite, }; static int ext4_file_mmap(struct file *file, struct vm_area_struct *vma) { struct address_space *mapping = file->f_mapping; if (!mapping->a_ops->readpage) return -ENOEXEC; file_accessed(file); vma->vm_ops = &ext4_file_vm_ops; vma->vm_flags |= VM_CAN_NONLINEAR; return 0; } static int ext4_file_open(struct inode * inode, struct file * filp) { struct super_block *sb = inode->i_sb; struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); struct ext4_inode_info *ei = EXT4_I(inode); struct vfsmount *mnt = filp->f_path.mnt; struct path path; char buf[64], *cp; if (unlikely(!(sbi->s_mount_flags & EXT4_MF_MNTDIR_SAMPLED) && !(sb->s_flags & MS_RDONLY))) { sbi->s_mount_flags |= EXT4_MF_MNTDIR_SAMPLED; /* * Sample where the filesystem has been mounted and * store it in the superblock for sysadmin convenience * when trying to sort through large numbers of block * devices or filesystem images. */ memset(buf, 0, sizeof(buf)); path.mnt = mnt; path.dentry = mnt->mnt_root; cp = d_path(&path, buf, sizeof(buf)); if (!IS_ERR(cp)) { strlcpy(sbi->s_es->s_last_mounted, cp, sizeof(sbi->s_es->s_last_mounted)); ext4_mark_super_dirty(sb); } } /* * Set up the jbd2_inode if we are opening the inode for * writing and the journal is present */ if (sbi->s_journal && !ei->jinode && (filp->f_mode & FMODE_WRITE)) { struct jbd2_inode *jinode = jbd2_alloc_inode(GFP_KERNEL); spin_lock(&inode->i_lock); if (!ei->jinode) { if (!jinode) { spin_unlock(&inode->i_lock); return -ENOMEM; } ei->jinode = jinode; jbd2_journal_init_jbd_inode(ei->jinode, inode); jinode = NULL; } spin_unlock(&inode->i_lock); if (unlikely(jinode != NULL)) jbd2_free_inode(jinode); } return dquot_file_open(inode, filp); } /* * ext4_llseek() copied from generic_file_llseek() to handle both * block-mapped and extent-mapped maxbytes values. This should * otherwise be identical with generic_file_llseek(). */ loff_t ext4_llseek(struct file *file, loff_t offset, int origin) { struct inode *inode = file->f_mapping->host; loff_t maxbytes; if (!(ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) maxbytes = EXT4_SB(inode->i_sb)->s_bitmap_maxbytes; else maxbytes = inode->i_sb->s_maxbytes; return generic_file_llseek_size(file, offset, origin, maxbytes); } const struct file_operations ext4_file_operations = { .llseek = ext4_llseek, .read = do_sync_read, .write = do_sync_write, .aio_read = generic_file_aio_read, .aio_write = ext4_file_write, .unlocked_ioctl = ext4_ioctl, #ifdef CONFIG_COMPAT .compat_ioctl = ext4_compat_ioctl, #endif .mmap = ext4_file_mmap, .open = ext4_file_open, .release = ext4_release_file, .fsync = ext4_sync_file, .splice_read = generic_file_splice_read, .splice_write = generic_file_splice_write, .fallocate = ext4_fallocate, }; const struct inode_operations ext4_file_inode_operations = { .setattr = ext4_setattr, .getattr = ext4_getattr, #ifdef CONFIG_EXT4_FS_XATTR .setxattr = generic_setxattr, .getxattr = generic_getxattr, .listxattr = ext4_listxattr, .removexattr = generic_removexattr, #endif .get_acl = ext4_get_acl, .fiemap = ext4_fiemap, };
gpl-2.0
yajnab/android_kernel_semc_msm7x30
drivers/video/svgalib.c
824
19839
/* * Common utility functions for VGA-based graphics cards. * * Copyright (c) 2006-2007 Ondrej Zajicek <santiago@crfreenet.org> * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive for * more details. * * Some parts are based on David Boucher's viafb (http://davesdomain.org.uk/viafb/) */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/fb.h> #include <linux/svga.h> #include <linux/slab.h> #include <asm/types.h> #include <asm/io.h> /* Write a CRT register value spread across multiple registers */ void svga_wcrt_multi(const struct vga_regset *regset, u32 value) { u8 regval, bitval, bitnum; while (regset->regnum != VGA_REGSET_END_VAL) { regval = vga_rcrt(NULL, regset->regnum); bitnum = regset->lowbit; while (bitnum <= regset->highbit) { bitval = 1 << bitnum; regval = regval & ~bitval; if (value & 1) regval = regval | bitval; bitnum ++; value = value >> 1; } vga_wcrt(NULL, regset->regnum, regval); regset ++; } } /* Write a sequencer register value spread across multiple registers */ void svga_wseq_multi(const struct vga_regset *regset, u32 value) { u8 regval, bitval, bitnum; while (regset->regnum != VGA_REGSET_END_VAL) { regval = vga_rseq(NULL, regset->regnum); bitnum = regset->lowbit; while (bitnum <= regset->highbit) { bitval = 1 << bitnum; regval = regval & ~bitval; if (value & 1) regval = regval | bitval; bitnum ++; value = value >> 1; } vga_wseq(NULL, regset->regnum, regval); regset ++; } } static unsigned int svga_regset_size(const struct vga_regset *regset) { u8 count = 0; while (regset->regnum != VGA_REGSET_END_VAL) { count += regset->highbit - regset->lowbit + 1; regset ++; } return 1 << count; } /* ------------------------------------------------------------------------- */ /* Set graphics controller registers to sane values */ void svga_set_default_gfx_regs(void) { /* All standard GFX registers (GR00 - GR08) */ vga_wgfx(NULL, VGA_GFX_SR_VALUE, 0x00); vga_wgfx(NULL, VGA_GFX_SR_ENABLE, 0x00); vga_wgfx(NULL, VGA_GFX_COMPARE_VALUE, 0x00); vga_wgfx(NULL, VGA_GFX_DATA_ROTATE, 0x00); vga_wgfx(NULL, VGA_GFX_PLANE_READ, 0x00); vga_wgfx(NULL, VGA_GFX_MODE, 0x00); /* vga_wgfx(NULL, VGA_GFX_MODE, 0x20); */ /* vga_wgfx(NULL, VGA_GFX_MODE, 0x40); */ vga_wgfx(NULL, VGA_GFX_MISC, 0x05); /* vga_wgfx(NULL, VGA_GFX_MISC, 0x01); */ vga_wgfx(NULL, VGA_GFX_COMPARE_MASK, 0x0F); vga_wgfx(NULL, VGA_GFX_BIT_MASK, 0xFF); } /* Set attribute controller registers to sane values */ void svga_set_default_atc_regs(void) { u8 count; vga_r(NULL, 0x3DA); vga_w(NULL, VGA_ATT_W, 0x00); /* All standard ATC registers (AR00 - AR14) */ for (count = 0; count <= 0xF; count ++) svga_wattr(count, count); svga_wattr(VGA_ATC_MODE, 0x01); /* svga_wattr(VGA_ATC_MODE, 0x41); */ svga_wattr(VGA_ATC_OVERSCAN, 0x00); svga_wattr(VGA_ATC_PLANE_ENABLE, 0x0F); svga_wattr(VGA_ATC_PEL, 0x00); svga_wattr(VGA_ATC_COLOR_PAGE, 0x00); vga_r(NULL, 0x3DA); vga_w(NULL, VGA_ATT_W, 0x20); } /* Set sequencer registers to sane values */ void svga_set_default_seq_regs(void) { /* Standard sequencer registers (SR01 - SR04), SR00 is not set */ vga_wseq(NULL, VGA_SEQ_CLOCK_MODE, VGA_SR01_CHAR_CLK_8DOTS); vga_wseq(NULL, VGA_SEQ_PLANE_WRITE, VGA_SR02_ALL_PLANES); vga_wseq(NULL, VGA_SEQ_CHARACTER_MAP, 0x00); /* vga_wseq(NULL, VGA_SEQ_MEMORY_MODE, VGA_SR04_EXT_MEM | VGA_SR04_SEQ_MODE | VGA_SR04_CHN_4M); */ vga_wseq(NULL, VGA_SEQ_MEMORY_MODE, VGA_SR04_EXT_MEM | VGA_SR04_SEQ_MODE); } /* Set CRTC registers to sane values */ void svga_set_default_crt_regs(void) { /* Standard CRT registers CR03 CR08 CR09 CR14 CR17 */ svga_wcrt_mask(0x03, 0x80, 0x80); /* Enable vertical retrace EVRA */ vga_wcrt(NULL, VGA_CRTC_PRESET_ROW, 0); svga_wcrt_mask(VGA_CRTC_MAX_SCAN, 0, 0x1F); vga_wcrt(NULL, VGA_CRTC_UNDERLINE, 0); vga_wcrt(NULL, VGA_CRTC_MODE, 0xE3); } void svga_set_textmode_vga_regs(void) { /* svga_wseq_mask(0x1, 0x00, 0x01); */ /* Switch 8/9 pixel per char */ vga_wseq(NULL, VGA_SEQ_MEMORY_MODE, VGA_SR04_EXT_MEM); vga_wseq(NULL, VGA_SEQ_PLANE_WRITE, 0x03); vga_wcrt(NULL, VGA_CRTC_MAX_SCAN, 0x0f); /* 0x4f */ vga_wcrt(NULL, VGA_CRTC_UNDERLINE, 0x1f); svga_wcrt_mask(VGA_CRTC_MODE, 0x23, 0x7f); vga_wcrt(NULL, VGA_CRTC_CURSOR_START, 0x0d); vga_wcrt(NULL, VGA_CRTC_CURSOR_END, 0x0e); vga_wcrt(NULL, VGA_CRTC_CURSOR_HI, 0x00); vga_wcrt(NULL, VGA_CRTC_CURSOR_LO, 0x00); vga_wgfx(NULL, VGA_GFX_MODE, 0x10); /* Odd/even memory mode */ vga_wgfx(NULL, VGA_GFX_MISC, 0x0E); /* Misc graphics register - text mode enable */ vga_wgfx(NULL, VGA_GFX_COMPARE_MASK, 0x00); vga_r(NULL, 0x3DA); vga_w(NULL, VGA_ATT_W, 0x00); svga_wattr(0x10, 0x0C); /* Attribute Mode Control Register - text mode, blinking and line graphics */ svga_wattr(0x13, 0x08); /* Horizontal Pixel Panning Register */ vga_r(NULL, 0x3DA); vga_w(NULL, VGA_ATT_W, 0x20); } #if 0 void svga_dump_var(struct fb_var_screeninfo *var, int node) { pr_debug("fb%d: var.vmode : 0x%X\n", node, var->vmode); pr_debug("fb%d: var.xres : %d\n", node, var->xres); pr_debug("fb%d: var.yres : %d\n", node, var->yres); pr_debug("fb%d: var.bits_per_pixel: %d\n", node, var->bits_per_pixel); pr_debug("fb%d: var.xres_virtual : %d\n", node, var->xres_virtual); pr_debug("fb%d: var.yres_virtual : %d\n", node, var->yres_virtual); pr_debug("fb%d: var.left_margin : %d\n", node, var->left_margin); pr_debug("fb%d: var.right_margin : %d\n", node, var->right_margin); pr_debug("fb%d: var.upper_margin : %d\n", node, var->upper_margin); pr_debug("fb%d: var.lower_margin : %d\n", node, var->lower_margin); pr_debug("fb%d: var.hsync_len : %d\n", node, var->hsync_len); pr_debug("fb%d: var.vsync_len : %d\n", node, var->vsync_len); pr_debug("fb%d: var.sync : 0x%X\n", node, var->sync); pr_debug("fb%d: var.pixclock : %d\n\n", node, var->pixclock); } #endif /* 0 */ /* ------------------------------------------------------------------------- */ void svga_settile(struct fb_info *info, struct fb_tilemap *map) { const u8 *font = map->data; u8 __iomem *fb = (u8 __iomem *)info->screen_base; int i, c; if ((map->width != 8) || (map->height != 16) || (map->depth != 1) || (map->length != 256)) { printk(KERN_ERR "fb%d: unsupported font parameters: width %d, height %d, depth %d, length %d\n", info->node, map->width, map->height, map->depth, map->length); return; } fb += 2; for (c = 0; c < map->length; c++) { for (i = 0; i < map->height; i++) { fb_writeb(font[i], fb + i * 4); // fb[i * 4] = font[i]; } fb += 128; font += map->height; } } /* Copy area in text (tileblit) mode */ void svga_tilecopy(struct fb_info *info, struct fb_tilearea *area) { int dx, dy; /* colstride is halved in this function because u16 are used */ int colstride = 1 << (info->fix.type_aux & FB_AUX_TEXT_SVGA_MASK); int rowstride = colstride * (info->var.xres_virtual / 8); u16 __iomem *fb = (u16 __iomem *) info->screen_base; u16 __iomem *src, *dst; if ((area->sy > area->dy) || ((area->sy == area->dy) && (area->sx > area->dx))) { src = fb + area->sx * colstride + area->sy * rowstride; dst = fb + area->dx * colstride + area->dy * rowstride; } else { src = fb + (area->sx + area->width - 1) * colstride + (area->sy + area->height - 1) * rowstride; dst = fb + (area->dx + area->width - 1) * colstride + (area->dy + area->height - 1) * rowstride; colstride = -colstride; rowstride = -rowstride; } for (dy = 0; dy < area->height; dy++) { u16 __iomem *src2 = src; u16 __iomem *dst2 = dst; for (dx = 0; dx < area->width; dx++) { fb_writew(fb_readw(src2), dst2); // *dst2 = *src2; src2 += colstride; dst2 += colstride; } src += rowstride; dst += rowstride; } } /* Fill area in text (tileblit) mode */ void svga_tilefill(struct fb_info *info, struct fb_tilerect *rect) { int dx, dy; int colstride = 2 << (info->fix.type_aux & FB_AUX_TEXT_SVGA_MASK); int rowstride = colstride * (info->var.xres_virtual / 8); int attr = (0x0F & rect->bg) << 4 | (0x0F & rect->fg); u8 __iomem *fb = (u8 __iomem *)info->screen_base; fb += rect->sx * colstride + rect->sy * rowstride; for (dy = 0; dy < rect->height; dy++) { u8 __iomem *fb2 = fb; for (dx = 0; dx < rect->width; dx++) { fb_writeb(rect->index, fb2); fb_writeb(attr, fb2 + 1); fb2 += colstride; } fb += rowstride; } } /* Write text in text (tileblit) mode */ void svga_tileblit(struct fb_info *info, struct fb_tileblit *blit) { int dx, dy, i; int colstride = 2 << (info->fix.type_aux & FB_AUX_TEXT_SVGA_MASK); int rowstride = colstride * (info->var.xres_virtual / 8); int attr = (0x0F & blit->bg) << 4 | (0x0F & blit->fg); u8 __iomem *fb = (u8 __iomem *)info->screen_base; fb += blit->sx * colstride + blit->sy * rowstride; i=0; for (dy=0; dy < blit->height; dy ++) { u8 __iomem *fb2 = fb; for (dx = 0; dx < blit->width; dx ++) { fb_writeb(blit->indices[i], fb2); fb_writeb(attr, fb2 + 1); fb2 += colstride; i ++; if (i == blit->length) return; } fb += rowstride; } } /* Set cursor in text (tileblit) mode */ void svga_tilecursor(struct fb_info *info, struct fb_tilecursor *cursor) { u8 cs = 0x0d; u8 ce = 0x0e; u16 pos = cursor->sx + (info->var.xoffset / 8) + (cursor->sy + (info->var.yoffset / 16)) * (info->var.xres_virtual / 8); if (! cursor -> mode) return; svga_wcrt_mask(0x0A, 0x20, 0x20); /* disable cursor */ if (cursor -> shape == FB_TILE_CURSOR_NONE) return; switch (cursor -> shape) { case FB_TILE_CURSOR_UNDERLINE: cs = 0x0d; break; case FB_TILE_CURSOR_LOWER_THIRD: cs = 0x09; break; case FB_TILE_CURSOR_LOWER_HALF: cs = 0x07; break; case FB_TILE_CURSOR_TWO_THIRDS: cs = 0x05; break; case FB_TILE_CURSOR_BLOCK: cs = 0x01; break; } /* set cursor position */ vga_wcrt(NULL, 0x0E, pos >> 8); vga_wcrt(NULL, 0x0F, pos & 0xFF); vga_wcrt(NULL, 0x0B, ce); /* set cursor end */ vga_wcrt(NULL, 0x0A, cs); /* set cursor start and enable it */ } int svga_get_tilemax(struct fb_info *info) { return 256; } /* Get capabilities of accelerator based on the mode */ void svga_get_caps(struct fb_info *info, struct fb_blit_caps *caps, struct fb_var_screeninfo *var) { if (var->bits_per_pixel == 0) { /* can only support 256 8x16 bitmap */ caps->x = 1 << (8 - 1); caps->y = 1 << (16 - 1); caps->len = 256; } else { caps->x = (var->bits_per_pixel == 4) ? 1 << (8 - 1) : ~(u32)0; caps->y = ~(u32)0; caps->len = ~(u32)0; } } EXPORT_SYMBOL(svga_get_caps); /* ------------------------------------------------------------------------- */ /* * Compute PLL settings (M, N, R) * F_VCO = (F_BASE * M) / N * F_OUT = F_VCO / (2^R) */ static inline u32 abs_diff(u32 a, u32 b) { return (a > b) ? (a - b) : (b - a); } int svga_compute_pll(const struct svga_pll *pll, u32 f_wanted, u16 *m, u16 *n, u16 *r, int node) { u16 am, an, ar; u32 f_vco, f_current, delta_current, delta_best; pr_debug("fb%d: ideal frequency: %d kHz\n", node, (unsigned int) f_wanted); ar = pll->r_max; f_vco = f_wanted << ar; /* overflow check */ if ((f_vco >> ar) != f_wanted) return -EINVAL; /* It is usually better to have greater VCO clock because of better frequency stability. So first try r_max, then r smaller. */ while ((ar > pll->r_min) && (f_vco > pll->f_vco_max)) { ar--; f_vco = f_vco >> 1; } /* VCO bounds check */ if ((f_vco < pll->f_vco_min) || (f_vco > pll->f_vco_max)) return -EINVAL; delta_best = 0xFFFFFFFF; *m = 0; *n = 0; *r = ar; am = pll->m_min; an = pll->n_min; while ((am <= pll->m_max) && (an <= pll->n_max)) { f_current = (pll->f_base * am) / an; delta_current = abs_diff (f_current, f_vco); if (delta_current < delta_best) { delta_best = delta_current; *m = am; *n = an; } if (f_current <= f_vco) { am ++; } else { an ++; } } f_current = (pll->f_base * *m) / *n; pr_debug("fb%d: found frequency: %d kHz (VCO %d kHz)\n", node, (int) (f_current >> ar), (int) f_current); pr_debug("fb%d: m = %d n = %d r = %d\n", node, (unsigned int) *m, (unsigned int) *n, (unsigned int) *r); return 0; } /* ------------------------------------------------------------------------- */ /* Check CRT timing values */ int svga_check_timings(const struct svga_timing_regs *tm, struct fb_var_screeninfo *var, int node) { u32 value; var->xres = (var->xres+7)&~7; var->left_margin = (var->left_margin+7)&~7; var->right_margin = (var->right_margin+7)&~7; var->hsync_len = (var->hsync_len+7)&~7; /* Check horizontal total */ value = var->xres + var->left_margin + var->right_margin + var->hsync_len; if (((value / 8) - 5) >= svga_regset_size (tm->h_total_regs)) return -EINVAL; /* Check horizontal display and blank start */ value = var->xres; if (((value / 8) - 1) >= svga_regset_size (tm->h_display_regs)) return -EINVAL; if (((value / 8) - 1) >= svga_regset_size (tm->h_blank_start_regs)) return -EINVAL; /* Check horizontal sync start */ value = var->xres + var->right_margin; if (((value / 8) - 1) >= svga_regset_size (tm->h_sync_start_regs)) return -EINVAL; /* Check horizontal blank end (or length) */ value = var->left_margin + var->right_margin + var->hsync_len; if ((value == 0) || ((value / 8) >= svga_regset_size (tm->h_blank_end_regs))) return -EINVAL; /* Check horizontal sync end (or length) */ value = var->hsync_len; if ((value == 0) || ((value / 8) >= svga_regset_size (tm->h_sync_end_regs))) return -EINVAL; /* Check vertical total */ value = var->yres + var->upper_margin + var->lower_margin + var->vsync_len; if ((value - 1) >= svga_regset_size(tm->v_total_regs)) return -EINVAL; /* Check vertical display and blank start */ value = var->yres; if ((value - 1) >= svga_regset_size(tm->v_display_regs)) return -EINVAL; if ((value - 1) >= svga_regset_size(tm->v_blank_start_regs)) return -EINVAL; /* Check vertical sync start */ value = var->yres + var->lower_margin; if ((value - 1) >= svga_regset_size(tm->v_sync_start_regs)) return -EINVAL; /* Check vertical blank end (or length) */ value = var->upper_margin + var->lower_margin + var->vsync_len; if ((value == 0) || (value >= svga_regset_size (tm->v_blank_end_regs))) return -EINVAL; /* Check vertical sync end (or length) */ value = var->vsync_len; if ((value == 0) || (value >= svga_regset_size (tm->v_sync_end_regs))) return -EINVAL; return 0; } /* Set CRT timing registers */ void svga_set_timings(const struct svga_timing_regs *tm, struct fb_var_screeninfo *var, u32 hmul, u32 hdiv, u32 vmul, u32 vdiv, u32 hborder, int node) { u8 regval; u32 value; value = var->xres + var->left_margin + var->right_margin + var->hsync_len; value = (value * hmul) / hdiv; pr_debug("fb%d: horizontal total : %d\n", node, value); svga_wcrt_multi(tm->h_total_regs, (value / 8) - 5); value = var->xres; value = (value * hmul) / hdiv; pr_debug("fb%d: horizontal display : %d\n", node, value); svga_wcrt_multi(tm->h_display_regs, (value / 8) - 1); value = var->xres; value = (value * hmul) / hdiv; pr_debug("fb%d: horizontal blank start: %d\n", node, value); svga_wcrt_multi(tm->h_blank_start_regs, (value / 8) - 1 + hborder); value = var->xres + var->left_margin + var->right_margin + var->hsync_len; value = (value * hmul) / hdiv; pr_debug("fb%d: horizontal blank end : %d\n", node, value); svga_wcrt_multi(tm->h_blank_end_regs, (value / 8) - 1 - hborder); value = var->xres + var->right_margin; value = (value * hmul) / hdiv; pr_debug("fb%d: horizontal sync start : %d\n", node, value); svga_wcrt_multi(tm->h_sync_start_regs, (value / 8)); value = var->xres + var->right_margin + var->hsync_len; value = (value * hmul) / hdiv; pr_debug("fb%d: horizontal sync end : %d\n", node, value); svga_wcrt_multi(tm->h_sync_end_regs, (value / 8)); value = var->yres + var->upper_margin + var->lower_margin + var->vsync_len; value = (value * vmul) / vdiv; pr_debug("fb%d: vertical total : %d\n", node, value); svga_wcrt_multi(tm->v_total_regs, value - 2); value = var->yres; value = (value * vmul) / vdiv; pr_debug("fb%d: vertical display : %d\n", node, value); svga_wcrt_multi(tm->v_display_regs, value - 1); value = var->yres; value = (value * vmul) / vdiv; pr_debug("fb%d: vertical blank start : %d\n", node, value); svga_wcrt_multi(tm->v_blank_start_regs, value); value = var->yres + var->upper_margin + var->lower_margin + var->vsync_len; value = (value * vmul) / vdiv; pr_debug("fb%d: vertical blank end : %d\n", node, value); svga_wcrt_multi(tm->v_blank_end_regs, value - 2); value = var->yres + var->lower_margin; value = (value * vmul) / vdiv; pr_debug("fb%d: vertical sync start : %d\n", node, value); svga_wcrt_multi(tm->v_sync_start_regs, value); value = var->yres + var->lower_margin + var->vsync_len; value = (value * vmul) / vdiv; pr_debug("fb%d: vertical sync end : %d\n", node, value); svga_wcrt_multi(tm->v_sync_end_regs, value); /* Set horizontal and vertical sync pulse polarity in misc register */ regval = vga_r(NULL, VGA_MIS_R); if (var->sync & FB_SYNC_HOR_HIGH_ACT) { pr_debug("fb%d: positive horizontal sync\n", node); regval = regval & ~0x80; } else { pr_debug("fb%d: negative horizontal sync\n", node); regval = regval | 0x80; } if (var->sync & FB_SYNC_VERT_HIGH_ACT) { pr_debug("fb%d: positive vertical sync\n", node); regval = regval & ~0x40; } else { pr_debug("fb%d: negative vertical sync\n\n", node); regval = regval | 0x40; } vga_w(NULL, VGA_MIS_W, regval); } /* ------------------------------------------------------------------------- */ static inline int match_format(const struct svga_fb_format *frm, struct fb_var_screeninfo *var) { int i = 0; int stored = -EINVAL; while (frm->bits_per_pixel != SVGA_FORMAT_END_VAL) { if ((var->bits_per_pixel == frm->bits_per_pixel) && (var->red.length <= frm->red.length) && (var->green.length <= frm->green.length) && (var->blue.length <= frm->blue.length) && (var->transp.length <= frm->transp.length) && (var->nonstd == frm->nonstd)) return i; if (var->bits_per_pixel == frm->bits_per_pixel) stored = i; i++; frm++; } return stored; } int svga_match_format(const struct svga_fb_format *frm, struct fb_var_screeninfo *var, struct fb_fix_screeninfo *fix) { int i = match_format(frm, var); if (i >= 0) { var->bits_per_pixel = frm[i].bits_per_pixel; var->red = frm[i].red; var->green = frm[i].green; var->blue = frm[i].blue; var->transp = frm[i].transp; var->nonstd = frm[i].nonstd; if (fix != NULL) { fix->type = frm[i].type; fix->type_aux = frm[i].type_aux; fix->visual = frm[i].visual; fix->xpanstep = frm[i].xpanstep; } } return i; } EXPORT_SYMBOL(svga_wcrt_multi); EXPORT_SYMBOL(svga_wseq_multi); EXPORT_SYMBOL(svga_set_default_gfx_regs); EXPORT_SYMBOL(svga_set_default_atc_regs); EXPORT_SYMBOL(svga_set_default_seq_regs); EXPORT_SYMBOL(svga_set_default_crt_regs); EXPORT_SYMBOL(svga_set_textmode_vga_regs); EXPORT_SYMBOL(svga_settile); EXPORT_SYMBOL(svga_tilecopy); EXPORT_SYMBOL(svga_tilefill); EXPORT_SYMBOL(svga_tileblit); EXPORT_SYMBOL(svga_tilecursor); EXPORT_SYMBOL(svga_get_tilemax); EXPORT_SYMBOL(svga_compute_pll); EXPORT_SYMBOL(svga_check_timings); EXPORT_SYMBOL(svga_set_timings); EXPORT_SYMBOL(svga_match_format); MODULE_AUTHOR("Ondrej Zajicek <santiago@crfreenet.org>"); MODULE_DESCRIPTION("Common utility functions for VGA-based graphics cards"); MODULE_LICENSE("GPL");
gpl-2.0
Andiry/linux-test
net/netfilter/nf_conntrack_proto_udp.c
824
10222
/* (C) 1999-2001 Paul `Rusty' Russell * (C) 2002-2004 Netfilter Core Team <coreteam@netfilter.org> * (C) 2006-2012 Patrick McHardy <kaber@trash.net> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/types.h> #include <linux/timer.h> #include <linux/module.h> #include <linux/udp.h> #include <linux/seq_file.h> #include <linux/skbuff.h> #include <linux/ipv6.h> #include <net/ip6_checksum.h> #include <net/checksum.h> #include <linux/netfilter.h> #include <linux/netfilter_ipv4.h> #include <linux/netfilter_ipv6.h> #include <net/netfilter/nf_conntrack_l4proto.h> #include <net/netfilter/nf_conntrack_ecache.h> #include <net/netfilter/nf_log.h> #include <net/netfilter/ipv4/nf_conntrack_ipv4.h> #include <net/netfilter/ipv6/nf_conntrack_ipv6.h> static unsigned int udp_timeouts[UDP_CT_MAX] = { [UDP_CT_UNREPLIED] = 30*HZ, [UDP_CT_REPLIED] = 180*HZ, }; static inline struct nf_udp_net *udp_pernet(struct net *net) { return &net->ct.nf_ct_proto.udp; } static bool udp_pkt_to_tuple(const struct sk_buff *skb, unsigned int dataoff, struct nf_conntrack_tuple *tuple) { const struct udphdr *hp; struct udphdr _hdr; /* Actually only need first 8 bytes. */ hp = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr); if (hp == NULL) return false; tuple->src.u.udp.port = hp->source; tuple->dst.u.udp.port = hp->dest; return true; } static bool udp_invert_tuple(struct nf_conntrack_tuple *tuple, const struct nf_conntrack_tuple *orig) { tuple->src.u.udp.port = orig->dst.u.udp.port; tuple->dst.u.udp.port = orig->src.u.udp.port; return true; } /* Print out the per-protocol part of the tuple. */ static void udp_print_tuple(struct seq_file *s, const struct nf_conntrack_tuple *tuple) { seq_printf(s, "sport=%hu dport=%hu ", ntohs(tuple->src.u.udp.port), ntohs(tuple->dst.u.udp.port)); } static unsigned int *udp_get_timeouts(struct net *net) { return udp_pernet(net)->timeouts; } /* Returns verdict for packet, and may modify conntracktype */ static int udp_packet(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info ctinfo, u_int8_t pf, unsigned int hooknum, unsigned int *timeouts) { /* If we've seen traffic both ways, this is some kind of UDP stream. Extend timeout. */ if (test_bit(IPS_SEEN_REPLY_BIT, &ct->status)) { nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[UDP_CT_REPLIED]); /* Also, more likely to be important, and not a probe */ if (!test_and_set_bit(IPS_ASSURED_BIT, &ct->status)) nf_conntrack_event_cache(IPCT_ASSURED, ct); } else { nf_ct_refresh_acct(ct, ctinfo, skb, timeouts[UDP_CT_UNREPLIED]); } return NF_ACCEPT; } /* Called when a new connection for this protocol found. */ static bool udp_new(struct nf_conn *ct, const struct sk_buff *skb, unsigned int dataoff, unsigned int *timeouts) { return true; } static int udp_error(struct net *net, struct nf_conn *tmpl, struct sk_buff *skb, unsigned int dataoff, enum ip_conntrack_info *ctinfo, u_int8_t pf, unsigned int hooknum) { unsigned int udplen = skb->len - dataoff; const struct udphdr *hdr; struct udphdr _hdr; /* Header is too small? */ hdr = skb_header_pointer(skb, dataoff, sizeof(_hdr), &_hdr); if (hdr == NULL) { if (LOG_INVALID(net, IPPROTO_UDP)) nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "nf_ct_udp: short packet "); return -NF_ACCEPT; } /* Truncated/malformed packets */ if (ntohs(hdr->len) > udplen || ntohs(hdr->len) < sizeof(*hdr)) { if (LOG_INVALID(net, IPPROTO_UDP)) nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "nf_ct_udp: truncated/malformed packet "); return -NF_ACCEPT; } /* Packet with no checksum */ if (!hdr->check) return NF_ACCEPT; /* Checksum invalid? Ignore. * We skip checking packets on the outgoing path * because the checksum is assumed to be correct. * FIXME: Source route IP option packets --RR */ if (net->ct.sysctl_checksum && hooknum == NF_INET_PRE_ROUTING && nf_checksum(skb, hooknum, dataoff, IPPROTO_UDP, pf)) { if (LOG_INVALID(net, IPPROTO_UDP)) nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "nf_ct_udp: bad UDP checksum "); return -NF_ACCEPT; } return NF_ACCEPT; } #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) #include <linux/netfilter/nfnetlink.h> #include <linux/netfilter/nfnetlink_cttimeout.h> static int udp_timeout_nlattr_to_obj(struct nlattr *tb[], struct net *net, void *data) { unsigned int *timeouts = data; struct nf_udp_net *un = udp_pernet(net); /* set default timeouts for UDP. */ timeouts[UDP_CT_UNREPLIED] = un->timeouts[UDP_CT_UNREPLIED]; timeouts[UDP_CT_REPLIED] = un->timeouts[UDP_CT_REPLIED]; if (tb[CTA_TIMEOUT_UDP_UNREPLIED]) { timeouts[UDP_CT_UNREPLIED] = ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDP_UNREPLIED])) * HZ; } if (tb[CTA_TIMEOUT_UDP_REPLIED]) { timeouts[UDP_CT_REPLIED] = ntohl(nla_get_be32(tb[CTA_TIMEOUT_UDP_REPLIED])) * HZ; } return 0; } static int udp_timeout_obj_to_nlattr(struct sk_buff *skb, const void *data) { const unsigned int *timeouts = data; if (nla_put_be32(skb, CTA_TIMEOUT_UDP_UNREPLIED, htonl(timeouts[UDP_CT_UNREPLIED] / HZ)) || nla_put_be32(skb, CTA_TIMEOUT_UDP_REPLIED, htonl(timeouts[UDP_CT_REPLIED] / HZ))) goto nla_put_failure; return 0; nla_put_failure: return -ENOSPC; } static const struct nla_policy udp_timeout_nla_policy[CTA_TIMEOUT_UDP_MAX+1] = { [CTA_TIMEOUT_UDP_UNREPLIED] = { .type = NLA_U32 }, [CTA_TIMEOUT_UDP_REPLIED] = { .type = NLA_U32 }, }; #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ #ifdef CONFIG_SYSCTL static struct ctl_table udp_sysctl_table[] = { { .procname = "nf_conntrack_udp_timeout", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "nf_conntrack_udp_timeout_stream", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { } }; #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT static struct ctl_table udp_compat_sysctl_table[] = { { .procname = "ip_conntrack_udp_timeout", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { .procname = "ip_conntrack_udp_timeout_stream", .maxlen = sizeof(unsigned int), .mode = 0644, .proc_handler = proc_dointvec_jiffies, }, { } }; #endif /* CONFIG_NF_CONNTRACK_PROC_COMPAT */ #endif /* CONFIG_SYSCTL */ static int udp_kmemdup_sysctl_table(struct nf_proto_net *pn, struct nf_udp_net *un) { #ifdef CONFIG_SYSCTL if (pn->ctl_table) return 0; pn->ctl_table = kmemdup(udp_sysctl_table, sizeof(udp_sysctl_table), GFP_KERNEL); if (!pn->ctl_table) return -ENOMEM; pn->ctl_table[0].data = &un->timeouts[UDP_CT_UNREPLIED]; pn->ctl_table[1].data = &un->timeouts[UDP_CT_REPLIED]; #endif return 0; } static int udp_kmemdup_compat_sysctl_table(struct nf_proto_net *pn, struct nf_udp_net *un) { #ifdef CONFIG_SYSCTL #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT pn->ctl_compat_table = kmemdup(udp_compat_sysctl_table, sizeof(udp_compat_sysctl_table), GFP_KERNEL); if (!pn->ctl_compat_table) return -ENOMEM; pn->ctl_compat_table[0].data = &un->timeouts[UDP_CT_UNREPLIED]; pn->ctl_compat_table[1].data = &un->timeouts[UDP_CT_REPLIED]; #endif #endif return 0; } static int udp_init_net(struct net *net, u_int16_t proto) { int ret; struct nf_udp_net *un = udp_pernet(net); struct nf_proto_net *pn = &un->pn; if (!pn->users) { int i; for (i = 0; i < UDP_CT_MAX; i++) un->timeouts[i] = udp_timeouts[i]; } if (proto == AF_INET) { ret = udp_kmemdup_compat_sysctl_table(pn, un); if (ret < 0) return ret; ret = udp_kmemdup_sysctl_table(pn, un); if (ret < 0) nf_ct_kfree_compat_sysctl_table(pn); } else ret = udp_kmemdup_sysctl_table(pn, un); return ret; } static struct nf_proto_net *udp_get_net_proto(struct net *net) { return &net->ct.nf_ct_proto.udp.pn; } struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4 __read_mostly = { .l3proto = PF_INET, .l4proto = IPPROTO_UDP, .name = "udp", .pkt_to_tuple = udp_pkt_to_tuple, .invert_tuple = udp_invert_tuple, .print_tuple = udp_print_tuple, .packet = udp_packet, .get_timeouts = udp_get_timeouts, .new = udp_new, .error = udp_error, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, .nla_policy = nf_ct_port_nla_policy, #endif #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) .ctnl_timeout = { .nlattr_to_obj = udp_timeout_nlattr_to_obj, .obj_to_nlattr = udp_timeout_obj_to_nlattr, .nlattr_max = CTA_TIMEOUT_UDP_MAX, .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, .nla_policy = udp_timeout_nla_policy, }, #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ .init_net = udp_init_net, .get_net_proto = udp_get_net_proto, }; EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp4); struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6 __read_mostly = { .l3proto = PF_INET6, .l4proto = IPPROTO_UDP, .name = "udp", .pkt_to_tuple = udp_pkt_to_tuple, .invert_tuple = udp_invert_tuple, .print_tuple = udp_print_tuple, .packet = udp_packet, .get_timeouts = udp_get_timeouts, .new = udp_new, .error = udp_error, #if IS_ENABLED(CONFIG_NF_CT_NETLINK) .tuple_to_nlattr = nf_ct_port_tuple_to_nlattr, .nlattr_to_tuple = nf_ct_port_nlattr_to_tuple, .nlattr_tuple_size = nf_ct_port_nlattr_tuple_size, .nla_policy = nf_ct_port_nla_policy, #endif #if IS_ENABLED(CONFIG_NF_CT_NETLINK_TIMEOUT) .ctnl_timeout = { .nlattr_to_obj = udp_timeout_nlattr_to_obj, .obj_to_nlattr = udp_timeout_obj_to_nlattr, .nlattr_max = CTA_TIMEOUT_UDP_MAX, .obj_size = sizeof(unsigned int) * CTA_TIMEOUT_UDP_MAX, .nla_policy = udp_timeout_nla_policy, }, #endif /* CONFIG_NF_CT_NETLINK_TIMEOUT */ .init_net = udp_init_net, .get_net_proto = udp_get_net_proto, }; EXPORT_SYMBOL_GPL(nf_conntrack_l4proto_udp6);
gpl-2.0
Makevelli/Make-Kernel
drivers/tty/serial/cpm_uart/cpm_uart_core.c
2104
35729
/* * Driver for CPM (SCC/SMC) serial ports; core driver * * Based on arch/ppc/cpm2_io/uart.c by Dan Malek * Based on ppc8xx.c by Thomas Gleixner * Based on drivers/serial/amba.c by Russell King * * Maintainer: Kumar Gala (galak@kernel.crashing.org) (CPM2) * Pantelis Antoniou (panto@intracom.gr) (CPM1) * * Copyright (C) 2004, 2007 Freescale Semiconductor, Inc. * (C) 2004 Intracom, S.A. * (C) 2005-2006 MontaVista Software, Inc. * Vitaly Bordug <vbordug@ru.mvista.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/module.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/ioport.h> #include <linux/init.h> #include <linux/serial.h> #include <linux/console.h> #include <linux/sysrq.h> #include <linux/device.h> #include <linux/bootmem.h> #include <linux/dma-mapping.h> #include <linux/fs_uart_pd.h> #include <linux/of_platform.h> #include <linux/gpio.h> #include <linux/of_gpio.h> #include <linux/clk.h> #include <asm/io.h> #include <asm/irq.h> #include <asm/delay.h> #include <asm/fs_pd.h> #include <asm/udbg.h> #if defined(CONFIG_SERIAL_CPM_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #include <linux/serial_core.h> #include <linux/kernel.h> #include "cpm_uart.h" /**************************************************************/ static int cpm_uart_tx_pump(struct uart_port *port); static void cpm_uart_init_smc(struct uart_cpm_port *pinfo); static void cpm_uart_init_scc(struct uart_cpm_port *pinfo); static void cpm_uart_initbd(struct uart_cpm_port *pinfo); /**************************************************************/ #define HW_BUF_SPD_THRESHOLD 2400 /* * Check, if transmit buffers are processed */ static unsigned int cpm_uart_tx_empty(struct uart_port *port) { struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; cbd_t __iomem *bdp = pinfo->tx_bd_base; int ret = 0; while (1) { if (in_be16(&bdp->cbd_sc) & BD_SC_READY) break; if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP) { ret = TIOCSER_TEMT; break; } bdp++; } pr_debug("CPM uart[%d]:tx_empty: %d\n", port->line, ret); return ret; } static void cpm_uart_set_mctrl(struct uart_port *port, unsigned int mctrl) { struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; if (pinfo->gpios[GPIO_RTS] >= 0) gpio_set_value(pinfo->gpios[GPIO_RTS], !(mctrl & TIOCM_RTS)); if (pinfo->gpios[GPIO_DTR] >= 0) gpio_set_value(pinfo->gpios[GPIO_DTR], !(mctrl & TIOCM_DTR)); } static unsigned int cpm_uart_get_mctrl(struct uart_port *port) { struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; unsigned int mctrl = TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; if (pinfo->gpios[GPIO_CTS] >= 0) { if (gpio_get_value(pinfo->gpios[GPIO_CTS])) mctrl &= ~TIOCM_CTS; } if (pinfo->gpios[GPIO_DSR] >= 0) { if (gpio_get_value(pinfo->gpios[GPIO_DSR])) mctrl &= ~TIOCM_DSR; } if (pinfo->gpios[GPIO_DCD] >= 0) { if (gpio_get_value(pinfo->gpios[GPIO_DCD])) mctrl &= ~TIOCM_CAR; } if (pinfo->gpios[GPIO_RI] >= 0) { if (!gpio_get_value(pinfo->gpios[GPIO_RI])) mctrl |= TIOCM_RNG; } return mctrl; } /* * Stop transmitter */ static void cpm_uart_stop_tx(struct uart_port *port) { struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; smc_t __iomem *smcp = pinfo->smcp; scc_t __iomem *sccp = pinfo->sccp; pr_debug("CPM uart[%d]:stop tx\n", port->line); if (IS_SMC(pinfo)) clrbits8(&smcp->smc_smcm, SMCM_TX); else clrbits16(&sccp->scc_sccm, UART_SCCM_TX); } /* * Start transmitter */ static void cpm_uart_start_tx(struct uart_port *port) { struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; smc_t __iomem *smcp = pinfo->smcp; scc_t __iomem *sccp = pinfo->sccp; pr_debug("CPM uart[%d]:start tx\n", port->line); if (IS_SMC(pinfo)) { if (in_8(&smcp->smc_smcm) & SMCM_TX) return; } else { if (in_be16(&sccp->scc_sccm) & UART_SCCM_TX) return; } if (cpm_uart_tx_pump(port) != 0) { if (IS_SMC(pinfo)) { setbits8(&smcp->smc_smcm, SMCM_TX); } else { setbits16(&sccp->scc_sccm, UART_SCCM_TX); } } } /* * Stop receiver */ static void cpm_uart_stop_rx(struct uart_port *port) { struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; smc_t __iomem *smcp = pinfo->smcp; scc_t __iomem *sccp = pinfo->sccp; pr_debug("CPM uart[%d]:stop rx\n", port->line); if (IS_SMC(pinfo)) clrbits8(&smcp->smc_smcm, SMCM_RX); else clrbits16(&sccp->scc_sccm, UART_SCCM_RX); } /* * Enable Modem status interrupts */ static void cpm_uart_enable_ms(struct uart_port *port) { pr_debug("CPM uart[%d]:enable ms\n", port->line); } /* * Generate a break. */ static void cpm_uart_break_ctl(struct uart_port *port, int break_state) { struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; pr_debug("CPM uart[%d]:break ctrl, break_state: %d\n", port->line, break_state); if (break_state) cpm_line_cr_cmd(pinfo, CPM_CR_STOP_TX); else cpm_line_cr_cmd(pinfo, CPM_CR_RESTART_TX); } /* * Transmit characters, refill buffer descriptor, if possible */ static void cpm_uart_int_tx(struct uart_port *port) { pr_debug("CPM uart[%d]:TX INT\n", port->line); cpm_uart_tx_pump(port); } #ifdef CONFIG_CONSOLE_POLL static int serial_polled; #endif /* * Receive characters */ static void cpm_uart_int_rx(struct uart_port *port) { int i; unsigned char ch; u8 *cp; struct tty_port *tport = &port->state->port; struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; cbd_t __iomem *bdp; u16 status; unsigned int flg; pr_debug("CPM uart[%d]:RX INT\n", port->line); /* Just loop through the closed BDs and copy the characters into * the buffer. */ bdp = pinfo->rx_cur; for (;;) { #ifdef CONFIG_CONSOLE_POLL if (unlikely(serial_polled)) { serial_polled = 0; return; } #endif /* get status */ status = in_be16(&bdp->cbd_sc); /* If this one is empty, return happy */ if (status & BD_SC_EMPTY) break; /* get number of characters, and check spce in flip-buffer */ i = in_be16(&bdp->cbd_datlen); /* If we have not enough room in tty flip buffer, then we try * later, which will be the next rx-interrupt or a timeout */ if (tty_buffer_request_room(tport, i) < i) { printk(KERN_WARNING "No room in flip buffer\n"); return; } /* get pointer */ cp = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo); /* loop through the buffer */ while (i-- > 0) { ch = *cp++; port->icount.rx++; flg = TTY_NORMAL; if (status & (BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV)) goto handle_error; if (uart_handle_sysrq_char(port, ch)) continue; #ifdef CONFIG_CONSOLE_POLL if (unlikely(serial_polled)) { serial_polled = 0; return; } #endif error_return: tty_insert_flip_char(tport, ch, flg); } /* End while (i--) */ /* This BD is ready to be used again. Clear status. get next */ clrbits16(&bdp->cbd_sc, BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV | BD_SC_ID); setbits16(&bdp->cbd_sc, BD_SC_EMPTY); if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP) bdp = pinfo->rx_bd_base; else bdp++; } /* End for (;;) */ /* Write back buffer pointer */ pinfo->rx_cur = bdp; /* activate BH processing */ tty_flip_buffer_push(tport); return; /* Error processing */ handle_error: /* Statistics */ if (status & BD_SC_BR) port->icount.brk++; if (status & BD_SC_PR) port->icount.parity++; if (status & BD_SC_FR) port->icount.frame++; if (status & BD_SC_OV) port->icount.overrun++; /* Mask out ignored conditions */ status &= port->read_status_mask; /* Handle the remaining ones */ if (status & BD_SC_BR) flg = TTY_BREAK; else if (status & BD_SC_PR) flg = TTY_PARITY; else if (status & BD_SC_FR) flg = TTY_FRAME; /* overrun does not affect the current character ! */ if (status & BD_SC_OV) { ch = 0; flg = TTY_OVERRUN; /* We skip this buffer */ /* CHECK: Is really nothing senseful there */ /* ASSUMPTION: it contains nothing valid */ i = 0; } #ifdef SUPPORT_SYSRQ port->sysrq = 0; #endif goto error_return; } /* * Asynchron mode interrupt handler */ static irqreturn_t cpm_uart_int(int irq, void *data) { u8 events; struct uart_port *port = data; struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; smc_t __iomem *smcp = pinfo->smcp; scc_t __iomem *sccp = pinfo->sccp; pr_debug("CPM uart[%d]:IRQ\n", port->line); if (IS_SMC(pinfo)) { events = in_8(&smcp->smc_smce); out_8(&smcp->smc_smce, events); if (events & SMCM_BRKE) uart_handle_break(port); if (events & SMCM_RX) cpm_uart_int_rx(port); if (events & SMCM_TX) cpm_uart_int_tx(port); } else { events = in_be16(&sccp->scc_scce); out_be16(&sccp->scc_scce, events); if (events & UART_SCCM_BRKE) uart_handle_break(port); if (events & UART_SCCM_RX) cpm_uart_int_rx(port); if (events & UART_SCCM_TX) cpm_uart_int_tx(port); } return (events) ? IRQ_HANDLED : IRQ_NONE; } static int cpm_uart_startup(struct uart_port *port) { int retval; struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; pr_debug("CPM uart[%d]:startup\n", port->line); /* If the port is not the console, make sure rx is disabled. */ if (!(pinfo->flags & FLAG_CONSOLE)) { /* Disable UART rx */ if (IS_SMC(pinfo)) { clrbits16(&pinfo->smcp->smc_smcmr, SMCMR_REN); clrbits8(&pinfo->smcp->smc_smcm, SMCM_RX); } else { clrbits32(&pinfo->sccp->scc_gsmrl, SCC_GSMRL_ENR); clrbits16(&pinfo->sccp->scc_sccm, UART_SCCM_RX); } cpm_uart_initbd(pinfo); cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX); } /* Install interrupt handler. */ retval = request_irq(port->irq, cpm_uart_int, 0, "cpm_uart", port); if (retval) return retval; /* Startup rx-int */ if (IS_SMC(pinfo)) { setbits8(&pinfo->smcp->smc_smcm, SMCM_RX); setbits16(&pinfo->smcp->smc_smcmr, (SMCMR_REN | SMCMR_TEN)); } else { setbits16(&pinfo->sccp->scc_sccm, UART_SCCM_RX); setbits32(&pinfo->sccp->scc_gsmrl, (SCC_GSMRL_ENR | SCC_GSMRL_ENT)); } return 0; } inline void cpm_uart_wait_until_send(struct uart_cpm_port *pinfo) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(pinfo->wait_closing); } /* * Shutdown the uart */ static void cpm_uart_shutdown(struct uart_port *port) { struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; pr_debug("CPM uart[%d]:shutdown\n", port->line); /* free interrupt handler */ free_irq(port->irq, port); /* If the port is not the console, disable Rx and Tx. */ if (!(pinfo->flags & FLAG_CONSOLE)) { /* Wait for all the BDs marked sent */ while(!cpm_uart_tx_empty(port)) { set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(2); } if (pinfo->wait_closing) cpm_uart_wait_until_send(pinfo); /* Stop uarts */ if (IS_SMC(pinfo)) { smc_t __iomem *smcp = pinfo->smcp; clrbits16(&smcp->smc_smcmr, SMCMR_REN | SMCMR_TEN); clrbits8(&smcp->smc_smcm, SMCM_RX | SMCM_TX); } else { scc_t __iomem *sccp = pinfo->sccp; clrbits32(&sccp->scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); clrbits16(&sccp->scc_sccm, UART_SCCM_TX | UART_SCCM_RX); } /* Shut them really down and reinit buffer descriptors */ if (IS_SMC(pinfo)) { out_be16(&pinfo->smcup->smc_brkcr, 0); cpm_line_cr_cmd(pinfo, CPM_CR_STOP_TX); } else { out_be16(&pinfo->sccup->scc_brkcr, 0); cpm_line_cr_cmd(pinfo, CPM_CR_GRA_STOP_TX); } cpm_uart_initbd(pinfo); } } static void cpm_uart_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { int baud; unsigned long flags; u16 cval, scval, prev_mode; int bits, sbits; struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; smc_t __iomem *smcp = pinfo->smcp; scc_t __iomem *sccp = pinfo->sccp; int maxidl; pr_debug("CPM uart[%d]:set_termios\n", port->line); baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk / 16); if (baud < HW_BUF_SPD_THRESHOLD || (pinfo->port.state && pinfo->port.state->port.low_latency)) pinfo->rx_fifosize = 1; else pinfo->rx_fifosize = RX_BUF_SIZE; /* MAXIDL is the timeout after which a receive buffer is closed * when not full if no more characters are received. * We calculate it from the baudrate so that the duration is * always the same at standard rates: about 4ms. */ maxidl = baud / 2400; if (maxidl < 1) maxidl = 1; if (maxidl > 0x10) maxidl = 0x10; /* Character length programmed into the mode register is the * sum of: 1 start bit, number of data bits, 0 or 1 parity bit, * 1 or 2 stop bits, minus 1. * The value 'bits' counts this for us. */ cval = 0; scval = 0; /* byte size */ switch (termios->c_cflag & CSIZE) { case CS5: bits = 5; break; case CS6: bits = 6; break; case CS7: bits = 7; break; case CS8: bits = 8; break; /* Never happens, but GCC is too dumb to figure it out */ default: bits = 8; break; } sbits = bits - 5; if (termios->c_cflag & CSTOPB) { cval |= SMCMR_SL; /* Two stops */ scval |= SCU_PSMR_SL; bits++; } if (termios->c_cflag & PARENB) { cval |= SMCMR_PEN; scval |= SCU_PSMR_PEN; bits++; if (!(termios->c_cflag & PARODD)) { cval |= SMCMR_PM_EVEN; scval |= (SCU_PSMR_REVP | SCU_PSMR_TEVP); } } /* * Update the timeout */ uart_update_timeout(port, termios->c_cflag, baud); /* * Set up parity check flag */ #define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK)) port->read_status_mask = (BD_SC_EMPTY | BD_SC_OV); if (termios->c_iflag & INPCK) port->read_status_mask |= BD_SC_FR | BD_SC_PR; if ((termios->c_iflag & BRKINT) || (termios->c_iflag & PARMRK)) port->read_status_mask |= BD_SC_BR; /* * Characters to ignore */ port->ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) port->ignore_status_mask |= BD_SC_PR | BD_SC_FR; if (termios->c_iflag & IGNBRK) { port->ignore_status_mask |= BD_SC_BR; /* * If we're ignore parity and break indicators, ignore * overruns too. (For real raw support). */ if (termios->c_iflag & IGNPAR) port->ignore_status_mask |= BD_SC_OV; } /* * !!! ignore all characters if CREAD is not set */ if ((termios->c_cflag & CREAD) == 0) port->read_status_mask &= ~BD_SC_EMPTY; spin_lock_irqsave(&port->lock, flags); /* Start bit has not been added (so don't, because we would just * subtract it later), and we need to add one for the number of * stops bits (there is always at least one). */ bits++; if (IS_SMC(pinfo)) { /* * MRBLR can be changed while an SMC/SCC is operating only * if it is done in a single bus cycle with one 16-bit move * (not two 8-bit bus cycles back-to-back). This occurs when * the cp shifts control to the next RxBD, so the change does * not take effect immediately. To guarantee the exact RxBD * on which the change occurs, change MRBLR only while the * SMC/SCC receiver is disabled. */ out_be16(&pinfo->smcup->smc_mrblr, pinfo->rx_fifosize); out_be16(&pinfo->smcup->smc_maxidl, maxidl); /* Set the mode register. We want to keep a copy of the * enables, because we want to put them back if they were * present. */ prev_mode = in_be16(&smcp->smc_smcmr) & (SMCMR_REN | SMCMR_TEN); /* Output in *one* operation, so we don't interrupt RX/TX if they * were already enabled. */ out_be16(&smcp->smc_smcmr, smcr_mk_clen(bits) | cval | SMCMR_SM_UART | prev_mode); } else { out_be16(&pinfo->sccup->scc_genscc.scc_mrblr, pinfo->rx_fifosize); out_be16(&pinfo->sccup->scc_maxidl, maxidl); out_be16(&sccp->scc_psmr, (sbits << 12) | scval); } if (pinfo->clk) clk_set_rate(pinfo->clk, baud); else cpm_set_brg(pinfo->brg - 1, baud); spin_unlock_irqrestore(&port->lock, flags); } static const char *cpm_uart_type(struct uart_port *port) { pr_debug("CPM uart[%d]:uart_type\n", port->line); return port->type == PORT_CPM ? "CPM UART" : NULL; } /* * verify the new serial_struct (for TIOCSSERIAL). */ static int cpm_uart_verify_port(struct uart_port *port, struct serial_struct *ser) { int ret = 0; pr_debug("CPM uart[%d]:verify_port\n", port->line); if (ser->type != PORT_UNKNOWN && ser->type != PORT_CPM) ret = -EINVAL; if (ser->irq < 0 || ser->irq >= nr_irqs) ret = -EINVAL; if (ser->baud_base < 9600) ret = -EINVAL; return ret; } /* * Transmit characters, refill buffer descriptor, if possible */ static int cpm_uart_tx_pump(struct uart_port *port) { cbd_t __iomem *bdp; u8 *p; int count; struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; struct circ_buf *xmit = &port->state->xmit; /* Handle xon/xoff */ if (port->x_char) { /* Pick next descriptor and fill from buffer */ bdp = pinfo->tx_cur; p = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo); *p++ = port->x_char; out_be16(&bdp->cbd_datlen, 1); setbits16(&bdp->cbd_sc, BD_SC_READY); /* Get next BD. */ if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP) bdp = pinfo->tx_bd_base; else bdp++; pinfo->tx_cur = bdp; port->icount.tx++; port->x_char = 0; return 1; } if (uart_circ_empty(xmit) || uart_tx_stopped(port)) { cpm_uart_stop_tx(port); return 0; } /* Pick next descriptor and fill from buffer */ bdp = pinfo->tx_cur; while (!(in_be16(&bdp->cbd_sc) & BD_SC_READY) && xmit->tail != xmit->head) { count = 0; p = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo); while (count < pinfo->tx_fifosize) { *p++ = xmit->buf[xmit->tail]; xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); port->icount.tx++; count++; if (xmit->head == xmit->tail) break; } out_be16(&bdp->cbd_datlen, count); setbits16(&bdp->cbd_sc, BD_SC_READY); /* Get next BD. */ if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP) bdp = pinfo->tx_bd_base; else bdp++; } pinfo->tx_cur = bdp; if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(port); if (uart_circ_empty(xmit)) { cpm_uart_stop_tx(port); return 0; } return 1; } /* * init buffer descriptors */ static void cpm_uart_initbd(struct uart_cpm_port *pinfo) { int i; u8 *mem_addr; cbd_t __iomem *bdp; pr_debug("CPM uart[%d]:initbd\n", pinfo->port.line); /* Set the physical address of the host memory * buffers in the buffer descriptors, and the * virtual address for us to work with. */ mem_addr = pinfo->mem_addr; bdp = pinfo->rx_cur = pinfo->rx_bd_base; for (i = 0; i < (pinfo->rx_nrfifos - 1); i++, bdp++) { out_be32(&bdp->cbd_bufaddr, cpu2cpm_addr(mem_addr, pinfo)); out_be16(&bdp->cbd_sc, BD_SC_EMPTY | BD_SC_INTRPT); mem_addr += pinfo->rx_fifosize; } out_be32(&bdp->cbd_bufaddr, cpu2cpm_addr(mem_addr, pinfo)); out_be16(&bdp->cbd_sc, BD_SC_WRAP | BD_SC_EMPTY | BD_SC_INTRPT); /* Set the physical address of the host memory * buffers in the buffer descriptors, and the * virtual address for us to work with. */ mem_addr = pinfo->mem_addr + L1_CACHE_ALIGN(pinfo->rx_nrfifos * pinfo->rx_fifosize); bdp = pinfo->tx_cur = pinfo->tx_bd_base; for (i = 0; i < (pinfo->tx_nrfifos - 1); i++, bdp++) { out_be32(&bdp->cbd_bufaddr, cpu2cpm_addr(mem_addr, pinfo)); out_be16(&bdp->cbd_sc, BD_SC_INTRPT); mem_addr += pinfo->tx_fifosize; } out_be32(&bdp->cbd_bufaddr, cpu2cpm_addr(mem_addr, pinfo)); out_be16(&bdp->cbd_sc, BD_SC_WRAP | BD_SC_INTRPT); } static void cpm_uart_init_scc(struct uart_cpm_port *pinfo) { scc_t __iomem *scp; scc_uart_t __iomem *sup; pr_debug("CPM uart[%d]:init_scc\n", pinfo->port.line); scp = pinfo->sccp; sup = pinfo->sccup; /* Store address */ out_be16(&pinfo->sccup->scc_genscc.scc_rbase, (u8 __iomem *)pinfo->rx_bd_base - DPRAM_BASE); out_be16(&pinfo->sccup->scc_genscc.scc_tbase, (u8 __iomem *)pinfo->tx_bd_base - DPRAM_BASE); /* Set up the uart parameters in the * parameter ram. */ cpm_set_scc_fcr(sup); out_be16(&sup->scc_genscc.scc_mrblr, pinfo->rx_fifosize); out_be16(&sup->scc_maxidl, 0x10); out_be16(&sup->scc_brkcr, 1); out_be16(&sup->scc_parec, 0); out_be16(&sup->scc_frmec, 0); out_be16(&sup->scc_nosec, 0); out_be16(&sup->scc_brkec, 0); out_be16(&sup->scc_uaddr1, 0); out_be16(&sup->scc_uaddr2, 0); out_be16(&sup->scc_toseq, 0); out_be16(&sup->scc_char1, 0x8000); out_be16(&sup->scc_char2, 0x8000); out_be16(&sup->scc_char3, 0x8000); out_be16(&sup->scc_char4, 0x8000); out_be16(&sup->scc_char5, 0x8000); out_be16(&sup->scc_char6, 0x8000); out_be16(&sup->scc_char7, 0x8000); out_be16(&sup->scc_char8, 0x8000); out_be16(&sup->scc_rccm, 0xc0ff); /* Send the CPM an initialize command. */ cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX); /* Set UART mode, 8 bit, no parity, one stop. * Enable receive and transmit. */ out_be32(&scp->scc_gsmrh, 0); out_be32(&scp->scc_gsmrl, SCC_GSMRL_MODE_UART | SCC_GSMRL_TDCR_16 | SCC_GSMRL_RDCR_16); /* Enable rx interrupts and clear all pending events. */ out_be16(&scp->scc_sccm, 0); out_be16(&scp->scc_scce, 0xffff); out_be16(&scp->scc_dsr, 0x7e7e); out_be16(&scp->scc_psmr, 0x3000); setbits32(&scp->scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); } static void cpm_uart_init_smc(struct uart_cpm_port *pinfo) { smc_t __iomem *sp; smc_uart_t __iomem *up; pr_debug("CPM uart[%d]:init_smc\n", pinfo->port.line); sp = pinfo->smcp; up = pinfo->smcup; /* Store address */ out_be16(&pinfo->smcup->smc_rbase, (u8 __iomem *)pinfo->rx_bd_base - DPRAM_BASE); out_be16(&pinfo->smcup->smc_tbase, (u8 __iomem *)pinfo->tx_bd_base - DPRAM_BASE); /* * In case SMC1 is being relocated... */ #if defined (CONFIG_I2C_SPI_SMC1_UCODE_PATCH) out_be16(&up->smc_rbptr, in_be16(&pinfo->smcup->smc_rbase)); out_be16(&up->smc_tbptr, in_be16(&pinfo->smcup->smc_tbase)); out_be32(&up->smc_rstate, 0); out_be32(&up->smc_tstate, 0); out_be16(&up->smc_brkcr, 1); /* number of break chars */ out_be16(&up->smc_brkec, 0); #endif /* Set up the uart parameters in the * parameter ram. */ cpm_set_smc_fcr(up); /* Using idle character time requires some additional tuning. */ out_be16(&up->smc_mrblr, pinfo->rx_fifosize); out_be16(&up->smc_maxidl, 0x10); out_be16(&up->smc_brklen, 0); out_be16(&up->smc_brkec, 0); out_be16(&up->smc_brkcr, 1); cpm_line_cr_cmd(pinfo, CPM_CR_INIT_TRX); /* Set UART mode, 8 bit, no parity, one stop. * Enable receive and transmit. */ out_be16(&sp->smc_smcmr, smcr_mk_clen(9) | SMCMR_SM_UART); /* Enable only rx interrupts clear all pending events. */ out_8(&sp->smc_smcm, 0); out_8(&sp->smc_smce, 0xff); setbits16(&sp->smc_smcmr, SMCMR_REN | SMCMR_TEN); } /* * Initialize port. This is called from early_console stuff * so we have to be careful here ! */ static int cpm_uart_request_port(struct uart_port *port) { struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; int ret; pr_debug("CPM uart[%d]:request port\n", port->line); if (pinfo->flags & FLAG_CONSOLE) return 0; if (IS_SMC(pinfo)) { clrbits8(&pinfo->smcp->smc_smcm, SMCM_RX | SMCM_TX); clrbits16(&pinfo->smcp->smc_smcmr, SMCMR_REN | SMCMR_TEN); } else { clrbits16(&pinfo->sccp->scc_sccm, UART_SCCM_TX | UART_SCCM_RX); clrbits32(&pinfo->sccp->scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); } ret = cpm_uart_allocbuf(pinfo, 0); if (ret) return ret; cpm_uart_initbd(pinfo); if (IS_SMC(pinfo)) cpm_uart_init_smc(pinfo); else cpm_uart_init_scc(pinfo); return 0; } static void cpm_uart_release_port(struct uart_port *port) { struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; if (!(pinfo->flags & FLAG_CONSOLE)) cpm_uart_freebuf(pinfo); } /* * Configure/autoconfigure the port. */ static void cpm_uart_config_port(struct uart_port *port, int flags) { pr_debug("CPM uart[%d]:config_port\n", port->line); if (flags & UART_CONFIG_TYPE) { port->type = PORT_CPM; cpm_uart_request_port(port); } } #if defined(CONFIG_CONSOLE_POLL) || defined(CONFIG_SERIAL_CPM_CONSOLE) /* * Write a string to the serial port * Note that this is called with interrupts already disabled */ static void cpm_uart_early_write(struct uart_cpm_port *pinfo, const char *string, u_int count) { unsigned int i; cbd_t __iomem *bdp, *bdbase; unsigned char *cpm_outp_addr; /* Get the address of the host memory buffer. */ bdp = pinfo->tx_cur; bdbase = pinfo->tx_bd_base; /* * Now, do each character. This is not as bad as it looks * since this is a holding FIFO and not a transmitting FIFO. * We could add the complexity of filling the entire transmit * buffer, but we would just wait longer between accesses...... */ for (i = 0; i < count; i++, string++) { /* Wait for transmitter fifo to empty. * Ready indicates output is ready, and xmt is doing * that, not that it is ready for us to send. */ while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0) ; /* Send the character out. * If the buffer address is in the CPM DPRAM, don't * convert it. */ cpm_outp_addr = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo); *cpm_outp_addr = *string; out_be16(&bdp->cbd_datlen, 1); setbits16(&bdp->cbd_sc, BD_SC_READY); if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP) bdp = bdbase; else bdp++; /* if a LF, also do CR... */ if (*string == 10) { while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0) ; cpm_outp_addr = cpm2cpu_addr(in_be32(&bdp->cbd_bufaddr), pinfo); *cpm_outp_addr = 13; out_be16(&bdp->cbd_datlen, 1); setbits16(&bdp->cbd_sc, BD_SC_READY); if (in_be16(&bdp->cbd_sc) & BD_SC_WRAP) bdp = bdbase; else bdp++; } } /* * Finally, Wait for transmitter & holding register to empty * and restore the IER */ while ((in_be16(&bdp->cbd_sc) & BD_SC_READY) != 0) ; pinfo->tx_cur = bdp; } #endif #ifdef CONFIG_CONSOLE_POLL /* Serial polling routines for writing and reading from the uart while * in an interrupt or debug context. */ #define GDB_BUF_SIZE 512 /* power of 2, please */ static char poll_buf[GDB_BUF_SIZE]; static char *pollp; static int poll_chars; static int poll_wait_key(char *obuf, struct uart_cpm_port *pinfo) { u_char c, *cp; volatile cbd_t *bdp; int i; /* Get the address of the host memory buffer. */ bdp = pinfo->rx_cur; while (bdp->cbd_sc & BD_SC_EMPTY) ; /* If the buffer address is in the CPM DPRAM, don't * convert it. */ cp = cpm2cpu_addr(bdp->cbd_bufaddr, pinfo); if (obuf) { i = c = bdp->cbd_datlen; while (i-- > 0) *obuf++ = *cp++; } else c = *cp; bdp->cbd_sc &= ~(BD_SC_BR | BD_SC_FR | BD_SC_PR | BD_SC_OV | BD_SC_ID); bdp->cbd_sc |= BD_SC_EMPTY; if (bdp->cbd_sc & BD_SC_WRAP) bdp = pinfo->rx_bd_base; else bdp++; pinfo->rx_cur = (cbd_t *)bdp; return (int)c; } static int cpm_get_poll_char(struct uart_port *port) { struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; if (!serial_polled) { serial_polled = 1; poll_chars = 0; } if (poll_chars <= 0) { poll_chars = poll_wait_key(poll_buf, pinfo); pollp = poll_buf; } poll_chars--; return *pollp++; } static void cpm_put_poll_char(struct uart_port *port, unsigned char c) { struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; static char ch[2]; ch[0] = (char)c; cpm_uart_early_write(pinfo, ch, 1); } #endif /* CONFIG_CONSOLE_POLL */ static struct uart_ops cpm_uart_pops = { .tx_empty = cpm_uart_tx_empty, .set_mctrl = cpm_uart_set_mctrl, .get_mctrl = cpm_uart_get_mctrl, .stop_tx = cpm_uart_stop_tx, .start_tx = cpm_uart_start_tx, .stop_rx = cpm_uart_stop_rx, .enable_ms = cpm_uart_enable_ms, .break_ctl = cpm_uart_break_ctl, .startup = cpm_uart_startup, .shutdown = cpm_uart_shutdown, .set_termios = cpm_uart_set_termios, .type = cpm_uart_type, .release_port = cpm_uart_release_port, .request_port = cpm_uart_request_port, .config_port = cpm_uart_config_port, .verify_port = cpm_uart_verify_port, #ifdef CONFIG_CONSOLE_POLL .poll_get_char = cpm_get_poll_char, .poll_put_char = cpm_put_poll_char, #endif }; struct uart_cpm_port cpm_uart_ports[UART_NR]; static int cpm_uart_init_port(struct device_node *np, struct uart_cpm_port *pinfo) { const u32 *data; void __iomem *mem, *pram; int len; int ret; int i; data = of_get_property(np, "clock", NULL); if (data) { struct clk *clk = clk_get(NULL, (const char*)data); if (!IS_ERR(clk)) pinfo->clk = clk; } if (!pinfo->clk) { data = of_get_property(np, "fsl,cpm-brg", &len); if (!data || len != 4) { printk(KERN_ERR "CPM UART %s has no/invalid " "fsl,cpm-brg property.\n", np->name); return -EINVAL; } pinfo->brg = *data; } data = of_get_property(np, "fsl,cpm-command", &len); if (!data || len != 4) { printk(KERN_ERR "CPM UART %s has no/invalid " "fsl,cpm-command property.\n", np->name); return -EINVAL; } pinfo->command = *data; mem = of_iomap(np, 0); if (!mem) return -ENOMEM; if (of_device_is_compatible(np, "fsl,cpm1-scc-uart") || of_device_is_compatible(np, "fsl,cpm2-scc-uart")) { pinfo->sccp = mem; pinfo->sccup = pram = cpm_uart_map_pram(pinfo, np); } else if (of_device_is_compatible(np, "fsl,cpm1-smc-uart") || of_device_is_compatible(np, "fsl,cpm2-smc-uart")) { pinfo->flags |= FLAG_SMC; pinfo->smcp = mem; pinfo->smcup = pram = cpm_uart_map_pram(pinfo, np); } else { ret = -ENODEV; goto out_mem; } if (!pram) { ret = -ENOMEM; goto out_mem; } pinfo->tx_nrfifos = TX_NUM_FIFO; pinfo->tx_fifosize = TX_BUF_SIZE; pinfo->rx_nrfifos = RX_NUM_FIFO; pinfo->rx_fifosize = RX_BUF_SIZE; pinfo->port.uartclk = ppc_proc_freq; pinfo->port.mapbase = (unsigned long)mem; pinfo->port.type = PORT_CPM; pinfo->port.ops = &cpm_uart_pops, pinfo->port.iotype = UPIO_MEM; pinfo->port.fifosize = pinfo->tx_nrfifos * pinfo->tx_fifosize; spin_lock_init(&pinfo->port.lock); pinfo->port.irq = of_irq_to_resource(np, 0, NULL); if (pinfo->port.irq == NO_IRQ) { ret = -EINVAL; goto out_pram; } for (i = 0; i < NUM_GPIOS; i++) pinfo->gpios[i] = of_get_gpio(np, i); #ifdef CONFIG_PPC_EARLY_DEBUG_CPM udbg_putc = NULL; #endif return cpm_uart_request_port(&pinfo->port); out_pram: cpm_uart_unmap_pram(pinfo, pram); out_mem: iounmap(mem); return ret; } #ifdef CONFIG_SERIAL_CPM_CONSOLE /* * Print a string to the serial port trying not to disturb * any possible real use of the port... * * Note that this is called with interrupts already disabled */ static void cpm_uart_console_write(struct console *co, const char *s, u_int count) { struct uart_cpm_port *pinfo = &cpm_uart_ports[co->index]; unsigned long flags; int nolock = oops_in_progress; if (unlikely(nolock)) { local_irq_save(flags); } else { spin_lock_irqsave(&pinfo->port.lock, flags); } cpm_uart_early_write(pinfo, s, count); if (unlikely(nolock)) { local_irq_restore(flags); } else { spin_unlock_irqrestore(&pinfo->port.lock, flags); } } static int __init cpm_uart_console_setup(struct console *co, char *options) { int baud = 38400; int bits = 8; int parity = 'n'; int flow = 'n'; int ret; struct uart_cpm_port *pinfo; struct uart_port *port; struct device_node *np = NULL; int i = 0; if (co->index >= UART_NR) { printk(KERN_ERR "cpm_uart: console index %d too high\n", co->index); return -ENODEV; } do { np = of_find_node_by_type(np, "serial"); if (!np) return -ENODEV; if (!of_device_is_compatible(np, "fsl,cpm1-smc-uart") && !of_device_is_compatible(np, "fsl,cpm1-scc-uart") && !of_device_is_compatible(np, "fsl,cpm2-smc-uart") && !of_device_is_compatible(np, "fsl,cpm2-scc-uart")) i--; } while (i++ != co->index); pinfo = &cpm_uart_ports[co->index]; pinfo->flags |= FLAG_CONSOLE; port = &pinfo->port; ret = cpm_uart_init_port(np, pinfo); of_node_put(np); if (ret) return ret; if (options) { uart_parse_options(options, &baud, &parity, &bits, &flow); } else { if ((baud = uart_baudrate()) == -1) baud = 9600; } if (IS_SMC(pinfo)) { out_be16(&pinfo->smcup->smc_brkcr, 0); cpm_line_cr_cmd(pinfo, CPM_CR_STOP_TX); clrbits8(&pinfo->smcp->smc_smcm, SMCM_RX | SMCM_TX); clrbits16(&pinfo->smcp->smc_smcmr, SMCMR_REN | SMCMR_TEN); } else { out_be16(&pinfo->sccup->scc_brkcr, 0); cpm_line_cr_cmd(pinfo, CPM_CR_GRA_STOP_TX); clrbits16(&pinfo->sccp->scc_sccm, UART_SCCM_TX | UART_SCCM_RX); clrbits32(&pinfo->sccp->scc_gsmrl, SCC_GSMRL_ENR | SCC_GSMRL_ENT); } ret = cpm_uart_allocbuf(pinfo, 1); if (ret) return ret; cpm_uart_initbd(pinfo); if (IS_SMC(pinfo)) cpm_uart_init_smc(pinfo); else cpm_uart_init_scc(pinfo); uart_set_options(port, co, baud, parity, bits, flow); cpm_line_cr_cmd(pinfo, CPM_CR_RESTART_TX); return 0; } static struct uart_driver cpm_reg; static struct console cpm_scc_uart_console = { .name = "ttyCPM", .write = cpm_uart_console_write, .device = uart_console_device, .setup = cpm_uart_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &cpm_reg, }; static int __init cpm_uart_console_init(void) { register_console(&cpm_scc_uart_console); return 0; } console_initcall(cpm_uart_console_init); #define CPM_UART_CONSOLE &cpm_scc_uart_console #else #define CPM_UART_CONSOLE NULL #endif static struct uart_driver cpm_reg = { .owner = THIS_MODULE, .driver_name = "ttyCPM", .dev_name = "ttyCPM", .major = SERIAL_CPM_MAJOR, .minor = SERIAL_CPM_MINOR, .cons = CPM_UART_CONSOLE, .nr = UART_NR, }; static int probe_index; static int cpm_uart_probe(struct platform_device *ofdev) { int index = probe_index++; struct uart_cpm_port *pinfo = &cpm_uart_ports[index]; int ret; pinfo->port.line = index; if (index >= UART_NR) return -ENODEV; dev_set_drvdata(&ofdev->dev, pinfo); /* initialize the device pointer for the port */ pinfo->port.dev = &ofdev->dev; ret = cpm_uart_init_port(ofdev->dev.of_node, pinfo); if (ret) return ret; return uart_add_one_port(&cpm_reg, &pinfo->port); } static int cpm_uart_remove(struct platform_device *ofdev) { struct uart_cpm_port *pinfo = dev_get_drvdata(&ofdev->dev); return uart_remove_one_port(&cpm_reg, &pinfo->port); } static struct of_device_id cpm_uart_match[] = { { .compatible = "fsl,cpm1-smc-uart", }, { .compatible = "fsl,cpm1-scc-uart", }, { .compatible = "fsl,cpm2-smc-uart", }, { .compatible = "fsl,cpm2-scc-uart", }, {} }; static struct platform_driver cpm_uart_driver = { .driver = { .name = "cpm_uart", .owner = THIS_MODULE, .of_match_table = cpm_uart_match, }, .probe = cpm_uart_probe, .remove = cpm_uart_remove, }; static int __init cpm_uart_init(void) { int ret = uart_register_driver(&cpm_reg); if (ret) return ret; ret = platform_driver_register(&cpm_uart_driver); if (ret) uart_unregister_driver(&cpm_reg); return ret; } static void __exit cpm_uart_exit(void) { platform_driver_unregister(&cpm_uart_driver); uart_unregister_driver(&cpm_reg); } module_init(cpm_uart_init); module_exit(cpm_uart_exit); MODULE_AUTHOR("Kumar Gala/Antoniou Pantelis"); MODULE_DESCRIPTION("CPM SCC/SMC port driver $Revision: 0.01 $"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV(SERIAL_CPM_MAJOR, SERIAL_CPM_MINOR);
gpl-2.0
xobs/adafruit-rpi-kernel
arch/arm/mach-s3c24xx/iotiming-s3c2412.c
2104
7719
/* * Copyright (c) 2006-2008 Simtec Electronics * http://armlinux.simtec.co.uk/ * Ben Dooks <ben@simtec.co.uk> * * S3C2412/S3C2443 (PL093 based) IO timing support * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/init.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/cpufreq.h> #include <linux/seq_file.h> #include <linux/device.h> #include <linux/delay.h> #include <linux/clk.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/amba/pl093.h> #include <asm/mach/arch.h> #include <asm/mach/map.h> #include <plat/cpu.h> #include <plat/cpu-freq-core.h> #include <plat/clock.h> #include "s3c2412.h" #define print_ns(x) ((x) / 10), ((x) % 10) /** * s3c2412_print_timing - print timing infromation via printk. * @pfx: The prefix to print each line with. * @iot: The IO timing information */ static void s3c2412_print_timing(const char *pfx, struct s3c_iotimings *iot) { struct s3c2412_iobank_timing *bt; unsigned int bank; for (bank = 0; bank < MAX_BANKS; bank++) { bt = iot->bank[bank].io_2412; if (!bt) continue; printk(KERN_DEBUG "%s: %d: idcy=%d.%d wstrd=%d.%d wstwr=%d,%d" "wstoen=%d.%d wstwen=%d.%d wstbrd=%d.%d\n", pfx, bank, print_ns(bt->idcy), print_ns(bt->wstrd), print_ns(bt->wstwr), print_ns(bt->wstoen), print_ns(bt->wstwen), print_ns(bt->wstbrd)); } } /** * to_div - turn a cycle length into a divisor setting. * @cyc_tns: The cycle time in 10ths of nanoseconds. * @clk_tns: The clock period in 10ths of nanoseconds. */ static inline unsigned int to_div(unsigned int cyc_tns, unsigned int clk_tns) { return cyc_tns ? DIV_ROUND_UP(cyc_tns, clk_tns) : 0; } /** * calc_timing - calculate timing divisor value and check in range. * @hwtm: The hardware timing in 10ths of nanoseconds. * @clk_tns: The clock period in 10ths of nanoseconds. * @err: Pointer to err variable to update in event of failure. */ static unsigned int calc_timing(unsigned int hwtm, unsigned int clk_tns, unsigned int *err) { unsigned int ret = to_div(hwtm, clk_tns); if (ret > 0xf) *err = -EINVAL; return ret; } /** * s3c2412_calc_bank - calculate the bank divisor settings. * @cfg: The current frequency configuration. * @bt: The bank timing. */ static int s3c2412_calc_bank(struct s3c_cpufreq_config *cfg, struct s3c2412_iobank_timing *bt) { unsigned int hclk = cfg->freq.hclk_tns; int err = 0; bt->smbidcyr = calc_timing(bt->idcy, hclk, &err); bt->smbwstrd = calc_timing(bt->wstrd, hclk, &err); bt->smbwstwr = calc_timing(bt->wstwr, hclk, &err); bt->smbwstoen = calc_timing(bt->wstoen, hclk, &err); bt->smbwstwen = calc_timing(bt->wstwen, hclk, &err); bt->smbwstbrd = calc_timing(bt->wstbrd, hclk, &err); return err; } /** * s3c2412_iotiming_debugfs - debugfs show io bank timing information * @seq: The seq_file to write output to using seq_printf(). * @cfg: The current configuration. * @iob: The IO bank information to decode. */ void s3c2412_iotiming_debugfs(struct seq_file *seq, struct s3c_cpufreq_config *cfg, union s3c_iobank *iob) { struct s3c2412_iobank_timing *bt = iob->io_2412; seq_printf(seq, "\tRead: idcy=%d.%d wstrd=%d.%d wstwr=%d,%d" "wstoen=%d.%d wstwen=%d.%d wstbrd=%d.%d\n", print_ns(bt->idcy), print_ns(bt->wstrd), print_ns(bt->wstwr), print_ns(bt->wstoen), print_ns(bt->wstwen), print_ns(bt->wstbrd)); } /** * s3c2412_iotiming_calc - calculate all the bank divisor settings. * @cfg: The current frequency configuration. * @iot: The bank timing information. * * Calculate the timing information for all the banks that are * configured as IO, using s3c2412_calc_bank(). */ int s3c2412_iotiming_calc(struct s3c_cpufreq_config *cfg, struct s3c_iotimings *iot) { struct s3c2412_iobank_timing *bt; int bank; int ret; for (bank = 0; bank < MAX_BANKS; bank++) { bt = iot->bank[bank].io_2412; if (!bt) continue; ret = s3c2412_calc_bank(cfg, bt); if (ret) { printk(KERN_ERR "%s: cannot calculate bank %d io\n", __func__, bank); goto err; } } return 0; err: return ret; } /** * s3c2412_iotiming_set - set the timing information * @cfg: The current frequency configuration. * @iot: The bank timing information. * * Set the IO bank information from the details calculated earlier from * calling s3c2412_iotiming_calc(). */ void s3c2412_iotiming_set(struct s3c_cpufreq_config *cfg, struct s3c_iotimings *iot) { struct s3c2412_iobank_timing *bt; void __iomem *regs; int bank; /* set the io timings from the specifier */ for (bank = 0; bank < MAX_BANKS; bank++) { bt = iot->bank[bank].io_2412; if (!bt) continue; regs = S3C2412_SSMC_BANK(bank); __raw_writel(bt->smbidcyr, regs + SMBIDCYR); __raw_writel(bt->smbwstrd, regs + SMBWSTRDR); __raw_writel(bt->smbwstwr, regs + SMBWSTWRR); __raw_writel(bt->smbwstoen, regs + SMBWSTOENR); __raw_writel(bt->smbwstwen, regs + SMBWSTWENR); __raw_writel(bt->smbwstbrd, regs + SMBWSTBRDR); } } static inline unsigned int s3c2412_decode_timing(unsigned int clock, u32 reg) { return (reg & 0xf) * clock; } static void s3c2412_iotiming_getbank(struct s3c_cpufreq_config *cfg, struct s3c2412_iobank_timing *bt, unsigned int bank) { unsigned long clk = cfg->freq.hclk_tns; /* ssmc clock??? */ void __iomem *regs = S3C2412_SSMC_BANK(bank); bt->idcy = s3c2412_decode_timing(clk, __raw_readl(regs + SMBIDCYR)); bt->wstrd = s3c2412_decode_timing(clk, __raw_readl(regs + SMBWSTRDR)); bt->wstoen = s3c2412_decode_timing(clk, __raw_readl(regs + SMBWSTOENR)); bt->wstwen = s3c2412_decode_timing(clk, __raw_readl(regs + SMBWSTWENR)); bt->wstbrd = s3c2412_decode_timing(clk, __raw_readl(regs + SMBWSTBRDR)); } /** * bank_is_io - return true if bank is (possibly) IO. * @bank: The bank number. * @bankcfg: The value of S3C2412_EBI_BANKCFG. */ static inline bool bank_is_io(unsigned int bank, u32 bankcfg) { if (bank < 2) return true; return !(bankcfg & (1 << bank)); } int s3c2412_iotiming_get(struct s3c_cpufreq_config *cfg, struct s3c_iotimings *timings) { struct s3c2412_iobank_timing *bt; u32 bankcfg = __raw_readl(S3C2412_EBI_BANKCFG); unsigned int bank; /* look through all banks to see what is currently set. */ for (bank = 0; bank < MAX_BANKS; bank++) { if (!bank_is_io(bank, bankcfg)) continue; bt = kzalloc(sizeof(struct s3c2412_iobank_timing), GFP_KERNEL); if (!bt) { printk(KERN_ERR "%s: no memory for bank\n", __func__); return -ENOMEM; } timings->bank[bank].io_2412 = bt; s3c2412_iotiming_getbank(cfg, bt, bank); } s3c2412_print_timing("get", timings); return 0; } /* this is in here as it is so small, it doesn't currently warrant a file * to itself. We expect that any s3c24xx needing this is going to also * need the iotiming support. */ void s3c2412_cpufreq_setrefresh(struct s3c_cpufreq_config *cfg) { struct s3c_cpufreq_board *board = cfg->board; u32 refresh; WARN_ON(board == NULL); /* Reduce both the refresh time (in ns) and the frequency (in MHz) * down to ensure that we do not overflow 32 bit numbers. * * This should work for HCLK up to 133MHz and refresh period up * to 30usec. */ refresh = (cfg->freq.hclk / 100) * (board->refresh / 10); refresh = DIV_ROUND_UP(refresh, (1000 * 1000)); /* apply scale */ refresh &= ((1 << 16) - 1); s3c_freq_dbg("%s: refresh value %u\n", __func__, (unsigned int)refresh); __raw_writel(refresh, S3C2412_REFRESH); }
gpl-2.0
NoelMacwan/SXDNanhu
drivers/atm/iphase.c
2360
110587
/****************************************************************************** iphase.c: Device driver for Interphase ATM PCI adapter cards Author: Peter Wang <pwang@iphase.com> Some fixes: Arnaldo Carvalho de Melo <acme@conectiva.com.br> Interphase Corporation <www.iphase.com> Version: 1.0 ******************************************************************************* This software may be used and distributed according to the terms of the GNU General Public License (GPL), incorporated herein by reference. Drivers based on this skeleton fall under the GPL and must retain the authorship (implicit copyright) notice. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. Modified from an incomplete driver for Interphase 5575 1KVC 1M card which was originally written by Monalisa Agrawal at UNH. Now this driver supports a variety of varients of Interphase ATM PCI (i)Chip adapter card family (See www.iphase.com/products/ClassSheet.cfm?ClassID=ATM) in terms of PHY type, the size of control memory and the size of packet memory. The followings are the change log and history: Bugfix the Mona's UBR driver. Modify the basic memory allocation and dma logic. Port the driver to the latest kernel from 2.0.46. Complete the ABR logic of the driver, and added the ABR work- around for the hardware anormalies. Add the CBR support. Add the flow control logic to the driver to allow rate-limit VC. Add 4K VC support to the board with 512K control memory. Add the support of all the variants of the Interphase ATM PCI (i)Chip adapter cards including x575 (155M OC3 and UTP155), x525 (25M UTP25) and x531 (DS3 and E3). Add SMP support. Support and updates available at: ftp://ftp.iphase.com/pub/atm *******************************************************************************/ #include <linux/module.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/pci.h> #include <linux/errno.h> #include <linux/atm.h> #include <linux/atmdev.h> #include <linux/sonet.h> #include <linux/skbuff.h> #include <linux/time.h> #include <linux/delay.h> #include <linux/uio.h> #include <linux/init.h> #include <linux/wait.h> #include <linux/slab.h> #include <asm/system.h> #include <asm/io.h> #include <asm/atomic.h> #include <asm/uaccess.h> #include <asm/string.h> #include <asm/byteorder.h> #include <linux/vmalloc.h> #include <linux/jiffies.h> #include "iphase.h" #include "suni.h" #define swap_byte_order(x) (((x & 0xff) << 8) | ((x & 0xff00) >> 8)) #define PRIV(dev) ((struct suni_priv *) dev->phy_data) static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr); static void desc_dbg(IADEV *iadev); static IADEV *ia_dev[8]; static struct atm_dev *_ia_dev[8]; static int iadev_count; static void ia_led_timer(unsigned long arg); static DEFINE_TIMER(ia_timer, ia_led_timer, 0, 0); static int IA_TX_BUF = DFL_TX_BUFFERS, IA_TX_BUF_SZ = DFL_TX_BUF_SZ; static int IA_RX_BUF = DFL_RX_BUFFERS, IA_RX_BUF_SZ = DFL_RX_BUF_SZ; static uint IADebugFlag = /* IF_IADBG_ERR | IF_IADBG_CBR| IF_IADBG_INIT_ADAPTER |IF_IADBG_ABR | IF_IADBG_EVENT*/ 0; module_param(IA_TX_BUF, int, 0); module_param(IA_TX_BUF_SZ, int, 0); module_param(IA_RX_BUF, int, 0); module_param(IA_RX_BUF_SZ, int, 0); module_param(IADebugFlag, uint, 0644); MODULE_LICENSE("GPL"); /**************************** IA_LIB **********************************/ static void ia_init_rtn_q (IARTN_Q *que) { que->next = NULL; que->tail = NULL; } static void ia_enque_head_rtn_q (IARTN_Q *que, IARTN_Q * data) { data->next = NULL; if (que->next == NULL) que->next = que->tail = data; else { data->next = que->next; que->next = data; } return; } static int ia_enque_rtn_q (IARTN_Q *que, struct desc_tbl_t data) { IARTN_Q *entry = kmalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) return -1; entry->data = data; entry->next = NULL; if (que->next == NULL) que->next = que->tail = entry; else { que->tail->next = entry; que->tail = que->tail->next; } return 1; } static IARTN_Q * ia_deque_rtn_q (IARTN_Q *que) { IARTN_Q *tmpdata; if (que->next == NULL) return NULL; tmpdata = que->next; if ( que->next == que->tail) que->next = que->tail = NULL; else que->next = que->next->next; return tmpdata; } static void ia_hack_tcq(IADEV *dev) { u_short desc1; u_short tcq_wr; struct ia_vcc *iavcc_r = NULL; tcq_wr = readl(dev->seg_reg+TCQ_WR_PTR) & 0xffff; while (dev->host_tcq_wr != tcq_wr) { desc1 = *(u_short *)(dev->seg_ram + dev->host_tcq_wr); if (!desc1) ; else if (!dev->desc_tbl[desc1 -1].timestamp) { IF_ABR(printk(" Desc %d is reset at %ld\n", desc1 -1, jiffies);) *(u_short *) (dev->seg_ram + dev->host_tcq_wr) = 0; } else if (dev->desc_tbl[desc1 -1].timestamp) { if (!(iavcc_r = dev->desc_tbl[desc1 -1].iavcc)) { printk("IA: Fatal err in get_desc\n"); continue; } iavcc_r->vc_desc_cnt--; dev->desc_tbl[desc1 -1].timestamp = 0; IF_EVENT(printk("ia_hack: return_q skb = 0x%p desc = %d\n", dev->desc_tbl[desc1 -1].txskb, desc1);) if (iavcc_r->pcr < dev->rate_limit) { IA_SKB_STATE (dev->desc_tbl[desc1-1].txskb) |= IA_TX_DONE; if (ia_enque_rtn_q(&dev->tx_return_q, dev->desc_tbl[desc1 -1]) < 0) printk("ia_hack_tcq: No memory available\n"); } dev->desc_tbl[desc1 -1].iavcc = NULL; dev->desc_tbl[desc1 -1].txskb = NULL; } dev->host_tcq_wr += 2; if (dev->host_tcq_wr > dev->ffL.tcq_ed) dev->host_tcq_wr = dev->ffL.tcq_st; } } /* ia_hack_tcq */ static u16 get_desc (IADEV *dev, struct ia_vcc *iavcc) { u_short desc_num, i; struct sk_buff *skb; struct ia_vcc *iavcc_r = NULL; unsigned long delta; static unsigned long timer = 0; int ltimeout; ia_hack_tcq (dev); if((time_after(jiffies,timer+50)) || ((dev->ffL.tcq_rd==dev->host_tcq_wr))) { timer = jiffies; i=0; while (i < dev->num_tx_desc) { if (!dev->desc_tbl[i].timestamp) { i++; continue; } ltimeout = dev->desc_tbl[i].iavcc->ltimeout; delta = jiffies - dev->desc_tbl[i].timestamp; if (delta >= ltimeout) { IF_ABR(printk("RECOVER run!! desc_tbl %d = %d delta = %ld, time = %ld\n", i,dev->desc_tbl[i].timestamp, delta, jiffies);) if (dev->ffL.tcq_rd == dev->ffL.tcq_st) dev->ffL.tcq_rd = dev->ffL.tcq_ed; else dev->ffL.tcq_rd -= 2; *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd) = i+1; if (!(skb = dev->desc_tbl[i].txskb) || !(iavcc_r = dev->desc_tbl[i].iavcc)) printk("Fatal err, desc table vcc or skb is NULL\n"); else iavcc_r->vc_desc_cnt--; dev->desc_tbl[i].timestamp = 0; dev->desc_tbl[i].iavcc = NULL; dev->desc_tbl[i].txskb = NULL; } i++; } /* while */ } if (dev->ffL.tcq_rd == dev->host_tcq_wr) return 0xFFFF; /* Get the next available descriptor number from TCQ */ desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd); while (!desc_num || (dev->desc_tbl[desc_num -1]).timestamp) { dev->ffL.tcq_rd += 2; if (dev->ffL.tcq_rd > dev->ffL.tcq_ed) dev->ffL.tcq_rd = dev->ffL.tcq_st; if (dev->ffL.tcq_rd == dev->host_tcq_wr) return 0xFFFF; desc_num = *(u_short *)(dev->seg_ram + dev->ffL.tcq_rd); } /* get system time */ dev->desc_tbl[desc_num -1].timestamp = jiffies; return desc_num; } static void clear_lockup (struct atm_vcc *vcc, IADEV *dev) { u_char foundLockUp; vcstatus_t *vcstatus; u_short *shd_tbl; u_short tempCellSlot, tempFract; struct main_vc *abr_vc = (struct main_vc *)dev->MAIN_VC_TABLE_ADDR; struct ext_vc *eabr_vc = (struct ext_vc *)dev->EXT_VC_TABLE_ADDR; u_int i; if (vcc->qos.txtp.traffic_class == ATM_ABR) { vcstatus = (vcstatus_t *) &(dev->testTable[vcc->vci]->vc_status); vcstatus->cnt++; foundLockUp = 0; if( vcstatus->cnt == 0x05 ) { abr_vc += vcc->vci; eabr_vc += vcc->vci; if( eabr_vc->last_desc ) { if( (abr_vc->status & 0x07) == ABR_STATE /* 0x2 */ ) { /* Wait for 10 Micro sec */ udelay(10); if ((eabr_vc->last_desc)&&((abr_vc->status & 0x07)==ABR_STATE)) foundLockUp = 1; } else { tempCellSlot = abr_vc->last_cell_slot; tempFract = abr_vc->fraction; if((tempCellSlot == dev->testTable[vcc->vci]->lastTime) && (tempFract == dev->testTable[vcc->vci]->fract)) foundLockUp = 1; dev->testTable[vcc->vci]->lastTime = tempCellSlot; dev->testTable[vcc->vci]->fract = tempFract; } } /* last descriptor */ vcstatus->cnt = 0; } /* vcstatus->cnt */ if (foundLockUp) { IF_ABR(printk("LOCK UP found\n");) writew(0xFFFD, dev->seg_reg+MODE_REG_0); /* Wait for 10 Micro sec */ udelay(10); abr_vc->status &= 0xFFF8; abr_vc->status |= 0x0001; /* state is idle */ shd_tbl = (u_short *)dev->ABR_SCHED_TABLE_ADDR; for( i = 0; ((i < dev->num_vc) && (shd_tbl[i])); i++ ); if (i < dev->num_vc) shd_tbl[i] = vcc->vci; else IF_ERR(printk("ABR Seg. may not continue on VC %x\n",vcc->vci);) writew(T_ONLINE, dev->seg_reg+MODE_REG_0); writew(~(TRANSMIT_DONE|TCQ_NOT_EMPTY), dev->seg_reg+SEG_MASK_REG); writew(TRANSMIT_DONE, dev->seg_reg+SEG_INTR_STATUS_REG); vcstatus->cnt = 0; } /* foundLockUp */ } /* if an ABR VC */ } /* ** Conversion of 24-bit cellrate (cells/sec) to 16-bit floating point format. ** ** +----+----+------------------+-------------------------------+ ** | R | NZ | 5-bit exponent | 9-bit mantissa | ** +----+----+------------------+-------------------------------+ ** ** R = reserved (written as 0) ** NZ = 0 if 0 cells/sec; 1 otherwise ** ** if NZ = 1, rate = 1.mmmmmmmmm x 2^(eeeee) cells/sec */ static u16 cellrate_to_float(u32 cr) { #define NZ 0x4000 #define M_BITS 9 /* Number of bits in mantissa */ #define E_BITS 5 /* Number of bits in exponent */ #define M_MASK 0x1ff #define E_MASK 0x1f u16 flot; u32 tmp = cr & 0x00ffffff; int i = 0; if (cr == 0) return 0; while (tmp != 1) { tmp >>= 1; i++; } if (i == M_BITS) flot = NZ | (i << M_BITS) | (cr & M_MASK); else if (i < M_BITS) flot = NZ | (i << M_BITS) | ((cr << (M_BITS - i)) & M_MASK); else flot = NZ | (i << M_BITS) | ((cr >> (i - M_BITS)) & M_MASK); return flot; } #if 0 /* ** Conversion of 16-bit floating point format to 24-bit cellrate (cells/sec). */ static u32 float_to_cellrate(u16 rate) { u32 exp, mantissa, cps; if ((rate & NZ) == 0) return 0; exp = (rate >> M_BITS) & E_MASK; mantissa = rate & M_MASK; if (exp == 0) return 1; cps = (1 << M_BITS) | mantissa; if (exp == M_BITS) cps = cps; else if (exp > M_BITS) cps <<= (exp - M_BITS); else cps >>= (M_BITS - exp); return cps; } #endif static void init_abr_vc (IADEV *dev, srv_cls_param_t *srv_p) { srv_p->class_type = ATM_ABR; srv_p->pcr = dev->LineRate; srv_p->mcr = 0; srv_p->icr = 0x055cb7; srv_p->tbe = 0xffffff; srv_p->frtt = 0x3a; srv_p->rif = 0xf; srv_p->rdf = 0xb; srv_p->nrm = 0x4; srv_p->trm = 0x7; srv_p->cdf = 0x3; srv_p->adtf = 50; } static int ia_open_abr_vc(IADEV *dev, srv_cls_param_t *srv_p, struct atm_vcc *vcc, u8 flag) { f_vc_abr_entry *f_abr_vc; r_vc_abr_entry *r_abr_vc; u32 icr; u8 trm, nrm, crm; u16 adtf, air, *ptr16; f_abr_vc =(f_vc_abr_entry *)dev->MAIN_VC_TABLE_ADDR; f_abr_vc += vcc->vci; switch (flag) { case 1: /* FFRED initialization */ #if 0 /* sanity check */ if (srv_p->pcr == 0) return INVALID_PCR; if (srv_p->pcr > dev->LineRate) srv_p->pcr = dev->LineRate; if ((srv_p->mcr + dev->sum_mcr) > dev->LineRate) return MCR_UNAVAILABLE; if (srv_p->mcr > srv_p->pcr) return INVALID_MCR; if (!(srv_p->icr)) srv_p->icr = srv_p->pcr; if ((srv_p->icr < srv_p->mcr) || (srv_p->icr > srv_p->pcr)) return INVALID_ICR; if ((srv_p->tbe < MIN_TBE) || (srv_p->tbe > MAX_TBE)) return INVALID_TBE; if ((srv_p->frtt < MIN_FRTT) || (srv_p->frtt > MAX_FRTT)) return INVALID_FRTT; if (srv_p->nrm > MAX_NRM) return INVALID_NRM; if (srv_p->trm > MAX_TRM) return INVALID_TRM; if (srv_p->adtf > MAX_ADTF) return INVALID_ADTF; else if (srv_p->adtf == 0) srv_p->adtf = 1; if (srv_p->cdf > MAX_CDF) return INVALID_CDF; if (srv_p->rif > MAX_RIF) return INVALID_RIF; if (srv_p->rdf > MAX_RDF) return INVALID_RDF; #endif memset ((caddr_t)f_abr_vc, 0, sizeof(*f_abr_vc)); f_abr_vc->f_vc_type = ABR; nrm = 2 << srv_p->nrm; /* (2 ** (srv_p->nrm +1)) */ /* i.e 2**n = 2 << (n-1) */ f_abr_vc->f_nrm = nrm << 8 | nrm; trm = 100000/(2 << (16 - srv_p->trm)); if ( trm == 0) trm = 1; f_abr_vc->f_nrmexp =(((srv_p->nrm +1) & 0x0f) << 12)|(MRM << 8) | trm; crm = srv_p->tbe / nrm; if (crm == 0) crm = 1; f_abr_vc->f_crm = crm & 0xff; f_abr_vc->f_pcr = cellrate_to_float(srv_p->pcr); icr = min( srv_p->icr, (srv_p->tbe > srv_p->frtt) ? ((srv_p->tbe/srv_p->frtt)*1000000) : (1000000/(srv_p->frtt/srv_p->tbe))); f_abr_vc->f_icr = cellrate_to_float(icr); adtf = (10000 * srv_p->adtf)/8192; if (adtf == 0) adtf = 1; f_abr_vc->f_cdf = ((7 - srv_p->cdf) << 12 | adtf) & 0xfff; f_abr_vc->f_mcr = cellrate_to_float(srv_p->mcr); f_abr_vc->f_acr = f_abr_vc->f_icr; f_abr_vc->f_status = 0x0042; break; case 0: /* RFRED initialization */ ptr16 = (u_short *)(dev->reass_ram + REASS_TABLE*dev->memSize); *(ptr16 + vcc->vci) = NO_AAL5_PKT | REASS_ABR; r_abr_vc = (r_vc_abr_entry*)(dev->reass_ram+ABR_VC_TABLE*dev->memSize); r_abr_vc += vcc->vci; r_abr_vc->r_status_rdf = (15 - srv_p->rdf) & 0x000f; air = srv_p->pcr << (15 - srv_p->rif); if (air == 0) air = 1; r_abr_vc->r_air = cellrate_to_float(air); dev->testTable[vcc->vci]->vc_status = VC_ACTIVE | VC_ABR; dev->sum_mcr += srv_p->mcr; dev->n_abr++; break; default: break; } return 0; } static int ia_cbr_setup (IADEV *dev, struct atm_vcc *vcc) { u32 rateLow=0, rateHigh, rate; int entries; struct ia_vcc *ia_vcc; int idealSlot =0, testSlot, toBeAssigned, inc; u32 spacing; u16 *SchedTbl, *TstSchedTbl; u16 cbrVC, vcIndex; u32 fracSlot = 0; u32 sp_mod = 0; u32 sp_mod2 = 0; /* IpAdjustTrafficParams */ if (vcc->qos.txtp.max_pcr <= 0) { IF_ERR(printk("PCR for CBR not defined\n");) return -1; } rate = vcc->qos.txtp.max_pcr; entries = rate / dev->Granularity; IF_CBR(printk("CBR: CBR entries=0x%x for rate=0x%x & Gran=0x%x\n", entries, rate, dev->Granularity);) if (entries < 1) IF_CBR(printk("CBR: Bandwidth smaller than granularity of CBR table\n");) rateLow = entries * dev->Granularity; rateHigh = (entries + 1) * dev->Granularity; if (3*(rate - rateLow) > (rateHigh - rate)) entries++; if (entries > dev->CbrRemEntries) { IF_CBR(printk("CBR: Not enough bandwidth to support this PCR.\n");) IF_CBR(printk("Entries = 0x%x, CbrRemEntries = 0x%x.\n", entries, dev->CbrRemEntries);) return -EBUSY; } ia_vcc = INPH_IA_VCC(vcc); ia_vcc->NumCbrEntry = entries; dev->sum_mcr += entries * dev->Granularity; /* IaFFrednInsertCbrSched */ // Starting at an arbitrary location, place the entries into the table // as smoothly as possible cbrVC = 0; spacing = dev->CbrTotEntries / entries; sp_mod = dev->CbrTotEntries % entries; // get modulo toBeAssigned = entries; fracSlot = 0; vcIndex = vcc->vci; IF_CBR(printk("Vci=0x%x,Spacing=0x%x,Sp_mod=0x%x\n",vcIndex,spacing,sp_mod);) while (toBeAssigned) { // If this is the first time, start the table loading for this connection // as close to entryPoint as possible. if (toBeAssigned == entries) { idealSlot = dev->CbrEntryPt; dev->CbrEntryPt += 2; // Adding 2 helps to prevent clumping if (dev->CbrEntryPt >= dev->CbrTotEntries) dev->CbrEntryPt -= dev->CbrTotEntries;// Wrap if necessary } else { idealSlot += (u32)(spacing + fracSlot); // Point to the next location // in the table that would be smoothest fracSlot = ((sp_mod + sp_mod2) / entries); // get new integer part sp_mod2 = ((sp_mod + sp_mod2) % entries); // calc new fractional part } if (idealSlot >= (int)dev->CbrTotEntries) idealSlot -= dev->CbrTotEntries; // Continuously check around this ideal value until a null // location is encountered. SchedTbl = (u16*)(dev->seg_ram+CBR_SCHED_TABLE*dev->memSize); inc = 0; testSlot = idealSlot; TstSchedTbl = (u16*)(SchedTbl+testSlot); //set index and read in value IF_CBR(printk("CBR Testslot 0x%x AT Location 0x%p, NumToAssign=%d\n", testSlot, TstSchedTbl,toBeAssigned);) memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC)); while (cbrVC) // If another VC at this location, we have to keep looking { inc++; testSlot = idealSlot - inc; if (testSlot < 0) { // Wrap if necessary testSlot += dev->CbrTotEntries; IF_CBR(printk("Testslot Wrap. STable Start=0x%p,Testslot=%d\n", SchedTbl,testSlot);) } TstSchedTbl = (u16 *)(SchedTbl + testSlot); // set table index memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC)); if (!cbrVC) break; testSlot = idealSlot + inc; if (testSlot >= (int)dev->CbrTotEntries) { // Wrap if necessary testSlot -= dev->CbrTotEntries; IF_CBR(printk("TotCbrEntries=%d",dev->CbrTotEntries);) IF_CBR(printk(" Testslot=0x%x ToBeAssgned=%d\n", testSlot, toBeAssigned);) } // set table index and read in value TstSchedTbl = (u16*)(SchedTbl + testSlot); IF_CBR(printk("Reading CBR Tbl from 0x%p, CbrVal=0x%x Iteration %d\n", TstSchedTbl,cbrVC,inc);) memcpy((caddr_t)&cbrVC,(caddr_t)TstSchedTbl,sizeof(cbrVC)); } /* while */ // Move this VCI number into this location of the CBR Sched table. memcpy((caddr_t)TstSchedTbl, (caddr_t)&vcIndex, sizeof(*TstSchedTbl)); dev->CbrRemEntries--; toBeAssigned--; } /* while */ /* IaFFrednCbrEnable */ dev->NumEnabledCBR++; if (dev->NumEnabledCBR == 1) { writew((CBR_EN | UBR_EN | ABR_EN | (0x23 << 2)), dev->seg_reg+STPARMS); IF_CBR(printk("CBR is enabled\n");) } return 0; } static void ia_cbrVc_close (struct atm_vcc *vcc) { IADEV *iadev; u16 *SchedTbl, NullVci = 0; u32 i, NumFound; iadev = INPH_IA_DEV(vcc->dev); iadev->NumEnabledCBR--; SchedTbl = (u16*)(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize); if (iadev->NumEnabledCBR == 0) { writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS); IF_CBR (printk("CBR support disabled\n");) } NumFound = 0; for (i=0; i < iadev->CbrTotEntries; i++) { if (*SchedTbl == vcc->vci) { iadev->CbrRemEntries++; *SchedTbl = NullVci; IF_CBR(NumFound++;) } SchedTbl++; } IF_CBR(printk("Exit ia_cbrVc_close, NumRemoved=%d\n",NumFound);) } static int ia_avail_descs(IADEV *iadev) { int tmp = 0; ia_hack_tcq(iadev); if (iadev->host_tcq_wr >= iadev->ffL.tcq_rd) tmp = (iadev->host_tcq_wr - iadev->ffL.tcq_rd) / 2; else tmp = (iadev->ffL.tcq_ed - iadev->ffL.tcq_rd + 2 + iadev->host_tcq_wr - iadev->ffL.tcq_st) / 2; return tmp; } static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb); static int ia_que_tx (IADEV *iadev) { struct sk_buff *skb; int num_desc; struct atm_vcc *vcc; num_desc = ia_avail_descs(iadev); while (num_desc && (skb = skb_dequeue(&iadev->tx_backlog))) { if (!(vcc = ATM_SKB(skb)->vcc)) { dev_kfree_skb_any(skb); printk("ia_que_tx: Null vcc\n"); break; } if (!test_bit(ATM_VF_READY,&vcc->flags)) { dev_kfree_skb_any(skb); printk("Free the SKB on closed vci %d \n", vcc->vci); break; } if (ia_pkt_tx (vcc, skb)) { skb_queue_head(&iadev->tx_backlog, skb); } num_desc--; } return 0; } static void ia_tx_poll (IADEV *iadev) { struct atm_vcc *vcc = NULL; struct sk_buff *skb = NULL, *skb1 = NULL; struct ia_vcc *iavcc; IARTN_Q * rtne; ia_hack_tcq(iadev); while ( (rtne = ia_deque_rtn_q(&iadev->tx_return_q))) { skb = rtne->data.txskb; if (!skb) { printk("ia_tx_poll: skb is null\n"); goto out; } vcc = ATM_SKB(skb)->vcc; if (!vcc) { printk("ia_tx_poll: vcc is null\n"); dev_kfree_skb_any(skb); goto out; } iavcc = INPH_IA_VCC(vcc); if (!iavcc) { printk("ia_tx_poll: iavcc is null\n"); dev_kfree_skb_any(skb); goto out; } skb1 = skb_dequeue(&iavcc->txing_skb); while (skb1 && (skb1 != skb)) { if (!(IA_SKB_STATE(skb1) & IA_TX_DONE)) { printk("IA_tx_intr: Vci %d lost pkt!!!\n", vcc->vci); } IF_ERR(printk("Release the SKB not match\n");) if ((vcc->pop) && (skb1->len != 0)) { vcc->pop(vcc, skb1); IF_EVENT(printk("Tansmit Done - skb 0x%lx return\n", (long)skb1);) } else dev_kfree_skb_any(skb1); skb1 = skb_dequeue(&iavcc->txing_skb); } if (!skb1) { IF_EVENT(printk("IA: Vci %d - skb not found requed\n",vcc->vci);) ia_enque_head_rtn_q (&iadev->tx_return_q, rtne); break; } if ((vcc->pop) && (skb->len != 0)) { vcc->pop(vcc, skb); IF_EVENT(printk("Tx Done - skb 0x%lx return\n",(long)skb);) } else dev_kfree_skb_any(skb); kfree(rtne); } ia_que_tx(iadev); out: return; } #if 0 static void ia_eeprom_put (IADEV *iadev, u32 addr, u_short val) { u32 t; int i; /* * Issue a command to enable writes to the NOVRAM */ NVRAM_CMD (EXTEND + EWEN); NVRAM_CLR_CE; /* * issue the write command */ NVRAM_CMD(IAWRITE + addr); /* * Send the data, starting with D15, then D14, and so on for 16 bits */ for (i=15; i>=0; i--) { NVRAM_CLKOUT (val & 0x8000); val <<= 1; } NVRAM_CLR_CE; CFG_OR(NVCE); t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); while (!(t & NVDO)) t = readl(iadev->reg+IPHASE5575_EEPROM_ACCESS); NVRAM_CLR_CE; /* * disable writes again */ NVRAM_CMD(EXTEND + EWDS) NVRAM_CLR_CE; CFG_AND(~NVDI); } #endif static u16 ia_eeprom_get (IADEV *iadev, u32 addr) { u_short val; u32 t; int i; /* * Read the first bit that was clocked with the falling edge of the * the last command data clock */ NVRAM_CMD(IAREAD + addr); /* * Now read the rest of the bits, the next bit read is D14, then D13, * and so on. */ val = 0; for (i=15; i>=0; i--) { NVRAM_CLKIN(t); val |= (t << i); } NVRAM_CLR_CE; CFG_AND(~NVDI); return val; } static void ia_hw_type(IADEV *iadev) { u_short memType = ia_eeprom_get(iadev, 25); iadev->memType = memType; if ((memType & MEM_SIZE_MASK) == MEM_SIZE_1M) { iadev->num_tx_desc = IA_TX_BUF; iadev->tx_buf_sz = IA_TX_BUF_SZ; iadev->num_rx_desc = IA_RX_BUF; iadev->rx_buf_sz = IA_RX_BUF_SZ; } else if ((memType & MEM_SIZE_MASK) == MEM_SIZE_512K) { if (IA_TX_BUF == DFL_TX_BUFFERS) iadev->num_tx_desc = IA_TX_BUF / 2; else iadev->num_tx_desc = IA_TX_BUF; iadev->tx_buf_sz = IA_TX_BUF_SZ; if (IA_RX_BUF == DFL_RX_BUFFERS) iadev->num_rx_desc = IA_RX_BUF / 2; else iadev->num_rx_desc = IA_RX_BUF; iadev->rx_buf_sz = IA_RX_BUF_SZ; } else { if (IA_TX_BUF == DFL_TX_BUFFERS) iadev->num_tx_desc = IA_TX_BUF / 8; else iadev->num_tx_desc = IA_TX_BUF; iadev->tx_buf_sz = IA_TX_BUF_SZ; if (IA_RX_BUF == DFL_RX_BUFFERS) iadev->num_rx_desc = IA_RX_BUF / 8; else iadev->num_rx_desc = IA_RX_BUF; iadev->rx_buf_sz = IA_RX_BUF_SZ; } iadev->rx_pkt_ram = TX_PACKET_RAM + (iadev->num_tx_desc * iadev->tx_buf_sz); IF_INIT(printk("BUF: tx=%d,sz=%d rx=%d sz= %d rx_pkt_ram=%d\n", iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc, iadev->rx_buf_sz, iadev->rx_pkt_ram);) #if 0 if ((memType & FE_MASK) == FE_SINGLE_MODE) { iadev->phy_type = PHY_OC3C_S; else if ((memType & FE_MASK) == FE_UTP_OPTION) iadev->phy_type = PHY_UTP155; else iadev->phy_type = PHY_OC3C_M; #endif iadev->phy_type = memType & FE_MASK; IF_INIT(printk("memType = 0x%x iadev->phy_type = 0x%x\n", memType,iadev->phy_type);) if (iadev->phy_type == FE_25MBIT_PHY) iadev->LineRate = (u32)(((25600000/8)*26)/(27*53)); else if (iadev->phy_type == FE_DS3_PHY) iadev->LineRate = (u32)(((44736000/8)*26)/(27*53)); else if (iadev->phy_type == FE_E3_PHY) iadev->LineRate = (u32)(((34368000/8)*26)/(27*53)); else iadev->LineRate = (u32)(ATM_OC3_PCR); IF_INIT(printk("iadev->LineRate = %d \n", iadev->LineRate);) } static void IaFrontEndIntr(IADEV *iadev) { volatile IA_SUNI *suni; volatile ia_mb25_t *mb25; volatile suni_pm7345_t *suni_pm7345; if(iadev->phy_type & FE_25MBIT_PHY) { mb25 = (ia_mb25_t*)iadev->phy; iadev->carrier_detect = Boolean(mb25->mb25_intr_status & MB25_IS_GSB); } else if (iadev->phy_type & FE_DS3_PHY) { suni_pm7345 = (suni_pm7345_t *)iadev->phy; /* clear FRMR interrupts */ (void) suni_pm7345->suni_ds3_frm_intr_stat; iadev->carrier_detect = Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV)); } else if (iadev->phy_type & FE_E3_PHY ) { suni_pm7345 = (suni_pm7345_t *)iadev->phy; (void) suni_pm7345->suni_e3_frm_maint_intr_ind; iadev->carrier_detect = Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat&SUNI_E3_LOS)); } else { suni = (IA_SUNI *)iadev->phy; (void) suni->suni_rsop_status; iadev->carrier_detect = Boolean(!(suni->suni_rsop_status & SUNI_LOSV)); } if (iadev->carrier_detect) printk("IA: SUNI carrier detected\n"); else printk("IA: SUNI carrier lost signal\n"); return; } static void ia_mb25_init (IADEV *iadev) { volatile ia_mb25_t *mb25 = (ia_mb25_t*)iadev->phy; #if 0 mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC | MB25_MC_ENABLED; #endif mb25->mb25_master_ctrl = MB25_MC_DRIC | MB25_MC_DREC; mb25->mb25_diag_control = 0; /* * Initialize carrier detect state */ iadev->carrier_detect = Boolean(mb25->mb25_intr_status & MB25_IS_GSB); return; } static void ia_suni_pm7345_init (IADEV *iadev) { volatile suni_pm7345_t *suni_pm7345 = (suni_pm7345_t *)iadev->phy; if (iadev->phy_type & FE_DS3_PHY) { iadev->carrier_detect = Boolean(!(suni_pm7345->suni_ds3_frm_stat & SUNI_DS3_LOSV)); suni_pm7345->suni_ds3_frm_intr_enbl = 0x17; suni_pm7345->suni_ds3_frm_cfg = 1; suni_pm7345->suni_ds3_tran_cfg = 1; suni_pm7345->suni_config = 0; suni_pm7345->suni_splr_cfg = 0; suni_pm7345->suni_splt_cfg = 0; } else { iadev->carrier_detect = Boolean(!(suni_pm7345->suni_e3_frm_fram_intr_ind_stat & SUNI_E3_LOS)); suni_pm7345->suni_e3_frm_fram_options = 0x4; suni_pm7345->suni_e3_frm_maint_options = 0x20; suni_pm7345->suni_e3_frm_fram_intr_enbl = 0x1d; suni_pm7345->suni_e3_frm_maint_intr_enbl = 0x30; suni_pm7345->suni_e3_tran_stat_diag_options = 0x0; suni_pm7345->suni_e3_tran_fram_options = 0x1; suni_pm7345->suni_config = SUNI_PM7345_E3ENBL; suni_pm7345->suni_splr_cfg = 0x41; suni_pm7345->suni_splt_cfg = 0x41; } /* * Enable RSOP loss of signal interrupt. */ suni_pm7345->suni_intr_enbl = 0x28; /* * Clear error counters */ suni_pm7345->suni_id_reset = 0; /* * Clear "PMCTST" in master test register. */ suni_pm7345->suni_master_test = 0; suni_pm7345->suni_rxcp_ctrl = 0x2c; suni_pm7345->suni_rxcp_fctrl = 0x81; suni_pm7345->suni_rxcp_idle_pat_h1 = suni_pm7345->suni_rxcp_idle_pat_h2 = suni_pm7345->suni_rxcp_idle_pat_h3 = 0; suni_pm7345->suni_rxcp_idle_pat_h4 = 1; suni_pm7345->suni_rxcp_idle_mask_h1 = 0xff; suni_pm7345->suni_rxcp_idle_mask_h2 = 0xff; suni_pm7345->suni_rxcp_idle_mask_h3 = 0xff; suni_pm7345->suni_rxcp_idle_mask_h4 = 0xfe; suni_pm7345->suni_rxcp_cell_pat_h1 = suni_pm7345->suni_rxcp_cell_pat_h2 = suni_pm7345->suni_rxcp_cell_pat_h3 = 0; suni_pm7345->suni_rxcp_cell_pat_h4 = 1; suni_pm7345->suni_rxcp_cell_mask_h1 = suni_pm7345->suni_rxcp_cell_mask_h2 = suni_pm7345->suni_rxcp_cell_mask_h3 = suni_pm7345->suni_rxcp_cell_mask_h4 = 0xff; suni_pm7345->suni_txcp_ctrl = 0xa4; suni_pm7345->suni_txcp_intr_en_sts = 0x10; suni_pm7345->suni_txcp_idle_pat_h5 = 0x55; suni_pm7345->suni_config &= ~(SUNI_PM7345_LLB | SUNI_PM7345_CLB | SUNI_PM7345_DLB | SUNI_PM7345_PLB); #ifdef __SNMP__ suni_pm7345->suni_rxcp_intr_en_sts |= SUNI_OOCDE; #endif /* __SNMP__ */ return; } /***************************** IA_LIB END *****************************/ #ifdef CONFIG_ATM_IA_DEBUG static int tcnter = 0; static void xdump( u_char* cp, int length, char* prefix ) { int col, count; u_char prntBuf[120]; u_char* pBuf = prntBuf; count = 0; while(count < length){ pBuf += sprintf( pBuf, "%s", prefix ); for(col = 0;count + col < length && col < 16; col++){ if (col != 0 && (col % 4) == 0) pBuf += sprintf( pBuf, " " ); pBuf += sprintf( pBuf, "%02X ", cp[count + col] ); } while(col++ < 16){ /* pad end of buffer with blanks */ if ((col % 4) == 0) sprintf( pBuf, " " ); pBuf += sprintf( pBuf, " " ); } pBuf += sprintf( pBuf, " " ); for(col = 0;count + col < length && col < 16; col++){ if (isprint((int)cp[count + col])) pBuf += sprintf( pBuf, "%c", cp[count + col] ); else pBuf += sprintf( pBuf, "." ); } printk("%s\n", prntBuf); count += col; pBuf = prntBuf; } } /* close xdump(... */ #endif /* CONFIG_ATM_IA_DEBUG */ static struct atm_dev *ia_boards = NULL; #define ACTUAL_RAM_BASE \ RAM_BASE*((iadev->mem)/(128 * 1024)) #define ACTUAL_SEG_RAM_BASE \ IPHASE5575_FRAG_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024)) #define ACTUAL_REASS_RAM_BASE \ IPHASE5575_REASS_CONTROL_RAM_BASE*((iadev->mem)/(128 * 1024)) /*-- some utilities and memory allocation stuff will come here -------------*/ static void desc_dbg(IADEV *iadev) { u_short tcq_wr_ptr, tcq_st_ptr, tcq_ed_ptr; u32 i; void __iomem *tmp; // regval = readl((u32)ia_cmds->maddr); tcq_wr_ptr = readw(iadev->seg_reg+TCQ_WR_PTR); printk("B_tcq_wr = 0x%x desc = %d last desc = %d\n", tcq_wr_ptr, readw(iadev->seg_ram+tcq_wr_ptr), readw(iadev->seg_ram+tcq_wr_ptr-2)); printk(" host_tcq_wr = 0x%x host_tcq_rd = 0x%x \n", iadev->host_tcq_wr, iadev->ffL.tcq_rd); tcq_st_ptr = readw(iadev->seg_reg+TCQ_ST_ADR); tcq_ed_ptr = readw(iadev->seg_reg+TCQ_ED_ADR); printk("tcq_st_ptr = 0x%x tcq_ed_ptr = 0x%x \n", tcq_st_ptr, tcq_ed_ptr); i = 0; while (tcq_st_ptr != tcq_ed_ptr) { tmp = iadev->seg_ram+tcq_st_ptr; printk("TCQ slot %d desc = %d Addr = %p\n", i++, readw(tmp), tmp); tcq_st_ptr += 2; } for(i=0; i <iadev->num_tx_desc; i++) printk("Desc_tbl[%d] = %d \n", i, iadev->desc_tbl[i].timestamp); } /*----------------------------- Receiving side stuff --------------------------*/ static void rx_excp_rcvd(struct atm_dev *dev) { #if 0 /* closing the receiving size will cause too many excp int */ IADEV *iadev; u_short state; u_short excpq_rd_ptr; //u_short *ptr; int vci, error = 1; iadev = INPH_IA_DEV(dev); state = readl(iadev->reass_reg + STATE_REG) & 0xffff; while((state & EXCPQ_EMPTY) != EXCPQ_EMPTY) { printk("state = %x \n", state); excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_RD_PTR) & 0xffff; printk("state = %x excpq_rd_ptr = %x \n", state, excpq_rd_ptr); if (excpq_rd_ptr == *(u16*)(iadev->reass_reg + EXCP_Q_WR_PTR)) IF_ERR(printk("excpq_rd_ptr is wrong!!!\n");) // TODO: update exception stat vci = readw(iadev->reass_ram+excpq_rd_ptr); error = readw(iadev->reass_ram+excpq_rd_ptr+2) & 0x0007; // pwang_test excpq_rd_ptr += 4; if (excpq_rd_ptr > (readw(iadev->reass_reg + EXCP_Q_ED_ADR)& 0xffff)) excpq_rd_ptr = readw(iadev->reass_reg + EXCP_Q_ST_ADR)& 0xffff; writew( excpq_rd_ptr, iadev->reass_reg + EXCP_Q_RD_PTR); state = readl(iadev->reass_reg + STATE_REG) & 0xffff; } #endif } static void free_desc(struct atm_dev *dev, int desc) { IADEV *iadev; iadev = INPH_IA_DEV(dev); writew(desc, iadev->reass_ram+iadev->rfL.fdq_wr); iadev->rfL.fdq_wr +=2; if (iadev->rfL.fdq_wr > iadev->rfL.fdq_ed) iadev->rfL.fdq_wr = iadev->rfL.fdq_st; writew(iadev->rfL.fdq_wr, iadev->reass_reg+FREEQ_WR_PTR); } static int rx_pkt(struct atm_dev *dev) { IADEV *iadev; struct atm_vcc *vcc; unsigned short status; struct rx_buf_desc __iomem *buf_desc_ptr; int desc; struct dle* wr_ptr; int len; struct sk_buff *skb; u_int buf_addr, dma_addr; iadev = INPH_IA_DEV(dev); if (iadev->rfL.pcq_rd == (readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff)) { printk(KERN_ERR DEV_LABEL "(itf %d) Receive queue empty\n", dev->number); return -EINVAL; } /* mask 1st 3 bits to get the actual descno. */ desc = readw(iadev->reass_ram+iadev->rfL.pcq_rd) & 0x1fff; IF_RX(printk("reass_ram = %p iadev->rfL.pcq_rd = 0x%x desc = %d\n", iadev->reass_ram, iadev->rfL.pcq_rd, desc); printk(" pcq_wr_ptr = 0x%x\n", readw(iadev->reass_reg+PCQ_WR_PTR)&0xffff);) /* update the read pointer - maybe we shud do this in the end*/ if ( iadev->rfL.pcq_rd== iadev->rfL.pcq_ed) iadev->rfL.pcq_rd = iadev->rfL.pcq_st; else iadev->rfL.pcq_rd += 2; writew(iadev->rfL.pcq_rd, iadev->reass_reg+PCQ_RD_PTR); /* get the buffer desc entry. update stuff. - doesn't seem to be any update necessary */ buf_desc_ptr = iadev->RX_DESC_BASE_ADDR; /* make the ptr point to the corresponding buffer desc entry */ buf_desc_ptr += desc; if (!desc || (desc > iadev->num_rx_desc) || ((buf_desc_ptr->vc_index & 0xffff) > iadev->num_vc)) { free_desc(dev, desc); IF_ERR(printk("IA: bad descriptor desc = %d \n", desc);) return -1; } vcc = iadev->rx_open[buf_desc_ptr->vc_index & 0xffff]; if (!vcc) { free_desc(dev, desc); printk("IA: null vcc, drop PDU\n"); return -1; } /* might want to check the status bits for errors */ status = (u_short) (buf_desc_ptr->desc_mode); if (status & (RX_CER | RX_PTE | RX_OFL)) { atomic_inc(&vcc->stats->rx_err); IF_ERR(printk("IA: bad packet, dropping it");) if (status & RX_CER) { IF_ERR(printk(" cause: packet CRC error\n");) } else if (status & RX_PTE) { IF_ERR(printk(" cause: packet time out\n");) } else { IF_ERR(printk(" cause: buffer overflow\n");) } goto out_free_desc; } /* build DLE. */ buf_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo; dma_addr = (buf_desc_ptr->dma_start_hi << 16) | buf_desc_ptr->dma_start_lo; len = dma_addr - buf_addr; if (len > iadev->rx_buf_sz) { printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz); atomic_inc(&vcc->stats->rx_err); goto out_free_desc; } if (!(skb = atm_alloc_charge(vcc, len, GFP_ATOMIC))) { if (vcc->vci < 32) printk("Drop control packets\n"); goto out_free_desc; } skb_put(skb,len); // pwang_test ATM_SKB(skb)->vcc = vcc; ATM_DESC(skb) = desc; skb_queue_tail(&iadev->rx_dma_q, skb); /* Build the DLE structure */ wr_ptr = iadev->rx_dle_q.write; wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data, len, PCI_DMA_FROMDEVICE); wr_ptr->local_pkt_addr = buf_addr; wr_ptr->bytes = len; /* We don't know this do we ?? */ wr_ptr->mode = DMA_INT_ENABLE; /* shud take care of wrap around here too. */ if(++wr_ptr == iadev->rx_dle_q.end) wr_ptr = iadev->rx_dle_q.start; iadev->rx_dle_q.write = wr_ptr; udelay(1); /* Increment transaction counter */ writel(1, iadev->dma+IPHASE5575_RX_COUNTER); out: return 0; out_free_desc: free_desc(dev, desc); goto out; } static void rx_intr(struct atm_dev *dev) { IADEV *iadev; u_short status; u_short state, i; iadev = INPH_IA_DEV(dev); status = readl(iadev->reass_reg+REASS_INTR_STATUS_REG) & 0xffff; IF_EVENT(printk("rx_intr: status = 0x%x\n", status);) if (status & RX_PKT_RCVD) { /* do something */ /* Basically recvd an interrupt for receiving a packet. A descriptor would have been written to the packet complete queue. Get all the descriptors and set up dma to move the packets till the packet complete queue is empty.. */ state = readl(iadev->reass_reg + STATE_REG) & 0xffff; IF_EVENT(printk("Rx intr status: RX_PKT_RCVD %08x\n", status);) while(!(state & PCQ_EMPTY)) { rx_pkt(dev); state = readl(iadev->reass_reg + STATE_REG) & 0xffff; } iadev->rxing = 1; } if (status & RX_FREEQ_EMPT) { if (iadev->rxing) { iadev->rx_tmp_cnt = iadev->rx_pkt_cnt; iadev->rx_tmp_jif = jiffies; iadev->rxing = 0; } else if ((time_after(jiffies, iadev->rx_tmp_jif + 50)) && ((iadev->rx_pkt_cnt - iadev->rx_tmp_cnt) == 0)) { for (i = 1; i <= iadev->num_rx_desc; i++) free_desc(dev, i); printk("Test logic RUN!!!!\n"); writew( ~(RX_FREEQ_EMPT|RX_EXCP_RCVD),iadev->reass_reg+REASS_MASK_REG); iadev->rxing = 1; } IF_EVENT(printk("Rx intr status: RX_FREEQ_EMPT %08x\n", status);) } if (status & RX_EXCP_RCVD) { /* probably need to handle the exception queue also. */ IF_EVENT(printk("Rx intr status: RX_EXCP_RCVD %08x\n", status);) rx_excp_rcvd(dev); } if (status & RX_RAW_RCVD) { /* need to handle the raw incoming cells. This deepnds on whether we have programmed to receive the raw cells or not. Else ignore. */ IF_EVENT(printk("Rx intr status: RX_RAW_RCVD %08x\n", status);) } } static void rx_dle_intr(struct atm_dev *dev) { IADEV *iadev; struct atm_vcc *vcc; struct sk_buff *skb; int desc; u_short state; struct dle *dle, *cur_dle; u_int dle_lp; int len; iadev = INPH_IA_DEV(dev); /* free all the dles done, that is just update our own dle read pointer - do we really need to do this. Think not. */ /* DMA is done, just get all the recevie buffers from the rx dma queue and push them up to the higher layer protocol. Also free the desc associated with the buffer. */ dle = iadev->rx_dle_q.read; dle_lp = readl(iadev->dma+IPHASE5575_RX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1); cur_dle = (struct dle*)(iadev->rx_dle_q.start + (dle_lp >> 4)); while(dle != cur_dle) { /* free the DMAed skb */ skb = skb_dequeue(&iadev->rx_dma_q); if (!skb) goto INCR_DLE; desc = ATM_DESC(skb); free_desc(dev, desc); if (!(len = skb->len)) { printk("rx_dle_intr: skb len 0\n"); dev_kfree_skb_any(skb); } else { struct cpcs_trailer *trailer; u_short length; struct ia_vcc *ia_vcc; pci_unmap_single(iadev->pci, iadev->rx_dle_q.write->sys_pkt_addr, len, PCI_DMA_FROMDEVICE); /* no VCC related housekeeping done as yet. lets see */ vcc = ATM_SKB(skb)->vcc; if (!vcc) { printk("IA: null vcc\n"); dev_kfree_skb_any(skb); goto INCR_DLE; } ia_vcc = INPH_IA_VCC(vcc); if (ia_vcc == NULL) { atomic_inc(&vcc->stats->rx_err); dev_kfree_skb_any(skb); atm_return(vcc, atm_guess_pdu2truesize(len)); goto INCR_DLE; } // get real pkt length pwang_test trailer = (struct cpcs_trailer*)((u_char *)skb->data + skb->len - sizeof(*trailer)); length = swap_byte_order(trailer->length); if ((length > iadev->rx_buf_sz) || (length > (skb->len - sizeof(struct cpcs_trailer)))) { atomic_inc(&vcc->stats->rx_err); IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)", length, skb->len);) dev_kfree_skb_any(skb); atm_return(vcc, atm_guess_pdu2truesize(len)); goto INCR_DLE; } skb_trim(skb, length); /* Display the packet */ IF_RXPKT(printk("\nDmad Recvd data: len = %d \n", skb->len); xdump(skb->data, skb->len, "RX: "); printk("\n");) IF_RX(printk("rx_dle_intr: skb push");) vcc->push(vcc,skb); atomic_inc(&vcc->stats->rx); iadev->rx_pkt_cnt++; } INCR_DLE: if (++dle == iadev->rx_dle_q.end) dle = iadev->rx_dle_q.start; } iadev->rx_dle_q.read = dle; /* if the interrupts are masked because there were no free desc available, unmask them now. */ if (!iadev->rxing) { state = readl(iadev->reass_reg + STATE_REG) & 0xffff; if (!(state & FREEQ_EMPTY)) { state = readl(iadev->reass_reg + REASS_MASK_REG) & 0xffff; writel(state & ~(RX_FREEQ_EMPT |/* RX_EXCP_RCVD |*/ RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG); iadev->rxing++; } } } static int open_rx(struct atm_vcc *vcc) { IADEV *iadev; u_short __iomem *vc_table; u_short __iomem *reass_ptr; IF_EVENT(printk("iadev: open_rx %d.%d\n", vcc->vpi, vcc->vci);) if (vcc->qos.rxtp.traffic_class == ATM_NONE) return 0; iadev = INPH_IA_DEV(vcc->dev); if (vcc->qos.rxtp.traffic_class == ATM_ABR) { if (iadev->phy_type & FE_25MBIT_PHY) { printk("IA: ABR not support\n"); return -EINVAL; } } /* Make only this VCI in the vc table valid and let all others be invalid entries */ vc_table = iadev->reass_ram+RX_VC_TABLE*iadev->memSize; vc_table += vcc->vci; /* mask the last 6 bits and OR it with 3 for 1K VCs */ *vc_table = vcc->vci << 6; /* Also keep a list of open rx vcs so that we can attach them with incoming PDUs later. */ if ((vcc->qos.rxtp.traffic_class == ATM_ABR) || (vcc->qos.txtp.traffic_class == ATM_ABR)) { srv_cls_param_t srv_p; init_abr_vc(iadev, &srv_p); ia_open_abr_vc(iadev, &srv_p, vcc, 0); } else { /* for UBR later may need to add CBR logic */ reass_ptr = iadev->reass_ram+REASS_TABLE*iadev->memSize; reass_ptr += vcc->vci; *reass_ptr = NO_AAL5_PKT; } if (iadev->rx_open[vcc->vci]) printk(KERN_CRIT DEV_LABEL "(itf %d): VCI %d already open\n", vcc->dev->number, vcc->vci); iadev->rx_open[vcc->vci] = vcc; return 0; } static int rx_init(struct atm_dev *dev) { IADEV *iadev; struct rx_buf_desc __iomem *buf_desc_ptr; unsigned long rx_pkt_start = 0; void *dle_addr; struct abr_vc_table *abr_vc_table; u16 *vc_table; u16 *reass_table; int i,j, vcsize_sel; u_short freeq_st_adr; u_short *freeq_start; iadev = INPH_IA_DEV(dev); // spin_lock_init(&iadev->rx_lock); /* Allocate 4k bytes - more aligned than needed (4k boundary) */ dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE, &iadev->rx_dle_dma); if (!dle_addr) { printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n"); goto err_out; } iadev->rx_dle_q.start = (struct dle *)dle_addr; iadev->rx_dle_q.read = iadev->rx_dle_q.start; iadev->rx_dle_q.write = iadev->rx_dle_q.start; iadev->rx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES); /* the end of the dle q points to the entry after the last DLE that can be used. */ /* write the upper 20 bits of the start address to rx list address register */ /* We know this is 32bit bus addressed so the following is safe */ writel(iadev->rx_dle_dma & 0xfffff000, iadev->dma + IPHASE5575_RX_LIST_ADDR); IF_INIT(printk("Tx Dle list addr: 0x%p value: 0x%0x\n", iadev->dma+IPHASE5575_TX_LIST_ADDR, *(u32*)(iadev->dma+IPHASE5575_TX_LIST_ADDR)); printk("Rx Dle list addr: 0x%p value: 0x%0x\n", iadev->dma+IPHASE5575_RX_LIST_ADDR, *(u32*)(iadev->dma+IPHASE5575_RX_LIST_ADDR));) writew(0xffff, iadev->reass_reg+REASS_MASK_REG); writew(0, iadev->reass_reg+MODE_REG); writew(RESET_REASS, iadev->reass_reg+REASS_COMMAND_REG); /* Receive side control memory map ------------------------------- Buffer descr 0x0000 (736 - 23K) VP Table 0x5c00 (256 - 512) Except q 0x5e00 (128 - 512) Free buffer q 0x6000 (1K - 2K) Packet comp q 0x6800 (1K - 2K) Reass Table 0x7000 (1K - 2K) VC Table 0x7800 (1K - 2K) ABR VC Table 0x8000 (1K - 32K) */ /* Base address for Buffer Descriptor Table */ writew(RX_DESC_BASE >> 16, iadev->reass_reg+REASS_DESC_BASE); /* Set the buffer size register */ writew(iadev->rx_buf_sz, iadev->reass_reg+BUF_SIZE); /* Initialize each entry in the Buffer Descriptor Table */ iadev->RX_DESC_BASE_ADDR = iadev->reass_ram+RX_DESC_BASE*iadev->memSize; buf_desc_ptr = iadev->RX_DESC_BASE_ADDR; memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr)); buf_desc_ptr++; rx_pkt_start = iadev->rx_pkt_ram; for(i=1; i<=iadev->num_rx_desc; i++) { memset_io(buf_desc_ptr, 0, sizeof(*buf_desc_ptr)); buf_desc_ptr->buf_start_hi = rx_pkt_start >> 16; buf_desc_ptr->buf_start_lo = rx_pkt_start & 0x0000ffff; buf_desc_ptr++; rx_pkt_start += iadev->rx_buf_sz; } IF_INIT(printk("Rx Buffer desc ptr: 0x%p\n", buf_desc_ptr);) i = FREE_BUF_DESC_Q*iadev->memSize; writew(i >> 16, iadev->reass_reg+REASS_QUEUE_BASE); writew(i, iadev->reass_reg+FREEQ_ST_ADR); writew(i+iadev->num_rx_desc*sizeof(u_short), iadev->reass_reg+FREEQ_ED_ADR); writew(i, iadev->reass_reg+FREEQ_RD_PTR); writew(i+iadev->num_rx_desc*sizeof(u_short), iadev->reass_reg+FREEQ_WR_PTR); /* Fill the FREEQ with all the free descriptors. */ freeq_st_adr = readw(iadev->reass_reg+FREEQ_ST_ADR); freeq_start = (u_short *)(iadev->reass_ram+freeq_st_adr); for(i=1; i<=iadev->num_rx_desc; i++) { *freeq_start = (u_short)i; freeq_start++; } IF_INIT(printk("freeq_start: 0x%p\n", freeq_start);) /* Packet Complete Queue */ i = (PKT_COMP_Q * iadev->memSize) & 0xffff; writew(i, iadev->reass_reg+PCQ_ST_ADR); writew(i+iadev->num_vc*sizeof(u_short), iadev->reass_reg+PCQ_ED_ADR); writew(i, iadev->reass_reg+PCQ_RD_PTR); writew(i, iadev->reass_reg+PCQ_WR_PTR); /* Exception Queue */ i = (EXCEPTION_Q * iadev->memSize) & 0xffff; writew(i, iadev->reass_reg+EXCP_Q_ST_ADR); writew(i + NUM_RX_EXCP * sizeof(RX_ERROR_Q), iadev->reass_reg+EXCP_Q_ED_ADR); writew(i, iadev->reass_reg+EXCP_Q_RD_PTR); writew(i, iadev->reass_reg+EXCP_Q_WR_PTR); /* Load local copy of FREEQ and PCQ ptrs */ iadev->rfL.fdq_st = readw(iadev->reass_reg+FREEQ_ST_ADR) & 0xffff; iadev->rfL.fdq_ed = readw(iadev->reass_reg+FREEQ_ED_ADR) & 0xffff ; iadev->rfL.fdq_rd = readw(iadev->reass_reg+FREEQ_RD_PTR) & 0xffff; iadev->rfL.fdq_wr = readw(iadev->reass_reg+FREEQ_WR_PTR) & 0xffff; iadev->rfL.pcq_st = readw(iadev->reass_reg+PCQ_ST_ADR) & 0xffff; iadev->rfL.pcq_ed = readw(iadev->reass_reg+PCQ_ED_ADR) & 0xffff; iadev->rfL.pcq_rd = readw(iadev->reass_reg+PCQ_RD_PTR) & 0xffff; iadev->rfL.pcq_wr = readw(iadev->reass_reg+PCQ_WR_PTR) & 0xffff; IF_INIT(printk("INIT:pcq_st:0x%x pcq_ed:0x%x pcq_rd:0x%x pcq_wr:0x%x", iadev->rfL.pcq_st, iadev->rfL.pcq_ed, iadev->rfL.pcq_rd, iadev->rfL.pcq_wr);) /* just for check - no VP TBL */ /* VP Table */ /* writew(0x0b80, iadev->reass_reg+VP_LKUP_BASE); */ /* initialize VP Table for invalid VPIs - I guess we can write all 1s or 0x000f in the entire memory space or something similar. */ /* This seems to work and looks right to me too !!! */ i = REASS_TABLE * iadev->memSize; writew((i >> 3), iadev->reass_reg+REASS_TABLE_BASE); /* initialize Reassembly table to I don't know what ???? */ reass_table = (u16 *)(iadev->reass_ram+i); j = REASS_TABLE_SZ * iadev->memSize; for(i=0; i < j; i++) *reass_table++ = NO_AAL5_PKT; i = 8*1024; vcsize_sel = 0; while (i != iadev->num_vc) { i /= 2; vcsize_sel++; } i = RX_VC_TABLE * iadev->memSize; writew(((i>>3) & 0xfff8) | vcsize_sel, iadev->reass_reg+VC_LKUP_BASE); vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize); j = RX_VC_TABLE_SZ * iadev->memSize; for(i = 0; i < j; i++) { /* shift the reassembly pointer by 3 + lower 3 bits of vc_lkup_base register (=3 for 1K VCs) and the last byte is those low 3 bits. Shall program this later. */ *vc_table = (i << 6) | 15; /* for invalid VCI */ vc_table++; } /* ABR VC table */ i = ABR_VC_TABLE * iadev->memSize; writew(i >> 3, iadev->reass_reg+ABR_LKUP_BASE); i = ABR_VC_TABLE * iadev->memSize; abr_vc_table = (struct abr_vc_table *)(iadev->reass_ram+i); j = REASS_TABLE_SZ * iadev->memSize; memset ((char*)abr_vc_table, 0, j * sizeof(*abr_vc_table)); for(i = 0; i < j; i++) { abr_vc_table->rdf = 0x0003; abr_vc_table->air = 0x5eb1; abr_vc_table++; } /* Initialize other registers */ /* VP Filter Register set for VC Reassembly only */ writew(0xff00, iadev->reass_reg+VP_FILTER); writew(0, iadev->reass_reg+XTRA_RM_OFFSET); writew(0x1, iadev->reass_reg+PROTOCOL_ID); /* Packet Timeout Count related Registers : Set packet timeout to occur in about 3 seconds Set Packet Aging Interval count register to overflow in about 4 us */ writew(0xF6F8, iadev->reass_reg+PKT_TM_CNT ); i = (j >> 6) & 0xFF; j += 2 * (j - 1); i |= ((j << 2) & 0xFF00); writew(i, iadev->reass_reg+TMOUT_RANGE); /* initiate the desc_tble */ for(i=0; i<iadev->num_tx_desc;i++) iadev->desc_tbl[i].timestamp = 0; /* to clear the interrupt status register - read it */ readw(iadev->reass_reg+REASS_INTR_STATUS_REG); /* Mask Register - clear it */ writew(~(RX_FREEQ_EMPT|RX_PKT_RCVD), iadev->reass_reg+REASS_MASK_REG); skb_queue_head_init(&iadev->rx_dma_q); iadev->rx_free_desc_qhead = NULL; iadev->rx_open = kzalloc(4 * iadev->num_vc, GFP_KERNEL); if (!iadev->rx_open) { printk(KERN_ERR DEV_LABEL "itf %d couldn't get free page\n", dev->number); goto err_free_dle; } iadev->rxing = 1; iadev->rx_pkt_cnt = 0; /* Mode Register */ writew(R_ONLINE, iadev->reass_reg+MODE_REG); return 0; err_free_dle: pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start, iadev->rx_dle_dma); err_out: return -ENOMEM; } /* The memory map suggested in appendix A and the coding for it. Keeping it around just in case we change our mind later. Buffer descr 0x0000 (128 - 4K) UBR sched 0x1000 (1K - 4K) UBR Wait q 0x2000 (1K - 4K) Commn queues 0x3000 Packet Ready, Trasmit comp(0x3100) (128 - 256) each extended VC 0x4000 (1K - 8K) ABR sched 0x6000 and ABR wait queue (1K - 2K) each CBR sched 0x7000 (as needed) VC table 0x8000 (1K - 32K) */ static void tx_intr(struct atm_dev *dev) { IADEV *iadev; unsigned short status; unsigned long flags; iadev = INPH_IA_DEV(dev); status = readl(iadev->seg_reg+SEG_INTR_STATUS_REG); if (status & TRANSMIT_DONE){ IF_EVENT(printk("Tansmit Done Intr logic run\n");) spin_lock_irqsave(&iadev->tx_lock, flags); ia_tx_poll(iadev); spin_unlock_irqrestore(&iadev->tx_lock, flags); writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG); if (iadev->close_pending) wake_up(&iadev->close_wait); } if (status & TCQ_NOT_EMPTY) { IF_EVENT(printk("TCQ_NOT_EMPTY int received\n");) } } static void tx_dle_intr(struct atm_dev *dev) { IADEV *iadev; struct dle *dle, *cur_dle; struct sk_buff *skb; struct atm_vcc *vcc; struct ia_vcc *iavcc; u_int dle_lp; unsigned long flags; iadev = INPH_IA_DEV(dev); spin_lock_irqsave(&iadev->tx_lock, flags); dle = iadev->tx_dle_q.read; dle_lp = readl(iadev->dma+IPHASE5575_TX_LIST_ADDR) & (sizeof(struct dle)*DLE_ENTRIES - 1); cur_dle = (struct dle*)(iadev->tx_dle_q.start + (dle_lp >> 4)); while (dle != cur_dle) { /* free the DMAed skb */ skb = skb_dequeue(&iadev->tx_dma_q); if (!skb) break; /* Revenge of the 2 dle (skb + trailer) used in ia_pkt_tx() */ if (!((dle - iadev->tx_dle_q.start)%(2*sizeof(struct dle)))) { pci_unmap_single(iadev->pci, dle->sys_pkt_addr, skb->len, PCI_DMA_TODEVICE); } vcc = ATM_SKB(skb)->vcc; if (!vcc) { printk("tx_dle_intr: vcc is null\n"); spin_unlock_irqrestore(&iadev->tx_lock, flags); dev_kfree_skb_any(skb); return; } iavcc = INPH_IA_VCC(vcc); if (!iavcc) { printk("tx_dle_intr: iavcc is null\n"); spin_unlock_irqrestore(&iadev->tx_lock, flags); dev_kfree_skb_any(skb); return; } if (vcc->qos.txtp.pcr >= iadev->rate_limit) { if ((vcc->pop) && (skb->len != 0)) { vcc->pop(vcc, skb); } else { dev_kfree_skb_any(skb); } } else { /* Hold the rate-limited skb for flow control */ IA_SKB_STATE(skb) |= IA_DLED; skb_queue_tail(&iavcc->txing_skb, skb); } IF_EVENT(printk("tx_dle_intr: enque skb = 0x%p \n", skb);) if (++dle == iadev->tx_dle_q.end) dle = iadev->tx_dle_q.start; } iadev->tx_dle_q.read = dle; spin_unlock_irqrestore(&iadev->tx_lock, flags); } static int open_tx(struct atm_vcc *vcc) { struct ia_vcc *ia_vcc; IADEV *iadev; struct main_vc *vc; struct ext_vc *evc; int ret; IF_EVENT(printk("iadev: open_tx entered vcc->vci = %d\n", vcc->vci);) if (vcc->qos.txtp.traffic_class == ATM_NONE) return 0; iadev = INPH_IA_DEV(vcc->dev); if (iadev->phy_type & FE_25MBIT_PHY) { if (vcc->qos.txtp.traffic_class == ATM_ABR) { printk("IA: ABR not support\n"); return -EINVAL; } if (vcc->qos.txtp.traffic_class == ATM_CBR) { printk("IA: CBR not support\n"); return -EINVAL; } } ia_vcc = INPH_IA_VCC(vcc); memset((caddr_t)ia_vcc, 0, sizeof(*ia_vcc)); if (vcc->qos.txtp.max_sdu > (iadev->tx_buf_sz - sizeof(struct cpcs_trailer))){ printk("IA: SDU size over (%d) the configured SDU size %d\n", vcc->qos.txtp.max_sdu,iadev->tx_buf_sz); vcc->dev_data = NULL; kfree(ia_vcc); return -EINVAL; } ia_vcc->vc_desc_cnt = 0; ia_vcc->txing = 1; /* find pcr */ if (vcc->qos.txtp.max_pcr == ATM_MAX_PCR) vcc->qos.txtp.pcr = iadev->LineRate; else if ((vcc->qos.txtp.max_pcr == 0)&&( vcc->qos.txtp.pcr <= 0)) vcc->qos.txtp.pcr = iadev->LineRate; else if ((vcc->qos.txtp.max_pcr > vcc->qos.txtp.pcr) && (vcc->qos.txtp.max_pcr> 0)) vcc->qos.txtp.pcr = vcc->qos.txtp.max_pcr; if (vcc->qos.txtp.pcr > iadev->LineRate) vcc->qos.txtp.pcr = iadev->LineRate; ia_vcc->pcr = vcc->qos.txtp.pcr; if (ia_vcc->pcr > (iadev->LineRate / 6) ) ia_vcc->ltimeout = HZ / 10; else if (ia_vcc->pcr > (iadev->LineRate / 130)) ia_vcc->ltimeout = HZ; else if (ia_vcc->pcr <= 170) ia_vcc->ltimeout = 16 * HZ; else ia_vcc->ltimeout = 2700 * HZ / ia_vcc->pcr; if (ia_vcc->pcr < iadev->rate_limit) skb_queue_head_init (&ia_vcc->txing_skb); if (ia_vcc->pcr < iadev->rate_limit) { struct sock *sk = sk_atm(vcc); if (vcc->qos.txtp.max_sdu != 0) { if (ia_vcc->pcr > 60000) sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 5; else if (ia_vcc->pcr > 2000) sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 4; else sk->sk_sndbuf = vcc->qos.txtp.max_sdu * 3; } else sk->sk_sndbuf = 24576; } vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR; evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR; vc += vcc->vci; evc += vcc->vci; memset((caddr_t)vc, 0, sizeof(*vc)); memset((caddr_t)evc, 0, sizeof(*evc)); /* store the most significant 4 bits of vci as the last 4 bits of first part of atm header. store the last 12 bits of vci as first 12 bits of the second part of the atm header. */ evc->atm_hdr1 = (vcc->vci >> 12) & 0x000f; evc->atm_hdr2 = (vcc->vci & 0x0fff) << 4; /* check the following for different traffic classes */ if (vcc->qos.txtp.traffic_class == ATM_UBR) { vc->type = UBR; vc->status = CRC_APPEND; vc->acr = cellrate_to_float(iadev->LineRate); if (vcc->qos.txtp.pcr > 0) vc->acr = cellrate_to_float(vcc->qos.txtp.pcr); IF_UBR(printk("UBR: txtp.pcr = 0x%x f_rate = 0x%x\n", vcc->qos.txtp.max_pcr,vc->acr);) } else if (vcc->qos.txtp.traffic_class == ATM_ABR) { srv_cls_param_t srv_p; IF_ABR(printk("Tx ABR VCC\n");) init_abr_vc(iadev, &srv_p); if (vcc->qos.txtp.pcr > 0) srv_p.pcr = vcc->qos.txtp.pcr; if (vcc->qos.txtp.min_pcr > 0) { int tmpsum = iadev->sum_mcr+iadev->sum_cbr+vcc->qos.txtp.min_pcr; if (tmpsum > iadev->LineRate) return -EBUSY; srv_p.mcr = vcc->qos.txtp.min_pcr; iadev->sum_mcr += vcc->qos.txtp.min_pcr; } else srv_p.mcr = 0; if (vcc->qos.txtp.icr) srv_p.icr = vcc->qos.txtp.icr; if (vcc->qos.txtp.tbe) srv_p.tbe = vcc->qos.txtp.tbe; if (vcc->qos.txtp.frtt) srv_p.frtt = vcc->qos.txtp.frtt; if (vcc->qos.txtp.rif) srv_p.rif = vcc->qos.txtp.rif; if (vcc->qos.txtp.rdf) srv_p.rdf = vcc->qos.txtp.rdf; if (vcc->qos.txtp.nrm_pres) srv_p.nrm = vcc->qos.txtp.nrm; if (vcc->qos.txtp.trm_pres) srv_p.trm = vcc->qos.txtp.trm; if (vcc->qos.txtp.adtf_pres) srv_p.adtf = vcc->qos.txtp.adtf; if (vcc->qos.txtp.cdf_pres) srv_p.cdf = vcc->qos.txtp.cdf; if (srv_p.icr > srv_p.pcr) srv_p.icr = srv_p.pcr; IF_ABR(printk("ABR:vcc->qos.txtp.max_pcr = %d mcr = %d\n", srv_p.pcr, srv_p.mcr);) ia_open_abr_vc(iadev, &srv_p, vcc, 1); } else if (vcc->qos.txtp.traffic_class == ATM_CBR) { if (iadev->phy_type & FE_25MBIT_PHY) { printk("IA: CBR not support\n"); return -EINVAL; } if (vcc->qos.txtp.max_pcr > iadev->LineRate) { IF_CBR(printk("PCR is not available\n");) return -1; } vc->type = CBR; vc->status = CRC_APPEND; if ((ret = ia_cbr_setup (iadev, vcc)) < 0) { return ret; } } else printk("iadev: Non UBR, ABR and CBR traffic not supportedn"); iadev->testTable[vcc->vci]->vc_status |= VC_ACTIVE; IF_EVENT(printk("ia open_tx returning \n");) return 0; } static int tx_init(struct atm_dev *dev) { IADEV *iadev; struct tx_buf_desc *buf_desc_ptr; unsigned int tx_pkt_start; void *dle_addr; int i; u_short tcq_st_adr; u_short *tcq_start; u_short prq_st_adr; u_short *prq_start; struct main_vc *vc; struct ext_vc *evc; u_short tmp16; u32 vcsize_sel; iadev = INPH_IA_DEV(dev); spin_lock_init(&iadev->tx_lock); IF_INIT(printk("Tx MASK REG: 0x%0x\n", readw(iadev->seg_reg+SEG_MASK_REG));) /* Allocate 4k (boundary aligned) bytes */ dle_addr = pci_alloc_consistent(iadev->pci, DLE_TOTAL_SIZE, &iadev->tx_dle_dma); if (!dle_addr) { printk(KERN_ERR DEV_LABEL "can't allocate DLEs\n"); goto err_out; } iadev->tx_dle_q.start = (struct dle*)dle_addr; iadev->tx_dle_q.read = iadev->tx_dle_q.start; iadev->tx_dle_q.write = iadev->tx_dle_q.start; iadev->tx_dle_q.end = (struct dle*)((unsigned long)dle_addr+sizeof(struct dle)*DLE_ENTRIES); /* write the upper 20 bits of the start address to tx list address register */ writel(iadev->tx_dle_dma & 0xfffff000, iadev->dma + IPHASE5575_TX_LIST_ADDR); writew(0xffff, iadev->seg_reg+SEG_MASK_REG); writew(0, iadev->seg_reg+MODE_REG_0); writew(RESET_SEG, iadev->seg_reg+SEG_COMMAND_REG); iadev->MAIN_VC_TABLE_ADDR = iadev->seg_ram+MAIN_VC_TABLE*iadev->memSize; iadev->EXT_VC_TABLE_ADDR = iadev->seg_ram+EXT_VC_TABLE*iadev->memSize; iadev->ABR_SCHED_TABLE_ADDR=iadev->seg_ram+ABR_SCHED_TABLE*iadev->memSize; /* Transmit side control memory map -------------------------------- Buffer descr 0x0000 (128 - 4K) Commn queues 0x1000 Transmit comp, Packet ready(0x1400) (512 - 1K) each TCQ - 4K, PRQ - 5K CBR Table 0x1800 (as needed) - 6K UBR Table 0x3000 (1K - 4K) - 12K UBR Wait queue 0x4000 (1K - 4K) - 16K ABR sched 0x5000 and ABR wait queue (1K - 2K) each ABR Tbl - 20K, ABR Wq - 22K extended VC 0x6000 (1K - 8K) - 24K VC Table 0x8000 (1K - 32K) - 32K Between 0x2000 (8K) and 0x3000 (12K) there is 4K space left for VBR Tbl and Wait q, which can be allotted later. */ /* Buffer Descriptor Table Base address */ writew(TX_DESC_BASE, iadev->seg_reg+SEG_DESC_BASE); /* initialize each entry in the buffer descriptor table */ buf_desc_ptr =(struct tx_buf_desc *)(iadev->seg_ram+TX_DESC_BASE); memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr)); buf_desc_ptr++; tx_pkt_start = TX_PACKET_RAM; for(i=1; i<=iadev->num_tx_desc; i++) { memset((caddr_t)buf_desc_ptr, 0, sizeof(*buf_desc_ptr)); buf_desc_ptr->desc_mode = AAL5; buf_desc_ptr->buf_start_hi = tx_pkt_start >> 16; buf_desc_ptr->buf_start_lo = tx_pkt_start & 0x0000ffff; buf_desc_ptr++; tx_pkt_start += iadev->tx_buf_sz; } iadev->tx_buf = kmalloc(iadev->num_tx_desc*sizeof(struct cpcs_trailer_desc), GFP_KERNEL); if (!iadev->tx_buf) { printk(KERN_ERR DEV_LABEL " couldn't get mem\n"); goto err_free_dle; } for (i= 0; i< iadev->num_tx_desc; i++) { struct cpcs_trailer *cpcs; cpcs = kmalloc(sizeof(*cpcs), GFP_KERNEL|GFP_DMA); if(!cpcs) { printk(KERN_ERR DEV_LABEL " couldn't get freepage\n"); goto err_free_tx_bufs; } iadev->tx_buf[i].cpcs = cpcs; iadev->tx_buf[i].dma_addr = pci_map_single(iadev->pci, cpcs, sizeof(*cpcs), PCI_DMA_TODEVICE); } iadev->desc_tbl = kmalloc(iadev->num_tx_desc * sizeof(struct desc_tbl_t), GFP_KERNEL); if (!iadev->desc_tbl) { printk(KERN_ERR DEV_LABEL " couldn't get mem\n"); goto err_free_all_tx_bufs; } /* Communication Queues base address */ i = TX_COMP_Q * iadev->memSize; writew(i >> 16, iadev->seg_reg+SEG_QUEUE_BASE); /* Transmit Complete Queue */ writew(i, iadev->seg_reg+TCQ_ST_ADR); writew(i, iadev->seg_reg+TCQ_RD_PTR); writew(i+iadev->num_tx_desc*sizeof(u_short),iadev->seg_reg+TCQ_WR_PTR); iadev->host_tcq_wr = i + iadev->num_tx_desc*sizeof(u_short); writew(i+2 * iadev->num_tx_desc * sizeof(u_short), iadev->seg_reg+TCQ_ED_ADR); /* Fill the TCQ with all the free descriptors. */ tcq_st_adr = readw(iadev->seg_reg+TCQ_ST_ADR); tcq_start = (u_short *)(iadev->seg_ram+tcq_st_adr); for(i=1; i<=iadev->num_tx_desc; i++) { *tcq_start = (u_short)i; tcq_start++; } /* Packet Ready Queue */ i = PKT_RDY_Q * iadev->memSize; writew(i, iadev->seg_reg+PRQ_ST_ADR); writew(i+2 * iadev->num_tx_desc * sizeof(u_short), iadev->seg_reg+PRQ_ED_ADR); writew(i, iadev->seg_reg+PRQ_RD_PTR); writew(i, iadev->seg_reg+PRQ_WR_PTR); /* Load local copy of PRQ and TCQ ptrs */ iadev->ffL.prq_st = readw(iadev->seg_reg+PRQ_ST_ADR) & 0xffff; iadev->ffL.prq_ed = readw(iadev->seg_reg+PRQ_ED_ADR) & 0xffff; iadev->ffL.prq_wr = readw(iadev->seg_reg+PRQ_WR_PTR) & 0xffff; iadev->ffL.tcq_st = readw(iadev->seg_reg+TCQ_ST_ADR) & 0xffff; iadev->ffL.tcq_ed = readw(iadev->seg_reg+TCQ_ED_ADR) & 0xffff; iadev->ffL.tcq_rd = readw(iadev->seg_reg+TCQ_RD_PTR) & 0xffff; /* Just for safety initializing the queue to have desc 1 always */ /* Fill the PRQ with all the free descriptors. */ prq_st_adr = readw(iadev->seg_reg+PRQ_ST_ADR); prq_start = (u_short *)(iadev->seg_ram+prq_st_adr); for(i=1; i<=iadev->num_tx_desc; i++) { *prq_start = (u_short)0; /* desc 1 in all entries */ prq_start++; } /* CBR Table */ IF_INIT(printk("Start CBR Init\n");) #if 1 /* for 1K VC board, CBR_PTR_BASE is 0 */ writew(0,iadev->seg_reg+CBR_PTR_BASE); #else /* Charlie's logic is wrong ? */ tmp16 = (iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize)>>17; IF_INIT(printk("cbr_ptr_base = 0x%x ", tmp16);) writew(tmp16,iadev->seg_reg+CBR_PTR_BASE); #endif IF_INIT(printk("value in register = 0x%x\n", readw(iadev->seg_reg+CBR_PTR_BASE));) tmp16 = (CBR_SCHED_TABLE*iadev->memSize) >> 1; writew(tmp16, iadev->seg_reg+CBR_TAB_BEG); IF_INIT(printk("cbr_tab_beg = 0x%x in reg = 0x%x \n", tmp16, readw(iadev->seg_reg+CBR_TAB_BEG));) writew(tmp16, iadev->seg_reg+CBR_TAB_END+1); // CBR_PTR; tmp16 = (CBR_SCHED_TABLE*iadev->memSize + iadev->num_vc*6 - 2) >> 1; writew(tmp16, iadev->seg_reg+CBR_TAB_END); IF_INIT(printk("iadev->seg_reg = 0x%p CBR_PTR_BASE = 0x%x\n", iadev->seg_reg, readw(iadev->seg_reg+CBR_PTR_BASE));) IF_INIT(printk("CBR_TAB_BEG = 0x%x, CBR_TAB_END = 0x%x, CBR_PTR = 0x%x\n", readw(iadev->seg_reg+CBR_TAB_BEG), readw(iadev->seg_reg+CBR_TAB_END), readw(iadev->seg_reg+CBR_TAB_END+1));) /* Initialize the CBR Schedualing Table */ memset_io(iadev->seg_ram+CBR_SCHED_TABLE*iadev->memSize, 0, iadev->num_vc*6); iadev->CbrRemEntries = iadev->CbrTotEntries = iadev->num_vc*3; iadev->CbrEntryPt = 0; iadev->Granularity = MAX_ATM_155 / iadev->CbrTotEntries; iadev->NumEnabledCBR = 0; /* UBR scheduling Table and wait queue */ /* initialize all bytes of UBR scheduler table and wait queue to 0 - SCHEDSZ is 1K (# of entries). - UBR Table size is 4K - UBR wait queue is 4K since the table and wait queues are contiguous, all the bytes can be initialized by one memeset. */ vcsize_sel = 0; i = 8*1024; while (i != iadev->num_vc) { i /= 2; vcsize_sel++; } i = MAIN_VC_TABLE * iadev->memSize; writew(vcsize_sel | ((i >> 8) & 0xfff8),iadev->seg_reg+VCT_BASE); i = EXT_VC_TABLE * iadev->memSize; writew((i >> 8) & 0xfffe, iadev->seg_reg+VCTE_BASE); i = UBR_SCHED_TABLE * iadev->memSize; writew((i & 0xffff) >> 11, iadev->seg_reg+UBR_SBPTR_BASE); i = UBR_WAIT_Q * iadev->memSize; writew((i >> 7) & 0xffff, iadev->seg_reg+UBRWQ_BASE); memset((caddr_t)(iadev->seg_ram+UBR_SCHED_TABLE*iadev->memSize), 0, iadev->num_vc*8); /* ABR scheduling Table(0x5000-0x57ff) and wait queue(0x5800-0x5fff)*/ /* initialize all bytes of ABR scheduler table and wait queue to 0 - SCHEDSZ is 1K (# of entries). - ABR Table size is 2K - ABR wait queue is 2K since the table and wait queues are contiguous, all the bytes can be initialized by one memeset. */ i = ABR_SCHED_TABLE * iadev->memSize; writew((i >> 11) & 0xffff, iadev->seg_reg+ABR_SBPTR_BASE); i = ABR_WAIT_Q * iadev->memSize; writew((i >> 7) & 0xffff, iadev->seg_reg+ABRWQ_BASE); i = ABR_SCHED_TABLE*iadev->memSize; memset((caddr_t)(iadev->seg_ram+i), 0, iadev->num_vc*4); vc = (struct main_vc *)iadev->MAIN_VC_TABLE_ADDR; evc = (struct ext_vc *)iadev->EXT_VC_TABLE_ADDR; iadev->testTable = kmalloc(sizeof(long)*iadev->num_vc, GFP_KERNEL); if (!iadev->testTable) { printk("Get freepage failed\n"); goto err_free_desc_tbl; } for(i=0; i<iadev->num_vc; i++) { memset((caddr_t)vc, 0, sizeof(*vc)); memset((caddr_t)evc, 0, sizeof(*evc)); iadev->testTable[i] = kmalloc(sizeof(struct testTable_t), GFP_KERNEL); if (!iadev->testTable[i]) goto err_free_test_tables; iadev->testTable[i]->lastTime = 0; iadev->testTable[i]->fract = 0; iadev->testTable[i]->vc_status = VC_UBR; vc++; evc++; } /* Other Initialization */ /* Max Rate Register */ if (iadev->phy_type & FE_25MBIT_PHY) { writew(RATE25, iadev->seg_reg+MAXRATE); writew((UBR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS); } else { writew(cellrate_to_float(iadev->LineRate),iadev->seg_reg+MAXRATE); writew((UBR_EN | ABR_EN | (0x23 << 2)), iadev->seg_reg+STPARMS); } /* Set Idle Header Reigisters to be sure */ writew(0, iadev->seg_reg+IDLEHEADHI); writew(0, iadev->seg_reg+IDLEHEADLO); /* Program ABR UBR Priority Register as PRI_ABR_UBR_EQUAL */ writew(0xaa00, iadev->seg_reg+ABRUBR_ARB); iadev->close_pending = 0; init_waitqueue_head(&iadev->close_wait); init_waitqueue_head(&iadev->timeout_wait); skb_queue_head_init(&iadev->tx_dma_q); ia_init_rtn_q(&iadev->tx_return_q); /* RM Cell Protocol ID and Message Type */ writew(RM_TYPE_4_0, iadev->seg_reg+RM_TYPE); skb_queue_head_init (&iadev->tx_backlog); /* Mode Register 1 */ writew(MODE_REG_1_VAL, iadev->seg_reg+MODE_REG_1); /* Mode Register 0 */ writew(T_ONLINE, iadev->seg_reg+MODE_REG_0); /* Interrupt Status Register - read to clear */ readw(iadev->seg_reg+SEG_INTR_STATUS_REG); /* Interrupt Mask Reg- don't mask TCQ_NOT_EMPTY interrupt generation */ writew(~(TRANSMIT_DONE | TCQ_NOT_EMPTY), iadev->seg_reg+SEG_MASK_REG); writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG); iadev->tx_pkt_cnt = 0; iadev->rate_limit = iadev->LineRate / 3; return 0; err_free_test_tables: while (--i >= 0) kfree(iadev->testTable[i]); kfree(iadev->testTable); err_free_desc_tbl: kfree(iadev->desc_tbl); err_free_all_tx_bufs: i = iadev->num_tx_desc; err_free_tx_bufs: while (--i >= 0) { struct cpcs_trailer_desc *desc = iadev->tx_buf + i; pci_unmap_single(iadev->pci, desc->dma_addr, sizeof(*desc->cpcs), PCI_DMA_TODEVICE); kfree(desc->cpcs); } kfree(iadev->tx_buf); err_free_dle: pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start, iadev->tx_dle_dma); err_out: return -ENOMEM; } static irqreturn_t ia_int(int irq, void *dev_id) { struct atm_dev *dev; IADEV *iadev; unsigned int status; int handled = 0; dev = dev_id; iadev = INPH_IA_DEV(dev); while( (status = readl(iadev->reg+IPHASE5575_BUS_STATUS_REG) & 0x7f)) { handled = 1; IF_EVENT(printk("ia_int: status = 0x%x\n", status);) if (status & STAT_REASSINT) { /* do something */ IF_EVENT(printk("REASSINT Bus status reg: %08x\n", status);) rx_intr(dev); } if (status & STAT_DLERINT) { /* Clear this bit by writing a 1 to it. */ *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLERINT; rx_dle_intr(dev); } if (status & STAT_SEGINT) { /* do something */ IF_EVENT(printk("IA: tx_intr \n");) tx_intr(dev); } if (status & STAT_DLETINT) { *(u_int *)(iadev->reg+IPHASE5575_BUS_STATUS_REG) = STAT_DLETINT; tx_dle_intr(dev); } if (status & (STAT_FEINT | STAT_ERRINT | STAT_MARKINT)) { if (status & STAT_FEINT) IaFrontEndIntr(iadev); } } return IRQ_RETVAL(handled); } /*----------------------------- entries --------------------------------*/ static int get_esi(struct atm_dev *dev) { IADEV *iadev; int i; u32 mac1; u16 mac2; iadev = INPH_IA_DEV(dev); mac1 = cpu_to_be32(le32_to_cpu(readl( iadev->reg+IPHASE5575_MAC1))); mac2 = cpu_to_be16(le16_to_cpu(readl(iadev->reg+IPHASE5575_MAC2))); IF_INIT(printk("ESI: 0x%08x%04x\n", mac1, mac2);) for (i=0; i<MAC1_LEN; i++) dev->esi[i] = mac1 >>(8*(MAC1_LEN-1-i)); for (i=0; i<MAC2_LEN; i++) dev->esi[i+MAC1_LEN] = mac2 >>(8*(MAC2_LEN - 1 -i)); return 0; } static int reset_sar(struct atm_dev *dev) { IADEV *iadev; int i, error = 1; unsigned int pci[64]; iadev = INPH_IA_DEV(dev); for(i=0; i<64; i++) if ((error = pci_read_config_dword(iadev->pci, i*4, &pci[i])) != PCIBIOS_SUCCESSFUL) return error; writel(0, iadev->reg+IPHASE5575_EXT_RESET); for(i=0; i<64; i++) if ((error = pci_write_config_dword(iadev->pci, i*4, pci[i])) != PCIBIOS_SUCCESSFUL) return error; udelay(5); return 0; } static int __devinit ia_init(struct atm_dev *dev) { IADEV *iadev; unsigned long real_base; void __iomem *base; unsigned short command; int error, i; /* The device has been identified and registered. Now we read necessary configuration info like memory base address, interrupt number etc */ IF_INIT(printk(">ia_init\n");) dev->ci_range.vpi_bits = 0; dev->ci_range.vci_bits = NR_VCI_LD; iadev = INPH_IA_DEV(dev); real_base = pci_resource_start (iadev->pci, 0); iadev->irq = iadev->pci->irq; error = pci_read_config_word(iadev->pci, PCI_COMMAND, &command); if (error) { printk(KERN_ERR DEV_LABEL "(itf %d): init error 0x%x\n", dev->number,error); return -EINVAL; } IF_INIT(printk(DEV_LABEL "(itf %d): rev.%d,realbase=0x%lx,irq=%d\n", dev->number, iadev->pci->revision, real_base, iadev->irq);) /* find mapping size of board */ iadev->pci_map_size = pci_resource_len(iadev->pci, 0); if (iadev->pci_map_size == 0x100000){ iadev->num_vc = 4096; dev->ci_range.vci_bits = NR_VCI_4K_LD; iadev->memSize = 4; } else if (iadev->pci_map_size == 0x40000) { iadev->num_vc = 1024; iadev->memSize = 1; } else { printk("Unknown pci_map_size = 0x%x\n", iadev->pci_map_size); return -EINVAL; } IF_INIT(printk (DEV_LABEL "map size: %i\n", iadev->pci_map_size);) /* enable bus mastering */ pci_set_master(iadev->pci); /* * Delay at least 1us before doing any mem accesses (how 'bout 10?) */ udelay(10); /* mapping the physical address to a virtual address in address space */ base = ioremap(real_base,iadev->pci_map_size); /* ioremap is not resolved ??? */ if (!base) { printk(DEV_LABEL " (itf %d): can't set up page mapping\n", dev->number); return error; } IF_INIT(printk(DEV_LABEL " (itf %d): rev.%d,base=%p,irq=%d\n", dev->number, iadev->pci->revision, base, iadev->irq);) /* filling the iphase dev structure */ iadev->mem = iadev->pci_map_size /2; iadev->real_base = real_base; iadev->base = base; /* Bus Interface Control Registers */ iadev->reg = base + REG_BASE; /* Segmentation Control Registers */ iadev->seg_reg = base + SEG_BASE; /* Reassembly Control Registers */ iadev->reass_reg = base + REASS_BASE; /* Front end/ DMA control registers */ iadev->phy = base + PHY_BASE; iadev->dma = base + PHY_BASE; /* RAM - Segmentation RAm and Reassembly RAM */ iadev->ram = base + ACTUAL_RAM_BASE; iadev->seg_ram = base + ACTUAL_SEG_RAM_BASE; iadev->reass_ram = base + ACTUAL_REASS_RAM_BASE; /* lets print out the above */ IF_INIT(printk("Base addrs: %p %p %p \n %p %p %p %p\n", iadev->reg,iadev->seg_reg,iadev->reass_reg, iadev->phy, iadev->ram, iadev->seg_ram, iadev->reass_ram);) /* lets try reading the MAC address */ error = get_esi(dev); if (error) { iounmap(iadev->base); return error; } printk("IA: "); for (i=0; i < ESI_LEN; i++) printk("%s%02X",i ? "-" : "",dev->esi[i]); printk("\n"); /* reset SAR */ if (reset_sar(dev)) { iounmap(iadev->base); printk("IA: reset SAR fail, please try again\n"); return 1; } return 0; } static void ia_update_stats(IADEV *iadev) { if (!iadev->carrier_detect) return; iadev->rx_cell_cnt += readw(iadev->reass_reg+CELL_CTR0)&0xffff; iadev->rx_cell_cnt += (readw(iadev->reass_reg+CELL_CTR1) & 0xffff) << 16; iadev->drop_rxpkt += readw(iadev->reass_reg + DRP_PKT_CNTR ) & 0xffff; iadev->drop_rxcell += readw(iadev->reass_reg + ERR_CNTR) & 0xffff; iadev->tx_cell_cnt += readw(iadev->seg_reg + CELL_CTR_LO_AUTO)&0xffff; iadev->tx_cell_cnt += (readw(iadev->seg_reg+CELL_CTR_HIGH_AUTO)&0xffff)<<16; return; } static void ia_led_timer(unsigned long arg) { unsigned long flags; static u_char blinking[8] = {0, 0, 0, 0, 0, 0, 0, 0}; u_char i; static u32 ctrl_reg; for (i = 0; i < iadev_count; i++) { if (ia_dev[i]) { ctrl_reg = readl(ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG); if (blinking[i] == 0) { blinking[i]++; ctrl_reg &= (~CTRL_LED); writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG); ia_update_stats(ia_dev[i]); } else { blinking[i] = 0; ctrl_reg |= CTRL_LED; writel(ctrl_reg, ia_dev[i]->reg+IPHASE5575_BUS_CONTROL_REG); spin_lock_irqsave(&ia_dev[i]->tx_lock, flags); if (ia_dev[i]->close_pending) wake_up(&ia_dev[i]->close_wait); ia_tx_poll(ia_dev[i]); spin_unlock_irqrestore(&ia_dev[i]->tx_lock, flags); } } } mod_timer(&ia_timer, jiffies + HZ / 4); return; } static void ia_phy_put(struct atm_dev *dev, unsigned char value, unsigned long addr) { writel(value, INPH_IA_DEV(dev)->phy+addr); } static unsigned char ia_phy_get(struct atm_dev *dev, unsigned long addr) { return readl(INPH_IA_DEV(dev)->phy+addr); } static void ia_free_tx(IADEV *iadev) { int i; kfree(iadev->desc_tbl); for (i = 0; i < iadev->num_vc; i++) kfree(iadev->testTable[i]); kfree(iadev->testTable); for (i = 0; i < iadev->num_tx_desc; i++) { struct cpcs_trailer_desc *desc = iadev->tx_buf + i; pci_unmap_single(iadev->pci, desc->dma_addr, sizeof(*desc->cpcs), PCI_DMA_TODEVICE); kfree(desc->cpcs); } kfree(iadev->tx_buf); pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->tx_dle_q.start, iadev->tx_dle_dma); } static void ia_free_rx(IADEV *iadev) { kfree(iadev->rx_open); pci_free_consistent(iadev->pci, DLE_TOTAL_SIZE, iadev->rx_dle_q.start, iadev->rx_dle_dma); } static int __devinit ia_start(struct atm_dev *dev) { IADEV *iadev; int error; unsigned char phy; u32 ctrl_reg; IF_EVENT(printk(">ia_start\n");) iadev = INPH_IA_DEV(dev); if (request_irq(iadev->irq, &ia_int, IRQF_SHARED, DEV_LABEL, dev)) { printk(KERN_ERR DEV_LABEL "(itf %d): IRQ%d is already in use\n", dev->number, iadev->irq); error = -EAGAIN; goto err_out; } /* @@@ should release IRQ on error */ /* enabling memory + master */ if ((error = pci_write_config_word(iadev->pci, PCI_COMMAND, PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER ))) { printk(KERN_ERR DEV_LABEL "(itf %d): can't enable memory+" "master (0x%x)\n",dev->number, error); error = -EIO; goto err_free_irq; } udelay(10); /* Maybe we should reset the front end, initialize Bus Interface Control Registers and see. */ IF_INIT(printk("Bus ctrl reg: %08x\n", readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));) ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG); ctrl_reg = (ctrl_reg & (CTRL_LED | CTRL_FE_RST)) | CTRL_B8 | CTRL_B16 | CTRL_B32 | CTRL_B48 | CTRL_B64 | CTRL_B128 | CTRL_ERRMASK | CTRL_DLETMASK /* shud be removed l8r */ | CTRL_DLERMASK | CTRL_SEGMASK | CTRL_REASSMASK | CTRL_FEMASK | CTRL_CSPREEMPT; writel(ctrl_reg, iadev->reg+IPHASE5575_BUS_CONTROL_REG); IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG)); printk("Bus status reg after init: %08x\n", readl(iadev->reg+IPHASE5575_BUS_STATUS_REG));) ia_hw_type(iadev); error = tx_init(dev); if (error) goto err_free_irq; error = rx_init(dev); if (error) goto err_free_tx; ctrl_reg = readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG); writel(ctrl_reg | CTRL_FE_RST, iadev->reg+IPHASE5575_BUS_CONTROL_REG); IF_INIT(printk("Bus ctrl reg after initializing: %08x\n", readl(iadev->reg+IPHASE5575_BUS_CONTROL_REG));) phy = 0; /* resolve compiler complaint */ IF_INIT ( if ((phy=ia_phy_get(dev,0)) == 0x30) printk("IA: pm5346,rev.%d\n",phy&0x0f); else printk("IA: utopia,rev.%0x\n",phy);) if (iadev->phy_type & FE_25MBIT_PHY) ia_mb25_init(iadev); else if (iadev->phy_type & (FE_DS3_PHY | FE_E3_PHY)) ia_suni_pm7345_init(iadev); else { error = suni_init(dev); if (error) goto err_free_rx; if (dev->phy->start) { error = dev->phy->start(dev); if (error) goto err_free_rx; } /* Get iadev->carrier_detect status */ IaFrontEndIntr(iadev); } return 0; err_free_rx: ia_free_rx(iadev); err_free_tx: ia_free_tx(iadev); err_free_irq: free_irq(iadev->irq, dev); err_out: return error; } static void ia_close(struct atm_vcc *vcc) { DEFINE_WAIT(wait); u16 *vc_table; IADEV *iadev; struct ia_vcc *ia_vcc; struct sk_buff *skb = NULL; struct sk_buff_head tmp_tx_backlog, tmp_vcc_backlog; unsigned long closetime, flags; iadev = INPH_IA_DEV(vcc->dev); ia_vcc = INPH_IA_VCC(vcc); if (!ia_vcc) return; IF_EVENT(printk("ia_close: ia_vcc->vc_desc_cnt = %d vci = %d\n", ia_vcc->vc_desc_cnt,vcc->vci);) clear_bit(ATM_VF_READY,&vcc->flags); skb_queue_head_init (&tmp_tx_backlog); skb_queue_head_init (&tmp_vcc_backlog); if (vcc->qos.txtp.traffic_class != ATM_NONE) { iadev->close_pending++; prepare_to_wait(&iadev->timeout_wait, &wait, TASK_UNINTERRUPTIBLE); schedule_timeout(50); finish_wait(&iadev->timeout_wait, &wait); spin_lock_irqsave(&iadev->tx_lock, flags); while((skb = skb_dequeue(&iadev->tx_backlog))) { if (ATM_SKB(skb)->vcc == vcc){ if (vcc->pop) vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); } else skb_queue_tail(&tmp_tx_backlog, skb); } while((skb = skb_dequeue(&tmp_tx_backlog))) skb_queue_tail(&iadev->tx_backlog, skb); IF_EVENT(printk("IA TX Done decs_cnt = %d\n", ia_vcc->vc_desc_cnt);) closetime = 300000 / ia_vcc->pcr; if (closetime == 0) closetime = 1; spin_unlock_irqrestore(&iadev->tx_lock, flags); wait_event_timeout(iadev->close_wait, (ia_vcc->vc_desc_cnt <= 0), closetime); spin_lock_irqsave(&iadev->tx_lock, flags); iadev->close_pending--; iadev->testTable[vcc->vci]->lastTime = 0; iadev->testTable[vcc->vci]->fract = 0; iadev->testTable[vcc->vci]->vc_status = VC_UBR; if (vcc->qos.txtp.traffic_class == ATM_ABR) { if (vcc->qos.txtp.min_pcr > 0) iadev->sum_mcr -= vcc->qos.txtp.min_pcr; } if (vcc->qos.txtp.traffic_class == ATM_CBR) { ia_vcc = INPH_IA_VCC(vcc); iadev->sum_mcr -= ia_vcc->NumCbrEntry*iadev->Granularity; ia_cbrVc_close (vcc); } spin_unlock_irqrestore(&iadev->tx_lock, flags); } if (vcc->qos.rxtp.traffic_class != ATM_NONE) { // reset reass table vc_table = (u16 *)(iadev->reass_ram+REASS_TABLE*iadev->memSize); vc_table += vcc->vci; *vc_table = NO_AAL5_PKT; // reset vc table vc_table = (u16 *)(iadev->reass_ram+RX_VC_TABLE*iadev->memSize); vc_table += vcc->vci; *vc_table = (vcc->vci << 6) | 15; if (vcc->qos.rxtp.traffic_class == ATM_ABR) { struct abr_vc_table __iomem *abr_vc_table = (iadev->reass_ram+ABR_VC_TABLE*iadev->memSize); abr_vc_table += vcc->vci; abr_vc_table->rdf = 0x0003; abr_vc_table->air = 0x5eb1; } // Drain the packets rx_dle_intr(vcc->dev); iadev->rx_open[vcc->vci] = NULL; } kfree(INPH_IA_VCC(vcc)); ia_vcc = NULL; vcc->dev_data = NULL; clear_bit(ATM_VF_ADDR,&vcc->flags); return; } static int ia_open(struct atm_vcc *vcc) { struct ia_vcc *ia_vcc; int error; if (!test_bit(ATM_VF_PARTIAL,&vcc->flags)) { IF_EVENT(printk("ia: not partially allocated resources\n");) vcc->dev_data = NULL; } if (vcc->vci != ATM_VPI_UNSPEC && vcc->vpi != ATM_VCI_UNSPEC) { IF_EVENT(printk("iphase open: unspec part\n");) set_bit(ATM_VF_ADDR,&vcc->flags); } if (vcc->qos.aal != ATM_AAL5) return -EINVAL; IF_EVENT(printk(DEV_LABEL "(itf %d): open %d.%d\n", vcc->dev->number, vcc->vpi, vcc->vci);) /* Device dependent initialization */ ia_vcc = kmalloc(sizeof(*ia_vcc), GFP_KERNEL); if (!ia_vcc) return -ENOMEM; vcc->dev_data = ia_vcc; if ((error = open_rx(vcc))) { IF_EVENT(printk("iadev: error in open_rx, closing\n");) ia_close(vcc); return error; } if ((error = open_tx(vcc))) { IF_EVENT(printk("iadev: error in open_tx, closing\n");) ia_close(vcc); return error; } set_bit(ATM_VF_READY,&vcc->flags); #if 0 { static u8 first = 1; if (first) { ia_timer.expires = jiffies + 3*HZ; add_timer(&ia_timer); first = 0; } } #endif IF_EVENT(printk("ia open returning\n");) return 0; } static int ia_change_qos(struct atm_vcc *vcc, struct atm_qos *qos, int flags) { IF_EVENT(printk(">ia_change_qos\n");) return 0; } static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg) { IA_CMDBUF ia_cmds; IADEV *iadev; int i, board; u16 __user *tmps; IF_EVENT(printk(">ia_ioctl\n");) if (cmd != IA_CMD) { if (!dev->phy->ioctl) return -EINVAL; return dev->phy->ioctl(dev,cmd,arg); } if (copy_from_user(&ia_cmds, arg, sizeof ia_cmds)) return -EFAULT; board = ia_cmds.status; if ((board < 0) || (board > iadev_count)) board = 0; iadev = ia_dev[board]; switch (ia_cmds.cmd) { case MEMDUMP: { switch (ia_cmds.sub_cmd) { case MEMDUMP_DEV: if (!capable(CAP_NET_ADMIN)) return -EPERM; if (copy_to_user(ia_cmds.buf, iadev, sizeof(IADEV))) return -EFAULT; ia_cmds.status = 0; break; case MEMDUMP_SEGREG: if (!capable(CAP_NET_ADMIN)) return -EPERM; tmps = (u16 __user *)ia_cmds.buf; for(i=0; i<0x80; i+=2, tmps++) if(put_user((u16)(readl(iadev->seg_reg+i) & 0xffff), tmps)) return -EFAULT; ia_cmds.status = 0; ia_cmds.len = 0x80; break; case MEMDUMP_REASSREG: if (!capable(CAP_NET_ADMIN)) return -EPERM; tmps = (u16 __user *)ia_cmds.buf; for(i=0; i<0x80; i+=2, tmps++) if(put_user((u16)(readl(iadev->reass_reg+i) & 0xffff), tmps)) return -EFAULT; ia_cmds.status = 0; ia_cmds.len = 0x80; break; case MEMDUMP_FFL: { ia_regs_t *regs_local; ffredn_t *ffL; rfredn_t *rfL; if (!capable(CAP_NET_ADMIN)) return -EPERM; regs_local = kmalloc(sizeof(*regs_local), GFP_KERNEL); if (!regs_local) return -ENOMEM; ffL = &regs_local->ffredn; rfL = &regs_local->rfredn; /* Copy real rfred registers into the local copy */ for (i=0; i<(sizeof (rfredn_t))/4; i++) ((u_int *)rfL)[i] = readl(iadev->reass_reg + i) & 0xffff; /* Copy real ffred registers into the local copy */ for (i=0; i<(sizeof (ffredn_t))/4; i++) ((u_int *)ffL)[i] = readl(iadev->seg_reg + i) & 0xffff; if (copy_to_user(ia_cmds.buf, regs_local,sizeof(ia_regs_t))) { kfree(regs_local); return -EFAULT; } kfree(regs_local); printk("Board %d registers dumped\n", board); ia_cmds.status = 0; } break; case READ_REG: { if (!capable(CAP_NET_ADMIN)) return -EPERM; desc_dbg(iadev); ia_cmds.status = 0; } break; case 0x6: { ia_cmds.status = 0; printk("skb = 0x%lx\n", (long)skb_peek(&iadev->tx_backlog)); printk("rtn_q: 0x%lx\n",(long)ia_deque_rtn_q(&iadev->tx_return_q)); } break; case 0x8: { struct k_sonet_stats *stats; stats = &PRIV(_ia_dev[board])->sonet_stats; printk("section_bip: %d\n", atomic_read(&stats->section_bip)); printk("line_bip : %d\n", atomic_read(&stats->line_bip)); printk("path_bip : %d\n", atomic_read(&stats->path_bip)); printk("line_febe : %d\n", atomic_read(&stats->line_febe)); printk("path_febe : %d\n", atomic_read(&stats->path_febe)); printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs)); printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs)); printk("tx_cells : %d\n", atomic_read(&stats->tx_cells)); printk("rx_cells : %d\n", atomic_read(&stats->rx_cells)); } ia_cmds.status = 0; break; case 0x9: if (!capable(CAP_NET_ADMIN)) return -EPERM; for (i = 1; i <= iadev->num_rx_desc; i++) free_desc(_ia_dev[board], i); writew( ~(RX_FREEQ_EMPT | RX_EXCP_RCVD), iadev->reass_reg+REASS_MASK_REG); iadev->rxing = 1; ia_cmds.status = 0; break; case 0xb: if (!capable(CAP_NET_ADMIN)) return -EPERM; IaFrontEndIntr(iadev); break; case 0xa: if (!capable(CAP_NET_ADMIN)) return -EPERM; { ia_cmds.status = 0; IADebugFlag = ia_cmds.maddr; printk("New debug option loaded\n"); } break; default: ia_cmds.status = 0; break; } } break; default: break; } return 0; } static int ia_getsockopt(struct atm_vcc *vcc, int level, int optname, void __user *optval, int optlen) { IF_EVENT(printk(">ia_getsockopt\n");) return -EINVAL; } static int ia_setsockopt(struct atm_vcc *vcc, int level, int optname, void __user *optval, unsigned int optlen) { IF_EVENT(printk(">ia_setsockopt\n");) return -EINVAL; } static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) { IADEV *iadev; struct dle *wr_ptr; struct tx_buf_desc __iomem *buf_desc_ptr; int desc; int comp_code; int total_len; struct cpcs_trailer *trailer; struct ia_vcc *iavcc; iadev = INPH_IA_DEV(vcc->dev); iavcc = INPH_IA_VCC(vcc); if (!iavcc->txing) { printk("discard packet on closed VC\n"); if (vcc->pop) vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); return 0; } if (skb->len > iadev->tx_buf_sz - 8) { printk("Transmit size over tx buffer size\n"); if (vcc->pop) vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); return 0; } if ((unsigned long)skb->data & 3) { printk("Misaligned SKB\n"); if (vcc->pop) vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); return 0; } /* Get a descriptor number from our free descriptor queue We get the descr number from the TCQ now, since I am using the TCQ as a free buffer queue. Initially TCQ will be initialized with all the descriptors and is hence, full. */ desc = get_desc (iadev, iavcc); if (desc == 0xffff) return 1; comp_code = desc >> 13; desc &= 0x1fff; if ((desc == 0) || (desc > iadev->num_tx_desc)) { IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) atomic_inc(&vcc->stats->tx); if (vcc->pop) vcc->pop(vcc, skb); else dev_kfree_skb_any(skb); return 0; /* return SUCCESS */ } if (comp_code) { IF_ERR(printk(DEV_LABEL "send desc:%d completion code %d error\n", desc, comp_code);) } /* remember the desc and vcc mapping */ iavcc->vc_desc_cnt++; iadev->desc_tbl[desc-1].iavcc = iavcc; iadev->desc_tbl[desc-1].txskb = skb; IA_SKB_STATE(skb) = 0; iadev->ffL.tcq_rd += 2; if (iadev->ffL.tcq_rd > iadev->ffL.tcq_ed) iadev->ffL.tcq_rd = iadev->ffL.tcq_st; writew(iadev->ffL.tcq_rd, iadev->seg_reg+TCQ_RD_PTR); /* Put the descriptor number in the packet ready queue and put the updated write pointer in the DLE field */ *(u16*)(iadev->seg_ram+iadev->ffL.prq_wr) = desc; iadev->ffL.prq_wr += 2; if (iadev->ffL.prq_wr > iadev->ffL.prq_ed) iadev->ffL.prq_wr = iadev->ffL.prq_st; /* Figure out the exact length of the packet and padding required to make it aligned on a 48 byte boundary. */ total_len = skb->len + sizeof(struct cpcs_trailer); total_len = ((total_len + 47) / 48) * 48; IF_TX(printk("ia packet len:%d padding:%d\n", total_len, total_len - skb->len);) /* Put the packet in a tx buffer */ trailer = iadev->tx_buf[desc-1].cpcs; IF_TX(printk("Sent: skb = 0x%p skb->data: 0x%p len: %d, desc: %d\n", skb, skb->data, skb->len, desc);) trailer->control = 0; /*big endian*/ trailer->length = ((skb->len & 0xff) << 8) | ((skb->len & 0xff00) >> 8); trailer->crc32 = 0; /* not needed - dummy bytes */ /* Display the packet */ IF_TXPKT(printk("Sent data: len = %d MsgNum = %d\n", skb->len, tcnter++); xdump(skb->data, skb->len, "TX: "); printk("\n");) /* Build the buffer descriptor */ buf_desc_ptr = iadev->seg_ram+TX_DESC_BASE; buf_desc_ptr += desc; /* points to the corresponding entry */ buf_desc_ptr->desc_mode = AAL5 | EOM_EN | APP_CRC32 | CMPL_INT; /* Huh ? p.115 of users guide describes this as a read-only register */ writew(TRANSMIT_DONE, iadev->seg_reg+SEG_INTR_STATUS_REG); buf_desc_ptr->vc_index = vcc->vci; buf_desc_ptr->bytes = total_len; if (vcc->qos.txtp.traffic_class == ATM_ABR) clear_lockup (vcc, iadev); /* Build the DLE structure */ wr_ptr = iadev->tx_dle_q.write; memset((caddr_t)wr_ptr, 0, sizeof(*wr_ptr)); wr_ptr->sys_pkt_addr = pci_map_single(iadev->pci, skb->data, skb->len, PCI_DMA_TODEVICE); wr_ptr->local_pkt_addr = (buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo; /* wr_ptr->bytes = swap_byte_order(total_len); didn't seem to affect?? */ wr_ptr->bytes = skb->len; /* hw bug - DLEs of 0x2d, 0x2e, 0x2f cause DMA lockup */ if ((wr_ptr->bytes >> 2) == 0xb) wr_ptr->bytes = 0x30; wr_ptr->mode = TX_DLE_PSI; wr_ptr->prq_wr_ptr_data = 0; /* end is not to be used for the DLE q */ if (++wr_ptr == iadev->tx_dle_q.end) wr_ptr = iadev->tx_dle_q.start; /* Build trailer dle */ wr_ptr->sys_pkt_addr = iadev->tx_buf[desc-1].dma_addr; wr_ptr->local_pkt_addr = ((buf_desc_ptr->buf_start_hi << 16) | buf_desc_ptr->buf_start_lo) + total_len - sizeof(struct cpcs_trailer); wr_ptr->bytes = sizeof(struct cpcs_trailer); wr_ptr->mode = DMA_INT_ENABLE; wr_ptr->prq_wr_ptr_data = iadev->ffL.prq_wr; /* end is not to be used for the DLE q */ if (++wr_ptr == iadev->tx_dle_q.end) wr_ptr = iadev->tx_dle_q.start; iadev->tx_dle_q.write = wr_ptr; ATM_DESC(skb) = vcc->vci; skb_queue_tail(&iadev->tx_dma_q, skb); atomic_inc(&vcc->stats->tx); iadev->tx_pkt_cnt++; /* Increment transaction counter */ writel(2, iadev->dma+IPHASE5575_TX_COUNTER); #if 0 /* add flow control logic */ if (atomic_read(&vcc->stats->tx) % 20 == 0) { if (iavcc->vc_desc_cnt > 10) { vcc->tx_quota = vcc->tx_quota * 3 / 4; printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota ); iavcc->flow_inc = -1; iavcc->saved_tx_quota = vcc->tx_quota; } else if ((iavcc->flow_inc < 0) && (iavcc->vc_desc_cnt < 3)) { // vcc->tx_quota = 3 * iavcc->saved_tx_quota / 4; printk("Tx2: vcc->tx_quota = %d \n", (u32)vcc->tx_quota ); iavcc->flow_inc = 0; } } #endif IF_TX(printk("ia send done\n");) return 0; } static int ia_send(struct atm_vcc *vcc, struct sk_buff *skb) { IADEV *iadev; unsigned long flags; iadev = INPH_IA_DEV(vcc->dev); if ((!skb)||(skb->len>(iadev->tx_buf_sz-sizeof(struct cpcs_trailer)))) { if (!skb) printk(KERN_CRIT "null skb in ia_send\n"); else dev_kfree_skb_any(skb); return -EINVAL; } spin_lock_irqsave(&iadev->tx_lock, flags); if (!test_bit(ATM_VF_READY,&vcc->flags)){ dev_kfree_skb_any(skb); spin_unlock_irqrestore(&iadev->tx_lock, flags); return -EINVAL; } ATM_SKB(skb)->vcc = vcc; if (skb_peek(&iadev->tx_backlog)) { skb_queue_tail(&iadev->tx_backlog, skb); } else { if (ia_pkt_tx (vcc, skb)) { skb_queue_tail(&iadev->tx_backlog, skb); } } spin_unlock_irqrestore(&iadev->tx_lock, flags); return 0; } static int ia_proc_read(struct atm_dev *dev,loff_t *pos,char *page) { int left = *pos, n; char *tmpPtr; IADEV *iadev = INPH_IA_DEV(dev); if(!left--) { if (iadev->phy_type == FE_25MBIT_PHY) { n = sprintf(page, " Board Type : Iphase5525-1KVC-128K\n"); return n; } if (iadev->phy_type == FE_DS3_PHY) n = sprintf(page, " Board Type : Iphase-ATM-DS3"); else if (iadev->phy_type == FE_E3_PHY) n = sprintf(page, " Board Type : Iphase-ATM-E3"); else if (iadev->phy_type == FE_UTP_OPTION) n = sprintf(page, " Board Type : Iphase-ATM-UTP155"); else n = sprintf(page, " Board Type : Iphase-ATM-OC3"); tmpPtr = page + n; if (iadev->pci_map_size == 0x40000) n += sprintf(tmpPtr, "-1KVC-"); else n += sprintf(tmpPtr, "-4KVC-"); tmpPtr = page + n; if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_1M) n += sprintf(tmpPtr, "1M \n"); else if ((iadev->memType & MEM_SIZE_MASK) == MEM_SIZE_512K) n += sprintf(tmpPtr, "512K\n"); else n += sprintf(tmpPtr, "128K\n"); return n; } if (!left) { return sprintf(page, " Number of Tx Buffer: %u\n" " Size of Tx Buffer : %u\n" " Number of Rx Buffer: %u\n" " Size of Rx Buffer : %u\n" " Packets Receiverd : %u\n" " Packets Transmitted: %u\n" " Cells Received : %u\n" " Cells Transmitted : %u\n" " Board Dropped Cells: %u\n" " Board Dropped Pkts : %u\n", iadev->num_tx_desc, iadev->tx_buf_sz, iadev->num_rx_desc, iadev->rx_buf_sz, iadev->rx_pkt_cnt, iadev->tx_pkt_cnt, iadev->rx_cell_cnt, iadev->tx_cell_cnt, iadev->drop_rxcell, iadev->drop_rxpkt); } return 0; } static const struct atmdev_ops ops = { .open = ia_open, .close = ia_close, .ioctl = ia_ioctl, .getsockopt = ia_getsockopt, .setsockopt = ia_setsockopt, .send = ia_send, .phy_put = ia_phy_put, .phy_get = ia_phy_get, .change_qos = ia_change_qos, .proc_read = ia_proc_read, .owner = THIS_MODULE, }; static int __devinit ia_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) { struct atm_dev *dev; IADEV *iadev; int ret; iadev = kzalloc(sizeof(*iadev), GFP_KERNEL); if (!iadev) { ret = -ENOMEM; goto err_out; } iadev->pci = pdev; IF_INIT(printk("ia detected at bus:%d dev: %d function:%d\n", pdev->bus->number, PCI_SLOT(pdev->devfn), PCI_FUNC(pdev->devfn));) if (pci_enable_device(pdev)) { ret = -ENODEV; goto err_out_free_iadev; } dev = atm_dev_register(DEV_LABEL, &pdev->dev, &ops, -1, NULL); if (!dev) { ret = -ENOMEM; goto err_out_disable_dev; } dev->dev_data = iadev; IF_INIT(printk(DEV_LABEL "registered at (itf :%d)\n", dev->number);) IF_INIT(printk("dev_id = 0x%p iadev->LineRate = %d \n", dev, iadev->LineRate);) pci_set_drvdata(pdev, dev); ia_dev[iadev_count] = iadev; _ia_dev[iadev_count] = dev; iadev_count++; if (ia_init(dev) || ia_start(dev)) { IF_INIT(printk("IA register failed!\n");) iadev_count--; ia_dev[iadev_count] = NULL; _ia_dev[iadev_count] = NULL; ret = -EINVAL; goto err_out_deregister_dev; } IF_EVENT(printk("iadev_count = %d\n", iadev_count);) iadev->next_board = ia_boards; ia_boards = dev; return 0; err_out_deregister_dev: atm_dev_deregister(dev); err_out_disable_dev: pci_disable_device(pdev); err_out_free_iadev: kfree(iadev); err_out: return ret; } static void __devexit ia_remove_one(struct pci_dev *pdev) { struct atm_dev *dev = pci_get_drvdata(pdev); IADEV *iadev = INPH_IA_DEV(dev); /* Disable phy interrupts */ ia_phy_put(dev, ia_phy_get(dev, SUNI_RSOP_CIE) & ~(SUNI_RSOP_CIE_LOSE), SUNI_RSOP_CIE); udelay(1); if (dev->phy && dev->phy->stop) dev->phy->stop(dev); /* De-register device */ free_irq(iadev->irq, dev); iadev_count--; ia_dev[iadev_count] = NULL; _ia_dev[iadev_count] = NULL; IF_EVENT(printk("deregistering iav at (itf:%d)\n", dev->number);) atm_dev_deregister(dev); iounmap(iadev->base); pci_disable_device(pdev); ia_free_rx(iadev); ia_free_tx(iadev); kfree(iadev); } static struct pci_device_id ia_pci_tbl[] = { { PCI_VENDOR_ID_IPHASE, 0x0008, PCI_ANY_ID, PCI_ANY_ID, }, { PCI_VENDOR_ID_IPHASE, 0x0009, PCI_ANY_ID, PCI_ANY_ID, }, { 0,} }; MODULE_DEVICE_TABLE(pci, ia_pci_tbl); static struct pci_driver ia_driver = { .name = DEV_LABEL, .id_table = ia_pci_tbl, .probe = ia_init_one, .remove = __devexit_p(ia_remove_one), }; static int __init ia_module_init(void) { int ret; ret = pci_register_driver(&ia_driver); if (ret >= 0) { ia_timer.expires = jiffies + 3*HZ; add_timer(&ia_timer); } else printk(KERN_ERR DEV_LABEL ": no adapter found\n"); return ret; } static void __exit ia_module_exit(void) { pci_unregister_driver(&ia_driver); del_timer(&ia_timer); } module_init(ia_module_init); module_exit(ia_module_exit);
gpl-2.0
icebluechao/stuttgart_kernel
arch/powerpc/sysdev/qe_lib/qe_io.c
3384
5573
/* * arch/powerpc/sysdev/qe_lib/qe_io.c * * QE Parallel I/O ports configuration routines * * Copyright (C) Freescale Semicondutor, Inc. 2006. All rights reserved. * * Author: Li Yang <LeoLi@freescale.com> * Based on code from Shlomi Gridish <gridish@freescale.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/stddef.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/module.h> #include <linux/ioport.h> #include <asm/io.h> #include <asm/qe.h> #include <asm/prom.h> #include <sysdev/fsl_soc.h> #undef DEBUG static struct qe_pio_regs __iomem *par_io; static int num_par_io_ports = 0; int par_io_init(struct device_node *np) { struct resource res; int ret; const u32 *num_ports; /* Map Parallel I/O ports registers */ ret = of_address_to_resource(np, 0, &res); if (ret) return ret; par_io = ioremap(res.start, res.end - res.start + 1); num_ports = of_get_property(np, "num-ports", NULL); if (num_ports) num_par_io_ports = *num_ports; return 0; } void __par_io_config_pin(struct qe_pio_regs __iomem *par_io, u8 pin, int dir, int open_drain, int assignment, int has_irq) { u32 pin_mask1bit; u32 pin_mask2bits; u32 new_mask2bits; u32 tmp_val; /* calculate pin location for single and 2 bits information */ pin_mask1bit = (u32) (1 << (QE_PIO_PINS - (pin + 1))); /* Set open drain, if required */ tmp_val = in_be32(&par_io->cpodr); if (open_drain) out_be32(&par_io->cpodr, pin_mask1bit | tmp_val); else out_be32(&par_io->cpodr, ~pin_mask1bit & tmp_val); /* define direction */ tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ? in_be32(&par_io->cpdir2) : in_be32(&par_io->cpdir1); /* get all bits mask for 2 bit per port */ pin_mask2bits = (u32) (0x3 << (QE_PIO_PINS - (pin % (QE_PIO_PINS / 2) + 1) * 2)); /* Get the final mask we need for the right definition */ new_mask2bits = (u32) (dir << (QE_PIO_PINS - (pin % (QE_PIO_PINS / 2) + 1) * 2)); /* clear and set 2 bits mask */ if (pin > (QE_PIO_PINS / 2) - 1) { out_be32(&par_io->cpdir2, ~pin_mask2bits & tmp_val); tmp_val &= ~pin_mask2bits; out_be32(&par_io->cpdir2, new_mask2bits | tmp_val); } else { out_be32(&par_io->cpdir1, ~pin_mask2bits & tmp_val); tmp_val &= ~pin_mask2bits; out_be32(&par_io->cpdir1, new_mask2bits | tmp_val); } /* define pin assignment */ tmp_val = (pin > (QE_PIO_PINS / 2) - 1) ? in_be32(&par_io->cppar2) : in_be32(&par_io->cppar1); new_mask2bits = (u32) (assignment << (QE_PIO_PINS - (pin % (QE_PIO_PINS / 2) + 1) * 2)); /* clear and set 2 bits mask */ if (pin > (QE_PIO_PINS / 2) - 1) { out_be32(&par_io->cppar2, ~pin_mask2bits & tmp_val); tmp_val &= ~pin_mask2bits; out_be32(&par_io->cppar2, new_mask2bits | tmp_val); } else { out_be32(&par_io->cppar1, ~pin_mask2bits & tmp_val); tmp_val &= ~pin_mask2bits; out_be32(&par_io->cppar1, new_mask2bits | tmp_val); } } EXPORT_SYMBOL(__par_io_config_pin); int par_io_config_pin(u8 port, u8 pin, int dir, int open_drain, int assignment, int has_irq) { if (!par_io || port >= num_par_io_ports) return -EINVAL; __par_io_config_pin(&par_io[port], pin, dir, open_drain, assignment, has_irq); return 0; } EXPORT_SYMBOL(par_io_config_pin); int par_io_data_set(u8 port, u8 pin, u8 val) { u32 pin_mask, tmp_val; if (port >= num_par_io_ports) return -EINVAL; if (pin >= QE_PIO_PINS) return -EINVAL; /* calculate pin location */ pin_mask = (u32) (1 << (QE_PIO_PINS - 1 - pin)); tmp_val = in_be32(&par_io[port].cpdata); if (val == 0) /* clear */ out_be32(&par_io[port].cpdata, ~pin_mask & tmp_val); else /* set */ out_be32(&par_io[port].cpdata, pin_mask | tmp_val); return 0; } EXPORT_SYMBOL(par_io_data_set); int par_io_of_config(struct device_node *np) { struct device_node *pio; const phandle *ph; int pio_map_len; const unsigned int *pio_map; if (par_io == NULL) { printk(KERN_ERR "par_io not initialized\n"); return -1; } ph = of_get_property(np, "pio-handle", NULL); if (ph == NULL) { printk(KERN_ERR "pio-handle not available\n"); return -1; } pio = of_find_node_by_phandle(*ph); pio_map = of_get_property(pio, "pio-map", &pio_map_len); if (pio_map == NULL) { printk(KERN_ERR "pio-map is not set!\n"); return -1; } pio_map_len /= sizeof(unsigned int); if ((pio_map_len % 6) != 0) { printk(KERN_ERR "pio-map format wrong!\n"); return -1; } while (pio_map_len > 0) { par_io_config_pin((u8) pio_map[0], (u8) pio_map[1], (int) pio_map[2], (int) pio_map[3], (int) pio_map[4], (int) pio_map[5]); pio_map += 6; pio_map_len -= 6; } of_node_put(pio); return 0; } EXPORT_SYMBOL(par_io_of_config); #ifdef DEBUG static void dump_par_io(void) { unsigned int i; printk(KERN_INFO "%s: par_io=%p\n", __func__, par_io); for (i = 0; i < num_par_io_ports; i++) { printk(KERN_INFO " cpodr[%u]=%08x\n", i, in_be32(&par_io[i].cpodr)); printk(KERN_INFO " cpdata[%u]=%08x\n", i, in_be32(&par_io[i].cpdata)); printk(KERN_INFO " cpdir1[%u]=%08x\n", i, in_be32(&par_io[i].cpdir1)); printk(KERN_INFO " cpdir2[%u]=%08x\n", i, in_be32(&par_io[i].cpdir2)); printk(KERN_INFO " cppar1[%u]=%08x\n", i, in_be32(&par_io[i].cppar1)); printk(KERN_INFO " cppar2[%u]=%08x\n", i, in_be32(&par_io[i].cppar2)); } } EXPORT_SYMBOL(dump_par_io); #endif /* DEBUG */
gpl-2.0
schlund/2.6.35-marvellous-kernel
arch/cris/arch-v32/mach-a3/pinmux.c
3384
9953
/* * Allocator for I/O pins. All pins are allocated to GPIO at bootup. * Unassigned pins and GPIO pins can be allocated to a fixed interface * or the I/O processor instead. * * Copyright (c) 2005-2007 Axis Communications AB. */ #include <linux/init.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/string.h> #include <linux/spinlock.h> #include <hwregs/reg_map.h> #include <hwregs/reg_rdwr.h> #include <pinmux.h> #include <hwregs/pinmux_defs.h> #include <hwregs/clkgen_defs.h> #undef DEBUG #define PINS 80 #define PORT_PINS 32 #define PORTS 3 static char pins[PINS]; static DEFINE_SPINLOCK(pinmux_lock); static void crisv32_pinmux_set(int port); int crisv32_pinmux_init(void) { static int initialized; if (!initialized) { initialized = 1; REG_WR_INT(pinmux, regi_pinmux, rw_hwprot, 0); crisv32_pinmux_alloc(PORT_A, 0, 31, pinmux_gpio); crisv32_pinmux_alloc(PORT_B, 0, 31, pinmux_gpio); crisv32_pinmux_alloc(PORT_C, 0, 15, pinmux_gpio); } return 0; } int crisv32_pinmux_alloc(int port, int first_pin, int last_pin, enum pin_mode mode) { int i; unsigned long flags; crisv32_pinmux_init(); if (port >= PORTS) return -EINVAL; spin_lock_irqsave(&pinmux_lock, flags); for (i = first_pin; i <= last_pin; i++) { if ((pins[port * PORT_PINS + i] != pinmux_none) && (pins[port * PORT_PINS + i] != pinmux_gpio) && (pins[port * PORT_PINS + i] != mode)) { spin_unlock_irqrestore(&pinmux_lock, flags); #ifdef DEBUG panic("Pinmux alloc failed!\n"); #endif return -EPERM; } } for (i = first_pin; i <= last_pin; i++) pins[port * PORT_PINS + i] = mode; crisv32_pinmux_set(port); spin_unlock_irqrestore(&pinmux_lock, flags); return 0; } int crisv32_pinmux_alloc_fixed(enum fixed_function function) { int ret = -EINVAL; char saved[sizeof pins]; unsigned long flags; spin_lock_irqsave(&pinmux_lock, flags); /* Save internal data for recovery */ memcpy(saved, pins, sizeof pins); crisv32_pinmux_init(); /* must be done before we read rw_hwprot */ reg_pinmux_rw_hwprot hwprot = REG_RD(pinmux, regi_pinmux, rw_hwprot); reg_clkgen_rw_clk_ctrl clk_ctrl = REG_RD(clkgen, regi_clkgen, rw_clk_ctrl); switch (function) { case pinmux_eth: clk_ctrl.eth = regk_clkgen_yes; clk_ctrl.dma0_1_eth = regk_clkgen_yes; ret = crisv32_pinmux_alloc(PORT_B, 8, 23, pinmux_fixed); ret |= crisv32_pinmux_alloc(PORT_B, 24, 25, pinmux_fixed); hwprot.eth = hwprot.eth_mdio = regk_pinmux_yes; break; case pinmux_geth: ret = crisv32_pinmux_alloc(PORT_B, 0, 7, pinmux_fixed); hwprot.geth = regk_pinmux_yes; break; case pinmux_tg_cmos: clk_ctrl.ccd_tg_100 = clk_ctrl.ccd_tg_200 = regk_clkgen_yes; ret = crisv32_pinmux_alloc(PORT_B, 27, 29, pinmux_fixed); hwprot.tg_clk = regk_pinmux_yes; break; case pinmux_tg_ccd: clk_ctrl.ccd_tg_100 = clk_ctrl.ccd_tg_200 = regk_clkgen_yes; ret = crisv32_pinmux_alloc(PORT_B, 27, 31, pinmux_fixed); ret |= crisv32_pinmux_alloc(PORT_C, 0, 15, pinmux_fixed); hwprot.tg = hwprot.tg_clk = regk_pinmux_yes; break; case pinmux_vout: clk_ctrl.strdma0_2_video = regk_clkgen_yes; ret = crisv32_pinmux_alloc(PORT_A, 8, 18, pinmux_fixed); hwprot.vout = hwprot.vout_sync = regk_pinmux_yes; break; case pinmux_ser1: clk_ctrl.sser_ser_dma6_7 = regk_clkgen_yes; ret = crisv32_pinmux_alloc(PORT_A, 24, 25, pinmux_fixed); hwprot.ser1 = regk_pinmux_yes; break; case pinmux_ser2: clk_ctrl.sser_ser_dma6_7 = regk_clkgen_yes; ret = crisv32_pinmux_alloc(PORT_A, 26, 27, pinmux_fixed); hwprot.ser2 = regk_pinmux_yes; break; case pinmux_ser3: clk_ctrl.sser_ser_dma6_7 = regk_clkgen_yes; ret = crisv32_pinmux_alloc(PORT_A, 28, 29, pinmux_fixed); hwprot.ser3 = regk_pinmux_yes; break; case pinmux_ser4: clk_ctrl.sser_ser_dma6_7 = regk_clkgen_yes; ret = crisv32_pinmux_alloc(PORT_A, 30, 31, pinmux_fixed); hwprot.ser4 = regk_pinmux_yes; break; case pinmux_sser: clk_ctrl.sser_ser_dma6_7 = regk_clkgen_yes; ret = crisv32_pinmux_alloc(PORT_A, 19, 23, pinmux_fixed); hwprot.sser = regk_pinmux_yes; break; case pinmux_pio: hwprot.pio = regk_pinmux_yes; ret = 0; break; case pinmux_pwm0: ret = crisv32_pinmux_alloc(PORT_A, 30, 30, pinmux_fixed); hwprot.pwm0 = regk_pinmux_yes; break; case pinmux_pwm1: ret = crisv32_pinmux_alloc(PORT_A, 31, 31, pinmux_fixed); hwprot.pwm1 = regk_pinmux_yes; break; case pinmux_pwm2: ret = crisv32_pinmux_alloc(PORT_B, 26, 26, pinmux_fixed); hwprot.pwm2 = regk_pinmux_yes; break; case pinmux_i2c0: ret = crisv32_pinmux_alloc(PORT_A, 0, 1, pinmux_fixed); hwprot.i2c0 = regk_pinmux_yes; break; case pinmux_i2c1: ret = crisv32_pinmux_alloc(PORT_A, 2, 3, pinmux_fixed); hwprot.i2c1 = regk_pinmux_yes; break; case pinmux_i2c1_3wire: ret = crisv32_pinmux_alloc(PORT_A, 2, 3, pinmux_fixed); ret |= crisv32_pinmux_alloc(PORT_A, 7, 7, pinmux_fixed); hwprot.i2c1 = hwprot.i2c1_sen = regk_pinmux_yes; break; case pinmux_i2c1_sda1: ret = crisv32_pinmux_alloc(PORT_A, 2, 4, pinmux_fixed); hwprot.i2c1 = hwprot.i2c1_sda1 = regk_pinmux_yes; break; case pinmux_i2c1_sda2: ret = crisv32_pinmux_alloc(PORT_A, 2, 3, pinmux_fixed); ret |= crisv32_pinmux_alloc(PORT_A, 5, 5, pinmux_fixed); hwprot.i2c1 = hwprot.i2c1_sda2 = regk_pinmux_yes; break; case pinmux_i2c1_sda3: ret = crisv32_pinmux_alloc(PORT_A, 2, 3, pinmux_fixed); ret |= crisv32_pinmux_alloc(PORT_A, 6, 6, pinmux_fixed); hwprot.i2c1 = hwprot.i2c1_sda3 = regk_pinmux_yes; break; default: ret = -EINVAL; break; } if (!ret) { REG_WR(pinmux, regi_pinmux, rw_hwprot, hwprot); REG_WR(clkgen, regi_clkgen, rw_clk_ctrl, clk_ctrl); } else memcpy(pins, saved, sizeof pins); spin_unlock_irqrestore(&pinmux_lock, flags); return ret; } void crisv32_pinmux_set(int port) { int i; int gpio_val = 0; int iop_val = 0; int pin = port * PORT_PINS; for (i = 0; (i < PORT_PINS) && (pin < PINS); i++, pin++) { if (pins[pin] == pinmux_gpio) gpio_val |= (1 << i); else if (pins[pin] == pinmux_iop) iop_val |= (1 << i); } REG_WRITE(int, regi_pinmux + REG_RD_ADDR_pinmux_rw_gio_pa + 4 * port, gpio_val); REG_WRITE(int, regi_pinmux + REG_RD_ADDR_pinmux_rw_iop_pa + 4 * port, iop_val); #ifdef DEBUG crisv32_pinmux_dump(); #endif } int crisv32_pinmux_dealloc(int port, int first_pin, int last_pin) { int i; unsigned long flags; crisv32_pinmux_init(); if (port > PORTS || port < 0) return -EINVAL; spin_lock_irqsave(&pinmux_lock, flags); for (i = first_pin; i <= last_pin; i++) pins[port * PORT_PINS + i] = pinmux_none; crisv32_pinmux_set(port); spin_unlock_irqrestore(&pinmux_lock, flags); return 0; } int crisv32_pinmux_dealloc_fixed(enum fixed_function function) { int ret = -EINVAL; char saved[sizeof pins]; unsigned long flags; spin_lock_irqsave(&pinmux_lock, flags); /* Save internal data for recovery */ memcpy(saved, pins, sizeof pins); crisv32_pinmux_init(); /* must be done before we read rw_hwprot */ reg_pinmux_rw_hwprot hwprot = REG_RD(pinmux, regi_pinmux, rw_hwprot); switch (function) { case pinmux_eth: ret = crisv32_pinmux_dealloc(PORT_B, 8, 23); ret |= crisv32_pinmux_dealloc(PORT_B, 24, 25); ret |= crisv32_pinmux_dealloc(PORT_B, 0, 7); hwprot.eth = hwprot.eth_mdio = hwprot.geth = regk_pinmux_no; break; case pinmux_tg_cmos: ret = crisv32_pinmux_dealloc(PORT_B, 27, 29); hwprot.tg_clk = regk_pinmux_no; break; case pinmux_tg_ccd: ret = crisv32_pinmux_dealloc(PORT_B, 27, 31); ret |= crisv32_pinmux_dealloc(PORT_C, 0, 15); hwprot.tg = hwprot.tg_clk = regk_pinmux_no; break; case pinmux_vout: ret = crisv32_pinmux_dealloc(PORT_A, 8, 18); hwprot.vout = hwprot.vout_sync = regk_pinmux_no; break; case pinmux_ser1: ret = crisv32_pinmux_dealloc(PORT_A, 24, 25); hwprot.ser1 = regk_pinmux_no; break; case pinmux_ser2: ret = crisv32_pinmux_dealloc(PORT_A, 26, 27); hwprot.ser2 = regk_pinmux_no; break; case pinmux_ser3: ret = crisv32_pinmux_dealloc(PORT_A, 28, 29); hwprot.ser3 = regk_pinmux_no; break; case pinmux_ser4: ret = crisv32_pinmux_dealloc(PORT_A, 30, 31); hwprot.ser4 = regk_pinmux_no; break; case pinmux_sser: ret = crisv32_pinmux_dealloc(PORT_A, 19, 23); hwprot.sser = regk_pinmux_no; break; case pinmux_pwm0: ret = crisv32_pinmux_dealloc(PORT_A, 30, 30); hwprot.pwm0 = regk_pinmux_no; break; case pinmux_pwm1: ret = crisv32_pinmux_dealloc(PORT_A, 31, 31); hwprot.pwm1 = regk_pinmux_no; break; case pinmux_pwm2: ret = crisv32_pinmux_dealloc(PORT_B, 26, 26); hwprot.pwm2 = regk_pinmux_no; break; case pinmux_i2c0: ret = crisv32_pinmux_dealloc(PORT_A, 0, 1); hwprot.i2c0 = regk_pinmux_no; break; case pinmux_i2c1: ret = crisv32_pinmux_dealloc(PORT_A, 2, 3); hwprot.i2c1 = regk_pinmux_no; break; case pinmux_i2c1_3wire: ret = crisv32_pinmux_dealloc(PORT_A, 2, 3); ret |= crisv32_pinmux_dealloc(PORT_A, 7, 7); hwprot.i2c1 = hwprot.i2c1_sen = regk_pinmux_no; break; case pinmux_i2c1_sda1: ret = crisv32_pinmux_dealloc(PORT_A, 2, 4); hwprot.i2c1_sda1 = regk_pinmux_no; break; case pinmux_i2c1_sda2: ret = crisv32_pinmux_dealloc(PORT_A, 2, 3); ret |= crisv32_pinmux_dealloc(PORT_A, 5, 5); hwprot.i2c1_sda2 = regk_pinmux_no; break; case pinmux_i2c1_sda3: ret = crisv32_pinmux_dealloc(PORT_A, 2, 3); ret |= crisv32_pinmux_dealloc(PORT_A, 6, 6); hwprot.i2c1_sda3 = regk_pinmux_no; break; default: ret = -EINVAL; break; } if (!ret) REG_WR(pinmux, regi_pinmux, rw_hwprot, hwprot); else memcpy(pins, saved, sizeof pins); spin_unlock_irqrestore(&pinmux_lock, flags); return ret; } void crisv32_pinmux_dump(void) { int i, j; int pin = 0; crisv32_pinmux_init(); for (i = 0; i < PORTS; i++) { pin++; printk(KERN_DEBUG "Port %c\n", 'A'+i); for (j = 0; (j < PORT_PINS) && (pin < PINS); j++, pin++) printk(KERN_DEBUG " Pin %d = %d\n", j, pins[i * PORT_PINS + j]); } } __initcall(crisv32_pinmux_init);
gpl-2.0
SOKP/kernel_samsung_hlte
drivers/bluetooth/hci_smd.c
3384
16607
/* * HCI_SMD (HCI Shared Memory Driver) is Qualcomm's Shared memory driver * for the BT HCI protocol. * * Copyright (c) 2000-2001, 2011-2012 The Linux Foundation. All rights reserved. * Copyright (C) 2002-2003 Maxim Krasnyansky <maxk@qualcomm.com> * Copyright (C) 2004-2006 Marcel Holtmann <marcel@holtmann.org> * * This file is based on drivers/bluetooth/hci_vhci.c * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 * as published by the Free Software Foundation * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/errno.h> #include <linux/semaphore.h> #include <linux/string.h> #include <linux/skbuff.h> #include <linux/wakelock.h> #include <linux/workqueue.h> #include <linux/uaccess.h> #include <linux/interrupt.h> #include <net/bluetooth/bluetooth.h> #include <net/bluetooth/hci_core.h> #include <net/bluetooth/hci.h> #include <mach/msm_smd.h> #define EVENT_CHANNEL "APPS_RIVA_BT_CMD" #define DATA_CHANNEL "APPS_RIVA_BT_ACL" /* release wakelock in 500ms, not immediately, because higher layers * don't always take wakelocks when they should * This is derived from the implementation for UART transport */ #define RX_Q_MONITOR (500) /* 500 milli second */ #define HCI_REGISTER_SET 0 /* SSR state machine to take care of back to back SSR requests * and handling the incomming BT on/off,Airplane mode toggling and * also spuriour SMD open notification while one SSr is in progress */ #define STATE_SSR_ON 0x1 #define STATE_SSR_START 0x02 #define STATE_SSR_CHANNEL_OPEN_PENDING 0x04 #define STATE_SSR_PENDING_INIT 0x08 #define STATE_SSR_COMPLETE 0x00 #define STATE_SSR_OFF STATE_SSR_COMPLETE static int ssr_state = STATE_SSR_OFF; static int hcismd_set; static DEFINE_SEMAPHORE(hci_smd_enable); static int restart_in_progress; static int hcismd_set_enable(const char *val, struct kernel_param *kp); module_param_call(hcismd_set, hcismd_set_enable, NULL, &hcismd_set, 0644); static void hci_dev_smd_open(struct work_struct *worker); static void hci_dev_restart(struct work_struct *worker); struct hci_smd_data { struct hci_dev *hdev; unsigned long flags; struct smd_channel *event_channel; struct smd_channel *data_channel; struct wake_lock wake_lock_tx; struct wake_lock wake_lock_rx; struct timer_list rx_q_timer; struct tasklet_struct rx_task; }; static struct hci_smd_data hs; /* Rx queue monitor timer function */ static int is_rx_q_empty(unsigned long arg) { struct hci_dev *hdev = (struct hci_dev *) arg; struct sk_buff_head *list_ = &hdev->rx_q; struct sk_buff *list = ((struct sk_buff *)list_)->next; BT_DBG("%s Rx timer triggered", hdev->name); if (list == (struct sk_buff *)list_) { BT_DBG("%s RX queue empty", hdev->name); return 1; } else{ BT_DBG("%s RX queue not empty", hdev->name); return 0; } } static void release_lock(void) { struct hci_smd_data *hsmd = &hs; BT_DBG("Releasing Rx Lock"); if (is_rx_q_empty((unsigned long)hsmd->hdev) && wake_lock_active(&hs.wake_lock_rx)) wake_unlock(&hs.wake_lock_rx); } /* Rx timer callback function */ static void schedule_timer(unsigned long arg) { struct hci_dev *hdev = (struct hci_dev *) arg; struct hci_smd_data *hsmd = &hs; BT_DBG("%s Schedule Rx timer", hdev->name); if (is_rx_q_empty(arg) && wake_lock_active(&hs.wake_lock_rx)) { BT_DBG("%s RX queue empty", hdev->name); /* * Since the queue is empty, its ideal * to release the wake lock on Rx */ wake_unlock(&hs.wake_lock_rx); } else{ BT_DBG("%s RX queue not empty", hdev->name); /* * Restart the timer to monitor whether the Rx queue is * empty for releasing the Rx wake lock */ mod_timer(&hsmd->rx_q_timer, jiffies + msecs_to_jiffies(RX_Q_MONITOR)); } } static int hci_smd_open(struct hci_dev *hdev) { set_bit(HCI_RUNNING, &hdev->flags); return 0; } static int hci_smd_close(struct hci_dev *hdev) { if (!test_and_clear_bit(HCI_RUNNING, &hdev->flags)) return 0; else return -EPERM; } static void hci_smd_destruct(struct hci_dev *hdev) { if (NULL != hdev->driver_data) kfree(hdev->driver_data); } static void hci_smd_recv_data(void) { int len = 0; int rc = 0; struct sk_buff *skb = NULL; struct hci_smd_data *hsmd = &hs; wake_lock(&hs.wake_lock_rx); len = smd_read_avail(hsmd->data_channel); if (len > HCI_MAX_FRAME_SIZE) { BT_ERR("Frame larger than the allowed size, flushing frame"); smd_read(hsmd->data_channel, NULL, len); goto out_data; } if (len <= 0) goto out_data; skb = bt_skb_alloc(len, GFP_ATOMIC); if (!skb) { BT_ERR("Error in allocating socket buffer"); smd_read(hsmd->data_channel, NULL, len); goto out_data; } rc = smd_read(hsmd->data_channel, skb_put(skb, len), len); if (rc < len) { BT_ERR("Error in reading from the channel"); goto out_data; } skb->dev = (void *)hsmd->hdev; bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT; skb_orphan(skb); rc = hci_recv_frame(skb); if (rc < 0) { BT_ERR("Error in passing the packet to HCI Layer"); /* * skb is getting freed in hci_recv_frame, making it * to null to avoid multiple access */ skb = NULL; goto out_data; } /* * Start the timer to monitor whether the Rx queue is * empty for releasing the Rx wake lock */ BT_DBG("Rx Timer is starting"); mod_timer(&hsmd->rx_q_timer, jiffies + msecs_to_jiffies(RX_Q_MONITOR)); out_data: release_lock(); if (rc) kfree_skb(skb); } static void hci_smd_recv_event(void) { int len = 0; int rc = 0; struct sk_buff *skb = NULL; struct hci_smd_data *hsmd = &hs; wake_lock(&hs.wake_lock_rx); len = smd_read_avail(hsmd->event_channel); if (len > HCI_MAX_FRAME_SIZE) { BT_ERR("Frame larger than the allowed size, flushing frame"); rc = smd_read(hsmd->event_channel, NULL, len); goto out_event; } while (len > 0) { skb = bt_skb_alloc(len, GFP_ATOMIC); if (!skb) { BT_ERR("Error in allocating socket buffer"); smd_read(hsmd->event_channel, NULL, len); goto out_event; } rc = smd_read(hsmd->event_channel, skb_put(skb, len), len); if (rc < len) { BT_ERR("Error in reading from the event channel"); goto out_event; } skb->dev = (void *)hsmd->hdev; bt_cb(skb)->pkt_type = HCI_EVENT_PKT; skb_orphan(skb); rc = hci_recv_frame(skb); if (rc < 0) { BT_ERR("Error in passing the packet to HCI Layer"); /* * skb is getting freed in hci_recv_frame, making it * to null to avoid multiple access */ skb = NULL; goto out_event; } len = smd_read_avail(hsmd->event_channel); /* * Start the timer to monitor whether the Rx queue is * empty for releasing the Rx wake lock */ BT_DBG("Rx Timer is starting"); mod_timer(&hsmd->rx_q_timer, jiffies + msecs_to_jiffies(RX_Q_MONITOR)); } out_event: release_lock(); if (rc) kfree_skb(skb); } static int hci_smd_send_frame(struct sk_buff *skb) { int len; int avail; int ret = 0; wake_lock(&hs.wake_lock_tx); switch (bt_cb(skb)->pkt_type) { case HCI_COMMAND_PKT: avail = smd_write_avail(hs.event_channel); if (!avail) { BT_ERR("No space available for smd frame"); ret = -ENOSPC; } len = smd_write(hs.event_channel, skb->data, skb->len); if (len < skb->len) { BT_ERR("Failed to write Command %d", len); ret = -ENODEV; } break; case HCI_ACLDATA_PKT: case HCI_SCODATA_PKT: avail = smd_write_avail(hs.data_channel); if (!avail) { BT_ERR("No space available for smd frame"); ret = -ENOSPC; } len = smd_write(hs.data_channel, skb->data, skb->len); if (len < skb->len) { BT_ERR("Failed to write Data %d", len); ret = -ENODEV; } break; default: BT_ERR("Uknown packet type"); ret = -ENODEV; break; } kfree_skb(skb); wake_unlock(&hs.wake_lock_tx); return ret; } static void hci_smd_rx(unsigned long arg) { struct hci_smd_data *hsmd = &hs; while ((smd_read_avail(hsmd->event_channel) > 0) || (smd_read_avail(hsmd->data_channel) > 0)) { hci_smd_recv_event(); hci_smd_recv_data(); } } static void hci_smd_notify_event(void *data, unsigned int event) { struct hci_dev *hdev = hs.hdev; struct hci_smd_data *hsmd = &hs; struct work_struct *reset_worker; struct work_struct *open_worker; int len = 0; if (!hdev) { BT_ERR("Frame for unknown HCI device (hdev=NULL)"); return; } switch (event) { case SMD_EVENT_DATA: len = smd_read_avail(hsmd->event_channel); if (len > 0) tasklet_hi_schedule(&hs.rx_task); else if (len < 0) BT_ERR("Failed to read event from smd %d", len); break; case SMD_EVENT_OPEN: BT_INFO("opening HCI-SMD channel :%s", EVENT_CHANNEL); BT_DBG("SSR state is : %x", ssr_state); if ((ssr_state == STATE_SSR_OFF) || (ssr_state == STATE_SSR_CHANNEL_OPEN_PENDING)) { hci_smd_open(hdev); open_worker = kzalloc(sizeof(*open_worker), GFP_ATOMIC); if (!open_worker) { BT_ERR("Out of memory"); break; } if (ssr_state == STATE_SSR_CHANNEL_OPEN_PENDING) { ssr_state = STATE_SSR_PENDING_INIT; BT_INFO("SSR state is : %x", ssr_state); } INIT_WORK(open_worker, hci_dev_smd_open); schedule_work(open_worker); } break; case SMD_EVENT_CLOSE: BT_INFO("Closing HCI-SMD channel :%s", EVENT_CHANNEL); BT_DBG("SSR state is : %x", ssr_state); if ((ssr_state == STATE_SSR_OFF) || (ssr_state == (STATE_SSR_PENDING_INIT))) { hci_smd_close(hdev); reset_worker = kzalloc(sizeof(*reset_worker), GFP_ATOMIC); if (!reset_worker) { BT_ERR("Out of memory"); break; } ssr_state = STATE_SSR_ON; BT_INFO("SSR state is : %x", ssr_state); INIT_WORK(reset_worker, hci_dev_restart); schedule_work(reset_worker); } else if (ssr_state & STATE_SSR_ON) { BT_ERR("SSR state is : %x", ssr_state); } break; default: break; } } static void hci_smd_notify_data(void *data, unsigned int event) { struct hci_dev *hdev = hs.hdev; struct hci_smd_data *hsmd = &hs; int len = 0; if (!hdev) { BT_ERR("Frame for unknown HCI device (hdev=NULL)"); return; } switch (event) { case SMD_EVENT_DATA: len = smd_read_avail(hsmd->data_channel); if (len > 0) tasklet_hi_schedule(&hs.rx_task); else if (len < 0) BT_ERR("Failed to read data from smd %d", len); break; case SMD_EVENT_OPEN: BT_INFO("opening HCI-SMD channel :%s", DATA_CHANNEL); hci_smd_open(hdev); break; case SMD_EVENT_CLOSE: BT_INFO("Closing HCI-SMD channel :%s", DATA_CHANNEL); hci_smd_close(hdev); break; default: break; } } static int hci_smd_hci_register_dev(struct hci_smd_data *hsmd) { struct hci_dev *hdev; if (hsmd->hdev) hdev = hsmd->hdev; else { BT_ERR("hdev is NULL"); return 0; } /* Allow the incomming SSR even the prev one at PENDING INIT STATE * since clenup need to be started again from the beging and ignore * or bypass the prev one */ if ((ssr_state == STATE_SSR_OFF) || (ssr_state == STATE_SSR_PENDING_INIT)) { if (test_and_set_bit(HCI_REGISTER_SET, &hsmd->flags)) { BT_ERR("HCI device registered already"); return 0; } else BT_INFO("HCI device registration is starting"); if (hci_register_dev(hdev) < 0) { BT_ERR("Can't register HCI device"); hci_free_dev(hdev); hsmd->hdev = NULL; clear_bit(HCI_REGISTER_SET, &hsmd->flags); return -ENODEV; } if (ssr_state == STATE_SSR_PENDING_INIT) { ssr_state = STATE_SSR_COMPLETE; BT_INFO("SSR state is : %x", ssr_state); } } else if (ssr_state) BT_ERR("Registration called in invalid context"); return 0; } static int hci_smd_register_smd(struct hci_smd_data *hsmd) { struct hci_dev *hdev; int rc; /* Initialize and register HCI device */ hdev = hci_alloc_dev(); if (!hdev) { BT_ERR("Can't allocate HCI device"); return -ENOMEM; } hsmd->hdev = hdev; hdev->bus = HCI_SMD; hdev->driver_data = NULL; hdev->open = hci_smd_open; hdev->close = hci_smd_close; hdev->send = hci_smd_send_frame; hdev->destruct = hci_smd_destruct; hdev->owner = THIS_MODULE; tasklet_init(&hsmd->rx_task, hci_smd_rx, (unsigned long) hsmd); /* * Setup the timer to monitor whether the Rx queue is empty, * to control the wake lock release */ setup_timer(&hsmd->rx_q_timer, schedule_timer, (unsigned long) hsmd->hdev); if (ssr_state == STATE_SSR_START) { ssr_state = STATE_SSR_CHANNEL_OPEN_PENDING; BT_INFO("SSR state is : %x", ssr_state); } /* Open the SMD Channel and device and register the callback function */ rc = smd_named_open_on_edge(EVENT_CHANNEL, SMD_APPS_WCNSS, &hsmd->event_channel, hdev, hci_smd_notify_event); if (rc < 0) { BT_ERR("Cannot open the command channel"); hci_free_dev(hdev); hsmd->hdev = NULL; return -ENODEV; } rc = smd_named_open_on_edge(DATA_CHANNEL, SMD_APPS_WCNSS, &hsmd->data_channel, hdev, hci_smd_notify_data); if (rc < 0) { BT_ERR("Failed to open the Data channel"); hci_free_dev(hdev); hsmd->hdev = NULL; return -ENODEV; } /* Disable the read interrupts on the channel */ smd_disable_read_intr(hsmd->event_channel); smd_disable_read_intr(hsmd->data_channel); return 0; } static void hci_smd_deregister_dev(struct hci_smd_data *hsmd) { tasklet_kill(&hs.rx_task); if (ssr_state) BT_DBG("SSR state is : %x", ssr_state); /* Though the hci_smd driver is not registered with the hci * need to close the opened channels as a part of cleaup */ if (!test_and_clear_bit(HCI_REGISTER_SET, &hsmd->flags)) { BT_ERR("HCI device un-registered already"); } else { BT_INFO("HCI device un-registration going on"); if (hsmd->hdev) { if (hci_unregister_dev(hsmd->hdev) < 0) BT_ERR("Can't unregister HCI device %s", hsmd->hdev->name); hci_free_dev(hsmd->hdev); hsmd->hdev = NULL; } } smd_close(hs.event_channel); smd_close(hs.data_channel); if (wake_lock_active(&hs.wake_lock_rx)) wake_unlock(&hs.wake_lock_rx); if (wake_lock_active(&hs.wake_lock_tx)) wake_unlock(&hs.wake_lock_tx); /*Destroy the timer used to monitor the Rx queue for emptiness */ if (hs.rx_q_timer.function) { del_timer_sync(&hs.rx_q_timer); hs.rx_q_timer.function = NULL; hs.rx_q_timer.data = 0; } } static void hci_dev_restart(struct work_struct *worker) { down(&hci_smd_enable); restart_in_progress = 1; BT_DBG("SSR state is : %x", ssr_state); if (ssr_state == STATE_SSR_ON) { ssr_state = STATE_SSR_START; BT_INFO("SSR state is : %x", ssr_state); } else { BT_ERR("restart triggered in wrong context"); up(&hci_smd_enable); kfree(worker); return; } hci_smd_deregister_dev(&hs); hci_smd_register_smd(&hs); up(&hci_smd_enable); kfree(worker); } static void hci_dev_smd_open(struct work_struct *worker) { down(&hci_smd_enable); if (ssr_state) BT_DBG("SSR state is : %x", ssr_state); if ((ssr_state != STATE_SSR_OFF) && (ssr_state != (STATE_SSR_PENDING_INIT))) { up(&hci_smd_enable); kfree(worker); return; } if (restart_in_progress == 1) { /* Allow wcnss to initialize */ restart_in_progress = 0; msleep(10000); } hci_smd_hci_register_dev(&hs); up(&hci_smd_enable); kfree(worker); } static int hcismd_set_enable(const char *val, struct kernel_param *kp) { int ret = 0; pr_err("hcismd_set_enable %d", hcismd_set); down(&hci_smd_enable); ret = param_set_int(val, kp); if (ret) goto done; /* Ignore the all incomming register de-register requests in case of * SSR is in-progress */ switch (hcismd_set) { case 1: if ((hs.hdev == NULL) && (ssr_state == STATE_SSR_OFF)) hci_smd_register_smd(&hs); else if (ssr_state) BT_ERR("SSR is in progress,state is : %x", ssr_state); break; case 0: if (ssr_state == STATE_SSR_OFF) hci_smd_deregister_dev(&hs); else if (ssr_state) BT_ERR("SSR is in progress,state is : %x", ssr_state); break; default: ret = -EFAULT; } done: up(&hci_smd_enable); return ret; } static int __init hci_smd_init(void) { wake_lock_init(&hs.wake_lock_rx, WAKE_LOCK_SUSPEND, "msm_smd_Rx"); wake_lock_init(&hs.wake_lock_tx, WAKE_LOCK_SUSPEND, "msm_smd_Tx"); restart_in_progress = 0; ssr_state = STATE_SSR_OFF; hs.hdev = NULL; return 0; } module_init(hci_smd_init); static void __exit hci_smd_exit(void) { wake_lock_destroy(&hs.wake_lock_rx); wake_lock_destroy(&hs.wake_lock_tx); } module_exit(hci_smd_exit); MODULE_AUTHOR("Ankur Nandwani <ankurn@codeaurora.org>"); MODULE_DESCRIPTION("Bluetooth SMD driver"); MODULE_LICENSE("GPL v2");
gpl-2.0