repo_name
string
path
string
copies
string
size
string
content
string
license
string
Jovy23/N900TUVUDNF9_Kernel
drivers/i2c/busses/i2c-octeon.c
4942
15426
/* * (C) Copyright 2009-2010 * Nokia Siemens Networks, michael.lawnick.ext@nsn.com * * Portions Copyright (C) 2010 Cavium Networks, Inc. * * This is a driver for the i2c adapter in Cavium Networks' OCTEON processors. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/sched.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/io.h> #include <linux/i2c.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/platform_device.h> #include <asm/octeon/octeon.h> #define DRV_NAME "i2c-octeon" /* The previous out-of-tree version was implicitly version 1.0. */ #define DRV_VERSION "2.0" /* register offsets */ #define SW_TWSI 0x00 #define TWSI_INT 0x10 /* Controller command patterns */ #define SW_TWSI_V 0x8000000000000000ull #define SW_TWSI_EOP_TWSI_DATA 0x0C00000100000000ull #define SW_TWSI_EOP_TWSI_CTL 0x0C00000200000000ull #define SW_TWSI_EOP_TWSI_CLKCTL 0x0C00000300000000ull #define SW_TWSI_EOP_TWSI_STAT 0x0C00000300000000ull #define SW_TWSI_EOP_TWSI_RST 0x0C00000700000000ull #define SW_TWSI_OP_TWSI_CLK 0x0800000000000000ull #define SW_TWSI_R 0x0100000000000000ull /* Controller command and status bits */ #define TWSI_CTL_CE 0x80 #define TWSI_CTL_ENAB 0x40 #define TWSI_CTL_STA 0x20 #define TWSI_CTL_STP 0x10 #define TWSI_CTL_IFLG 0x08 #define TWSI_CTL_AAK 0x04 /* Some status values */ #define STAT_START 0x08 #define STAT_RSTART 0x10 #define STAT_TXADDR_ACK 0x18 #define STAT_TXDATA_ACK 0x28 #define STAT_RXADDR_ACK 0x40 #define STAT_RXDATA_ACK 0x50 #define STAT_IDLE 0xF8 struct octeon_i2c { wait_queue_head_t queue; struct i2c_adapter adap; int irq; int twsi_freq; int sys_freq; resource_size_t twsi_phys; void __iomem *twsi_base; resource_size_t regsize; struct device *dev; }; /** * octeon_i2c_write_sw - write an I2C core register. * @i2c: The struct octeon_i2c. * @eop_reg: Register selector. * @data: Value to be written. * * The I2C core registers are accessed indirectly via the SW_TWSI CSR. */ static void octeon_i2c_write_sw(struct octeon_i2c *i2c, u64 eop_reg, u8 data) { u64 tmp; __raw_writeq(SW_TWSI_V | eop_reg | data, i2c->twsi_base + SW_TWSI); do { tmp = __raw_readq(i2c->twsi_base + SW_TWSI); } while ((tmp & SW_TWSI_V) != 0); } /** * octeon_i2c_read_sw - write an I2C core register. * @i2c: The struct octeon_i2c. * @eop_reg: Register selector. * * Returns the data. * * The I2C core registers are accessed indirectly via the SW_TWSI CSR. */ static u8 octeon_i2c_read_sw(struct octeon_i2c *i2c, u64 eop_reg) { u64 tmp; __raw_writeq(SW_TWSI_V | eop_reg | SW_TWSI_R, i2c->twsi_base + SW_TWSI); do { tmp = __raw_readq(i2c->twsi_base + SW_TWSI); } while ((tmp & SW_TWSI_V) != 0); return tmp & 0xFF; } /** * octeon_i2c_write_int - write the TWSI_INT register * @i2c: The struct octeon_i2c. * @data: Value to be written. */ static void octeon_i2c_write_int(struct octeon_i2c *i2c, u64 data) { u64 tmp; __raw_writeq(data, i2c->twsi_base + TWSI_INT); tmp = __raw_readq(i2c->twsi_base + TWSI_INT); } /** * octeon_i2c_int_enable - enable the TS interrupt. * @i2c: The struct octeon_i2c. * * The interrupt will be asserted when there is non-STAT_IDLE state in * the SW_TWSI_EOP_TWSI_STAT register. */ static void octeon_i2c_int_enable(struct octeon_i2c *i2c) { octeon_i2c_write_int(i2c, 0x40); } /** * octeon_i2c_int_disable - disable the TS interrupt. * @i2c: The struct octeon_i2c. */ static void octeon_i2c_int_disable(struct octeon_i2c *i2c) { octeon_i2c_write_int(i2c, 0); } /** * octeon_i2c_unblock - unblock the bus. * @i2c: The struct octeon_i2c. * * If there was a reset while a device was driving 0 to bus, * bus is blocked. We toggle it free manually by some clock * cycles and send a stop. */ static void octeon_i2c_unblock(struct octeon_i2c *i2c) { int i; dev_dbg(i2c->dev, "%s\n", __func__); for (i = 0; i < 9; i++) { octeon_i2c_write_int(i2c, 0x0); udelay(5); octeon_i2c_write_int(i2c, 0x200); udelay(5); } octeon_i2c_write_int(i2c, 0x300); udelay(5); octeon_i2c_write_int(i2c, 0x100); udelay(5); octeon_i2c_write_int(i2c, 0x0); } /** * octeon_i2c_isr - the interrupt service routine. * @int: The irq, unused. * @dev_id: Our struct octeon_i2c. */ static irqreturn_t octeon_i2c_isr(int irq, void *dev_id) { struct octeon_i2c *i2c = dev_id; octeon_i2c_int_disable(i2c); wake_up_interruptible(&i2c->queue); return IRQ_HANDLED; } static int octeon_i2c_test_iflg(struct octeon_i2c *i2c) { return (octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_CTL) & TWSI_CTL_IFLG) != 0; } /** * octeon_i2c_wait - wait for the IFLG to be set. * @i2c: The struct octeon_i2c. * * Returns 0 on success, otherwise a negative errno. */ static int octeon_i2c_wait(struct octeon_i2c *i2c) { int result; octeon_i2c_int_enable(i2c); result = wait_event_interruptible_timeout(i2c->queue, octeon_i2c_test_iflg(i2c), i2c->adap.timeout); octeon_i2c_int_disable(i2c); if (result < 0) { dev_dbg(i2c->dev, "%s: wait interrupted\n", __func__); return result; } else if (result == 0) { dev_dbg(i2c->dev, "%s: timeout\n", __func__); return -ETIMEDOUT; } return 0; } /** * octeon_i2c_start - send START to the bus. * @i2c: The struct octeon_i2c. * * Returns 0 on success, otherwise a negative errno. */ static int octeon_i2c_start(struct octeon_i2c *i2c) { u8 data; int result; octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL, TWSI_CTL_ENAB | TWSI_CTL_STA); result = octeon_i2c_wait(i2c); if (result) { if (octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT) == STAT_IDLE) { /* * Controller refused to send start flag May * be a client is holding SDA low - let's try * to free it. */ octeon_i2c_unblock(i2c); octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL, TWSI_CTL_ENAB | TWSI_CTL_STA); result = octeon_i2c_wait(i2c); } if (result) return result; } data = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT); if ((data != STAT_START) && (data != STAT_RSTART)) { dev_err(i2c->dev, "%s: bad status (0x%x)\n", __func__, data); return -EIO; } return 0; } /** * octeon_i2c_stop - send STOP to the bus. * @i2c: The struct octeon_i2c. * * Returns 0 on success, otherwise a negative errno. */ static int octeon_i2c_stop(struct octeon_i2c *i2c) { u8 data; octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL, TWSI_CTL_ENAB | TWSI_CTL_STP); data = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT); if (data != STAT_IDLE) { dev_err(i2c->dev, "%s: bad status(0x%x)\n", __func__, data); return -EIO; } return 0; } /** * octeon_i2c_write - send data to the bus. * @i2c: The struct octeon_i2c. * @target: Target address. * @data: Pointer to the data to be sent. * @length: Length of the data. * * The address is sent over the bus, then the data. * * Returns 0 on success, otherwise a negative errno. */ static int octeon_i2c_write(struct octeon_i2c *i2c, int target, const u8 *data, int length) { int i, result; u8 tmp; result = octeon_i2c_start(i2c); if (result) return result; octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_DATA, target << 1); octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL, TWSI_CTL_ENAB); result = octeon_i2c_wait(i2c); if (result) return result; for (i = 0; i < length; i++) { tmp = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT); if ((tmp != STAT_TXADDR_ACK) && (tmp != STAT_TXDATA_ACK)) { dev_err(i2c->dev, "%s: bad status before write (0x%x)\n", __func__, tmp); return -EIO; } octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_DATA, data[i]); octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL, TWSI_CTL_ENAB); result = octeon_i2c_wait(i2c); if (result) return result; } return 0; } /** * octeon_i2c_read - receive data from the bus. * @i2c: The struct octeon_i2c. * @target: Target address. * @data: Pointer to the location to store the datae . * @length: Length of the data. * * The address is sent over the bus, then the data is read. * * Returns 0 on success, otherwise a negative errno. */ static int octeon_i2c_read(struct octeon_i2c *i2c, int target, u8 *data, int length) { int i, result; u8 tmp; if (length < 1) return -EINVAL; result = octeon_i2c_start(i2c); if (result) return result; octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_DATA, (target<<1) | 1); octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL, TWSI_CTL_ENAB); result = octeon_i2c_wait(i2c); if (result) return result; for (i = 0; i < length; i++) { tmp = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT); if ((tmp != STAT_RXDATA_ACK) && (tmp != STAT_RXADDR_ACK)) { dev_err(i2c->dev, "%s: bad status before read (0x%x)\n", __func__, tmp); return -EIO; } if (i+1 < length) octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL, TWSI_CTL_ENAB | TWSI_CTL_AAK); else octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL, TWSI_CTL_ENAB); result = octeon_i2c_wait(i2c); if (result) return result; data[i] = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_DATA); } return 0; } /** * octeon_i2c_xfer - The driver's master_xfer function. * @adap: Pointer to the i2c_adapter structure. * @msgs: Pointer to the messages to be processed. * @num: Length of the MSGS array. * * Returns the number of messages processed, or a negative errno on * failure. */ static int octeon_i2c_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num) { struct i2c_msg *pmsg; int i; int ret = 0; struct octeon_i2c *i2c = i2c_get_adapdata(adap); for (i = 0; ret == 0 && i < num; i++) { pmsg = &msgs[i]; dev_dbg(i2c->dev, "Doing %s %d byte(s) to/from 0x%02x - %d of %d messages\n", pmsg->flags & I2C_M_RD ? "read" : "write", pmsg->len, pmsg->addr, i + 1, num); if (pmsg->flags & I2C_M_RD) ret = octeon_i2c_read(i2c, pmsg->addr, pmsg->buf, pmsg->len); else ret = octeon_i2c_write(i2c, pmsg->addr, pmsg->buf, pmsg->len); } octeon_i2c_stop(i2c); return (ret != 0) ? ret : num; } static u32 octeon_i2c_functionality(struct i2c_adapter *adap) { return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; } static const struct i2c_algorithm octeon_i2c_algo = { .master_xfer = octeon_i2c_xfer, .functionality = octeon_i2c_functionality, }; static struct i2c_adapter octeon_i2c_ops = { .owner = THIS_MODULE, .name = "OCTEON adapter", .algo = &octeon_i2c_algo, .timeout = 2, }; /** * octeon_i2c_setclock - Calculate and set clock divisors. */ static int __devinit octeon_i2c_setclock(struct octeon_i2c *i2c) { int tclk, thp_base, inc, thp_idx, mdiv_idx, ndiv_idx, foscl, diff; int thp = 0x18, mdiv = 2, ndiv = 0, delta_hz = 1000000; for (ndiv_idx = 0; ndiv_idx < 8 && delta_hz != 0; ndiv_idx++) { /* * An mdiv value of less than 2 seems to not work well * with ds1337 RTCs, so we constrain it to larger * values. */ for (mdiv_idx = 15; mdiv_idx >= 2 && delta_hz != 0; mdiv_idx--) { /* * For given ndiv and mdiv values check the * two closest thp values. */ tclk = i2c->twsi_freq * (mdiv_idx + 1) * 10; tclk *= (1 << ndiv_idx); thp_base = (i2c->sys_freq / (tclk * 2)) - 1; for (inc = 0; inc <= 1; inc++) { thp_idx = thp_base + inc; if (thp_idx < 5 || thp_idx > 0xff) continue; foscl = i2c->sys_freq / (2 * (thp_idx + 1)); foscl = foscl / (1 << ndiv_idx); foscl = foscl / (mdiv_idx + 1) / 10; diff = abs(foscl - i2c->twsi_freq); if (diff < delta_hz) { delta_hz = diff; thp = thp_idx; mdiv = mdiv_idx; ndiv = ndiv_idx; } } } } octeon_i2c_write_sw(i2c, SW_TWSI_OP_TWSI_CLK, thp); octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CLKCTL, (mdiv << 3) | ndiv); return 0; } static int __devinit octeon_i2c_initlowlevel(struct octeon_i2c *i2c) { u8 status; int tries; /* disable high level controller, enable bus access */ octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_CTL, TWSI_CTL_ENAB); /* reset controller */ octeon_i2c_write_sw(i2c, SW_TWSI_EOP_TWSI_RST, 0); for (tries = 10; tries; tries--) { udelay(1); status = octeon_i2c_read_sw(i2c, SW_TWSI_EOP_TWSI_STAT); if (status == STAT_IDLE) return 0; } dev_err(i2c->dev, "%s: TWSI_RST failed! (0x%x)\n", __func__, status); return -EIO; } static int __devinit octeon_i2c_probe(struct platform_device *pdev) { int irq, result = 0; struct octeon_i2c *i2c; struct octeon_i2c_data *i2c_data; struct resource *res_mem; /* All adaptors have an irq. */ irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; i2c = kzalloc(sizeof(*i2c), GFP_KERNEL); if (!i2c) { dev_err(&pdev->dev, "kzalloc failed\n"); result = -ENOMEM; goto out; } i2c->dev = &pdev->dev; i2c_data = pdev->dev.platform_data; res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res_mem == NULL) { dev_err(i2c->dev, "found no memory resource\n"); result = -ENXIO; goto fail_region; } if (i2c_data == NULL) { dev_err(i2c->dev, "no I2C frequency data\n"); result = -ENXIO; goto fail_region; } i2c->twsi_phys = res_mem->start; i2c->regsize = resource_size(res_mem); i2c->twsi_freq = i2c_data->i2c_freq; i2c->sys_freq = i2c_data->sys_freq; if (!request_mem_region(i2c->twsi_phys, i2c->regsize, res_mem->name)) { dev_err(i2c->dev, "request_mem_region failed\n"); goto fail_region; } i2c->twsi_base = ioremap(i2c->twsi_phys, i2c->regsize); init_waitqueue_head(&i2c->queue); i2c->irq = irq; result = request_irq(i2c->irq, octeon_i2c_isr, 0, DRV_NAME, i2c); if (result < 0) { dev_err(i2c->dev, "failed to attach interrupt\n"); goto fail_irq; } result = octeon_i2c_initlowlevel(i2c); if (result) { dev_err(i2c->dev, "init low level failed\n"); goto fail_add; } result = octeon_i2c_setclock(i2c); if (result) { dev_err(i2c->dev, "clock init failed\n"); goto fail_add; } i2c->adap = octeon_i2c_ops; i2c->adap.dev.parent = &pdev->dev; i2c->adap.nr = pdev->id >= 0 ? pdev->id : 0; i2c_set_adapdata(&i2c->adap, i2c); platform_set_drvdata(pdev, i2c); result = i2c_add_numbered_adapter(&i2c->adap); if (result < 0) { dev_err(i2c->dev, "failed to add adapter\n"); goto fail_add; } dev_info(i2c->dev, "version %s\n", DRV_VERSION); return result; fail_add: platform_set_drvdata(pdev, NULL); free_irq(i2c->irq, i2c); fail_irq: iounmap(i2c->twsi_base); release_mem_region(i2c->twsi_phys, i2c->regsize); fail_region: kfree(i2c); out: return result; }; static int __devexit octeon_i2c_remove(struct platform_device *pdev) { struct octeon_i2c *i2c = platform_get_drvdata(pdev); i2c_del_adapter(&i2c->adap); platform_set_drvdata(pdev, NULL); free_irq(i2c->irq, i2c); iounmap(i2c->twsi_base); release_mem_region(i2c->twsi_phys, i2c->regsize); kfree(i2c); return 0; }; static struct platform_driver octeon_i2c_driver = { .probe = octeon_i2c_probe, .remove = __devexit_p(octeon_i2c_remove), .driver = { .owner = THIS_MODULE, .name = DRV_NAME, }, }; module_platform_driver(octeon_i2c_driver); MODULE_AUTHOR("Michael Lawnick <michael.lawnick.ext@nsn.com>"); MODULE_DESCRIPTION("I2C-Bus adapter for Cavium OCTEON processors"); MODULE_LICENSE("GPL"); MODULE_VERSION(DRV_VERSION); MODULE_ALIAS("platform:" DRV_NAME);
gpl-2.0
razrqcom-dev-team/android_kernel_motorola_msm8226
drivers/staging/tidspbridge/rmgr/proc.c
4942
49072
/* * proc.c * * DSP-BIOS Bridge driver support functions for TI OMAP processors. * * Processor interface at the driver level. * * Copyright (C) 2005-2006 Texas Instruments, Inc. * * This package is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * THIS PACKAGE IS PROVIDED ``AS IS'' AND WITHOUT ANY EXPRESS OR * IMPLIED WARRANTIES, INCLUDING, WITHOUT LIMITATION, THE IMPLIED * WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR A PARTICULAR PURPOSE. */ #include <linux/types.h> /* ------------------------------------ Host OS */ #include <linux/dma-mapping.h> #include <linux/scatterlist.h> #include <dspbridge/host_os.h> /* ----------------------------------- DSP/BIOS Bridge */ #include <dspbridge/dbdefs.h> /* ----------------------------------- OS Adaptation Layer */ #include <dspbridge/ntfy.h> #include <dspbridge/sync.h> /* ----------------------------------- Bridge Driver */ #include <dspbridge/dspdefs.h> #include <dspbridge/dspdeh.h> /* ----------------------------------- Platform Manager */ #include <dspbridge/cod.h> #include <dspbridge/dev.h> #include <dspbridge/procpriv.h> #include <dspbridge/dmm.h> /* ----------------------------------- Resource Manager */ #include <dspbridge/mgr.h> #include <dspbridge/node.h> #include <dspbridge/nldr.h> #include <dspbridge/rmm.h> /* ----------------------------------- Others */ #include <dspbridge/dbdcd.h> #include <dspbridge/msg.h> #include <dspbridge/dspioctl.h> #include <dspbridge/drv.h> /* ----------------------------------- This */ #include <dspbridge/proc.h> #include <dspbridge/pwr.h> #include <dspbridge/resourcecleanup.h> /* ----------------------------------- Defines, Data Structures, Typedefs */ #define MAXCMDLINELEN 255 #define PROC_ENVPROCID "PROC_ID=%d" #define MAXPROCIDLEN (8 + 5) #define PROC_DFLT_TIMEOUT 10000 /* Time out in milliseconds */ #define PWR_TIMEOUT 500 /* Sleep/wake timout in msec */ #define EXTEND "_EXT_END" /* Extmem end addr in DSP binary */ #define DSP_CACHE_LINE 128 #define BUFMODE_MASK (3 << 14) /* Buffer modes from DSP perspective */ #define RBUF 0x4000 /* Input buffer */ #define WBUF 0x8000 /* Output Buffer */ extern struct device *bridge; /* ----------------------------------- Globals */ /* The proc_object structure. */ struct proc_object { struct list_head link; /* Link to next proc_object */ struct dev_object *dev_obj; /* Device this PROC represents */ u32 process; /* Process owning this Processor */ struct mgr_object *mgr_obj; /* Manager Object Handle */ u32 attach_count; /* Processor attach count */ u32 processor_id; /* Processor number */ u32 timeout; /* Time out count */ enum dsp_procstate proc_state; /* Processor state */ u32 unit; /* DDSP unit number */ bool is_already_attached; /* * True if the Device below has * GPP Client attached */ struct ntfy_object *ntfy_obj; /* Manages notifications */ /* Bridge Context Handle */ struct bridge_dev_context *bridge_context; /* Function interface to Bridge driver */ struct bridge_drv_interface *intf_fxns; char *last_coff; struct list_head proc_list; }; DEFINE_MUTEX(proc_lock); /* For critical sections */ /* ----------------------------------- Function Prototypes */ static int proc_monitor(struct proc_object *proc_obj); static s32 get_envp_count(char **envp); static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems, s32 cnew_envp, char *sz_var); /* remember mapping information */ static struct dmm_map_object *add_mapping_info(struct process_context *pr_ctxt, u32 mpu_addr, u32 dsp_addr, u32 size) { struct dmm_map_object *map_obj; u32 num_usr_pgs = size / PG_SIZE4K; pr_debug("%s: adding map info: mpu_addr 0x%x virt 0x%x size 0x%x\n", __func__, mpu_addr, dsp_addr, size); map_obj = kzalloc(sizeof(struct dmm_map_object), GFP_KERNEL); if (!map_obj) { pr_err("%s: kzalloc failed\n", __func__); return NULL; } INIT_LIST_HEAD(&map_obj->link); map_obj->pages = kcalloc(num_usr_pgs, sizeof(struct page *), GFP_KERNEL); if (!map_obj->pages) { pr_err("%s: kzalloc failed\n", __func__); kfree(map_obj); return NULL; } map_obj->mpu_addr = mpu_addr; map_obj->dsp_addr = dsp_addr; map_obj->size = size; map_obj->num_usr_pgs = num_usr_pgs; spin_lock(&pr_ctxt->dmm_map_lock); list_add(&map_obj->link, &pr_ctxt->dmm_map_list); spin_unlock(&pr_ctxt->dmm_map_lock); return map_obj; } static int match_exact_map_obj(struct dmm_map_object *map_obj, u32 dsp_addr, u32 size) { if (map_obj->dsp_addr == dsp_addr && map_obj->size != size) pr_err("%s: addr match (0x%x), size don't (0x%x != 0x%x)\n", __func__, dsp_addr, map_obj->size, size); return map_obj->dsp_addr == dsp_addr && map_obj->size == size; } static void remove_mapping_information(struct process_context *pr_ctxt, u32 dsp_addr, u32 size) { struct dmm_map_object *map_obj; pr_debug("%s: looking for virt 0x%x size 0x%x\n", __func__, dsp_addr, size); spin_lock(&pr_ctxt->dmm_map_lock); list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) { pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n", __func__, map_obj->mpu_addr, map_obj->dsp_addr, map_obj->size); if (match_exact_map_obj(map_obj, dsp_addr, size)) { pr_debug("%s: match, deleting map info\n", __func__); list_del(&map_obj->link); kfree(map_obj->dma_info.sg); kfree(map_obj->pages); kfree(map_obj); goto out; } pr_debug("%s: candidate didn't match\n", __func__); } pr_err("%s: failed to find given map info\n", __func__); out: spin_unlock(&pr_ctxt->dmm_map_lock); } static int match_containing_map_obj(struct dmm_map_object *map_obj, u32 mpu_addr, u32 size) { u32 map_obj_end = map_obj->mpu_addr + map_obj->size; return mpu_addr >= map_obj->mpu_addr && mpu_addr + size <= map_obj_end; } static struct dmm_map_object *find_containing_mapping( struct process_context *pr_ctxt, u32 mpu_addr, u32 size) { struct dmm_map_object *map_obj; pr_debug("%s: looking for mpu_addr 0x%x size 0x%x\n", __func__, mpu_addr, size); spin_lock(&pr_ctxt->dmm_map_lock); list_for_each_entry(map_obj, &pr_ctxt->dmm_map_list, link) { pr_debug("%s: candidate: mpu_addr 0x%x virt 0x%x size 0x%x\n", __func__, map_obj->mpu_addr, map_obj->dsp_addr, map_obj->size); if (match_containing_map_obj(map_obj, mpu_addr, size)) { pr_debug("%s: match!\n", __func__); goto out; } pr_debug("%s: no match!\n", __func__); } map_obj = NULL; out: spin_unlock(&pr_ctxt->dmm_map_lock); return map_obj; } static int find_first_page_in_cache(struct dmm_map_object *map_obj, unsigned long mpu_addr) { u32 mapped_base_page = map_obj->mpu_addr >> PAGE_SHIFT; u32 requested_base_page = mpu_addr >> PAGE_SHIFT; int pg_index = requested_base_page - mapped_base_page; if (pg_index < 0 || pg_index >= map_obj->num_usr_pgs) { pr_err("%s: failed (got %d)\n", __func__, pg_index); return -1; } pr_debug("%s: first page is %d\n", __func__, pg_index); return pg_index; } static inline struct page *get_mapping_page(struct dmm_map_object *map_obj, int pg_i) { pr_debug("%s: looking for pg_i %d, num_usr_pgs: %d\n", __func__, pg_i, map_obj->num_usr_pgs); if (pg_i < 0 || pg_i >= map_obj->num_usr_pgs) { pr_err("%s: requested pg_i %d is out of mapped range\n", __func__, pg_i); return NULL; } return map_obj->pages[pg_i]; } /* * ======== proc_attach ======== * Purpose: * Prepare for communication with a particular DSP processor, and return * a handle to the processor object. */ int proc_attach(u32 processor_id, const struct dsp_processorattrin *attr_in, void **ph_processor, struct process_context *pr_ctxt) { int status = 0; struct dev_object *hdev_obj; struct proc_object *p_proc_object = NULL; struct mgr_object *hmgr_obj = NULL; struct drv_object *hdrv_obj = NULL; struct drv_data *drv_datap = dev_get_drvdata(bridge); u8 dev_type; if (pr_ctxt->processor) { *ph_processor = pr_ctxt->processor; return status; } /* Get the Driver and Manager Object Handles */ if (!drv_datap || !drv_datap->drv_object || !drv_datap->mgr_object) { status = -ENODATA; pr_err("%s: Failed to get object handles\n", __func__); } else { hdrv_obj = drv_datap->drv_object; hmgr_obj = drv_datap->mgr_object; } if (!status) { /* Get the Device Object */ status = drv_get_dev_object(processor_id, hdrv_obj, &hdev_obj); } if (!status) status = dev_get_dev_type(hdev_obj, &dev_type); if (status) goto func_end; /* If we made it this far, create the Proceesor object: */ p_proc_object = kzalloc(sizeof(struct proc_object), GFP_KERNEL); /* Fill out the Processor Object: */ if (p_proc_object == NULL) { status = -ENOMEM; goto func_end; } p_proc_object->dev_obj = hdev_obj; p_proc_object->mgr_obj = hmgr_obj; p_proc_object->processor_id = dev_type; /* Store TGID instead of process handle */ p_proc_object->process = current->tgid; INIT_LIST_HEAD(&p_proc_object->proc_list); if (attr_in) p_proc_object->timeout = attr_in->timeout; else p_proc_object->timeout = PROC_DFLT_TIMEOUT; status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns); if (!status) { status = dev_get_bridge_context(hdev_obj, &p_proc_object->bridge_context); if (status) kfree(p_proc_object); } else kfree(p_proc_object); if (status) goto func_end; /* Create the Notification Object */ /* This is created with no event mask, no notify mask * and no valid handle to the notification. They all get * filled up when proc_register_notify is called */ p_proc_object->ntfy_obj = kmalloc(sizeof(struct ntfy_object), GFP_KERNEL); if (p_proc_object->ntfy_obj) ntfy_init(p_proc_object->ntfy_obj); else status = -ENOMEM; if (!status) { /* Insert the Processor Object into the DEV List. * Return handle to this Processor Object: * Find out if the Device is already attached to a * Processor. If so, return AlreadyAttached status */ status = dev_insert_proc_object(p_proc_object->dev_obj, (u32) p_proc_object, &p_proc_object-> is_already_attached); if (!status) { if (p_proc_object->is_already_attached) status = 0; } else { if (p_proc_object->ntfy_obj) { ntfy_delete(p_proc_object->ntfy_obj); kfree(p_proc_object->ntfy_obj); } kfree(p_proc_object); } if (!status) { *ph_processor = (void *)p_proc_object; pr_ctxt->processor = *ph_processor; (void)proc_notify_clients(p_proc_object, DSP_PROCESSORATTACH); } } else { /* Don't leak memory if status is failed */ kfree(p_proc_object); } func_end: return status; } static int get_exec_file(struct cfg_devnode *dev_node_obj, struct dev_object *hdev_obj, u32 size, char *exec_file) { u8 dev_type; s32 len; struct drv_data *drv_datap = dev_get_drvdata(bridge); dev_get_dev_type(hdev_obj, (u8 *) &dev_type); if (!exec_file) return -EFAULT; if (dev_type == DSP_UNIT) { if (!drv_datap || !drv_datap->base_img) return -EFAULT; if (strlen(drv_datap->base_img) > size) return -EINVAL; strcpy(exec_file, drv_datap->base_img); } else if (dev_type == IVA_UNIT && iva_img) { len = strlen(iva_img); strncpy(exec_file, iva_img, len + 1); } else { return -ENOENT; } return 0; } /* * ======== proc_auto_start ======== = * Purpose: * A Particular device gets loaded with the default image * if the AutoStart flag is set. * Parameters: * hdev_obj: Handle to the Device * Returns: * 0: On Successful Loading * -EPERM General Failure * Requires: * hdev_obj != NULL * Ensures: */ int proc_auto_start(struct cfg_devnode *dev_node_obj, struct dev_object *hdev_obj) { int status = -EPERM; struct proc_object *p_proc_object; char sz_exec_file[MAXCMDLINELEN]; char *argv[2]; struct mgr_object *hmgr_obj = NULL; struct drv_data *drv_datap = dev_get_drvdata(bridge); u8 dev_type; /* Create a Dummy PROC Object */ if (!drv_datap || !drv_datap->mgr_object) { status = -ENODATA; pr_err("%s: Failed to retrieve the object handle\n", __func__); goto func_end; } else { hmgr_obj = drv_datap->mgr_object; } p_proc_object = kzalloc(sizeof(struct proc_object), GFP_KERNEL); if (p_proc_object == NULL) { status = -ENOMEM; goto func_end; } p_proc_object->dev_obj = hdev_obj; p_proc_object->mgr_obj = hmgr_obj; status = dev_get_intf_fxns(hdev_obj, &p_proc_object->intf_fxns); if (!status) status = dev_get_bridge_context(hdev_obj, &p_proc_object->bridge_context); if (status) goto func_cont; /* Stop the Device, put it into standby mode */ status = proc_stop(p_proc_object); if (status) goto func_cont; /* Get the default executable for this board... */ dev_get_dev_type(hdev_obj, (u8 *) &dev_type); p_proc_object->processor_id = dev_type; status = get_exec_file(dev_node_obj, hdev_obj, sizeof(sz_exec_file), sz_exec_file); if (!status) { argv[0] = sz_exec_file; argv[1] = NULL; /* ...and try to load it: */ status = proc_load(p_proc_object, 1, (const char **)argv, NULL); if (!status) status = proc_start(p_proc_object); } kfree(p_proc_object->last_coff); p_proc_object->last_coff = NULL; func_cont: kfree(p_proc_object); func_end: return status; } /* * ======== proc_ctrl ======== * Purpose: * Pass control information to the GPP device driver managing the * DSP processor. * * This will be an OEM-only function, and not part of the DSP/BIOS Bridge * application developer's API. * Call the bridge_dev_ctrl fxn with the Argument. This is a Synchronous * Operation. arg can be null. */ int proc_ctrl(void *hprocessor, u32 dw_cmd, struct dsp_cbdata * arg) { int status = 0; struct proc_object *p_proc_object = hprocessor; u32 timeout = 0; if (p_proc_object) { /* intercept PWR deep sleep command */ if (dw_cmd == BRDIOCTL_DEEPSLEEP) { timeout = arg->cb_data; status = pwr_sleep_dsp(PWR_DEEPSLEEP, timeout); } /* intercept PWR emergency sleep command */ else if (dw_cmd == BRDIOCTL_EMERGENCYSLEEP) { timeout = arg->cb_data; status = pwr_sleep_dsp(PWR_EMERGENCYDEEPSLEEP, timeout); } else if (dw_cmd == PWR_DEEPSLEEP) { /* timeout = arg->cb_data; */ status = pwr_sleep_dsp(PWR_DEEPSLEEP, timeout); } /* intercept PWR wake commands */ else if (dw_cmd == BRDIOCTL_WAKEUP) { timeout = arg->cb_data; status = pwr_wake_dsp(timeout); } else if (dw_cmd == PWR_WAKEUP) { /* timeout = arg->cb_data; */ status = pwr_wake_dsp(timeout); } else if (!((*p_proc_object->intf_fxns->dev_cntrl) (p_proc_object->bridge_context, dw_cmd, arg))) { status = 0; } else { status = -EPERM; } } else { status = -EFAULT; } return status; } /* * ======== proc_detach ======== * Purpose: * Destroys the Processor Object. Removes the notification from the Dev * List. */ int proc_detach(struct process_context *pr_ctxt) { int status = 0; struct proc_object *p_proc_object = NULL; p_proc_object = (struct proc_object *)pr_ctxt->processor; if (p_proc_object) { /* Notify the Client */ ntfy_notify(p_proc_object->ntfy_obj, DSP_PROCESSORDETACH); /* Remove the notification memory */ if (p_proc_object->ntfy_obj) { ntfy_delete(p_proc_object->ntfy_obj); kfree(p_proc_object->ntfy_obj); } kfree(p_proc_object->last_coff); p_proc_object->last_coff = NULL; /* Remove the Proc from the DEV List */ (void)dev_remove_proc_object(p_proc_object->dev_obj, (u32) p_proc_object); /* Free the Processor Object */ kfree(p_proc_object); pr_ctxt->processor = NULL; } else { status = -EFAULT; } return status; } /* * ======== proc_enum_nodes ======== * Purpose: * Enumerate and get configuration information about nodes allocated * on a DSP processor. */ int proc_enum_nodes(void *hprocessor, void **node_tab, u32 node_tab_size, u32 *pu_num_nodes, u32 *pu_allocated) { int status = -EPERM; struct proc_object *p_proc_object = (struct proc_object *)hprocessor; struct node_mgr *hnode_mgr = NULL; if (p_proc_object) { if (!(dev_get_node_manager(p_proc_object->dev_obj, &hnode_mgr))) { if (hnode_mgr) { status = node_enum_nodes(hnode_mgr, node_tab, node_tab_size, pu_num_nodes, pu_allocated); } } } else { status = -EFAULT; } return status; } /* Cache operation against kernel address instead of users */ static int build_dma_sg(struct dmm_map_object *map_obj, unsigned long start, ssize_t len, int pg_i) { struct page *page; unsigned long offset; ssize_t rest; int ret = 0, i = 0; struct scatterlist *sg = map_obj->dma_info.sg; while (len) { page = get_mapping_page(map_obj, pg_i); if (!page) { pr_err("%s: no page for %08lx\n", __func__, start); ret = -EINVAL; goto out; } else if (IS_ERR(page)) { pr_err("%s: err page for %08lx(%lu)\n", __func__, start, PTR_ERR(page)); ret = PTR_ERR(page); goto out; } offset = start & ~PAGE_MASK; rest = min_t(ssize_t, PAGE_SIZE - offset, len); sg_set_page(&sg[i], page, rest, offset); len -= rest; start += rest; pg_i++, i++; } if (i != map_obj->dma_info.num_pages) { pr_err("%s: bad number of sg iterations\n", __func__); ret = -EFAULT; goto out; } out: return ret; } static int memory_regain_ownership(struct dmm_map_object *map_obj, unsigned long start, ssize_t len, enum dma_data_direction dir) { int ret = 0; unsigned long first_data_page = start >> PAGE_SHIFT; unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT); /* calculating the number of pages this area spans */ unsigned long num_pages = last_data_page - first_data_page + 1; struct bridge_dma_map_info *dma_info = &map_obj->dma_info; if (!dma_info->sg) goto out; if (dma_info->dir != dir || dma_info->num_pages != num_pages) { pr_err("%s: dma info doesn't match given params\n", __func__); return -EINVAL; } dma_unmap_sg(bridge, dma_info->sg, num_pages, dma_info->dir); pr_debug("%s: dma_map_sg unmapped\n", __func__); kfree(dma_info->sg); map_obj->dma_info.sg = NULL; out: return ret; } /* Cache operation against kernel address instead of users */ static int memory_give_ownership(struct dmm_map_object *map_obj, unsigned long start, ssize_t len, enum dma_data_direction dir) { int pg_i, ret, sg_num; struct scatterlist *sg; unsigned long first_data_page = start >> PAGE_SHIFT; unsigned long last_data_page = ((u32)(start + len - 1) >> PAGE_SHIFT); /* calculating the number of pages this area spans */ unsigned long num_pages = last_data_page - first_data_page + 1; pg_i = find_first_page_in_cache(map_obj, start); if (pg_i < 0) { pr_err("%s: failed to find first page in cache\n", __func__); ret = -EINVAL; goto out; } sg = kcalloc(num_pages, sizeof(*sg), GFP_KERNEL); if (!sg) { pr_err("%s: kcalloc failed\n", __func__); ret = -ENOMEM; goto out; } sg_init_table(sg, num_pages); /* cleanup a previous sg allocation */ /* this may happen if application doesn't signal for e/o DMA */ kfree(map_obj->dma_info.sg); map_obj->dma_info.sg = sg; map_obj->dma_info.dir = dir; map_obj->dma_info.num_pages = num_pages; ret = build_dma_sg(map_obj, start, len, pg_i); if (ret) goto kfree_sg; sg_num = dma_map_sg(bridge, sg, num_pages, dir); if (sg_num < 1) { pr_err("%s: dma_map_sg failed: %d\n", __func__, sg_num); ret = -EFAULT; goto kfree_sg; } pr_debug("%s: dma_map_sg mapped %d elements\n", __func__, sg_num); map_obj->dma_info.sg_num = sg_num; return 0; kfree_sg: kfree(sg); map_obj->dma_info.sg = NULL; out: return ret; } int proc_begin_dma(void *hprocessor, void *pmpu_addr, u32 ul_size, enum dma_data_direction dir) { /* Keep STATUS here for future additions to this function */ int status = 0; struct process_context *pr_ctxt = (struct process_context *) hprocessor; struct dmm_map_object *map_obj; if (!pr_ctxt) { status = -EFAULT; goto err_out; } pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__, (u32)pmpu_addr, ul_size, dir); mutex_lock(&proc_lock); /* find requested memory are in cached mapping information */ map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size); if (!map_obj) { pr_err("%s: find_containing_mapping failed\n", __func__); status = -EFAULT; goto no_map; } if (memory_give_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) { pr_err("%s: InValid address parameters %p %x\n", __func__, pmpu_addr, ul_size); status = -EFAULT; } no_map: mutex_unlock(&proc_lock); err_out: return status; } int proc_end_dma(void *hprocessor, void *pmpu_addr, u32 ul_size, enum dma_data_direction dir) { /* Keep STATUS here for future additions to this function */ int status = 0; struct process_context *pr_ctxt = (struct process_context *) hprocessor; struct dmm_map_object *map_obj; if (!pr_ctxt) { status = -EFAULT; goto err_out; } pr_debug("%s: addr 0x%x, size 0x%x, type %d\n", __func__, (u32)pmpu_addr, ul_size, dir); mutex_lock(&proc_lock); /* find requested memory are in cached mapping information */ map_obj = find_containing_mapping(pr_ctxt, (u32) pmpu_addr, ul_size); if (!map_obj) { pr_err("%s: find_containing_mapping failed\n", __func__); status = -EFAULT; goto no_map; } if (memory_regain_ownership(map_obj, (u32) pmpu_addr, ul_size, dir)) { pr_err("%s: InValid address parameters %p %x\n", __func__, pmpu_addr, ul_size); status = -EFAULT; } no_map: mutex_unlock(&proc_lock); err_out: return status; } /* * ======== proc_flush_memory ======== * Purpose: * Flush cache */ int proc_flush_memory(void *hprocessor, void *pmpu_addr, u32 ul_size, u32 ul_flags) { enum dma_data_direction dir = DMA_BIDIRECTIONAL; return proc_begin_dma(hprocessor, pmpu_addr, ul_size, dir); } /* * ======== proc_invalidate_memory ======== * Purpose: * Invalidates the memory specified */ int proc_invalidate_memory(void *hprocessor, void *pmpu_addr, u32 size) { enum dma_data_direction dir = DMA_FROM_DEVICE; return proc_begin_dma(hprocessor, pmpu_addr, size, dir); } /* * ======== proc_get_resource_info ======== * Purpose: * Enumerate the resources currently available on a processor. */ int proc_get_resource_info(void *hprocessor, u32 resource_type, struct dsp_resourceinfo *resource_info, u32 resource_info_size) { int status = -EPERM; struct proc_object *p_proc_object = (struct proc_object *)hprocessor; struct node_mgr *hnode_mgr = NULL; struct nldr_object *nldr_obj = NULL; struct rmm_target_obj *rmm = NULL; struct io_mgr *hio_mgr = NULL; /* IO manager handle */ if (!p_proc_object) { status = -EFAULT; goto func_end; } switch (resource_type) { case DSP_RESOURCE_DYNDARAM: case DSP_RESOURCE_DYNSARAM: case DSP_RESOURCE_DYNEXTERNAL: case DSP_RESOURCE_DYNSRAM: status = dev_get_node_manager(p_proc_object->dev_obj, &hnode_mgr); if (!hnode_mgr) { status = -EFAULT; goto func_end; } status = node_get_nldr_obj(hnode_mgr, &nldr_obj); if (!status) { status = nldr_get_rmm_manager(nldr_obj, &rmm); if (rmm) { if (!rmm_stat(rmm, (enum dsp_memtype)resource_type, (struct dsp_memstat *) &(resource_info->result. mem_stat))) status = -EINVAL; } else { status = -EFAULT; } } break; case DSP_RESOURCE_PROCLOAD: status = dev_get_io_mgr(p_proc_object->dev_obj, &hio_mgr); if (hio_mgr) status = p_proc_object->intf_fxns-> io_get_proc_load(hio_mgr, (struct dsp_procloadstat *) &(resource_info->result. proc_load_stat)); else status = -EFAULT; break; default: status = -EPERM; break; } func_end: return status; } /* * ======== proc_get_dev_object ======== * Purpose: * Return the Dev Object handle for a given Processor. * */ int proc_get_dev_object(void *hprocessor, struct dev_object **device_obj) { int status = -EPERM; struct proc_object *p_proc_object = (struct proc_object *)hprocessor; if (p_proc_object) { *device_obj = p_proc_object->dev_obj; status = 0; } else { *device_obj = NULL; status = -EFAULT; } return status; } /* * ======== proc_get_state ======== * Purpose: * Report the state of the specified DSP processor. */ int proc_get_state(void *hprocessor, struct dsp_processorstate *proc_state_obj, u32 state_info_size) { int status = 0; struct proc_object *p_proc_object = (struct proc_object *)hprocessor; int brd_status; if (p_proc_object) { /* First, retrieve BRD state information */ status = (*p_proc_object->intf_fxns->brd_status) (p_proc_object->bridge_context, &brd_status); if (!status) { switch (brd_status) { case BRD_STOPPED: proc_state_obj->proc_state = PROC_STOPPED; break; case BRD_SLEEP_TRANSITION: case BRD_DSP_HIBERNATION: /* Fall through */ case BRD_RUNNING: proc_state_obj->proc_state = PROC_RUNNING; break; case BRD_LOADED: proc_state_obj->proc_state = PROC_LOADED; break; case BRD_ERROR: proc_state_obj->proc_state = PROC_ERROR; break; default: proc_state_obj->proc_state = 0xFF; status = -EPERM; break; } } } else { status = -EFAULT; } dev_dbg(bridge, "%s, results: status: 0x%x proc_state_obj: 0x%x\n", __func__, status, proc_state_obj->proc_state); return status; } /* * ======== proc_get_trace ======== * Purpose: * Retrieve the current contents of the trace buffer, located on the * Processor. Predefined symbols for the trace buffer must have been * configured into the DSP executable. * Details: * We support using the symbols SYS_PUTCBEG and SYS_PUTCEND to define a * trace buffer, only. Treat it as an undocumented feature. * This call is destructive, meaning the processor is placed in the monitor * state as a result of this function. */ int proc_get_trace(void *hprocessor, u8 * pbuf, u32 max_size) { int status; status = -ENOSYS; return status; } /* * ======== proc_load ======== * Purpose: * Reset a processor and load a new base program image. * This will be an OEM-only function, and not part of the DSP/BIOS Bridge * application developer's API. */ int proc_load(void *hprocessor, const s32 argc_index, const char **user_args, const char **user_envp) { int status = 0; struct proc_object *p_proc_object = (struct proc_object *)hprocessor; struct io_mgr *hio_mgr; /* IO manager handle */ struct msg_mgr *hmsg_mgr; struct cod_manager *cod_mgr; /* Code manager handle */ char *pargv0; /* temp argv[0] ptr */ char **new_envp; /* Updated envp[] array. */ char sz_proc_id[MAXPROCIDLEN]; /* Size of "PROC_ID=<n>" */ s32 envp_elems; /* Num elements in envp[]. */ s32 cnew_envp; /* " " in new_envp[] */ s32 nproc_id = 0; /* Anticipate MP version. */ struct dcd_manager *hdcd_handle; struct dmm_object *dmm_mgr; u32 dw_ext_end; u32 proc_id; int brd_state; struct drv_data *drv_datap = dev_get_drvdata(bridge); #ifdef OPT_LOAD_TIME_INSTRUMENTATION struct timeval tv1; struct timeval tv2; #endif #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) struct dspbridge_platform_data *pdata = omap_dspbridge_dev->dev.platform_data; #endif #ifdef OPT_LOAD_TIME_INSTRUMENTATION do_gettimeofday(&tv1); #endif if (!p_proc_object) { status = -EFAULT; goto func_end; } dev_get_cod_mgr(p_proc_object->dev_obj, &cod_mgr); if (!cod_mgr) { status = -EPERM; goto func_end; } status = proc_stop(hprocessor); if (status) goto func_end; /* Place the board in the monitor state. */ status = proc_monitor(hprocessor); if (status) goto func_end; /* Save ptr to original argv[0]. */ pargv0 = (char *)user_args[0]; /*Prepend "PROC_ID=<nproc_id>"to envp array for target. */ envp_elems = get_envp_count((char **)user_envp); cnew_envp = (envp_elems ? (envp_elems + 1) : (envp_elems + 2)); new_envp = kzalloc(cnew_envp * sizeof(char **), GFP_KERNEL); if (new_envp) { status = snprintf(sz_proc_id, MAXPROCIDLEN, PROC_ENVPROCID, nproc_id); if (status == -1) { dev_dbg(bridge, "%s: Proc ID string overflow\n", __func__); status = -EPERM; } else { new_envp = prepend_envp(new_envp, (char **)user_envp, envp_elems, cnew_envp, sz_proc_id); /* Get the DCD Handle */ status = mgr_get_dcd_handle(p_proc_object->mgr_obj, (u32 *) &hdcd_handle); if (!status) { /* Before proceeding with new load, * check if a previously registered COFF * exists. * If yes, unregister nodes in previously * registered COFF. If any error occurred, * set previously registered COFF to NULL. */ if (p_proc_object->last_coff != NULL) { status = dcd_auto_unregister(hdcd_handle, p_proc_object-> last_coff); /* Regardless of auto unregister status, * free previously allocated * memory. */ kfree(p_proc_object->last_coff); p_proc_object->last_coff = NULL; } } /* On success, do cod_open_base() */ status = cod_open_base(cod_mgr, (char *)user_args[0], COD_SYMB); } } else { status = -ENOMEM; } if (!status) { /* Auto-register data base */ /* Get the DCD Handle */ status = mgr_get_dcd_handle(p_proc_object->mgr_obj, (u32 *) &hdcd_handle); if (!status) { /* Auto register nodes in specified COFF * file. If registration did not fail, * (status = 0 or -EACCES) * save the name of the COFF file for * de-registration in the future. */ status = dcd_auto_register(hdcd_handle, (char *)user_args[0]); if (status == -EACCES) status = 0; if (status) { status = -EPERM; } else { /* Allocate memory for pszLastCoff */ p_proc_object->last_coff = kzalloc((strlen(user_args[0]) + 1), GFP_KERNEL); /* If memory allocated, save COFF file name */ if (p_proc_object->last_coff) { strncpy(p_proc_object->last_coff, (char *)user_args[0], (strlen((char *)user_args[0]) + 1)); } } } } /* Update shared memory address and size */ if (!status) { /* Create the message manager. This must be done * before calling the IOOnLoaded function. */ dev_get_msg_mgr(p_proc_object->dev_obj, &hmsg_mgr); if (!hmsg_mgr) { status = msg_create(&hmsg_mgr, p_proc_object->dev_obj, (msg_onexit) node_on_exit); dev_set_msg_mgr(p_proc_object->dev_obj, hmsg_mgr); } } if (!status) { /* Set the Device object's message manager */ status = dev_get_io_mgr(p_proc_object->dev_obj, &hio_mgr); if (hio_mgr) status = (*p_proc_object->intf_fxns->io_on_loaded) (hio_mgr); else status = -EFAULT; } if (!status) { /* Now, attempt to load an exec: */ /* Boost the OPP level to Maximum level supported by baseport */ #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) if (pdata->cpu_set_freq) (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP5]); #endif status = cod_load_base(cod_mgr, argc_index, (char **)user_args, dev_brd_write_fxn, p_proc_object->dev_obj, NULL); if (status) { if (status == -EBADF) { dev_dbg(bridge, "%s: Failure to Load the EXE\n", __func__); } if (status == -ESPIPE) { pr_err("%s: Couldn't parse the file\n", __func__); } } /* Requesting the lowest opp supported */ #if defined(CONFIG_TIDSPBRIDGE_DVFS) && !defined(CONFIG_CPU_FREQ) if (pdata->cpu_set_freq) (*pdata->cpu_set_freq) (pdata->mpu_speed[VDD1_OPP1]); #endif } if (!status) { /* Update the Processor status to loaded */ status = (*p_proc_object->intf_fxns->brd_set_state) (p_proc_object->bridge_context, BRD_LOADED); if (!status) { p_proc_object->proc_state = PROC_LOADED; if (p_proc_object->ntfy_obj) proc_notify_clients(p_proc_object, DSP_PROCESSORSTATECHANGE); } } if (!status) { status = proc_get_processor_id(hprocessor, &proc_id); if (proc_id == DSP_UNIT) { /* Use all available DSP address space after EXTMEM * for DMM */ if (!status) status = cod_get_sym_value(cod_mgr, EXTEND, &dw_ext_end); /* Reset DMM structs and add an initial free chunk */ if (!status) { status = dev_get_dmm_mgr(p_proc_object->dev_obj, &dmm_mgr); if (dmm_mgr) { /* Set dw_ext_end to DMM START u8 * address */ dw_ext_end = (dw_ext_end + 1) * DSPWORDSIZE; /* DMM memory is from EXT_END */ status = dmm_create_tables(dmm_mgr, dw_ext_end, DMMPOOLSIZE); } else { status = -EFAULT; } } } } /* Restore the original argv[0] */ kfree(new_envp); user_args[0] = pargv0; if (!status) { if (!((*p_proc_object->intf_fxns->brd_status) (p_proc_object->bridge_context, &brd_state))) { pr_info("%s: Processor Loaded %s\n", __func__, pargv0); kfree(drv_datap->base_img); drv_datap->base_img = kmalloc(strlen(pargv0) + 1, GFP_KERNEL); if (drv_datap->base_img) strncpy(drv_datap->base_img, pargv0, strlen(pargv0) + 1); else status = -ENOMEM; } } func_end: if (status) { pr_err("%s: Processor failed to load\n", __func__); proc_stop(p_proc_object); } #ifdef OPT_LOAD_TIME_INSTRUMENTATION do_gettimeofday(&tv2); if (tv2.tv_usec < tv1.tv_usec) { tv2.tv_usec += 1000000; tv2.tv_sec--; } dev_dbg(bridge, "%s: time to load %d sec and %d usec\n", __func__, tv2.tv_sec - tv1.tv_sec, tv2.tv_usec - tv1.tv_usec); #endif return status; } /* * ======== proc_map ======== * Purpose: * Maps a MPU buffer to DSP address space. */ int proc_map(void *hprocessor, void *pmpu_addr, u32 ul_size, void *req_addr, void **pp_map_addr, u32 ul_map_attr, struct process_context *pr_ctxt) { u32 va_align; u32 pa_align; struct dmm_object *dmm_mgr; u32 size_align; int status = 0; struct proc_object *p_proc_object = (struct proc_object *)hprocessor; struct dmm_map_object *map_obj; u32 tmp_addr = 0; #ifdef CONFIG_TIDSPBRIDGE_CACHE_LINE_CHECK if ((ul_map_attr & BUFMODE_MASK) != RBUF) { if (!IS_ALIGNED((u32)pmpu_addr, DSP_CACHE_LINE) || !IS_ALIGNED(ul_size, DSP_CACHE_LINE)) { pr_err("%s: not aligned: 0x%x (%d)\n", __func__, (u32)pmpu_addr, ul_size); return -EFAULT; } } #endif /* Calculate the page-aligned PA, VA and size */ va_align = PG_ALIGN_LOW((u32) req_addr, PG_SIZE4K); pa_align = PG_ALIGN_LOW((u32) pmpu_addr, PG_SIZE4K); size_align = PG_ALIGN_HIGH(ul_size + (u32) pmpu_addr - pa_align, PG_SIZE4K); if (!p_proc_object) { status = -EFAULT; goto func_end; } /* Critical section */ mutex_lock(&proc_lock); dmm_get_handle(p_proc_object, &dmm_mgr); if (dmm_mgr) status = dmm_map_memory(dmm_mgr, va_align, size_align); else status = -EFAULT; /* Add mapping to the page tables. */ if (!status) { /* Mapped address = MSB of VA | LSB of PA */ tmp_addr = (va_align | ((u32) pmpu_addr & (PG_SIZE4K - 1))); /* mapped memory resource tracking */ map_obj = add_mapping_info(pr_ctxt, pa_align, tmp_addr, size_align); if (!map_obj) status = -ENOMEM; else status = (*p_proc_object->intf_fxns->brd_mem_map) (p_proc_object->bridge_context, pa_align, va_align, size_align, ul_map_attr, map_obj->pages); } if (!status) { /* Mapped address = MSB of VA | LSB of PA */ *pp_map_addr = (void *) tmp_addr; } else { remove_mapping_information(pr_ctxt, tmp_addr, size_align); dmm_un_map_memory(dmm_mgr, va_align, &size_align); } mutex_unlock(&proc_lock); if (status) goto func_end; func_end: dev_dbg(bridge, "%s: hprocessor %p, pmpu_addr %p, ul_size %x, " "req_addr %p, ul_map_attr %x, pp_map_addr %p, va_align %x, " "pa_align %x, size_align %x status 0x%x\n", __func__, hprocessor, pmpu_addr, ul_size, req_addr, ul_map_attr, pp_map_addr, va_align, pa_align, size_align, status); return status; } /* * ======== proc_register_notify ======== * Purpose: * Register to be notified of specific processor events. */ int proc_register_notify(void *hprocessor, u32 event_mask, u32 notify_type, struct dsp_notification * hnotification) { int status = 0; struct proc_object *p_proc_object = (struct proc_object *)hprocessor; struct deh_mgr *hdeh_mgr; /* Check processor handle */ if (!p_proc_object) { status = -EFAULT; goto func_end; } /* Check if event mask is a valid processor related event */ if (event_mask & ~(DSP_PROCESSORSTATECHANGE | DSP_PROCESSORATTACH | DSP_PROCESSORDETACH | DSP_PROCESSORRESTART | DSP_MMUFAULT | DSP_SYSERROR | DSP_PWRERROR | DSP_WDTOVERFLOW)) status = -EINVAL; /* Check if notify type is valid */ if (notify_type != DSP_SIGNALEVENT) status = -EINVAL; if (!status) { /* If event mask is not DSP_SYSERROR, DSP_MMUFAULT, * or DSP_PWRERROR then register event immediately. */ if (event_mask & ~(DSP_SYSERROR | DSP_MMUFAULT | DSP_PWRERROR | DSP_WDTOVERFLOW)) { status = ntfy_register(p_proc_object->ntfy_obj, hnotification, event_mask, notify_type); /* Special case alert, special case alert! * If we're trying to *deregister* (i.e. event_mask * is 0), a DSP_SYSERROR or DSP_MMUFAULT notification, * we have to deregister with the DEH manager. * There's no way to know, based on event_mask which * manager the notification event was registered with, * so if we're trying to deregister and ntfy_register * failed, we'll give the deh manager a shot. */ if ((event_mask == 0) && status) { status = dev_get_deh_mgr(p_proc_object->dev_obj, &hdeh_mgr); status = bridge_deh_register_notify(hdeh_mgr, event_mask, notify_type, hnotification); } } else { status = dev_get_deh_mgr(p_proc_object->dev_obj, &hdeh_mgr); status = bridge_deh_register_notify(hdeh_mgr, event_mask, notify_type, hnotification); } } func_end: return status; } /* * ======== proc_reserve_memory ======== * Purpose: * Reserve a virtually contiguous region of DSP address space. */ int proc_reserve_memory(void *hprocessor, u32 ul_size, void **pp_rsv_addr, struct process_context *pr_ctxt) { struct dmm_object *dmm_mgr; int status = 0; struct proc_object *p_proc_object = (struct proc_object *)hprocessor; struct dmm_rsv_object *rsv_obj; if (!p_proc_object) { status = -EFAULT; goto func_end; } status = dmm_get_handle(p_proc_object, &dmm_mgr); if (!dmm_mgr) { status = -EFAULT; goto func_end; } status = dmm_reserve_memory(dmm_mgr, ul_size, (u32 *) pp_rsv_addr); if (status != 0) goto func_end; /* * A successful reserve should be followed by insertion of rsv_obj * into dmm_rsv_list, so that reserved memory resource tracking * remains uptodate */ rsv_obj = kmalloc(sizeof(struct dmm_rsv_object), GFP_KERNEL); if (rsv_obj) { rsv_obj->dsp_reserved_addr = (u32) *pp_rsv_addr; spin_lock(&pr_ctxt->dmm_rsv_lock); list_add(&rsv_obj->link, &pr_ctxt->dmm_rsv_list); spin_unlock(&pr_ctxt->dmm_rsv_lock); } func_end: dev_dbg(bridge, "%s: hprocessor: 0x%p ul_size: 0x%x pp_rsv_addr: 0x%p " "status 0x%x\n", __func__, hprocessor, ul_size, pp_rsv_addr, status); return status; } /* * ======== proc_start ======== * Purpose: * Start a processor running. */ int proc_start(void *hprocessor) { int status = 0; struct proc_object *p_proc_object = (struct proc_object *)hprocessor; struct cod_manager *cod_mgr; /* Code manager handle */ u32 dw_dsp_addr; /* Loaded code's entry point. */ int brd_state; if (!p_proc_object) { status = -EFAULT; goto func_end; } /* Call the bridge_brd_start */ if (p_proc_object->proc_state != PROC_LOADED) { status = -EBADR; goto func_end; } status = dev_get_cod_mgr(p_proc_object->dev_obj, &cod_mgr); if (!cod_mgr) { status = -EFAULT; goto func_cont; } status = cod_get_entry(cod_mgr, &dw_dsp_addr); if (status) goto func_cont; status = (*p_proc_object->intf_fxns->brd_start) (p_proc_object->bridge_context, dw_dsp_addr); if (status) goto func_cont; /* Call dev_create2 */ status = dev_create2(p_proc_object->dev_obj); if (!status) { p_proc_object->proc_state = PROC_RUNNING; /* Deep sleep switces off the peripheral clocks. * we just put the DSP CPU in idle in the idle loop. * so there is no need to send a command to DSP */ if (p_proc_object->ntfy_obj) { proc_notify_clients(p_proc_object, DSP_PROCESSORSTATECHANGE); } } else { /* Failed to Create Node Manager and DISP Object * Stop the Processor from running. Put it in STOPPED State */ (void)(*p_proc_object->intf_fxns-> brd_stop) (p_proc_object->bridge_context); p_proc_object->proc_state = PROC_STOPPED; } func_cont: if (!status) { if (!((*p_proc_object->intf_fxns->brd_status) (p_proc_object->bridge_context, &brd_state))) { pr_info("%s: dsp in running state\n", __func__); } } else { pr_err("%s: Failed to start the dsp\n", __func__); proc_stop(p_proc_object); } func_end: return status; } /* * ======== proc_stop ======== * Purpose: * Stop a processor running. */ int proc_stop(void *hprocessor) { int status = 0; struct proc_object *p_proc_object = (struct proc_object *)hprocessor; struct msg_mgr *hmsg_mgr; struct node_mgr *hnode_mgr; void *hnode; u32 node_tab_size = 1; u32 num_nodes = 0; u32 nodes_allocated = 0; if (!p_proc_object) { status = -EFAULT; goto func_end; } /* check if there are any running nodes */ status = dev_get_node_manager(p_proc_object->dev_obj, &hnode_mgr); if (!status && hnode_mgr) { status = node_enum_nodes(hnode_mgr, &hnode, node_tab_size, &num_nodes, &nodes_allocated); if ((status == -EINVAL) || (nodes_allocated > 0)) { pr_err("%s: Can't stop device, active nodes = %d \n", __func__, nodes_allocated); return -EBADR; } } /* Call the bridge_brd_stop */ /* It is OK to stop a device that does n't have nodes OR not started */ status = (*p_proc_object->intf_fxns-> brd_stop) (p_proc_object->bridge_context); if (!status) { dev_dbg(bridge, "%s: processor in standby mode\n", __func__); p_proc_object->proc_state = PROC_STOPPED; /* Destroy the Node Manager, msg_ctrl Manager */ if (!(dev_destroy2(p_proc_object->dev_obj))) { /* Destroy the msg_ctrl by calling msg_delete */ dev_get_msg_mgr(p_proc_object->dev_obj, &hmsg_mgr); if (hmsg_mgr) { msg_delete(hmsg_mgr); dev_set_msg_mgr(p_proc_object->dev_obj, NULL); } } } else { pr_err("%s: Failed to stop the processor\n", __func__); } func_end: return status; } /* * ======== proc_un_map ======== * Purpose: * Removes a MPU buffer mapping from the DSP address space. */ int proc_un_map(void *hprocessor, void *map_addr, struct process_context *pr_ctxt) { int status = 0; struct proc_object *p_proc_object = (struct proc_object *)hprocessor; struct dmm_object *dmm_mgr; u32 va_align; u32 size_align; va_align = PG_ALIGN_LOW((u32) map_addr, PG_SIZE4K); if (!p_proc_object) { status = -EFAULT; goto func_end; } status = dmm_get_handle(hprocessor, &dmm_mgr); if (!dmm_mgr) { status = -EFAULT; goto func_end; } /* Critical section */ mutex_lock(&proc_lock); /* * Update DMM structures. Get the size to unmap. * This function returns error if the VA is not mapped */ status = dmm_un_map_memory(dmm_mgr, (u32) va_align, &size_align); /* Remove mapping from the page tables. */ if (!status) { status = (*p_proc_object->intf_fxns->brd_mem_un_map) (p_proc_object->bridge_context, va_align, size_align); } if (status) goto unmap_failed; /* * A successful unmap should be followed by removal of map_obj * from dmm_map_list, so that mapped memory resource tracking * remains uptodate */ remove_mapping_information(pr_ctxt, (u32) map_addr, size_align); unmap_failed: mutex_unlock(&proc_lock); func_end: dev_dbg(bridge, "%s: hprocessor: 0x%p map_addr: 0x%p status: 0x%x\n", __func__, hprocessor, map_addr, status); return status; } /* * ======== proc_un_reserve_memory ======== * Purpose: * Frees a previously reserved region of DSP address space. */ int proc_un_reserve_memory(void *hprocessor, void *prsv_addr, struct process_context *pr_ctxt) { struct dmm_object *dmm_mgr; int status = 0; struct proc_object *p_proc_object = (struct proc_object *)hprocessor; struct dmm_rsv_object *rsv_obj; if (!p_proc_object) { status = -EFAULT; goto func_end; } status = dmm_get_handle(p_proc_object, &dmm_mgr); if (!dmm_mgr) { status = -EFAULT; goto func_end; } status = dmm_un_reserve_memory(dmm_mgr, (u32) prsv_addr); if (status != 0) goto func_end; /* * A successful unreserve should be followed by removal of rsv_obj * from dmm_rsv_list, so that reserved memory resource tracking * remains uptodate */ spin_lock(&pr_ctxt->dmm_rsv_lock); list_for_each_entry(rsv_obj, &pr_ctxt->dmm_rsv_list, link) { if (rsv_obj->dsp_reserved_addr == (u32) prsv_addr) { list_del(&rsv_obj->link); kfree(rsv_obj); break; } } spin_unlock(&pr_ctxt->dmm_rsv_lock); func_end: dev_dbg(bridge, "%s: hprocessor: 0x%p prsv_addr: 0x%p status: 0x%x\n", __func__, hprocessor, prsv_addr, status); return status; } /* * ======== = proc_monitor ======== == * Purpose: * Place the Processor in Monitor State. This is an internal * function and a requirement before Processor is loaded. * This does a bridge_brd_stop, dev_destroy2 and bridge_brd_monitor. * In dev_destroy2 we delete the node manager. * Parameters: * p_proc_object: Pointer to Processor Object * Returns: * 0: Processor placed in monitor mode. * !0: Failed to place processor in monitor mode. * Requires: * Valid Processor Handle * Ensures: * Success: ProcObject state is PROC_IDLE */ static int proc_monitor(struct proc_object *proc_obj) { int status = -EPERM; struct msg_mgr *hmsg_mgr; /* This is needed only when Device is loaded when it is * already 'ACTIVE' */ /* Destroy the Node Manager, msg_ctrl Manager */ if (!dev_destroy2(proc_obj->dev_obj)) { /* Destroy the msg_ctrl by calling msg_delete */ dev_get_msg_mgr(proc_obj->dev_obj, &hmsg_mgr); if (hmsg_mgr) { msg_delete(hmsg_mgr); dev_set_msg_mgr(proc_obj->dev_obj, NULL); } } /* Place the Board in the Monitor State */ if (!((*proc_obj->intf_fxns->brd_monitor) (proc_obj->bridge_context))) { status = 0; } return status; } /* * ======== get_envp_count ======== * Purpose: * Return the number of elements in the envp array, including the * terminating NULL element. */ static s32 get_envp_count(char **envp) { s32 ret = 0; if (envp) { while (*envp++) ret++; ret += 1; /* Include the terminating NULL in the count. */ } return ret; } /* * ======== prepend_envp ======== * Purpose: * Prepend an environment variable=value pair to the new envp array, and * copy in the existing var=value pairs in the old envp array. */ static char **prepend_envp(char **new_envp, char **envp, s32 envp_elems, s32 cnew_envp, char *sz_var) { char **pp_envp = new_envp; /* Prepend new environ var=value string */ *new_envp++ = sz_var; /* Copy user's environment into our own. */ while (envp_elems--) *new_envp++ = *envp++; /* Ensure NULL terminates the new environment strings array. */ if (envp_elems == 0) *new_envp = NULL; return pp_envp; } /* * ======== proc_notify_clients ======== * Purpose: * Notify the processor the events. */ int proc_notify_clients(void *proc, u32 events) { int status = 0; struct proc_object *p_proc_object = (struct proc_object *)proc; if (!p_proc_object) { status = -EFAULT; goto func_end; } ntfy_notify(p_proc_object->ntfy_obj, events); func_end: return status; } /* * ======== proc_notify_all_clients ======== * Purpose: * Notify the processor the events. This includes notifying all clients * attached to a particulat DSP. */ int proc_notify_all_clients(void *proc, u32 events) { int status = 0; struct proc_object *p_proc_object = (struct proc_object *)proc; if (!p_proc_object) { status = -EFAULT; goto func_end; } dev_notify_clients(p_proc_object->dev_obj, events); func_end: return status; } /* * ======== proc_get_processor_id ======== * Purpose: * Retrieves the processor ID. */ int proc_get_processor_id(void *proc, u32 * proc_id) { int status = 0; struct proc_object *p_proc_object = (struct proc_object *)proc; if (p_proc_object) *proc_id = p_proc_object->processor_id; else status = -EFAULT; return status; }
gpl-2.0
Radium-Devices/Radium_hammerhead
drivers/hwmon/smm665.c
4942
21337
/* * Driver for SMM665 Power Controller / Monitor * * Copyright (C) 2010 Ericsson AB. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This driver should also work for SMM465, SMM764, and SMM766, but is untested * for those chips. Only monitoring functionality is implemented. * * Datasheets: * http://www.summitmicro.com/prod_select/summary/SMM665/SMM665B_2089_20.pdf * http://www.summitmicro.com/prod_select/summary/SMM766B/SMM766B_2122.pdf */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/slab.h> #include <linux/i2c.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/delay.h> /* Internal reference voltage (VREF, x 1000 */ #define SMM665_VREF_ADC_X1000 1250 /* module parameters */ static int vref = SMM665_VREF_ADC_X1000; module_param(vref, int, 0); MODULE_PARM_DESC(vref, "Reference voltage in mV"); enum chips { smm465, smm665, smm665c, smm764, smm766 }; /* * ADC channel addresses */ #define SMM665_MISC16_ADC_DATA_A 0x00 #define SMM665_MISC16_ADC_DATA_B 0x01 #define SMM665_MISC16_ADC_DATA_C 0x02 #define SMM665_MISC16_ADC_DATA_D 0x03 #define SMM665_MISC16_ADC_DATA_E 0x04 #define SMM665_MISC16_ADC_DATA_F 0x05 #define SMM665_MISC16_ADC_DATA_VDD 0x06 #define SMM665_MISC16_ADC_DATA_12V 0x07 #define SMM665_MISC16_ADC_DATA_INT_TEMP 0x08 #define SMM665_MISC16_ADC_DATA_AIN1 0x09 #define SMM665_MISC16_ADC_DATA_AIN2 0x0a /* * Command registers */ #define SMM665_MISC8_CMD_STS 0x80 #define SMM665_MISC8_STATUS1 0x81 #define SMM665_MISC8_STATUSS2 0x82 #define SMM665_MISC8_IO_POLARITY 0x83 #define SMM665_MISC8_PUP_POLARITY 0x84 #define SMM665_MISC8_ADOC_STATUS1 0x85 #define SMM665_MISC8_ADOC_STATUS2 0x86 #define SMM665_MISC8_WRITE_PROT 0x87 #define SMM665_MISC8_STS_TRACK 0x88 /* * Configuration registers and register groups */ #define SMM665_ADOC_ENABLE 0x0d #define SMM665_LIMIT_BASE 0x80 /* First limit register */ /* * Limit register bit masks */ #define SMM665_TRIGGER_RST 0x8000 #define SMM665_TRIGGER_HEALTHY 0x4000 #define SMM665_TRIGGER_POWEROFF 0x2000 #define SMM665_TRIGGER_SHUTDOWN 0x1000 #define SMM665_ADC_MASK 0x03ff #define smm665_is_critical(lim) ((lim) & (SMM665_TRIGGER_RST \ | SMM665_TRIGGER_POWEROFF \ | SMM665_TRIGGER_SHUTDOWN)) /* * Fault register bit definitions * Values are merged from status registers 1/2, * with status register 1 providing the upper 8 bits. */ #define SMM665_FAULT_A 0x0001 #define SMM665_FAULT_B 0x0002 #define SMM665_FAULT_C 0x0004 #define SMM665_FAULT_D 0x0008 #define SMM665_FAULT_E 0x0010 #define SMM665_FAULT_F 0x0020 #define SMM665_FAULT_VDD 0x0040 #define SMM665_FAULT_12V 0x0080 #define SMM665_FAULT_TEMP 0x0100 #define SMM665_FAULT_AIN1 0x0200 #define SMM665_FAULT_AIN2 0x0400 /* * I2C Register addresses * * The configuration register needs to be the configured base register. * The command/status register address is derived from it. */ #define SMM665_REGMASK 0x78 #define SMM665_CMDREG_BASE 0x48 #define SMM665_CONFREG_BASE 0x50 /* * Equations given by chip manufacturer to calculate voltage/temperature values * vref = Reference voltage on VREF_ADC pin (module parameter) * adc = 10bit ADC value read back from registers */ /* Voltage A-F and VDD */ #define SMM665_VMON_ADC_TO_VOLTS(adc) ((adc) * vref / 256) /* Voltage 12VIN */ #define SMM665_12VIN_ADC_TO_VOLTS(adc) ((adc) * vref * 3 / 256) /* Voltage AIN1, AIN2 */ #define SMM665_AIN_ADC_TO_VOLTS(adc) ((adc) * vref / 512) /* Temp Sensor */ #define SMM665_TEMP_ADC_TO_CELSIUS(adc) (((adc) <= 511) ? \ ((int)(adc) * 1000 / 4) : \ (((int)(adc) - 0x400) * 1000 / 4)) #define SMM665_NUM_ADC 11 /* * Chip dependent ADC conversion time, in uS */ #define SMM665_ADC_WAIT_SMM665 70 #define SMM665_ADC_WAIT_SMM766 185 struct smm665_data { enum chips type; int conversion_time; /* ADC conversion time */ struct device *hwmon_dev; struct mutex update_lock; bool valid; unsigned long last_updated; /* in jiffies */ u16 adc[SMM665_NUM_ADC]; /* adc values (raw) */ u16 faults; /* fault status */ /* The following values are in mV */ int critical_min_limit[SMM665_NUM_ADC]; int alarm_min_limit[SMM665_NUM_ADC]; int critical_max_limit[SMM665_NUM_ADC]; int alarm_max_limit[SMM665_NUM_ADC]; struct i2c_client *cmdreg; }; /* * smm665_read16() * * Read 16 bit value from <reg>, <reg+1>. Upper 8 bits are in <reg>. */ static int smm665_read16(struct i2c_client *client, int reg) { int rv, val; rv = i2c_smbus_read_byte_data(client, reg); if (rv < 0) return rv; val = rv << 8; rv = i2c_smbus_read_byte_data(client, reg + 1); if (rv < 0) return rv; val |= rv; return val; } /* * Read adc value. */ static int smm665_read_adc(struct smm665_data *data, int adc) { struct i2c_client *client = data->cmdreg; int rv; int radc; /* * Algorithm for reading ADC, per SMM665 datasheet * * {[S][addr][W][Ack]} {[offset][Ack]} {[S][addr][R][Nack]} * [wait conversion time] * {[S][addr][R][Ack]} {[datahi][Ack]} {[datalo][Ack][P]} * * To implement the first part of this exchange, * do a full read transaction and expect a failure/Nack. * This sets up the address pointer on the SMM665 * and starts the ADC conversion. * Then do a two-byte read transaction. */ rv = i2c_smbus_read_byte_data(client, adc << 3); if (rv != -ENXIO) { /* * We expect ENXIO to reflect NACK * (per Documentation/i2c/fault-codes). * Everything else is an error. */ dev_dbg(&client->dev, "Unexpected return code %d when setting ADC index", rv); return (rv < 0) ? rv : -EIO; } udelay(data->conversion_time); /* * Now read two bytes. * * Neither i2c_smbus_read_byte() nor * i2c_smbus_read_block_data() worked here, * so use i2c_smbus_read_word_swapped() instead. * We could also try to use i2c_master_recv(), * but that is not always supported. */ rv = i2c_smbus_read_word_swapped(client, 0); if (rv < 0) { dev_dbg(&client->dev, "Failed to read ADC value: error %d", rv); return -1; } /* * Validate/verify readback adc channel (in bit 11..14). */ radc = (rv >> 11) & 0x0f; if (radc != adc) { dev_dbg(&client->dev, "Unexpected RADC: Expected %d got %d", adc, radc); return -EIO; } return rv & SMM665_ADC_MASK; } static struct smm665_data *smm665_update_device(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct smm665_data *data = i2c_get_clientdata(client); struct smm665_data *ret = data; mutex_lock(&data->update_lock); if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { int i, val; /* * read status registers */ val = smm665_read16(client, SMM665_MISC8_STATUS1); if (unlikely(val < 0)) { ret = ERR_PTR(val); goto abort; } data->faults = val; /* Read adc registers */ for (i = 0; i < SMM665_NUM_ADC; i++) { val = smm665_read_adc(data, i); if (unlikely(val < 0)) { ret = ERR_PTR(val); goto abort; } data->adc[i] = val; } data->last_updated = jiffies; data->valid = 1; } abort: mutex_unlock(&data->update_lock); return ret; } /* Return converted value from given adc */ static int smm665_convert(u16 adcval, int index) { int val = 0; switch (index) { case SMM665_MISC16_ADC_DATA_12V: val = SMM665_12VIN_ADC_TO_VOLTS(adcval & SMM665_ADC_MASK); break; case SMM665_MISC16_ADC_DATA_VDD: case SMM665_MISC16_ADC_DATA_A: case SMM665_MISC16_ADC_DATA_B: case SMM665_MISC16_ADC_DATA_C: case SMM665_MISC16_ADC_DATA_D: case SMM665_MISC16_ADC_DATA_E: case SMM665_MISC16_ADC_DATA_F: val = SMM665_VMON_ADC_TO_VOLTS(adcval & SMM665_ADC_MASK); break; case SMM665_MISC16_ADC_DATA_AIN1: case SMM665_MISC16_ADC_DATA_AIN2: val = SMM665_AIN_ADC_TO_VOLTS(adcval & SMM665_ADC_MASK); break; case SMM665_MISC16_ADC_DATA_INT_TEMP: val = SMM665_TEMP_ADC_TO_CELSIUS(adcval & SMM665_ADC_MASK); break; default: /* If we get here, the developer messed up */ WARN_ON_ONCE(1); break; } return val; } static int smm665_get_min(struct device *dev, int index) { struct i2c_client *client = to_i2c_client(dev); struct smm665_data *data = i2c_get_clientdata(client); return data->alarm_min_limit[index]; } static int smm665_get_max(struct device *dev, int index) { struct i2c_client *client = to_i2c_client(dev); struct smm665_data *data = i2c_get_clientdata(client); return data->alarm_max_limit[index]; } static int smm665_get_lcrit(struct device *dev, int index) { struct i2c_client *client = to_i2c_client(dev); struct smm665_data *data = i2c_get_clientdata(client); return data->critical_min_limit[index]; } static int smm665_get_crit(struct device *dev, int index) { struct i2c_client *client = to_i2c_client(dev); struct smm665_data *data = i2c_get_clientdata(client); return data->critical_max_limit[index]; } static ssize_t smm665_show_crit_alarm(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct smm665_data *data = smm665_update_device(dev); int val = 0; if (IS_ERR(data)) return PTR_ERR(data); if (data->faults & (1 << attr->index)) val = 1; return snprintf(buf, PAGE_SIZE, "%d\n", val); } static ssize_t smm665_show_input(struct device *dev, struct device_attribute *da, char *buf) { struct sensor_device_attribute *attr = to_sensor_dev_attr(da); struct smm665_data *data = smm665_update_device(dev); int adc = attr->index; int val; if (IS_ERR(data)) return PTR_ERR(data); val = smm665_convert(data->adc[adc], adc); return snprintf(buf, PAGE_SIZE, "%d\n", val); } #define SMM665_SHOW(what) \ static ssize_t smm665_show_##what(struct device *dev, \ struct device_attribute *da, char *buf) \ { \ struct sensor_device_attribute *attr = to_sensor_dev_attr(da); \ const int val = smm665_get_##what(dev, attr->index); \ return snprintf(buf, PAGE_SIZE, "%d\n", val); \ } SMM665_SHOW(min); SMM665_SHOW(max); SMM665_SHOW(lcrit); SMM665_SHOW(crit); /* * These macros are used below in constructing device attribute objects * for use with sysfs_create_group() to make a sysfs device file * for each register. */ #define SMM665_ATTR(name, type, cmd_idx) \ static SENSOR_DEVICE_ATTR(name##_##type, S_IRUGO, \ smm665_show_##type, NULL, cmd_idx) /* Construct a sensor_device_attribute structure for each register */ /* Input voltages */ SMM665_ATTR(in1, input, SMM665_MISC16_ADC_DATA_12V); SMM665_ATTR(in2, input, SMM665_MISC16_ADC_DATA_VDD); SMM665_ATTR(in3, input, SMM665_MISC16_ADC_DATA_A); SMM665_ATTR(in4, input, SMM665_MISC16_ADC_DATA_B); SMM665_ATTR(in5, input, SMM665_MISC16_ADC_DATA_C); SMM665_ATTR(in6, input, SMM665_MISC16_ADC_DATA_D); SMM665_ATTR(in7, input, SMM665_MISC16_ADC_DATA_E); SMM665_ATTR(in8, input, SMM665_MISC16_ADC_DATA_F); SMM665_ATTR(in9, input, SMM665_MISC16_ADC_DATA_AIN1); SMM665_ATTR(in10, input, SMM665_MISC16_ADC_DATA_AIN2); /* Input voltages min */ SMM665_ATTR(in1, min, SMM665_MISC16_ADC_DATA_12V); SMM665_ATTR(in2, min, SMM665_MISC16_ADC_DATA_VDD); SMM665_ATTR(in3, min, SMM665_MISC16_ADC_DATA_A); SMM665_ATTR(in4, min, SMM665_MISC16_ADC_DATA_B); SMM665_ATTR(in5, min, SMM665_MISC16_ADC_DATA_C); SMM665_ATTR(in6, min, SMM665_MISC16_ADC_DATA_D); SMM665_ATTR(in7, min, SMM665_MISC16_ADC_DATA_E); SMM665_ATTR(in8, min, SMM665_MISC16_ADC_DATA_F); SMM665_ATTR(in9, min, SMM665_MISC16_ADC_DATA_AIN1); SMM665_ATTR(in10, min, SMM665_MISC16_ADC_DATA_AIN2); /* Input voltages max */ SMM665_ATTR(in1, max, SMM665_MISC16_ADC_DATA_12V); SMM665_ATTR(in2, max, SMM665_MISC16_ADC_DATA_VDD); SMM665_ATTR(in3, max, SMM665_MISC16_ADC_DATA_A); SMM665_ATTR(in4, max, SMM665_MISC16_ADC_DATA_B); SMM665_ATTR(in5, max, SMM665_MISC16_ADC_DATA_C); SMM665_ATTR(in6, max, SMM665_MISC16_ADC_DATA_D); SMM665_ATTR(in7, max, SMM665_MISC16_ADC_DATA_E); SMM665_ATTR(in8, max, SMM665_MISC16_ADC_DATA_F); SMM665_ATTR(in9, max, SMM665_MISC16_ADC_DATA_AIN1); SMM665_ATTR(in10, max, SMM665_MISC16_ADC_DATA_AIN2); /* Input voltages lcrit */ SMM665_ATTR(in1, lcrit, SMM665_MISC16_ADC_DATA_12V); SMM665_ATTR(in2, lcrit, SMM665_MISC16_ADC_DATA_VDD); SMM665_ATTR(in3, lcrit, SMM665_MISC16_ADC_DATA_A); SMM665_ATTR(in4, lcrit, SMM665_MISC16_ADC_DATA_B); SMM665_ATTR(in5, lcrit, SMM665_MISC16_ADC_DATA_C); SMM665_ATTR(in6, lcrit, SMM665_MISC16_ADC_DATA_D); SMM665_ATTR(in7, lcrit, SMM665_MISC16_ADC_DATA_E); SMM665_ATTR(in8, lcrit, SMM665_MISC16_ADC_DATA_F); SMM665_ATTR(in9, lcrit, SMM665_MISC16_ADC_DATA_AIN1); SMM665_ATTR(in10, lcrit, SMM665_MISC16_ADC_DATA_AIN2); /* Input voltages crit */ SMM665_ATTR(in1, crit, SMM665_MISC16_ADC_DATA_12V); SMM665_ATTR(in2, crit, SMM665_MISC16_ADC_DATA_VDD); SMM665_ATTR(in3, crit, SMM665_MISC16_ADC_DATA_A); SMM665_ATTR(in4, crit, SMM665_MISC16_ADC_DATA_B); SMM665_ATTR(in5, crit, SMM665_MISC16_ADC_DATA_C); SMM665_ATTR(in6, crit, SMM665_MISC16_ADC_DATA_D); SMM665_ATTR(in7, crit, SMM665_MISC16_ADC_DATA_E); SMM665_ATTR(in8, crit, SMM665_MISC16_ADC_DATA_F); SMM665_ATTR(in9, crit, SMM665_MISC16_ADC_DATA_AIN1); SMM665_ATTR(in10, crit, SMM665_MISC16_ADC_DATA_AIN2); /* critical alarms */ SMM665_ATTR(in1, crit_alarm, SMM665_FAULT_12V); SMM665_ATTR(in2, crit_alarm, SMM665_FAULT_VDD); SMM665_ATTR(in3, crit_alarm, SMM665_FAULT_A); SMM665_ATTR(in4, crit_alarm, SMM665_FAULT_B); SMM665_ATTR(in5, crit_alarm, SMM665_FAULT_C); SMM665_ATTR(in6, crit_alarm, SMM665_FAULT_D); SMM665_ATTR(in7, crit_alarm, SMM665_FAULT_E); SMM665_ATTR(in8, crit_alarm, SMM665_FAULT_F); SMM665_ATTR(in9, crit_alarm, SMM665_FAULT_AIN1); SMM665_ATTR(in10, crit_alarm, SMM665_FAULT_AIN2); /* Temperature */ SMM665_ATTR(temp1, input, SMM665_MISC16_ADC_DATA_INT_TEMP); SMM665_ATTR(temp1, min, SMM665_MISC16_ADC_DATA_INT_TEMP); SMM665_ATTR(temp1, max, SMM665_MISC16_ADC_DATA_INT_TEMP); SMM665_ATTR(temp1, lcrit, SMM665_MISC16_ADC_DATA_INT_TEMP); SMM665_ATTR(temp1, crit, SMM665_MISC16_ADC_DATA_INT_TEMP); SMM665_ATTR(temp1, crit_alarm, SMM665_FAULT_TEMP); /* * Finally, construct an array of pointers to members of the above objects, * as required for sysfs_create_group() */ static struct attribute *smm665_attributes[] = { &sensor_dev_attr_in1_input.dev_attr.attr, &sensor_dev_attr_in1_min.dev_attr.attr, &sensor_dev_attr_in1_max.dev_attr.attr, &sensor_dev_attr_in1_lcrit.dev_attr.attr, &sensor_dev_attr_in1_crit.dev_attr.attr, &sensor_dev_attr_in1_crit_alarm.dev_attr.attr, &sensor_dev_attr_in2_input.dev_attr.attr, &sensor_dev_attr_in2_min.dev_attr.attr, &sensor_dev_attr_in2_max.dev_attr.attr, &sensor_dev_attr_in2_lcrit.dev_attr.attr, &sensor_dev_attr_in2_crit.dev_attr.attr, &sensor_dev_attr_in2_crit_alarm.dev_attr.attr, &sensor_dev_attr_in3_input.dev_attr.attr, &sensor_dev_attr_in3_min.dev_attr.attr, &sensor_dev_attr_in3_max.dev_attr.attr, &sensor_dev_attr_in3_lcrit.dev_attr.attr, &sensor_dev_attr_in3_crit.dev_attr.attr, &sensor_dev_attr_in3_crit_alarm.dev_attr.attr, &sensor_dev_attr_in4_input.dev_attr.attr, &sensor_dev_attr_in4_min.dev_attr.attr, &sensor_dev_attr_in4_max.dev_attr.attr, &sensor_dev_attr_in4_lcrit.dev_attr.attr, &sensor_dev_attr_in4_crit.dev_attr.attr, &sensor_dev_attr_in4_crit_alarm.dev_attr.attr, &sensor_dev_attr_in5_input.dev_attr.attr, &sensor_dev_attr_in5_min.dev_attr.attr, &sensor_dev_attr_in5_max.dev_attr.attr, &sensor_dev_attr_in5_lcrit.dev_attr.attr, &sensor_dev_attr_in5_crit.dev_attr.attr, &sensor_dev_attr_in5_crit_alarm.dev_attr.attr, &sensor_dev_attr_in6_input.dev_attr.attr, &sensor_dev_attr_in6_min.dev_attr.attr, &sensor_dev_attr_in6_max.dev_attr.attr, &sensor_dev_attr_in6_lcrit.dev_attr.attr, &sensor_dev_attr_in6_crit.dev_attr.attr, &sensor_dev_attr_in6_crit_alarm.dev_attr.attr, &sensor_dev_attr_in7_input.dev_attr.attr, &sensor_dev_attr_in7_min.dev_attr.attr, &sensor_dev_attr_in7_max.dev_attr.attr, &sensor_dev_attr_in7_lcrit.dev_attr.attr, &sensor_dev_attr_in7_crit.dev_attr.attr, &sensor_dev_attr_in7_crit_alarm.dev_attr.attr, &sensor_dev_attr_in8_input.dev_attr.attr, &sensor_dev_attr_in8_min.dev_attr.attr, &sensor_dev_attr_in8_max.dev_attr.attr, &sensor_dev_attr_in8_lcrit.dev_attr.attr, &sensor_dev_attr_in8_crit.dev_attr.attr, &sensor_dev_attr_in8_crit_alarm.dev_attr.attr, &sensor_dev_attr_in9_input.dev_attr.attr, &sensor_dev_attr_in9_min.dev_attr.attr, &sensor_dev_attr_in9_max.dev_attr.attr, &sensor_dev_attr_in9_lcrit.dev_attr.attr, &sensor_dev_attr_in9_crit.dev_attr.attr, &sensor_dev_attr_in9_crit_alarm.dev_attr.attr, &sensor_dev_attr_in10_input.dev_attr.attr, &sensor_dev_attr_in10_min.dev_attr.attr, &sensor_dev_attr_in10_max.dev_attr.attr, &sensor_dev_attr_in10_lcrit.dev_attr.attr, &sensor_dev_attr_in10_crit.dev_attr.attr, &sensor_dev_attr_in10_crit_alarm.dev_attr.attr, &sensor_dev_attr_temp1_input.dev_attr.attr, &sensor_dev_attr_temp1_min.dev_attr.attr, &sensor_dev_attr_temp1_max.dev_attr.attr, &sensor_dev_attr_temp1_lcrit.dev_attr.attr, &sensor_dev_attr_temp1_crit.dev_attr.attr, &sensor_dev_attr_temp1_crit_alarm.dev_attr.attr, NULL, }; static const struct attribute_group smm665_group = { .attrs = smm665_attributes, }; static int smm665_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct i2c_adapter *adapter = client->adapter; struct smm665_data *data; int i, ret; if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA | I2C_FUNC_SMBUS_WORD_DATA)) return -ENODEV; if (i2c_smbus_read_byte_data(client, SMM665_ADOC_ENABLE) < 0) return -ENODEV; data = devm_kzalloc(&client->dev, sizeof(*data), GFP_KERNEL); if (!data) return -ENOMEM; i2c_set_clientdata(client, data); mutex_init(&data->update_lock); data->type = id->driver_data; data->cmdreg = i2c_new_dummy(adapter, (client->addr & ~SMM665_REGMASK) | SMM665_CMDREG_BASE); if (!data->cmdreg) return -ENOMEM; switch (data->type) { case smm465: case smm665: data->conversion_time = SMM665_ADC_WAIT_SMM665; break; case smm665c: case smm764: case smm766: data->conversion_time = SMM665_ADC_WAIT_SMM766; break; } ret = -ENODEV; if (i2c_smbus_read_byte_data(data->cmdreg, SMM665_MISC8_CMD_STS) < 0) goto out_unregister; /* * Read limits. * * Limit registers start with register SMM665_LIMIT_BASE. * Each channel uses 8 registers, providing four limit values * per channel. Each limit value requires two registers, with the * high byte in the first register and the low byte in the second * register. The first two limits are under limit values, followed * by two over limit values. * * Limit register order matches the ADC register order, so we use * ADC register defines throughout the code to index limit registers. * * We save the first retrieved value both as "critical" and "alarm" * value. The second value overwrites either the critical or the * alarm value, depending on its configuration. This ensures that both * critical and alarm values are initialized, even if both registers are * configured as critical or non-critical. */ for (i = 0; i < SMM665_NUM_ADC; i++) { int val; val = smm665_read16(client, SMM665_LIMIT_BASE + i * 8); if (unlikely(val < 0)) goto out_unregister; data->critical_min_limit[i] = data->alarm_min_limit[i] = smm665_convert(val, i); val = smm665_read16(client, SMM665_LIMIT_BASE + i * 8 + 2); if (unlikely(val < 0)) goto out_unregister; if (smm665_is_critical(val)) data->critical_min_limit[i] = smm665_convert(val, i); else data->alarm_min_limit[i] = smm665_convert(val, i); val = smm665_read16(client, SMM665_LIMIT_BASE + i * 8 + 4); if (unlikely(val < 0)) goto out_unregister; data->critical_max_limit[i] = data->alarm_max_limit[i] = smm665_convert(val, i); val = smm665_read16(client, SMM665_LIMIT_BASE + i * 8 + 6); if (unlikely(val < 0)) goto out_unregister; if (smm665_is_critical(val)) data->critical_max_limit[i] = smm665_convert(val, i); else data->alarm_max_limit[i] = smm665_convert(val, i); } /* Register sysfs hooks */ ret = sysfs_create_group(&client->dev.kobj, &smm665_group); if (ret) goto out_unregister; data->hwmon_dev = hwmon_device_register(&client->dev); if (IS_ERR(data->hwmon_dev)) { ret = PTR_ERR(data->hwmon_dev); goto out_remove_group; } return 0; out_remove_group: sysfs_remove_group(&client->dev.kobj, &smm665_group); out_unregister: i2c_unregister_device(data->cmdreg); return ret; } static int smm665_remove(struct i2c_client *client) { struct smm665_data *data = i2c_get_clientdata(client); i2c_unregister_device(data->cmdreg); hwmon_device_unregister(data->hwmon_dev); sysfs_remove_group(&client->dev.kobj, &smm665_group); return 0; } static const struct i2c_device_id smm665_id[] = { {"smm465", smm465}, {"smm665", smm665}, {"smm665c", smm665c}, {"smm764", smm764}, {"smm766", smm766}, {} }; MODULE_DEVICE_TABLE(i2c, smm665_id); /* This is the driver that will be inserted */ static struct i2c_driver smm665_driver = { .driver = { .name = "smm665", }, .probe = smm665_probe, .remove = smm665_remove, .id_table = smm665_id, }; module_i2c_driver(smm665_driver); MODULE_AUTHOR("Guenter Roeck"); MODULE_DESCRIPTION("SMM665 driver"); MODULE_LICENSE("GPL");
gpl-2.0
neobuddy89/falcon_kernel
sound/drivers/mts64.c
4942
27420
/* * ALSA Driver for Ego Systems Inc. (ESI) Miditerminal 4140 * Copyright (c) 2006 by Matthias König <mk@phasorlab.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA * */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/parport.h> #include <linux/spinlock.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/slab.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/rawmidi.h> #include <sound/control.h> #define CARD_NAME "Miditerminal 4140" #define DRIVER_NAME "MTS64" #define PLATFORM_DRIVER "snd_mts64" static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; static struct platform_device *platform_devices[SNDRV_CARDS]; static int device_count; module_param_array(index, int, NULL, S_IRUGO); MODULE_PARM_DESC(index, "Index value for " CARD_NAME " soundcard."); module_param_array(id, charp, NULL, S_IRUGO); MODULE_PARM_DESC(id, "ID string for " CARD_NAME " soundcard."); module_param_array(enable, bool, NULL, S_IRUGO); MODULE_PARM_DESC(enable, "Enable " CARD_NAME " soundcard."); MODULE_AUTHOR("Matthias Koenig <mk@phasorlab.de>"); MODULE_DESCRIPTION("ESI Miditerminal 4140"); MODULE_LICENSE("GPL"); MODULE_SUPPORTED_DEVICE("{{ESI,Miditerminal 4140}}"); /********************************************************************* * Chip specific *********************************************************************/ #define MTS64_NUM_INPUT_PORTS 5 #define MTS64_NUM_OUTPUT_PORTS 4 #define MTS64_SMPTE_SUBSTREAM 4 struct mts64 { spinlock_t lock; struct snd_card *card; struct snd_rawmidi *rmidi; struct pardevice *pardev; int pardev_claimed; int open_count; int current_midi_output_port; int current_midi_input_port; u8 mode[MTS64_NUM_INPUT_PORTS]; struct snd_rawmidi_substream *midi_input_substream[MTS64_NUM_INPUT_PORTS]; int smpte_switch; u8 time[4]; /* [0]=hh, [1]=mm, [2]=ss, [3]=ff */ u8 fps; }; static int snd_mts64_free(struct mts64 *mts) { kfree(mts); return 0; } static int __devinit snd_mts64_create(struct snd_card *card, struct pardevice *pardev, struct mts64 **rchip) { struct mts64 *mts; *rchip = NULL; mts = kzalloc(sizeof(struct mts64), GFP_KERNEL); if (mts == NULL) return -ENOMEM; /* Init chip specific data */ spin_lock_init(&mts->lock); mts->card = card; mts->pardev = pardev; mts->current_midi_output_port = -1; mts->current_midi_input_port = -1; *rchip = mts; return 0; } /********************************************************************* * HW register related constants *********************************************************************/ /* Status Bits */ #define MTS64_STAT_BSY 0x80 #define MTS64_STAT_BIT_SET 0x20 /* readout process, bit is set */ #define MTS64_STAT_PORT 0x10 /* read byte is a port number */ /* Control Bits */ #define MTS64_CTL_READOUT 0x08 /* enable readout */ #define MTS64_CTL_WRITE_CMD 0x06 #define MTS64_CTL_WRITE_DATA 0x02 #define MTS64_CTL_STROBE 0x01 /* Command */ #define MTS64_CMD_RESET 0xfe #define MTS64_CMD_PROBE 0x8f /* Used in probing procedure */ #define MTS64_CMD_SMPTE_SET_TIME 0xe8 #define MTS64_CMD_SMPTE_SET_FPS 0xee #define MTS64_CMD_SMPTE_STOP 0xef #define MTS64_CMD_SMPTE_FPS_24 0xe3 #define MTS64_CMD_SMPTE_FPS_25 0xe2 #define MTS64_CMD_SMPTE_FPS_2997 0xe4 #define MTS64_CMD_SMPTE_FPS_30D 0xe1 #define MTS64_CMD_SMPTE_FPS_30 0xe0 #define MTS64_CMD_COM_OPEN 0xf8 /* setting the communication mode */ #define MTS64_CMD_COM_CLOSE1 0xff /* clearing communication mode */ #define MTS64_CMD_COM_CLOSE2 0xf5 /********************************************************************* * Hardware specific functions *********************************************************************/ static void mts64_enable_readout(struct parport *p); static void mts64_disable_readout(struct parport *p); static int mts64_device_ready(struct parport *p); static int mts64_device_init(struct parport *p); static int mts64_device_open(struct mts64 *mts); static int mts64_device_close(struct mts64 *mts); static u8 mts64_map_midi_input(u8 c); static int mts64_probe(struct parport *p); static u16 mts64_read(struct parport *p); static u8 mts64_read_char(struct parport *p); static void mts64_smpte_start(struct parport *p, u8 hours, u8 minutes, u8 seconds, u8 frames, u8 idx); static void mts64_smpte_stop(struct parport *p); static void mts64_write_command(struct parport *p, u8 c); static void mts64_write_data(struct parport *p, u8 c); static void mts64_write_midi(struct mts64 *mts, u8 c, int midiport); /* Enables the readout procedure * * Before we can read a midi byte from the device, we have to set * bit 3 of control port. */ static void mts64_enable_readout(struct parport *p) { u8 c; c = parport_read_control(p); c |= MTS64_CTL_READOUT; parport_write_control(p, c); } /* Disables readout * * Readout is disabled by clearing bit 3 of control */ static void mts64_disable_readout(struct parport *p) { u8 c; c = parport_read_control(p); c &= ~MTS64_CTL_READOUT; parport_write_control(p, c); } /* waits for device ready * * Checks if BUSY (Bit 7 of status) is clear * 1 device ready * 0 failure */ static int mts64_device_ready(struct parport *p) { int i; u8 c; for (i = 0; i < 0xffff; ++i) { c = parport_read_status(p); c &= MTS64_STAT_BSY; if (c != 0) return 1; } return 0; } /* Init device (LED blinking startup magic) * * Returns: * 0 init ok * -EIO failure */ static int __devinit mts64_device_init(struct parport *p) { int i; mts64_write_command(p, MTS64_CMD_RESET); for (i = 0; i < 64; ++i) { msleep(100); if (mts64_probe(p) == 0) { /* success */ mts64_disable_readout(p); return 0; } } mts64_disable_readout(p); return -EIO; } /* * Opens the device (set communication mode) */ static int mts64_device_open(struct mts64 *mts) { int i; struct parport *p = mts->pardev->port; for (i = 0; i < 5; ++i) mts64_write_command(p, MTS64_CMD_COM_OPEN); return 0; } /* * Close device (clear communication mode) */ static int mts64_device_close(struct mts64 *mts) { int i; struct parport *p = mts->pardev->port; for (i = 0; i < 5; ++i) { mts64_write_command(p, MTS64_CMD_COM_CLOSE1); mts64_write_command(p, MTS64_CMD_COM_CLOSE2); } return 0; } /* map hardware port to substream number * * When reading a byte from the device, the device tells us * on what port the byte is. This HW port has to be mapped to * the midiport (substream number). * substream 0-3 are Midiports 1-4 * substream 4 is SMPTE Timecode * The mapping is done by the table: * HW | 0 | 1 | 2 | 3 | 4 * SW | 0 | 1 | 4 | 2 | 3 */ static u8 mts64_map_midi_input(u8 c) { static u8 map[] = { 0, 1, 4, 2, 3 }; return map[c]; } /* Probe parport for device * * Do we have a Miditerminal 4140 on parport? * Returns: * 0 device found * -ENODEV no device */ static int __devinit mts64_probe(struct parport *p) { u8 c; mts64_smpte_stop(p); mts64_write_command(p, MTS64_CMD_PROBE); msleep(50); c = mts64_read(p); c &= 0x00ff; if (c != MTS64_CMD_PROBE) return -ENODEV; else return 0; } /* Read byte incl. status from device * * Returns: * data in lower 8 bits and status in upper 8 bits */ static u16 mts64_read(struct parport *p) { u8 data, status; mts64_device_ready(p); mts64_enable_readout(p); status = parport_read_status(p); data = mts64_read_char(p); mts64_disable_readout(p); return (status << 8) | data; } /* Read a byte from device * * Note, that readout mode has to be enabled. * readout procedure is as follows: * - Write number of the Bit to read to DATA * - Read STATUS * - Bit 5 of STATUS indicates if Bit is set * * Returns: * Byte read from device */ static u8 mts64_read_char(struct parport *p) { u8 c = 0; u8 status; u8 i; for (i = 0; i < 8; ++i) { parport_write_data(p, i); c >>= 1; status = parport_read_status(p); if (status & MTS64_STAT_BIT_SET) c |= 0x80; } return c; } /* Starts SMPTE Timecode generation * * The device creates SMPTE Timecode by hardware. * 0 24 fps * 1 25 fps * 2 29.97 fps * 3 30 fps (Drop-frame) * 4 30 fps */ static void mts64_smpte_start(struct parport *p, u8 hours, u8 minutes, u8 seconds, u8 frames, u8 idx) { static u8 fps[5] = { MTS64_CMD_SMPTE_FPS_24, MTS64_CMD_SMPTE_FPS_25, MTS64_CMD_SMPTE_FPS_2997, MTS64_CMD_SMPTE_FPS_30D, MTS64_CMD_SMPTE_FPS_30 }; mts64_write_command(p, MTS64_CMD_SMPTE_SET_TIME); mts64_write_command(p, frames); mts64_write_command(p, seconds); mts64_write_command(p, minutes); mts64_write_command(p, hours); mts64_write_command(p, MTS64_CMD_SMPTE_SET_FPS); mts64_write_command(p, fps[idx]); } /* Stops SMPTE Timecode generation */ static void mts64_smpte_stop(struct parport *p) { mts64_write_command(p, MTS64_CMD_SMPTE_STOP); } /* Write a command byte to device */ static void mts64_write_command(struct parport *p, u8 c) { mts64_device_ready(p); parport_write_data(p, c); parport_write_control(p, MTS64_CTL_WRITE_CMD); parport_write_control(p, MTS64_CTL_WRITE_CMD | MTS64_CTL_STROBE); parport_write_control(p, MTS64_CTL_WRITE_CMD); } /* Write a data byte to device */ static void mts64_write_data(struct parport *p, u8 c) { mts64_device_ready(p); parport_write_data(p, c); parport_write_control(p, MTS64_CTL_WRITE_DATA); parport_write_control(p, MTS64_CTL_WRITE_DATA | MTS64_CTL_STROBE); parport_write_control(p, MTS64_CTL_WRITE_DATA); } /* Write a MIDI byte to midiport * * midiport ranges from 0-3 and maps to Ports 1-4 * assumptions: communication mode is on */ static void mts64_write_midi(struct mts64 *mts, u8 c, int midiport) { struct parport *p = mts->pardev->port; /* check current midiport */ if (mts->current_midi_output_port != midiport) mts64_write_command(p, midiport); /* write midi byte */ mts64_write_data(p, c); } /********************************************************************* * Control elements *********************************************************************/ /* SMPTE Switch */ #define snd_mts64_ctl_smpte_switch_info snd_ctl_boolean_mono_info static int snd_mts64_ctl_smpte_switch_get(struct snd_kcontrol* kctl, struct snd_ctl_elem_value *uctl) { struct mts64 *mts = snd_kcontrol_chip(kctl); spin_lock_irq(&mts->lock); uctl->value.integer.value[0] = mts->smpte_switch; spin_unlock_irq(&mts->lock); return 0; } /* smpte_switch is not accessed from IRQ handler, so we just need to protect the HW access */ static int snd_mts64_ctl_smpte_switch_put(struct snd_kcontrol* kctl, struct snd_ctl_elem_value *uctl) { struct mts64 *mts = snd_kcontrol_chip(kctl); int changed = 0; int val = !!uctl->value.integer.value[0]; spin_lock_irq(&mts->lock); if (mts->smpte_switch == val) goto __out; changed = 1; mts->smpte_switch = val; if (mts->smpte_switch) { mts64_smpte_start(mts->pardev->port, mts->time[0], mts->time[1], mts->time[2], mts->time[3], mts->fps); } else { mts64_smpte_stop(mts->pardev->port); } __out: spin_unlock_irq(&mts->lock); return changed; } static struct snd_kcontrol_new mts64_ctl_smpte_switch __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_RAWMIDI, .name = "SMPTE Playback Switch", .index = 0, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .private_value = 0, .info = snd_mts64_ctl_smpte_switch_info, .get = snd_mts64_ctl_smpte_switch_get, .put = snd_mts64_ctl_smpte_switch_put }; /* Time */ static int snd_mts64_ctl_smpte_time_h_info(struct snd_kcontrol *kctl, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 23; return 0; } static int snd_mts64_ctl_smpte_time_f_info(struct snd_kcontrol *kctl, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 99; return 0; } static int snd_mts64_ctl_smpte_time_info(struct snd_kcontrol *kctl, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 59; return 0; } static int snd_mts64_ctl_smpte_time_get(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *uctl) { struct mts64 *mts = snd_kcontrol_chip(kctl); int idx = kctl->private_value; spin_lock_irq(&mts->lock); uctl->value.integer.value[0] = mts->time[idx]; spin_unlock_irq(&mts->lock); return 0; } static int snd_mts64_ctl_smpte_time_put(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *uctl) { struct mts64 *mts = snd_kcontrol_chip(kctl); int idx = kctl->private_value; unsigned int time = uctl->value.integer.value[0] % 60; int changed = 0; spin_lock_irq(&mts->lock); if (mts->time[idx] != time) { changed = 1; mts->time[idx] = time; } spin_unlock_irq(&mts->lock); return changed; } static struct snd_kcontrol_new mts64_ctl_smpte_time_hours __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_RAWMIDI, .name = "SMPTE Time Hours", .index = 0, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .private_value = 0, .info = snd_mts64_ctl_smpte_time_h_info, .get = snd_mts64_ctl_smpte_time_get, .put = snd_mts64_ctl_smpte_time_put }; static struct snd_kcontrol_new mts64_ctl_smpte_time_minutes __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_RAWMIDI, .name = "SMPTE Time Minutes", .index = 0, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .private_value = 1, .info = snd_mts64_ctl_smpte_time_info, .get = snd_mts64_ctl_smpte_time_get, .put = snd_mts64_ctl_smpte_time_put }; static struct snd_kcontrol_new mts64_ctl_smpte_time_seconds __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_RAWMIDI, .name = "SMPTE Time Seconds", .index = 0, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .private_value = 2, .info = snd_mts64_ctl_smpte_time_info, .get = snd_mts64_ctl_smpte_time_get, .put = snd_mts64_ctl_smpte_time_put }; static struct snd_kcontrol_new mts64_ctl_smpte_time_frames __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_RAWMIDI, .name = "SMPTE Time Frames", .index = 0, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .private_value = 3, .info = snd_mts64_ctl_smpte_time_f_info, .get = snd_mts64_ctl_smpte_time_get, .put = snd_mts64_ctl_smpte_time_put }; /* FPS */ static int snd_mts64_ctl_smpte_fps_info(struct snd_kcontrol *kctl, struct snd_ctl_elem_info *uinfo) { static char *texts[5] = { "24", "25", "29.97", "30D", "30" }; uinfo->type = SNDRV_CTL_ELEM_TYPE_ENUMERATED; uinfo->count = 1; uinfo->value.enumerated.items = 5; if (uinfo->value.enumerated.item > 4) uinfo->value.enumerated.item = 4; strcpy(uinfo->value.enumerated.name, texts[uinfo->value.enumerated.item]); return 0; } static int snd_mts64_ctl_smpte_fps_get(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *uctl) { struct mts64 *mts = snd_kcontrol_chip(kctl); spin_lock_irq(&mts->lock); uctl->value.enumerated.item[0] = mts->fps; spin_unlock_irq(&mts->lock); return 0; } static int snd_mts64_ctl_smpte_fps_put(struct snd_kcontrol *kctl, struct snd_ctl_elem_value *uctl) { struct mts64 *mts = snd_kcontrol_chip(kctl); int changed = 0; if (uctl->value.enumerated.item[0] >= 5) return -EINVAL; spin_lock_irq(&mts->lock); if (mts->fps != uctl->value.enumerated.item[0]) { changed = 1; mts->fps = uctl->value.enumerated.item[0]; } spin_unlock_irq(&mts->lock); return changed; } static struct snd_kcontrol_new mts64_ctl_smpte_fps __devinitdata = { .iface = SNDRV_CTL_ELEM_IFACE_RAWMIDI, .name = "SMPTE Fps", .index = 0, .access = SNDRV_CTL_ELEM_ACCESS_READWRITE, .private_value = 0, .info = snd_mts64_ctl_smpte_fps_info, .get = snd_mts64_ctl_smpte_fps_get, .put = snd_mts64_ctl_smpte_fps_put }; static int __devinit snd_mts64_ctl_create(struct snd_card *card, struct mts64 *mts) { int err, i; static struct snd_kcontrol_new *control[] __devinitdata = { &mts64_ctl_smpte_switch, &mts64_ctl_smpte_time_hours, &mts64_ctl_smpte_time_minutes, &mts64_ctl_smpte_time_seconds, &mts64_ctl_smpte_time_frames, &mts64_ctl_smpte_fps, NULL }; for (i = 0; control[i]; ++i) { err = snd_ctl_add(card, snd_ctl_new1(control[i], mts)); if (err < 0) { snd_printd("Cannot create control: %s\n", control[i]->name); return err; } } return 0; } /********************************************************************* * Rawmidi *********************************************************************/ #define MTS64_MODE_INPUT_TRIGGERED 0x01 static int snd_mts64_rawmidi_open(struct snd_rawmidi_substream *substream) { struct mts64 *mts = substream->rmidi->private_data; if (mts->open_count == 0) { /* We don't need a spinlock here, because this is just called if the device has not been opened before. So there aren't any IRQs from the device */ mts64_device_open(mts); msleep(50); } ++(mts->open_count); return 0; } static int snd_mts64_rawmidi_close(struct snd_rawmidi_substream *substream) { struct mts64 *mts = substream->rmidi->private_data; unsigned long flags; --(mts->open_count); if (mts->open_count == 0) { /* We need the spinlock_irqsave here because we can still have IRQs at this point */ spin_lock_irqsave(&mts->lock, flags); mts64_device_close(mts); spin_unlock_irqrestore(&mts->lock, flags); msleep(500); } else if (mts->open_count < 0) mts->open_count = 0; return 0; } static void snd_mts64_rawmidi_output_trigger(struct snd_rawmidi_substream *substream, int up) { struct mts64 *mts = substream->rmidi->private_data; u8 data; unsigned long flags; spin_lock_irqsave(&mts->lock, flags); while (snd_rawmidi_transmit_peek(substream, &data, 1) == 1) { mts64_write_midi(mts, data, substream->number+1); snd_rawmidi_transmit_ack(substream, 1); } spin_unlock_irqrestore(&mts->lock, flags); } static void snd_mts64_rawmidi_input_trigger(struct snd_rawmidi_substream *substream, int up) { struct mts64 *mts = substream->rmidi->private_data; unsigned long flags; spin_lock_irqsave(&mts->lock, flags); if (up) mts->mode[substream->number] |= MTS64_MODE_INPUT_TRIGGERED; else mts->mode[substream->number] &= ~MTS64_MODE_INPUT_TRIGGERED; spin_unlock_irqrestore(&mts->lock, flags); } static struct snd_rawmidi_ops snd_mts64_rawmidi_output_ops = { .open = snd_mts64_rawmidi_open, .close = snd_mts64_rawmidi_close, .trigger = snd_mts64_rawmidi_output_trigger }; static struct snd_rawmidi_ops snd_mts64_rawmidi_input_ops = { .open = snd_mts64_rawmidi_open, .close = snd_mts64_rawmidi_close, .trigger = snd_mts64_rawmidi_input_trigger }; /* Create and initialize the rawmidi component */ static int __devinit snd_mts64_rawmidi_create(struct snd_card *card) { struct mts64 *mts = card->private_data; struct snd_rawmidi *rmidi; struct snd_rawmidi_substream *substream; struct list_head *list; int err; err = snd_rawmidi_new(card, CARD_NAME, 0, MTS64_NUM_OUTPUT_PORTS, MTS64_NUM_INPUT_PORTS, &rmidi); if (err < 0) return err; rmidi->private_data = mts; strcpy(rmidi->name, CARD_NAME); rmidi->info_flags = SNDRV_RAWMIDI_INFO_OUTPUT | SNDRV_RAWMIDI_INFO_INPUT | SNDRV_RAWMIDI_INFO_DUPLEX; mts->rmidi = rmidi; /* register rawmidi ops */ snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_OUTPUT, &snd_mts64_rawmidi_output_ops); snd_rawmidi_set_ops(rmidi, SNDRV_RAWMIDI_STREAM_INPUT, &snd_mts64_rawmidi_input_ops); /* name substreams */ /* output */ list_for_each(list, &rmidi->streams[SNDRV_RAWMIDI_STREAM_OUTPUT].substreams) { substream = list_entry(list, struct snd_rawmidi_substream, list); sprintf(substream->name, "Miditerminal %d", substream->number+1); } /* input */ list_for_each(list, &rmidi->streams[SNDRV_RAWMIDI_STREAM_INPUT].substreams) { substream = list_entry(list, struct snd_rawmidi_substream, list); mts->midi_input_substream[substream->number] = substream; switch(substream->number) { case MTS64_SMPTE_SUBSTREAM: strcpy(substream->name, "Miditerminal SMPTE"); break; default: sprintf(substream->name, "Miditerminal %d", substream->number+1); } } /* controls */ err = snd_mts64_ctl_create(card, mts); return err; } /********************************************************************* * parport stuff *********************************************************************/ static void snd_mts64_interrupt(void *private) { struct mts64 *mts = ((struct snd_card*)private)->private_data; u16 ret; u8 status, data; struct snd_rawmidi_substream *substream; spin_lock(&mts->lock); ret = mts64_read(mts->pardev->port); data = ret & 0x00ff; status = ret >> 8; if (status & MTS64_STAT_PORT) { mts->current_midi_input_port = mts64_map_midi_input(data); } else { if (mts->current_midi_input_port == -1) goto __out; substream = mts->midi_input_substream[mts->current_midi_input_port]; if (mts->mode[substream->number] & MTS64_MODE_INPUT_TRIGGERED) snd_rawmidi_receive(substream, &data, 1); } __out: spin_unlock(&mts->lock); } static int __devinit snd_mts64_probe_port(struct parport *p) { struct pardevice *pardev; int res; pardev = parport_register_device(p, DRIVER_NAME, NULL, NULL, NULL, 0, NULL); if (!pardev) return -EIO; if (parport_claim(pardev)) { parport_unregister_device(pardev); return -EIO; } res = mts64_probe(p); parport_release(pardev); parport_unregister_device(pardev); return res; } static void __devinit snd_mts64_attach(struct parport *p) { struct platform_device *device; device = platform_device_alloc(PLATFORM_DRIVER, device_count); if (!device) return; /* Temporary assignment to forward the parport */ platform_set_drvdata(device, p); if (platform_device_add(device) < 0) { platform_device_put(device); return; } /* Since we dont get the return value of probe * We need to check if device probing succeeded or not */ if (!platform_get_drvdata(device)) { platform_device_unregister(device); return; } /* register device in global table */ platform_devices[device_count] = device; device_count++; } static void snd_mts64_detach(struct parport *p) { /* nothing to do here */ } static struct parport_driver mts64_parport_driver = { .name = "mts64", .attach = snd_mts64_attach, .detach = snd_mts64_detach }; /********************************************************************* * platform stuff *********************************************************************/ static void snd_mts64_card_private_free(struct snd_card *card) { struct mts64 *mts = card->private_data; struct pardevice *pardev = mts->pardev; if (pardev) { if (mts->pardev_claimed) parport_release(pardev); parport_unregister_device(pardev); } snd_mts64_free(mts); } static int __devinit snd_mts64_probe(struct platform_device *pdev) { struct pardevice *pardev; struct parport *p; int dev = pdev->id; struct snd_card *card = NULL; struct mts64 *mts = NULL; int err; p = platform_get_drvdata(pdev); platform_set_drvdata(pdev, NULL); if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) return -ENOENT; if ((err = snd_mts64_probe_port(p)) < 0) return err; err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) { snd_printd("Cannot create card\n"); return err; } strcpy(card->driver, DRIVER_NAME); strcpy(card->shortname, "ESI " CARD_NAME); sprintf(card->longname, "%s at 0x%lx, irq %i", card->shortname, p->base, p->irq); pardev = parport_register_device(p, /* port */ DRIVER_NAME, /* name */ NULL, /* preempt */ NULL, /* wakeup */ snd_mts64_interrupt, /* ISR */ PARPORT_DEV_EXCL, /* flags */ (void *)card); /* private */ if (pardev == NULL) { snd_printd("Cannot register pardevice\n"); err = -EIO; goto __err; } if ((err = snd_mts64_create(card, pardev, &mts)) < 0) { snd_printd("Cannot create main component\n"); parport_unregister_device(pardev); goto __err; } card->private_data = mts; card->private_free = snd_mts64_card_private_free; if ((err = snd_mts64_rawmidi_create(card)) < 0) { snd_printd("Creating Rawmidi component failed\n"); goto __err; } /* claim parport */ if (parport_claim(pardev)) { snd_printd("Cannot claim parport 0x%lx\n", pardev->port->base); err = -EIO; goto __err; } mts->pardev_claimed = 1; /* init device */ if ((err = mts64_device_init(p)) < 0) goto __err; platform_set_drvdata(pdev, card); snd_card_set_dev(card, &pdev->dev); /* At this point card will be usable */ if ((err = snd_card_register(card)) < 0) { snd_printd("Cannot register card\n"); goto __err; } snd_printk(KERN_INFO "ESI Miditerminal 4140 on 0x%lx\n", p->base); return 0; __err: snd_card_free(card); return err; } static int __devexit snd_mts64_remove(struct platform_device *pdev) { struct snd_card *card = platform_get_drvdata(pdev); if (card) snd_card_free(card); return 0; } static struct platform_driver snd_mts64_driver = { .probe = snd_mts64_probe, .remove = __devexit_p(snd_mts64_remove), .driver = { .name = PLATFORM_DRIVER } }; /********************************************************************* * module init stuff *********************************************************************/ static void snd_mts64_unregister_all(void) { int i; for (i = 0; i < SNDRV_CARDS; ++i) { if (platform_devices[i]) { platform_device_unregister(platform_devices[i]); platform_devices[i] = NULL; } } platform_driver_unregister(&snd_mts64_driver); parport_unregister_driver(&mts64_parport_driver); } static int __init snd_mts64_module_init(void) { int err; if ((err = platform_driver_register(&snd_mts64_driver)) < 0) return err; if (parport_register_driver(&mts64_parport_driver) != 0) { platform_driver_unregister(&snd_mts64_driver); return -EIO; } if (device_count == 0) { snd_mts64_unregister_all(); return -ENODEV; } return 0; } static void __exit snd_mts64_module_exit(void) { snd_mts64_unregister_all(); } module_init(snd_mts64_module_init); module_exit(snd_mts64_module_exit);
gpl-2.0
gentu/android_kernel_zte_nx503a
arch/x86/mm/amdtopology.c
8014
4381
/* * AMD NUMA support. * Discover the memory map and associated nodes. * * This version reads it directly from the AMD northbridge. * * Copyright 2002,2003 Andi Kleen, SuSE Labs. */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/string.h> #include <linux/module.h> #include <linux/nodemask.h> #include <linux/memblock.h> #include <linux/bootmem.h> #include <asm/io.h> #include <linux/pci_ids.h> #include <linux/acpi.h> #include <asm/types.h> #include <asm/mmzone.h> #include <asm/proto.h> #include <asm/e820.h> #include <asm/pci-direct.h> #include <asm/numa.h> #include <asm/mpspec.h> #include <asm/apic.h> #include <asm/amd_nb.h> static unsigned char __initdata nodeids[8]; static __init int find_northbridge(void) { int num; for (num = 0; num < 32; num++) { u32 header; header = read_pci_config(0, num, 0, 0x00); if (header != (PCI_VENDOR_ID_AMD | (0x1100<<16)) && header != (PCI_VENDOR_ID_AMD | (0x1200<<16)) && header != (PCI_VENDOR_ID_AMD | (0x1300<<16))) continue; header = read_pci_config(0, num, 1, 0x00); if (header != (PCI_VENDOR_ID_AMD | (0x1101<<16)) && header != (PCI_VENDOR_ID_AMD | (0x1201<<16)) && header != (PCI_VENDOR_ID_AMD | (0x1301<<16))) continue; return num; } return -ENOENT; } static __init void early_get_boot_cpu_id(void) { /* * need to get the APIC ID of the BSP so can use that to * create apicid_to_node in amd_scan_nodes() */ #ifdef CONFIG_X86_MPPARSE /* * get boot-time SMP configuration: */ if (smp_found_config) early_get_smp_config(); #endif } int __init amd_numa_init(void) { u64 start = PFN_PHYS(0); u64 end = PFN_PHYS(max_pfn); unsigned numnodes; u64 prevbase; int i, j, nb; u32 nodeid, reg; unsigned int bits, cores, apicid_base; if (!early_pci_allowed()) return -EINVAL; nb = find_northbridge(); if (nb < 0) return nb; pr_info("Scanning NUMA topology in Northbridge %d\n", nb); reg = read_pci_config(0, nb, 0, 0x60); numnodes = ((reg >> 4) & 0xF) + 1; if (numnodes <= 1) return -ENOENT; pr_info("Number of physical nodes %d\n", numnodes); prevbase = 0; for (i = 0; i < 8; i++) { u64 base, limit; base = read_pci_config(0, nb, 1, 0x40 + i*8); limit = read_pci_config(0, nb, 1, 0x44 + i*8); nodeids[i] = nodeid = limit & 7; if ((base & 3) == 0) { if (i < numnodes) pr_info("Skipping disabled node %d\n", i); continue; } if (nodeid >= numnodes) { pr_info("Ignoring excess node %d (%Lx:%Lx)\n", nodeid, base, limit); continue; } if (!limit) { pr_info("Skipping node entry %d (base %Lx)\n", i, base); continue; } if ((base >> 8) & 3 || (limit >> 8) & 3) { pr_err("Node %d using interleaving mode %Lx/%Lx\n", nodeid, (base >> 8) & 3, (limit >> 8) & 3); return -EINVAL; } if (node_isset(nodeid, numa_nodes_parsed)) { pr_info("Node %d already present, skipping\n", nodeid); continue; } limit >>= 16; limit <<= 24; limit |= (1<<24)-1; limit++; if (limit > end) limit = end; if (limit <= base) continue; base >>= 16; base <<= 24; if (base < start) base = start; if (limit > end) limit = end; if (limit == base) { pr_err("Empty node %d\n", nodeid); continue; } if (limit < base) { pr_err("Node %d bogus settings %Lx-%Lx.\n", nodeid, base, limit); continue; } /* Could sort here, but pun for now. Should not happen anyroads. */ if (prevbase > base) { pr_err("Node map not sorted %Lx,%Lx\n", prevbase, base); return -EINVAL; } pr_info("Node %d MemBase %016Lx Limit %016Lx\n", nodeid, base, limit); prevbase = base; numa_add_memblk(nodeid, base, limit); node_set(nodeid, numa_nodes_parsed); } if (!nodes_weight(numa_nodes_parsed)) return -ENOENT; /* * We seem to have valid NUMA configuration. Map apicids to nodes * using the coreid bits from early_identify_cpu. */ bits = boot_cpu_data.x86_coreid_bits; cores = 1 << bits; apicid_base = 0; /* get the APIC ID of the BSP early for systems with apicid lifting */ early_get_boot_cpu_id(); if (boot_cpu_physical_apicid > 0) { pr_info("BSP APIC ID: %02x\n", boot_cpu_physical_apicid); apicid_base = boot_cpu_physical_apicid; } for_each_node_mask(i, numa_nodes_parsed) for (j = apicid_base; j < cores + apicid_base; j++) set_apicid_to_node((i << bits) + j, i); return 0; }
gpl-2.0
vl197602/htc_msm8960
drivers/staging/prima/CORE/BAP/src/bapApiStatus.c
79
33497
/* * Copyright (c) 2012, Code Aurora Forum. All rights reserved. * * Previously licensed under the ISC license by Qualcomm Atheros, Inc. * * * Permission to use, copy, modify, and/or distribute this software for * any purpose with or without fee is hereby granted, provided that the * above copyright notice and this permission notice appear in all * copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL * WARRANTIES WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE * AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL * DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR * PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR * PERFORMANCE OF THIS SOFTWARE. */ /*=========================================================================== b a p A p i S t a t u s . C OVERVIEW: This software unit holds the implementation of the WLAN BAP modules Status functions. The functions externalized by this module are to be called ONLY by other WLAN modules (HDD) that properly register with the BAP Layer initially. DEPENDENCIES: Are listed for each API below. Copyright (c) 2008 QUALCOMM Incorporated. All Rights Reserved. Qualcomm Confidential and Proprietary ===========================================================================*/ /*=========================================================================== EDIT HISTORY FOR FILE This section contains comments describing changes made to the module. Notice that changes are listed in reverse chronological order. $Header: /cygdrive/d/Builds/M7201JSDCAAPAD52240B/WM/platform/msm7200/Src/Drivers/SD/ClientDrivers/WLAN/QCT_BTAMP_RSN/CORE/BAP/src/bapApiStatus.c,v 1.7 2009/03/09 08:45:04 jzmuda Exp jzmuda $$DateTime$$Author: jzmuda $ when who what, where, why ---------- --- -------------------------------------------------------- 2008-09-15 jez Created module ===========================================================================*/ /*---------------------------------------------------------------------------- * Include Files * -------------------------------------------------------------------------*/ //#include "wlan_qct_tl.h" #include "vos_trace.h" /* BT-AMP PAL API header file */ #include "bapApi.h" #include "bapInternal.h" //#define BAP_DEBUG /*---------------------------------------------------------------------------- * Preprocessor Definitions and Constants * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Type Declarations * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Global Data Definitions * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Static Variable Definitions * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Static Function Declarations and Definitions * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Externalized Function Definitions * -------------------------------------------------------------------------*/ /*---------------------------------------------------------------------------- * Function Declarations and Documentation * -------------------------------------------------------------------------*/ /* Status Parameters */ /*---------------------------------------------------------------------------- FUNCTION WLAN_BAPReadFailedContactCounter() DESCRIPTION Implements the actual HCI Read Failed Contact Counter command. There is no need for a callback because when this call returns the action has been completed. DEPENDENCIES NA. PARAMETERS IN btampHandle: pointer to the BAP handle. Returned from WLANBAP_GetNewHndl. pBapHCIReadFailedContactCounter: pointer to the "HCI Read Failed Contact Counter" structure. pFailedContactCounter: pointer to return value for the "Failed Contact Counter" IN/OUT pBapHCIEvent: Return event value for the command complete event. (The caller of this routine is responsible for sending the Command Complete event up the HCI interface.) RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to pBapHCIReadFailedContactCounter or pFailedContactCounter is NULL VOS_STATUS_SUCCESS: Success SIDE EFFECTS ----------------------------------------------------------------------------*/ VOS_STATUS WLAN_BAPReadFailedContactCounter ( ptBtampHandle btampHandle, tBtampTLVHCI_Read_Failed_Contact_Counter_Cmd *pBapHCIReadFailedContactCounter, tpBtampHCI_Event pBapHCIEvent /* This now encodes ALL event types */ /* Including "Read" Command Complete*/ ) { return VOS_STATUS_SUCCESS; } /* WLAN_BAPReadFailedContactCounter */ /*---------------------------------------------------------------------------- FUNCTION WLAN_BAPResetFailedContactCounter() DESCRIPTION Implements the actual HCI Reset Failed Contact Counter command. There is no need for a callback because when this call returns the action has been completed. DEPENDENCIES NA. PARAMETERS IN btampHandle: pointer to the BAP handle. Returned from WLANBAP_GetNewHndl. pBapHCIResetFailedContactCounter: pointer to the "HCI Reset Failed Contact Counter" structure. IN/OUT pBapHCIEvent: Return event value for the command complete event. (The caller of this routine is responsible for sending the Command Complete event up the HCI interface.) RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to pBapHCIResetFailedContactCounter is NULL VOS_STATUS_SUCCESS: Success SIDE EFFECTS ----------------------------------------------------------------------------*/ VOS_STATUS WLAN_BAPResetFailedContactCounter ( ptBtampHandle btampHandle, tBtampTLVHCI_Reset_Failed_Contact_Counter_Cmd *pBapHCIResetFailedContactCounter, tpBtampHCI_Event pBapHCIEvent /* This now encodes ALL event types */ /* Including Command Complete and Command Status*/ ) { return VOS_STATUS_SUCCESS; } /* WLAN_BAPResetFailedContactCounter */ /*---------------------------------------------------------------------------- FUNCTION WLAN_BAPReadLinkQuality() DESCRIPTION Implements the actual HCI Read Link Quality command. There is no need for a callback because when this call returns the action has been completed. DEPENDENCIES NA. PARAMETERS IN btampHandle: pointer to the BAP handle. Returned from WLANBAP_GetNewHndl. pBapHCIReadLinkQuality: pointer to the "HCI Read Link Quality" structure. pBapHCILinkQuality: pointer to return value for the "Link Quality" IN/OUT pBapHCIEvent: Return event value for the command complete event. (The caller of this routine is responsible for sending the Command Complete event up the HCI interface.) RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to pBapHCIReadLinkQuality or pBapHCILinkQuality is NULL VOS_STATUS_SUCCESS: Success SIDE EFFECTS ----------------------------------------------------------------------------*/ VOS_STATUS WLAN_BAPReadLinkQuality ( ptBtampHandle btampHandle, tBtampTLVHCI_Read_Link_Quality_Cmd *pBapHCIReadLinkQuality, tpBtampHCI_Event pBapHCIEvent /* This now encodes ALL event types */ /* Including Command Complete and Command Status*/ ) { ptBtampContext btampContext = (ptBtampContext) btampHandle; v_U8_t phyLinkHandle; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: btampHandle value: %x", __FUNCTION__, btampHandle); /* Validate params */ if ((NULL == btampHandle) || (NULL == pBapHCIReadLinkQuality) || (NULL == pBapHCIEvent)) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "Invalid input parameters in %s", __FUNCTION__); return VOS_STATUS_E_FAULT; } /* Validate the physical link handle extracted from input parameter. This parameter has 2 bytes for physical handle (only lower byte valid) */ phyLinkHandle = (v_U8_t) pBapHCIReadLinkQuality->log_link_handle; pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Read_Link_Quality.log_link_handle = phyLinkHandle; pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Read_Link_Quality.link_quality = 0; if (phyLinkHandle != btampContext->phy_link_handle) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "Invalid Physical link handle in %s", __FUNCTION__); pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Read_Link_Quality.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; } else { /* Get the Link quality indication status from control block. Link quality value is being updated on the SME callback */ pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Read_Link_Quality.link_quality = btampContext->link_quality; pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Read_Link_Quality.status = WLANBAP_STATUS_SUCCESS; } /* Fill in the parameters for command complete event... */ pBapHCIEvent->bapHCIEventCode = BTAMP_TLV_HCI_COMMAND_COMPLETE_EVENT; pBapHCIEvent->u.btampCommandCompleteEvent.present = TRUE; pBapHCIEvent->u.btampCommandCompleteEvent.num_hci_command_packets = 1; pBapHCIEvent->u.btampCommandCompleteEvent.command_opcode = BTAMP_TLV_HCI_READ_LINK_QUALITY_CMD; return VOS_STATUS_SUCCESS; } /* WLAN_BAPReadLinkQuality */ /*---------------------------------------------------------------------------- FUNCTION WLAN_BAPReadRSSI() DESCRIPTION Implements the actual HCI Read RSSI command. There is no need for a callback because when this call returns the action has been completed. DEPENDENCIES NA. PARAMETERS IN btampHandle: pointer to the BAP handle. Returned from WLANBAP_GetNewHndl. pBapHCIReadRSSI: pointer to the "HCI Read RSSI" structure. pBapHCIRSSI: pointer to return value for the "RSSI". IN/OUT pBapHCIEvent: Return event value for the command complete event. (The caller of this routine is responsible for sending the Command Complete event up the HCI interface.) RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to pBapHCIReadRSSI or pBapHCIRSSI is NULL VOS_STATUS_SUCCESS: Success SIDE EFFECTS ----------------------------------------------------------------------------*/ VOS_STATUS WLAN_BAPReadRSSI ( ptBtampHandle btampHandle, tBtampTLVHCI_Read_RSSI_Cmd *pBapHCIReadRSSI, tpBtampHCI_Event pBapHCIEvent /* This now encodes ALL event types */ /* Including Command Complete and Command Status*/ ) { VOS_STATUS vosStatus; ptBtampContext btampContext = (ptBtampContext) btampHandle; v_U8_t phyLinkHandle; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: btampHandle value: %x", __FUNCTION__, btampHandle); /* Validate params */ if ((NULL == btampHandle) || (NULL == pBapHCIReadRSSI) || (NULL == pBapHCIEvent)) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "Invalid input parameters in %s", __FUNCTION__); return VOS_STATUS_E_FAULT; } /* Validate the physical link handle extracted from input parameter. This parameter has 2 bytes for physical handle (only lower byte valid) */ phyLinkHandle = (v_U8_t) pBapHCIReadRSSI->log_link_handle; pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Read_RSSI.phy_link_handle = phyLinkHandle; pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Read_RSSI.rssi = 0; if (phyLinkHandle != btampContext->phy_link_handle) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "Invalid Physical link handle in %s", __FUNCTION__); pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Read_RSSI.status = WLANBAP_ERROR_INVALID_HCI_CMND_PARAM; } else { /* Get the RSSI value for this station (physical link) */ vosStatus = WLANTL_GetRssi(btampContext->pvosGCtx, btampContext->ucSTAId, &pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Read_RSSI.rssi); if (VOS_STATUS_SUCCESS == vosStatus) { /* GetRssi success, indicate the to upper layer */ pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Read_RSSI.status = WLANBAP_STATUS_SUCCESS; } else { /* API failed, indicate unspecified error to upper layer */ pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Read_RSSI.status = WLANBAP_ERROR_UNSPECIFIED_ERROR; } } /* Fill in the parameters for command complete event... */ pBapHCIEvent->bapHCIEventCode = BTAMP_TLV_HCI_COMMAND_COMPLETE_EVENT; pBapHCIEvent->u.btampCommandCompleteEvent.present = TRUE; pBapHCIEvent->u.btampCommandCompleteEvent.num_hci_command_packets = 1; pBapHCIEvent->u.btampCommandCompleteEvent.command_opcode = BTAMP_TLV_HCI_READ_RSSI_CMD; return VOS_STATUS_SUCCESS; } /* WLAN_BAPReadRSSI */ /*---------------------------------------------------------------------------- FUNCTION WLAN_BAPReadLocalAMPInfo() DESCRIPTION Implements the actual HCI Read Local AMP Information command. There is no need for a callback because when this call returns the action has been completed. DEPENDENCIES NA. PARAMETERS IN btampHandle: pointer to the BAP handle. Returned from WLANBAP_GetNewHndl. pBapHCIReadLocalAMPInfo: pointer to the "HCI Read Local AMP Info" Structure. IN/OUT pBapHCIEvent: Return event value for the command complete event. (The caller of this routine is responsible for sending the Command Complete event up the HCI interface.) RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to pBapHCIReadLocalAMPInfo or pBapHCILocalAMPInfo is NULL VOS_STATUS_SUCCESS: Success SIDE EFFECTS ----------------------------------------------------------------------------*/ VOS_STATUS WLAN_BAPReadLocalAMPInfo ( ptBtampHandle btampHandle, tBtampTLVHCI_Read_Local_AMP_Information_Cmd *pBapHCIReadLocalAMPInfo, tpBtampHCI_Event pBapHCIEvent /* This now encodes ALL event types */ /* Including Command Complete and Command Status*/ ) { /* Validate params */ if (btampHandle == NULL) { return VOS_STATUS_E_FAULT; } /* Validate params */ if (pBapHCIReadLocalAMPInfo == NULL) { return VOS_STATUS_E_FAULT; } VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: btampHandle value: %x", __FUNCTION__, btampHandle); /* Format the command complete event to return... */ pBapHCIEvent->bapHCIEventCode = BTAMP_TLV_HCI_COMMAND_COMPLETE_EVENT; pBapHCIEvent->u.btampCommandCompleteEvent.present = 1; pBapHCIEvent->u.btampCommandCompleteEvent.num_hci_command_packets = 1; pBapHCIEvent->u.btampCommandCompleteEvent.command_opcode = BTAMP_TLV_HCI_READ_LOCAL_AMP_INFORMATION_CMD; pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Read_Local_AMP_Info.status = WLANBAP_STATUS_SUCCESS; pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Read_Local_AMP_Info.HC_AMP_Status = WLANBAP_HCI_AMP_STATUS_NOT_SHARED; pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Read_Local_AMP_Info.HC_Total_BW = 24000; pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Read_Local_AMP_Info.HC_Max_Guaranteed_BW = 12000; pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Read_Local_AMP_Info.HC_Min_Latency = 100; pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Read_Local_AMP_Info.HC_Max_PDU_Size = WLANBAP_MAX_80211_PAL_PDU_SIZE; pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Read_Local_AMP_Info.HC_Controller_Type = 1; #if 0 AMP Info PAL_Capabilities: Size: 2 Octets Value Parameter Description 0xXXXX Bit 0: "Service Type = Guaranteed" is not supported by PAL = 0 "Service Type = Guaranteed" is supported by PAL = 1 Bits 15-1: Reserved (shall be set to 0) (See EFS in Generic AMP FIPD [1]) #endif //0 pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Read_Local_AMP_Info.HC_PAL_Capabilities = 0x00; // was 0x03. Completely wrong. pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Read_Local_AMP_Info.HC_AMP_Assoc_Length = 248; //= 40; pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Read_Local_AMP_Info.HC_Max_Flush_Timeout = 10000; //10; pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Read_Local_AMP_Info.HC_BE_Flush_Timeout = 10000; //8; return VOS_STATUS_SUCCESS; } /* WLAN_BAPReadLocalAMPInfo */ /*---------------------------------------------------------------------------- FUNCTION WLAN_BAPReadLocalAMPAssoc() DESCRIPTION Implements the actual HCI Read Local AMP Assoc command. There is no need for a callback because when this call returns the action has been completed. DEPENDENCIES NA. PARAMETERS IN btampHandle: pointer to the BAP handle. Returned from WLANBAP_GetNewHndl. pBapHCIReadLocalAMPAssoc: pointer to the "HCI Read Local AMP Assoc" Structure. IN/OUT pBapHCIEvent: Return event value for the command complete event. (The caller of this routine is responsible for sending the Command Complete event up the HCI interface.) RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to pBapHCIReadLocalAMPAssoc (or pBapHCILocalAMPAssoc) is NULL VOS_STATUS_SUCCESS: Success SIDE EFFECTS ----------------------------------------------------------------------------*/ VOS_STATUS WLAN_BAPReadLocalAMPAssoc ( ptBtampHandle btampHandle, tBtampTLVHCI_Read_Local_AMP_Assoc_Cmd *pBapHCIReadLocalAMPAssoc, tpBtampHCI_Event pBapHCIEvent /* This now encodes ALL event types */ /* Including Command Complete and Command Status*/ ) { VOS_STATUS vosStatus; ptBtampContext btampContext = (ptBtampContext) btampHandle; /* btampContext value */ tHalHandle hHal; tBtampAMP_ASSOC btamp_ASSOC; v_U32_t nConsumed = 0; /*- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -*/ /* Validate params */ if ((pBapHCIReadLocalAMPAssoc == NULL) || (NULL == btampHandle)) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "param is NULL in %s", __FUNCTION__); return VOS_STATUS_E_FAULT; } hHal = VOS_GET_HAL_CB(btampContext->pvosGCtx); if (NULL == hHal) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_ERROR, "hHal is NULL in %s", __FUNCTION__); return VOS_STATUS_E_FAULT; } VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: btampHandle value: %x", __FUNCTION__, btampHandle); VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "In %s, phy_link_handle = %d", __FUNCTION__, pBapHCIReadLocalAMPAssoc->phy_link_handle); /* Update the MAC address and SSID if in case the Read Local AMP Assoc * Request is made before Create Physical Link creation. */ WLANBAP_ReadMacConfig (btampContext); /* Fill in the contents of an AMP_Assoc structure in preparation * for Packing it into the AMP_assoc_fragment field of the Read * Local AMP Assoc Command Complete Event */ /* Return the local MAC address */ btamp_ASSOC.AMP_Assoc_MAC_Addr.present = 1; vos_mem_copy( btamp_ASSOC.AMP_Assoc_MAC_Addr.mac_addr, btampContext->self_mac_addr, sizeof(btampContext->self_mac_addr)); /*Save the local AMP assoc info*/ vos_mem_copy(btampContext->btamp_AMP_Assoc.HC_mac_addr, btampContext->self_mac_addr, sizeof(btampContext->self_mac_addr)); /* JEZ090303: This logic should return a single channel list with the */ /* selected channel, if we have one. */ //if (btampContext->channel) if (1) { /* Return the local Preferred Channel List */ /* Return both the Regulatory Info and one channel list */ btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.present = 1; memcpy (btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.country, "XXX", 3); /*Save the local AMP assoc info*/ vos_mem_copy(btampContext->btamp_AMP_Assoc.HC_pref_country, "XXX", 3); btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.num_triplets = 2; btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.triplets[0][0] = 201; btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.triplets[0][1] = 254; btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.triplets[0][2] = 0; if (( BT_INITIATOR == btampContext->BAPDeviceRole ) && ( 0 != btampContext->channel )) { btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.triplets[1][0] = btampContext->channel; btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.triplets[1][1] = 0x01; //we are AP - we start on their 1st preferred channel btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.triplets[1][2] = 0x11; } else { if (btampContext->config.ucPreferredChannel) { btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.triplets[1][0] = btampContext->config.ucPreferredChannel; btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.triplets[1][1] = 0x0B - btampContext->config.ucPreferredChannel + 1; } else { btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.triplets[1][0] = 0x01; btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.triplets[1][1] = 0x0B; //all channels for 1 to 11 } btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.triplets[1][2] = 0x11; } } else { /* Return the local Preferred Channel List */ /* Return only the Regulatory Info */ btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.present = 1; memcpy (btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.country, "XXX", 3); btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.num_triplets = 1; btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.triplets[0][0] = 201; btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.triplets[0][1] = 254; btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.triplets[0][2] = 0; } /*Save the local AMP assoc info*/ btampContext->btamp_AMP_Assoc.HC_pref_num_triplets = btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.num_triplets; btampContext->btamp_AMP_Assoc.HC_pref_triplets[0][0] = btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.triplets[0][0]; btampContext->btamp_AMP_Assoc.HC_pref_triplets[0][1] = btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.triplets[0][1]; btampContext->btamp_AMP_Assoc.HC_pref_triplets[0][2] = btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.triplets[0][2]; btampContext->btamp_AMP_Assoc.HC_pref_triplets[1][0] = btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.triplets[1][0]; btampContext->btamp_AMP_Assoc.HC_pref_triplets[1][1] = btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.triplets[1][1]; btampContext->btamp_AMP_Assoc.HC_pref_triplets[1][2] = btamp_ASSOC.AMP_Assoc_Preferred_Channel_List.triplets[1][2]; /* Also, at this point, lie and tell the other side we are connected on */ /* the one channel we support. I hope this convinces the peer as BT-AMP AP */ /* We really want him to use our channel. Since we only support one.*/ /* Return the local Connected Channel */ btamp_ASSOC.AMP_Assoc_Connected_Channel.present = 1; memcpy (btamp_ASSOC.AMP_Assoc_Connected_Channel.country, "XXX", 3); btamp_ASSOC.AMP_Assoc_Connected_Channel.num_triplets = 2; btamp_ASSOC.AMP_Assoc_Connected_Channel.triplets[0][0] = 201; btamp_ASSOC.AMP_Assoc_Connected_Channel.triplets[0][1] = 254; btamp_ASSOC.AMP_Assoc_Connected_Channel.triplets[0][2] = 0; //btamp_ASSOC.AMP_Assoc_Connected_Channel.triplets[1][0] = 0x01; btamp_ASSOC.AMP_Assoc_Connected_Channel.triplets[1][0] = (0 != btampContext->channel)?btampContext->channel:0x01; btamp_ASSOC.AMP_Assoc_Connected_Channel.triplets[1][1] = 0x01; btamp_ASSOC.AMP_Assoc_Connected_Channel.triplets[1][2] = 0x11; /* Return the local PAL Capabilities */ btamp_ASSOC.AMP_Assoc_PAL_Capabilities.present = 1; #if 0 AMP ASSOC Pal Capabilities: Size: 4 Octets Value Description 4 TypeID for 802.11 PAL Capabilities 4 Length 0xXXXXXXXX Bit 0: 0 signifies the PAL is not capable of utilizing received Activity Reports 1 signifies the PAL is capable of utilizing received Activity Reports Bit 1: 0 signifies the PAL is not capable of utilizing scheduling information sent in an Activity Report 1 signifies the PAL is capable of utilizing scheduling information sent in an Activity Report Bits 2..31 Reserved #endif //0 btamp_ASSOC.AMP_Assoc_PAL_Capabilities.pal_capabilities // = btampContext->btamp_Remote_AMP_Assoc.HC_pal_capabilities; //= 0x03; = 0x00; /* Return the local PAL Version */ btamp_ASSOC.AMP_Assoc_PAL_Version.present = 1; /* Return the version and company ID data */ btamp_ASSOC.AMP_Assoc_PAL_Version.pal_version = WLANBAP_PAL_VERSION; btamp_ASSOC.AMP_Assoc_PAL_Version.pal_CompanyID = WLANBAP_QUALCOMM_COMPANY_ID; // Qualcomm Company ID btamp_ASSOC.AMP_Assoc_PAL_Version.pal_subversion = WLANBAP_PAL_SUBVERSION; //Pack the AMP Assoc structure vosStatus = btampPackAMP_ASSOC( hHal, &btamp_ASSOC, pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Read_Read_Local_AMP_Assoc.AMP_assoc_fragment, 248, &nConsumed); VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: nConsumed value: %d", __FUNCTION__, nConsumed); /* Format the command complete event to return... */ pBapHCIEvent->bapHCIEventCode = BTAMP_TLV_HCI_COMMAND_COMPLETE_EVENT; pBapHCIEvent->u.btampCommandCompleteEvent.present = 1; pBapHCIEvent->u.btampCommandCompleteEvent.num_hci_command_packets = 1; pBapHCIEvent->u.btampCommandCompleteEvent.command_opcode = BTAMP_TLV_HCI_READ_LOCAL_AMP_ASSOC_CMD; /*Validate the Physical handle*/ if(pBapHCIReadLocalAMPAssoc->phy_link_handle != btampContext->phy_link_handle) { VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: Wrong Physical Link handle in Read Local AMP Assoc cmd: current: %x, new: %x", __FUNCTION__, btampContext->phy_link_handle, pBapHCIReadLocalAMPAssoc->phy_link_handle); pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Read_Read_Local_AMP_Assoc.status = WLANBAP_ERROR_NO_CNCT; } else pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Read_Read_Local_AMP_Assoc.status = WLANBAP_STATUS_SUCCESS; pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Read_Read_Local_AMP_Assoc.phy_link_handle = pBapHCIReadLocalAMPAssoc->phy_link_handle; /* We will fit in one fragment, so remaining is exactly equal to encoded size*/ pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Read_Read_Local_AMP_Assoc.remaining_length = nConsumed; return VOS_STATUS_SUCCESS; } /* WLAN_BAPReadLocalAMPAssoc */ /*---------------------------------------------------------------------------- FUNCTION WLAN_BAPWriteRemoteAMPAssoc() DESCRIPTION Implements the actual HCI Write Remote AMP Assoc command. There is no need for a callback because when this call returns the action has been completed. DEPENDENCIES NA. PARAMETERS IN btampHandle: pointer to the BAP handle. Returned from WLANBAP_GetNewHndl. pBapHCIWriteRemoteAMPAssoc: pointer to the "HCI Write Remote AMP Assoc" Structure. IN/OUT pBapHCIEvent: Return event value for the command complete event. (The caller of this routine is responsible for sending the Command Complete event up the HCI interface.) RETURN VALUE The result code associated with performing the operation VOS_STATUS_E_FAULT: pointer to pBapHCIWriteRemoteAMPAssoc is NULL VOS_STATUS_SUCCESS: Success SIDE EFFECTS ----------------------------------------------------------------------------*/ VOS_STATUS WLAN_BAPWriteRemoteAMPAssoc ( ptBtampHandle btampHandle, tBtampTLVHCI_Write_Remote_AMP_ASSOC_Cmd *pBapHCIWriteRemoteAMPAssoc, tpBtampHCI_Event pBapHCIEvent /* This now encodes ALL event types */ /* Including Command Complete and Command Status*/ ) { tWLAN_BAPEvent bapEvent; /* State machine event */ VOS_STATUS vosStatus; tBtampHCI_Event bapHCIEvent; /* I am using btampContext, instead of pBapPhysLinkMachine */ //tWLAN_BAPbapPhysLinkMachine *pBapPhysLinkMachine; ptBtampContext btampContext = (ptBtampContext) btampHandle; /* btampContext value */ v_U8_t status; /* return the BT-AMP status here */ /* Validate params */ if (pBapHCIWriteRemoteAMPAssoc == NULL) { return VOS_STATUS_E_FAULT; } VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: btampHandle value: %x", __FUNCTION__, btampHandle); /* Fill in the event structure */ bapEvent.event = eWLAN_BAP_HCI_WRITE_REMOTE_AMP_ASSOC; bapEvent.params = pBapHCIWriteRemoteAMPAssoc; VOS_TRACE( VOS_MODULE_ID_BAP, VOS_TRACE_LEVEL_INFO_HIGH, "%s: btampContext value: %x", __FUNCTION__, btampContext); /* Handle event */ vosStatus = btampFsm(btampContext, &bapEvent, &status); /* Format the command complete event to return... */ pBapHCIEvent->bapHCIEventCode = BTAMP_TLV_HCI_COMMAND_COMPLETE_EVENT; pBapHCIEvent->u.btampCommandCompleteEvent.present = 1; pBapHCIEvent->u.btampCommandCompleteEvent.num_hci_command_packets = 1; pBapHCIEvent->u.btampCommandCompleteEvent.command_opcode = BTAMP_TLV_HCI_WRITE_REMOTE_AMP_ASSOC_CMD; pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Write_Remote_AMP_Assoc.status = status; pBapHCIEvent->u.btampCommandCompleteEvent.cc_event.Write_Remote_AMP_Assoc.phy_link_handle = pBapHCIWriteRemoteAMPAssoc->phy_link_handle; if(WLANBAP_ERROR_NO_SUITABLE_CHANNEL == status) { /* Format the Physical Link Complete event to return... */ bapHCIEvent.bapHCIEventCode = BTAMP_TLV_HCI_PHYSICAL_LINK_COMPLETE_EVENT; bapHCIEvent.u.btampPhysicalLinkCompleteEvent.present = 1; bapHCIEvent.u.btampPhysicalLinkCompleteEvent.status = status; bapHCIEvent.u.btampPhysicalLinkCompleteEvent.phy_link_handle = btampContext->phy_link_handle; bapHCIEvent.u.btampPhysicalLinkCompleteEvent.ch_number = 0; vosStatus = (*btampContext->pBapHCIEventCB) ( btampContext->pHddHdl, /* this refers the BSL per application context */ &bapHCIEvent, /* This now encodes ALL event types */ VOS_TRUE /* Flag to indicate assoc-specific event */ ); } /* ... */ return VOS_STATUS_SUCCESS; } /* WLAN_BAPWriteRemoteAMPAssoc */
gpl-2.0
cminyard/linux-live-app-coredump
drivers/gpio/gpio-uniphier.c
335
13586
// SPDX-License-Identifier: GPL-2.0 // // Copyright (C) 2017 Socionext Inc. // Author: Masahiro Yamada <yamada.masahiro@socionext.com> #include <linux/bits.h> #include <linux/gpio/driver.h> #include <linux/irq.h> #include <linux/irqdomain.h> #include <linux/module.h> #include <linux/of.h> #include <linux/of_device.h> #include <linux/of_irq.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #include <dt-bindings/gpio/uniphier-gpio.h> #define UNIPHIER_GPIO_IRQ_MAX_NUM 24 #define UNIPHIER_GPIO_PORT_DATA 0x0 /* data */ #define UNIPHIER_GPIO_PORT_DIR 0x4 /* direction (1:in, 0:out) */ #define UNIPHIER_GPIO_IRQ_EN 0x90 /* irq enable */ #define UNIPHIER_GPIO_IRQ_MODE 0x94 /* irq mode (1: both edge) */ #define UNIPHIER_GPIO_IRQ_FLT_EN 0x98 /* noise filter enable */ #define UNIPHIER_GPIO_IRQ_FLT_CYC 0x9c /* noise filter clock cycle */ struct uniphier_gpio_priv { struct gpio_chip chip; struct irq_chip irq_chip; struct irq_domain *domain; void __iomem *regs; spinlock_t lock; u32 saved_vals[]; }; static unsigned int uniphier_gpio_bank_to_reg(unsigned int bank) { unsigned int reg; reg = (bank + 1) * 8; /* * Unfortunately, the GPIO port registers are not contiguous because * offset 0x90-0x9f is used for IRQ. Add 0x10 when crossing the region. */ if (reg >= UNIPHIER_GPIO_IRQ_EN) reg += 0x10; return reg; } static void uniphier_gpio_get_bank_and_mask(unsigned int offset, unsigned int *bank, u32 *mask) { *bank = offset / UNIPHIER_GPIO_LINES_PER_BANK; *mask = BIT(offset % UNIPHIER_GPIO_LINES_PER_BANK); } static void uniphier_gpio_reg_update(struct uniphier_gpio_priv *priv, unsigned int reg, u32 mask, u32 val) { unsigned long flags; u32 tmp; spin_lock_irqsave(&priv->lock, flags); tmp = readl(priv->regs + reg); tmp &= ~mask; tmp |= mask & val; writel(tmp, priv->regs + reg); spin_unlock_irqrestore(&priv->lock, flags); } static void uniphier_gpio_bank_write(struct gpio_chip *chip, unsigned int bank, unsigned int reg, u32 mask, u32 val) { struct uniphier_gpio_priv *priv = gpiochip_get_data(chip); if (!mask) return; uniphier_gpio_reg_update(priv, uniphier_gpio_bank_to_reg(bank) + reg, mask, val); } static void uniphier_gpio_offset_write(struct gpio_chip *chip, unsigned int offset, unsigned int reg, int val) { unsigned int bank; u32 mask; uniphier_gpio_get_bank_and_mask(offset, &bank, &mask); uniphier_gpio_bank_write(chip, bank, reg, mask, val ? mask : 0); } static int uniphier_gpio_offset_read(struct gpio_chip *chip, unsigned int offset, unsigned int reg) { struct uniphier_gpio_priv *priv = gpiochip_get_data(chip); unsigned int bank, reg_offset; u32 mask; uniphier_gpio_get_bank_and_mask(offset, &bank, &mask); reg_offset = uniphier_gpio_bank_to_reg(bank) + reg; return !!(readl(priv->regs + reg_offset) & mask); } static int uniphier_gpio_get_direction(struct gpio_chip *chip, unsigned int offset) { if (uniphier_gpio_offset_read(chip, offset, UNIPHIER_GPIO_PORT_DIR)) return GPIO_LINE_DIRECTION_IN; return GPIO_LINE_DIRECTION_OUT; } static int uniphier_gpio_direction_input(struct gpio_chip *chip, unsigned int offset) { uniphier_gpio_offset_write(chip, offset, UNIPHIER_GPIO_PORT_DIR, 1); return 0; } static int uniphier_gpio_direction_output(struct gpio_chip *chip, unsigned int offset, int val) { uniphier_gpio_offset_write(chip, offset, UNIPHIER_GPIO_PORT_DATA, val); uniphier_gpio_offset_write(chip, offset, UNIPHIER_GPIO_PORT_DIR, 0); return 0; } static int uniphier_gpio_get(struct gpio_chip *chip, unsigned int offset) { return uniphier_gpio_offset_read(chip, offset, UNIPHIER_GPIO_PORT_DATA); } static void uniphier_gpio_set(struct gpio_chip *chip, unsigned int offset, int val) { uniphier_gpio_offset_write(chip, offset, UNIPHIER_GPIO_PORT_DATA, val); } static void uniphier_gpio_set_multiple(struct gpio_chip *chip, unsigned long *mask, unsigned long *bits) { unsigned long i, bank, bank_mask, bank_bits; for_each_set_clump8(i, bank_mask, mask, chip->ngpio) { bank = i / UNIPHIER_GPIO_LINES_PER_BANK; bank_bits = bitmap_get_value8(bits, i); uniphier_gpio_bank_write(chip, bank, UNIPHIER_GPIO_PORT_DATA, bank_mask, bank_bits); } } static int uniphier_gpio_to_irq(struct gpio_chip *chip, unsigned int offset) { struct irq_fwspec fwspec; if (offset < UNIPHIER_GPIO_IRQ_OFFSET) return -ENXIO; fwspec.fwnode = of_node_to_fwnode(chip->parent->of_node); fwspec.param_count = 2; fwspec.param[0] = offset - UNIPHIER_GPIO_IRQ_OFFSET; /* * IRQ_TYPE_NONE is rejected by the parent irq domain. Set LEVEL_HIGH * temporarily. Anyway, ->irq_set_type() will override it later. */ fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH; return irq_create_fwspec_mapping(&fwspec); } static void uniphier_gpio_irq_mask(struct irq_data *data) { struct uniphier_gpio_priv *priv = irq_data_get_irq_chip_data(data); u32 mask = BIT(irqd_to_hwirq(data)); uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, 0); irq_chip_mask_parent(data); } static void uniphier_gpio_irq_unmask(struct irq_data *data) { struct uniphier_gpio_priv *priv = irq_data_get_irq_chip_data(data); u32 mask = BIT(irqd_to_hwirq(data)); uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_EN, mask, mask); irq_chip_unmask_parent(data); } static int uniphier_gpio_irq_set_type(struct irq_data *data, unsigned int type) { struct uniphier_gpio_priv *priv = irq_data_get_irq_chip_data(data); u32 mask = BIT(irqd_to_hwirq(data)); u32 val = 0; if (type == IRQ_TYPE_EDGE_BOTH) { val = mask; type = IRQ_TYPE_EDGE_FALLING; } uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_MODE, mask, val); /* To enable both edge detection, the noise filter must be enabled. */ uniphier_gpio_reg_update(priv, UNIPHIER_GPIO_IRQ_FLT_EN, mask, val); return irq_chip_set_type_parent(data, type); } static int uniphier_gpio_irq_get_parent_hwirq(struct uniphier_gpio_priv *priv, unsigned int hwirq) { struct device_node *np = priv->chip.parent->of_node; const __be32 *range; u32 base, parent_base, size; int len; range = of_get_property(np, "socionext,interrupt-ranges", &len); if (!range) return -EINVAL; len /= sizeof(*range); for (; len >= 3; len -= 3) { base = be32_to_cpu(*range++); parent_base = be32_to_cpu(*range++); size = be32_to_cpu(*range++); if (base <= hwirq && hwirq < base + size) return hwirq - base + parent_base; } return -ENOENT; } static int uniphier_gpio_irq_domain_translate(struct irq_domain *domain, struct irq_fwspec *fwspec, unsigned long *out_hwirq, unsigned int *out_type) { if (WARN_ON(fwspec->param_count < 2)) return -EINVAL; *out_hwirq = fwspec->param[0]; *out_type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; return 0; } static int uniphier_gpio_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, unsigned int nr_irqs, void *arg) { struct uniphier_gpio_priv *priv = domain->host_data; struct irq_fwspec parent_fwspec; irq_hw_number_t hwirq; unsigned int type; int ret; if (WARN_ON(nr_irqs != 1)) return -EINVAL; ret = uniphier_gpio_irq_domain_translate(domain, arg, &hwirq, &type); if (ret) return ret; ret = uniphier_gpio_irq_get_parent_hwirq(priv, hwirq); if (ret < 0) return ret; /* parent is UniPhier AIDET */ parent_fwspec.fwnode = domain->parent->fwnode; parent_fwspec.param_count = 2; parent_fwspec.param[0] = ret; parent_fwspec.param[1] = (type == IRQ_TYPE_EDGE_BOTH) ? IRQ_TYPE_EDGE_FALLING : type; ret = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, &priv->irq_chip, priv); if (ret) return ret; return irq_domain_alloc_irqs_parent(domain, virq, 1, &parent_fwspec); } static int uniphier_gpio_irq_domain_activate(struct irq_domain *domain, struct irq_data *data, bool early) { struct uniphier_gpio_priv *priv = domain->host_data; struct gpio_chip *chip = &priv->chip; return gpiochip_lock_as_irq(chip, irqd_to_hwirq(data) + UNIPHIER_GPIO_IRQ_OFFSET); } static void uniphier_gpio_irq_domain_deactivate(struct irq_domain *domain, struct irq_data *data) { struct uniphier_gpio_priv *priv = domain->host_data; struct gpio_chip *chip = &priv->chip; gpiochip_unlock_as_irq(chip, irqd_to_hwirq(data) + UNIPHIER_GPIO_IRQ_OFFSET); } static const struct irq_domain_ops uniphier_gpio_irq_domain_ops = { .alloc = uniphier_gpio_irq_domain_alloc, .free = irq_domain_free_irqs_common, .activate = uniphier_gpio_irq_domain_activate, .deactivate = uniphier_gpio_irq_domain_deactivate, .translate = uniphier_gpio_irq_domain_translate, }; static void uniphier_gpio_hw_init(struct uniphier_gpio_priv *priv) { /* * Due to the hardware design, the noise filter must be enabled to * detect both edge interrupts. This filter is intended to remove the * noise from the irq lines. It does not work for GPIO input, so GPIO * debounce is not supported. Unfortunately, the filter period is * shared among all irq lines. Just choose a sensible period here. */ writel(0xff, priv->regs + UNIPHIER_GPIO_IRQ_FLT_CYC); } static unsigned int uniphier_gpio_get_nbanks(unsigned int ngpio) { return DIV_ROUND_UP(ngpio, UNIPHIER_GPIO_LINES_PER_BANK); } static int uniphier_gpio_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; struct device_node *parent_np; struct irq_domain *parent_domain; struct uniphier_gpio_priv *priv; struct gpio_chip *chip; struct irq_chip *irq_chip; unsigned int nregs; u32 ngpios; int ret; parent_np = of_irq_find_parent(dev->of_node); if (!parent_np) return -ENXIO; parent_domain = irq_find_host(parent_np); of_node_put(parent_np); if (!parent_domain) return -EPROBE_DEFER; ret = of_property_read_u32(dev->of_node, "ngpios", &ngpios); if (ret) return ret; nregs = uniphier_gpio_get_nbanks(ngpios) * 2 + 3; priv = devm_kzalloc(dev, struct_size(priv, saved_vals, nregs), GFP_KERNEL); if (!priv) return -ENOMEM; priv->regs = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(priv->regs)) return PTR_ERR(priv->regs); spin_lock_init(&priv->lock); chip = &priv->chip; chip->label = dev_name(dev); chip->parent = dev; chip->request = gpiochip_generic_request; chip->free = gpiochip_generic_free; chip->get_direction = uniphier_gpio_get_direction; chip->direction_input = uniphier_gpio_direction_input; chip->direction_output = uniphier_gpio_direction_output; chip->get = uniphier_gpio_get; chip->set = uniphier_gpio_set; chip->set_multiple = uniphier_gpio_set_multiple; chip->to_irq = uniphier_gpio_to_irq; chip->base = -1; chip->ngpio = ngpios; irq_chip = &priv->irq_chip; irq_chip->name = dev_name(dev); irq_chip->irq_mask = uniphier_gpio_irq_mask; irq_chip->irq_unmask = uniphier_gpio_irq_unmask; irq_chip->irq_eoi = irq_chip_eoi_parent; irq_chip->irq_set_affinity = irq_chip_set_affinity_parent; irq_chip->irq_set_type = uniphier_gpio_irq_set_type; uniphier_gpio_hw_init(priv); ret = devm_gpiochip_add_data(dev, chip, priv); if (ret) return ret; priv->domain = irq_domain_create_hierarchy( parent_domain, 0, UNIPHIER_GPIO_IRQ_MAX_NUM, of_node_to_fwnode(dev->of_node), &uniphier_gpio_irq_domain_ops, priv); if (!priv->domain) return -ENOMEM; platform_set_drvdata(pdev, priv); return 0; } static int uniphier_gpio_remove(struct platform_device *pdev) { struct uniphier_gpio_priv *priv = platform_get_drvdata(pdev); irq_domain_remove(priv->domain); return 0; } static int __maybe_unused uniphier_gpio_suspend(struct device *dev) { struct uniphier_gpio_priv *priv = dev_get_drvdata(dev); unsigned int nbanks = uniphier_gpio_get_nbanks(priv->chip.ngpio); u32 *val = priv->saved_vals; unsigned int reg; int i; for (i = 0; i < nbanks; i++) { reg = uniphier_gpio_bank_to_reg(i); *val++ = readl(priv->regs + reg + UNIPHIER_GPIO_PORT_DATA); *val++ = readl(priv->regs + reg + UNIPHIER_GPIO_PORT_DIR); } *val++ = readl(priv->regs + UNIPHIER_GPIO_IRQ_EN); *val++ = readl(priv->regs + UNIPHIER_GPIO_IRQ_MODE); *val++ = readl(priv->regs + UNIPHIER_GPIO_IRQ_FLT_EN); return 0; } static int __maybe_unused uniphier_gpio_resume(struct device *dev) { struct uniphier_gpio_priv *priv = dev_get_drvdata(dev); unsigned int nbanks = uniphier_gpio_get_nbanks(priv->chip.ngpio); const u32 *val = priv->saved_vals; unsigned int reg; int i; for (i = 0; i < nbanks; i++) { reg = uniphier_gpio_bank_to_reg(i); writel(*val++, priv->regs + reg + UNIPHIER_GPIO_PORT_DATA); writel(*val++, priv->regs + reg + UNIPHIER_GPIO_PORT_DIR); } writel(*val++, priv->regs + UNIPHIER_GPIO_IRQ_EN); writel(*val++, priv->regs + UNIPHIER_GPIO_IRQ_MODE); writel(*val++, priv->regs + UNIPHIER_GPIO_IRQ_FLT_EN); uniphier_gpio_hw_init(priv); return 0; } static const struct dev_pm_ops uniphier_gpio_pm_ops = { SET_LATE_SYSTEM_SLEEP_PM_OPS(uniphier_gpio_suspend, uniphier_gpio_resume) }; static const struct of_device_id uniphier_gpio_match[] = { { .compatible = "socionext,uniphier-gpio" }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, uniphier_gpio_match); static struct platform_driver uniphier_gpio_driver = { .probe = uniphier_gpio_probe, .remove = uniphier_gpio_remove, .driver = { .name = "uniphier-gpio", .of_match_table = uniphier_gpio_match, .pm = &uniphier_gpio_pm_ops, }, }; module_platform_driver(uniphier_gpio_driver); MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>"); MODULE_DESCRIPTION("UniPhier GPIO driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
TheTypoMaster/linux_kernel_2.6.32.67
arch/ia64/sn/kernel/sn2/sn_hwperf.c
591
23257
/* * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. * * Copyright (C) 2004-2006 Silicon Graphics, Inc. All rights reserved. * * SGI Altix topology and hardware performance monitoring API. * Mark Goodwin <markgw@sgi.com>. * * Creates /proc/sgi_sn/sn_topology (read-only) to export * info about Altix nodes, routers, CPUs and NumaLink * interconnection/topology. * * Also creates a dynamic misc device named "sn_hwperf" * that supports an ioctl interface to call down into SAL * to discover hw objects, topology and to read/write * memory mapped registers, e.g. for performance monitoring. * The "sn_hwperf" device is registered only after the procfs * file is first opened, i.e. only if/when it's needed. * * This API is used by SGI Performance Co-Pilot and other * tools, see http://oss.sgi.com/projects/pcp */ #include <linux/fs.h> #include <linux/slab.h> #include <linux/vmalloc.h> #include <linux/seq_file.h> #include <linux/miscdevice.h> #include <linux/utsname.h> #include <linux/cpumask.h> #include <linux/smp_lock.h> #include <linux/nodemask.h> #include <linux/smp.h> #include <linux/mutex.h> #include <asm/processor.h> #include <asm/topology.h> #include <asm/uaccess.h> #include <asm/sal.h> #include <asm/sn/io.h> #include <asm/sn/sn_sal.h> #include <asm/sn/module.h> #include <asm/sn/geo.h> #include <asm/sn/sn2/sn_hwperf.h> #include <asm/sn/addrs.h> static void *sn_hwperf_salheap = NULL; static int sn_hwperf_obj_cnt = 0; static nasid_t sn_hwperf_master_nasid = INVALID_NASID; static int sn_hwperf_init(void); static DEFINE_MUTEX(sn_hwperf_init_mutex); #define cnode_possible(n) ((n) < num_cnodes) static int sn_hwperf_enum_objects(int *nobj, struct sn_hwperf_object_info **ret) { int e; u64 sz; struct sn_hwperf_object_info *objbuf = NULL; if ((e = sn_hwperf_init()) < 0) { printk(KERN_ERR "sn_hwperf_init failed: err %d\n", e); goto out; } sz = sn_hwperf_obj_cnt * sizeof(struct sn_hwperf_object_info); objbuf = vmalloc(sz); if (objbuf == NULL) { printk("sn_hwperf_enum_objects: vmalloc(%d) failed\n", (int)sz); e = -ENOMEM; goto out; } e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, SN_HWPERF_ENUM_OBJECTS, 0, sz, (u64) objbuf, 0, 0, NULL); if (e != SN_HWPERF_OP_OK) { e = -EINVAL; vfree(objbuf); } out: *nobj = sn_hwperf_obj_cnt; *ret = objbuf; return e; } static int sn_hwperf_location_to_bpos(char *location, int *rack, int *bay, int *slot, int *slab) { char type; /* first scan for an old style geoid string */ if (sscanf(location, "%03d%c%02d#%d", rack, &type, bay, slab) == 4) *slot = 0; else /* scan for a new bladed geoid string */ if (sscanf(location, "%03d%c%02d^%02d#%d", rack, &type, bay, slot, slab) != 5) return -1; /* success */ return 0; } static int sn_hwperf_geoid_to_cnode(char *location) { int cnode; geoid_t geoid; moduleid_t module_id; int rack, bay, slot, slab; int this_rack, this_bay, this_slot, this_slab; if (sn_hwperf_location_to_bpos(location, &rack, &bay, &slot, &slab)) return -1; /* * FIXME: replace with cleaner for_each_XXX macro which addresses * both compute and IO nodes once ACPI3.0 is available. */ for (cnode = 0; cnode < num_cnodes; cnode++) { geoid = cnodeid_get_geoid(cnode); module_id = geo_module(geoid); this_rack = MODULE_GET_RACK(module_id); this_bay = MODULE_GET_BPOS(module_id); this_slot = geo_slot(geoid); this_slab = geo_slab(geoid); if (rack == this_rack && bay == this_bay && slot == this_slot && slab == this_slab) { break; } } return cnode_possible(cnode) ? cnode : -1; } static int sn_hwperf_obj_to_cnode(struct sn_hwperf_object_info * obj) { if (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj)) BUG(); if (SN_HWPERF_FOREIGN(obj)) return -1; return sn_hwperf_geoid_to_cnode(obj->location); } static int sn_hwperf_generic_ordinal(struct sn_hwperf_object_info *obj, struct sn_hwperf_object_info *objs) { int ordinal; struct sn_hwperf_object_info *p; for (ordinal=0, p=objs; p != obj; p++) { if (SN_HWPERF_FOREIGN(p)) continue; if (SN_HWPERF_SAME_OBJTYPE(p, obj)) ordinal++; } return ordinal; } static const char *slabname_node = "node"; /* SHub asic */ static const char *slabname_ionode = "ionode"; /* TIO asic */ static const char *slabname_router = "router"; /* NL3R or NL4R */ static const char *slabname_other = "other"; /* unknown asic */ static const char *sn_hwperf_get_slabname(struct sn_hwperf_object_info *obj, struct sn_hwperf_object_info *objs, int *ordinal) { int isnode; const char *slabname = slabname_other; if ((isnode = SN_HWPERF_IS_NODE(obj)) || SN_HWPERF_IS_IONODE(obj)) { slabname = isnode ? slabname_node : slabname_ionode; *ordinal = sn_hwperf_obj_to_cnode(obj); } else { *ordinal = sn_hwperf_generic_ordinal(obj, objs); if (SN_HWPERF_IS_ROUTER(obj)) slabname = slabname_router; } return slabname; } static void print_pci_topology(struct seq_file *s) { char *p; size_t sz; int e; for (sz = PAGE_SIZE; sz < 16 * PAGE_SIZE; sz += PAGE_SIZE) { if (!(p = kmalloc(sz, GFP_KERNEL))) break; e = ia64_sn_ioif_get_pci_topology(__pa(p), sz); if (e == SALRET_OK) seq_puts(s, p); kfree(p); if (e == SALRET_OK || e == SALRET_NOT_IMPLEMENTED) break; } } static inline int sn_hwperf_has_cpus(cnodeid_t node) { return node < MAX_NUMNODES && node_online(node) && nr_cpus_node(node); } static inline int sn_hwperf_has_mem(cnodeid_t node) { return node < MAX_NUMNODES && node_online(node) && NODE_DATA(node)->node_present_pages; } static struct sn_hwperf_object_info * sn_hwperf_findobj_id(struct sn_hwperf_object_info *objbuf, int nobj, int id) { int i; struct sn_hwperf_object_info *p = objbuf; for (i=0; i < nobj; i++, p++) { if (p->id == id) return p; } return NULL; } static int sn_hwperf_get_nearest_node_objdata(struct sn_hwperf_object_info *objbuf, int nobj, cnodeid_t node, cnodeid_t *near_mem_node, cnodeid_t *near_cpu_node) { int e; struct sn_hwperf_object_info *nodeobj = NULL; struct sn_hwperf_object_info *op; struct sn_hwperf_object_info *dest; struct sn_hwperf_object_info *router; struct sn_hwperf_port_info ptdata[16]; int sz, i, j; cnodeid_t c; int found_mem = 0; int found_cpu = 0; if (!cnode_possible(node)) return -EINVAL; if (sn_hwperf_has_cpus(node)) { if (near_cpu_node) *near_cpu_node = node; found_cpu++; } if (sn_hwperf_has_mem(node)) { if (near_mem_node) *near_mem_node = node; found_mem++; } if (found_cpu && found_mem) return 0; /* trivially successful */ /* find the argument node object */ for (i=0, op=objbuf; i < nobj; i++, op++) { if (!SN_HWPERF_IS_NODE(op) && !SN_HWPERF_IS_IONODE(op)) continue; if (node == sn_hwperf_obj_to_cnode(op)) { nodeobj = op; break; } } if (!nodeobj) { e = -ENOENT; goto err; } /* get it's interconnect topology */ sz = op->ports * sizeof(struct sn_hwperf_port_info); BUG_ON(sz > sizeof(ptdata)); e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, SN_HWPERF_ENUM_PORTS, nodeobj->id, sz, (u64)&ptdata, 0, 0, NULL); if (e != SN_HWPERF_OP_OK) { e = -EINVAL; goto err; } /* find nearest node with cpus and nearest memory */ for (router=NULL, j=0; j < op->ports; j++) { dest = sn_hwperf_findobj_id(objbuf, nobj, ptdata[j].conn_id); if (dest && SN_HWPERF_IS_ROUTER(dest)) router = dest; if (!dest || SN_HWPERF_FOREIGN(dest) || !SN_HWPERF_IS_NODE(dest) || SN_HWPERF_IS_IONODE(dest)) { continue; } c = sn_hwperf_obj_to_cnode(dest); if (!found_cpu && sn_hwperf_has_cpus(c)) { if (near_cpu_node) *near_cpu_node = c; found_cpu++; } if (!found_mem && sn_hwperf_has_mem(c)) { if (near_mem_node) *near_mem_node = c; found_mem++; } } if (router && (!found_cpu || !found_mem)) { /* search for a node connected to the same router */ sz = router->ports * sizeof(struct sn_hwperf_port_info); BUG_ON(sz > sizeof(ptdata)); e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, SN_HWPERF_ENUM_PORTS, router->id, sz, (u64)&ptdata, 0, 0, NULL); if (e != SN_HWPERF_OP_OK) { e = -EINVAL; goto err; } for (j=0; j < router->ports; j++) { dest = sn_hwperf_findobj_id(objbuf, nobj, ptdata[j].conn_id); if (!dest || dest->id == node || SN_HWPERF_FOREIGN(dest) || !SN_HWPERF_IS_NODE(dest) || SN_HWPERF_IS_IONODE(dest)) { continue; } c = sn_hwperf_obj_to_cnode(dest); if (!found_cpu && sn_hwperf_has_cpus(c)) { if (near_cpu_node) *near_cpu_node = c; found_cpu++; } if (!found_mem && sn_hwperf_has_mem(c)) { if (near_mem_node) *near_mem_node = c; found_mem++; } if (found_cpu && found_mem) break; } } if (!found_cpu || !found_mem) { /* resort to _any_ node with CPUs and memory */ for (i=0, op=objbuf; i < nobj; i++, op++) { if (SN_HWPERF_FOREIGN(op) || SN_HWPERF_IS_IONODE(op) || !SN_HWPERF_IS_NODE(op)) { continue; } c = sn_hwperf_obj_to_cnode(op); if (!found_cpu && sn_hwperf_has_cpus(c)) { if (near_cpu_node) *near_cpu_node = c; found_cpu++; } if (!found_mem && sn_hwperf_has_mem(c)) { if (near_mem_node) *near_mem_node = c; found_mem++; } if (found_cpu && found_mem) break; } } if (!found_cpu || !found_mem) e = -ENODATA; err: return e; } static int sn_topology_show(struct seq_file *s, void *d) { int sz; int pt; int e = 0; int i; int j; const char *slabname; int ordinal; char slice; struct cpuinfo_ia64 *c; struct sn_hwperf_port_info *ptdata; struct sn_hwperf_object_info *p; struct sn_hwperf_object_info *obj = d; /* this object */ struct sn_hwperf_object_info *objs = s->private; /* all objects */ u8 shubtype; u8 system_size; u8 sharing_size; u8 partid; u8 coher; u8 nasid_shift; u8 region_size; u16 nasid_mask; int nasid_msb; if (obj == objs) { seq_printf(s, "# sn_topology version 2\n"); seq_printf(s, "# objtype ordinal location partition" " [attribute value [, ...]]\n"); if (ia64_sn_get_sn_info(0, &shubtype, &nasid_mask, &nasid_shift, &system_size, &sharing_size, &partid, &coher, &region_size)) BUG(); for (nasid_msb=63; nasid_msb > 0; nasid_msb--) { if (((u64)nasid_mask << nasid_shift) & (1ULL << nasid_msb)) break; } seq_printf(s, "partition %u %s local " "shubtype %s, " "nasid_mask 0x%016llx, " "nasid_bits %d:%d, " "system_size %d, " "sharing_size %d, " "coherency_domain %d, " "region_size %d\n", partid, utsname()->nodename, shubtype ? "shub2" : "shub1", (u64)nasid_mask << nasid_shift, nasid_msb, nasid_shift, system_size, sharing_size, coher, region_size); print_pci_topology(s); } if (SN_HWPERF_FOREIGN(obj)) { /* private in another partition: not interesting */ return 0; } for (i = 0; i < SN_HWPERF_MAXSTRING && obj->name[i]; i++) { if (obj->name[i] == ' ') obj->name[i] = '_'; } slabname = sn_hwperf_get_slabname(obj, objs, &ordinal); seq_printf(s, "%s %d %s %s asic %s", slabname, ordinal, obj->location, obj->sn_hwp_this_part ? "local" : "shared", obj->name); if (ordinal < 0 || (!SN_HWPERF_IS_NODE(obj) && !SN_HWPERF_IS_IONODE(obj))) seq_putc(s, '\n'); else { cnodeid_t near_mem = -1; cnodeid_t near_cpu = -1; seq_printf(s, ", nasid 0x%x", cnodeid_to_nasid(ordinal)); if (sn_hwperf_get_nearest_node_objdata(objs, sn_hwperf_obj_cnt, ordinal, &near_mem, &near_cpu) == 0) { seq_printf(s, ", near_mem_nodeid %d, near_cpu_nodeid %d", near_mem, near_cpu); } if (!SN_HWPERF_IS_IONODE(obj)) { for_each_online_node(i) { seq_printf(s, i ? ":%d" : ", dist %d", node_distance(ordinal, i)); } } seq_putc(s, '\n'); /* * CPUs on this node, if any */ if (!SN_HWPERF_IS_IONODE(obj)) { for_each_cpu_and(i, cpu_online_mask, cpumask_of_node(ordinal)) { slice = 'a' + cpuid_to_slice(i); c = cpu_data(i); seq_printf(s, "cpu %d %s%c local" " freq %luMHz, arch ia64", i, obj->location, slice, c->proc_freq / 1000000); for_each_online_cpu(j) { seq_printf(s, j ? ":%d" : ", dist %d", node_distance( cpu_to_node(i), cpu_to_node(j))); } seq_putc(s, '\n'); } } } if (obj->ports) { /* * numalink ports */ sz = obj->ports * sizeof(struct sn_hwperf_port_info); if ((ptdata = kmalloc(sz, GFP_KERNEL)) == NULL) return -ENOMEM; e = ia64_sn_hwperf_op(sn_hwperf_master_nasid, SN_HWPERF_ENUM_PORTS, obj->id, sz, (u64) ptdata, 0, 0, NULL); if (e != SN_HWPERF_OP_OK) return -EINVAL; for (ordinal=0, p=objs; p != obj; p++) { if (!SN_HWPERF_FOREIGN(p)) ordinal += p->ports; } for (pt = 0; pt < obj->ports; pt++) { for (p = objs, i = 0; i < sn_hwperf_obj_cnt; i++, p++) { if (ptdata[pt].conn_id == p->id) { break; } } seq_printf(s, "numalink %d %s-%d", ordinal+pt, obj->location, ptdata[pt].port); if (i >= sn_hwperf_obj_cnt) { /* no connection */ seq_puts(s, " local endpoint disconnected" ", protocol unknown\n"); continue; } if (obj->sn_hwp_this_part && p->sn_hwp_this_part) /* both ends local to this partition */ seq_puts(s, " local"); else if (SN_HWPERF_FOREIGN(p)) /* both ends of the link in foreign partiton */ seq_puts(s, " foreign"); else /* link straddles a partition */ seq_puts(s, " shared"); /* * Unlikely, but strictly should query the LLP config * registers because an NL4R can be configured to run * NL3 protocol, even when not talking to an NL3 router. * Ditto for node-node. */ seq_printf(s, " endpoint %s-%d, protocol %s\n", p->location, ptdata[pt].conn_port, (SN_HWPERF_IS_NL3ROUTER(obj) || SN_HWPERF_IS_NL3ROUTER(p)) ? "LLP3" : "LLP4"); } kfree(ptdata); } return 0; } static void *sn_topology_start(struct seq_file *s, loff_t * pos) { struct sn_hwperf_object_info *objs = s->private; if (*pos < sn_hwperf_obj_cnt) return (void *)(objs + *pos); return NULL; } static void *sn_topology_next(struct seq_file *s, void *v, loff_t * pos) { ++*pos; return sn_topology_start(s, pos); } static void sn_topology_stop(struct seq_file *m, void *v) { return; } /* * /proc/sgi_sn/sn_topology, read-only using seq_file */ static const struct seq_operations sn_topology_seq_ops = { .start = sn_topology_start, .next = sn_topology_next, .stop = sn_topology_stop, .show = sn_topology_show }; struct sn_hwperf_op_info { u64 op; struct sn_hwperf_ioctl_args *a; void *p; int *v0; int ret; }; static void sn_hwperf_call_sal(void *info) { struct sn_hwperf_op_info *op_info = info; int r; r = ia64_sn_hwperf_op(sn_hwperf_master_nasid, op_info->op, op_info->a->arg, op_info->a->sz, (u64) op_info->p, 0, 0, op_info->v0); op_info->ret = r; } static int sn_hwperf_op_cpu(struct sn_hwperf_op_info *op_info) { u32 cpu; u32 use_ipi; int r = 0; cpumask_t save_allowed; cpu = (op_info->a->arg & SN_HWPERF_ARG_CPU_MASK) >> 32; use_ipi = op_info->a->arg & SN_HWPERF_ARG_USE_IPI_MASK; op_info->a->arg &= SN_HWPERF_ARG_OBJID_MASK; if (cpu != SN_HWPERF_ARG_ANY_CPU) { if (cpu >= nr_cpu_ids || !cpu_online(cpu)) { r = -EINVAL; goto out; } } if (cpu == SN_HWPERF_ARG_ANY_CPU || cpu == get_cpu()) { /* don't care, or already on correct cpu */ sn_hwperf_call_sal(op_info); } else { if (use_ipi) { /* use an interprocessor interrupt to call SAL */ smp_call_function_single(cpu, sn_hwperf_call_sal, op_info, 1); } else { /* migrate the task before calling SAL */ save_allowed = current->cpus_allowed; set_cpus_allowed(current, cpumask_of_cpu(cpu)); sn_hwperf_call_sal(op_info); set_cpus_allowed(current, save_allowed); } } r = op_info->ret; out: return r; } /* map SAL hwperf error code to system error code */ static int sn_hwperf_map_err(int hwperf_err) { int e; switch(hwperf_err) { case SN_HWPERF_OP_OK: e = 0; break; case SN_HWPERF_OP_NOMEM: e = -ENOMEM; break; case SN_HWPERF_OP_NO_PERM: e = -EPERM; break; case SN_HWPERF_OP_IO_ERROR: e = -EIO; break; case SN_HWPERF_OP_BUSY: e = -EBUSY; break; case SN_HWPERF_OP_RECONFIGURE: e = -EAGAIN; break; case SN_HWPERF_OP_INVAL: default: e = -EINVAL; break; } return e; } /* * ioctl for "sn_hwperf" misc device */ static int sn_hwperf_ioctl(struct inode *in, struct file *fp, u32 op, unsigned long arg) { struct sn_hwperf_ioctl_args a; struct cpuinfo_ia64 *cdata; struct sn_hwperf_object_info *objs; struct sn_hwperf_object_info *cpuobj; struct sn_hwperf_op_info op_info; void *p = NULL; int nobj; char slice; int node; int r; int v0; int i; int j; unlock_kernel(); /* only user requests are allowed here */ if ((op & SN_HWPERF_OP_MASK) < 10) { r = -EINVAL; goto error; } r = copy_from_user(&a, (const void __user *)arg, sizeof(struct sn_hwperf_ioctl_args)); if (r != 0) { r = -EFAULT; goto error; } /* * Allocate memory to hold a kernel copy of the user buffer. The * buffer contents are either copied in or out (or both) of user * space depending on the flags encoded in the requested operation. */ if (a.ptr) { p = vmalloc(a.sz); if (!p) { r = -ENOMEM; goto error; } } if (op & SN_HWPERF_OP_MEM_COPYIN) { r = copy_from_user(p, (const void __user *)a.ptr, a.sz); if (r != 0) { r = -EFAULT; goto error; } } switch (op) { case SN_HWPERF_GET_CPU_INFO: if (a.sz == sizeof(u64)) { /* special case to get size needed */ *(u64 *) p = (u64) num_online_cpus() * sizeof(struct sn_hwperf_object_info); } else if (a.sz < num_online_cpus() * sizeof(struct sn_hwperf_object_info)) { r = -ENOMEM; goto error; } else if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) { int cpuobj_index = 0; memset(p, 0, a.sz); for (i = 0; i < nobj; i++) { if (!SN_HWPERF_IS_NODE(objs + i)) continue; node = sn_hwperf_obj_to_cnode(objs + i); for_each_online_cpu(j) { if (node != cpu_to_node(j)) continue; cpuobj = (struct sn_hwperf_object_info *) p + cpuobj_index++; slice = 'a' + cpuid_to_slice(j); cdata = cpu_data(j); cpuobj->id = j; snprintf(cpuobj->name, sizeof(cpuobj->name), "CPU %luMHz %s", cdata->proc_freq / 1000000, cdata->vendor); snprintf(cpuobj->location, sizeof(cpuobj->location), "%s%c", objs[i].location, slice); } } vfree(objs); } break; case SN_HWPERF_GET_NODE_NASID: if (a.sz != sizeof(u64) || (node = a.arg) < 0 || !cnode_possible(node)) { r = -EINVAL; goto error; } *(u64 *)p = (u64)cnodeid_to_nasid(node); break; case SN_HWPERF_GET_OBJ_NODE: i = a.arg; if (a.sz != sizeof(u64) || i < 0) { r = -EINVAL; goto error; } if ((r = sn_hwperf_enum_objects(&nobj, &objs)) == 0) { if (i >= nobj) { r = -EINVAL; vfree(objs); goto error; } if (objs[i].id != a.arg) { for (i = 0; i < nobj; i++) { if (objs[i].id == a.arg) break; } } if (i == nobj) { r = -EINVAL; vfree(objs); goto error; } if (!SN_HWPERF_IS_NODE(objs + i) && !SN_HWPERF_IS_IONODE(objs + i)) { r = -ENOENT; vfree(objs); goto error; } *(u64 *)p = (u64)sn_hwperf_obj_to_cnode(objs + i); vfree(objs); } break; case SN_HWPERF_GET_MMRS: case SN_HWPERF_SET_MMRS: case SN_HWPERF_OBJECT_DISTANCE: op_info.p = p; op_info.a = &a; op_info.v0 = &v0; op_info.op = op; r = sn_hwperf_op_cpu(&op_info); if (r) { r = sn_hwperf_map_err(r); a.v0 = v0; goto error; } break; default: /* all other ops are a direct SAL call */ r = ia64_sn_hwperf_op(sn_hwperf_master_nasid, op, a.arg, a.sz, (u64) p, 0, 0, &v0); if (r) { r = sn_hwperf_map_err(r); goto error; } a.v0 = v0; break; } if (op & SN_HWPERF_OP_MEM_COPYOUT) { r = copy_to_user((void __user *)a.ptr, p, a.sz); if (r != 0) { r = -EFAULT; goto error; } } error: vfree(p); lock_kernel(); return r; } static const struct file_operations sn_hwperf_fops = { .ioctl = sn_hwperf_ioctl, }; static struct miscdevice sn_hwperf_dev = { MISC_DYNAMIC_MINOR, "sn_hwperf", &sn_hwperf_fops }; static int sn_hwperf_init(void) { u64 v; int salr; int e = 0; /* single threaded, once-only initialization */ mutex_lock(&sn_hwperf_init_mutex); if (sn_hwperf_salheap) { mutex_unlock(&sn_hwperf_init_mutex); return e; } /* * The PROM code needs a fixed reference node. For convenience the * same node as the console I/O is used. */ sn_hwperf_master_nasid = (nasid_t) ia64_sn_get_console_nasid(); /* * Request the needed size and install the PROM scratch area. * The PROM keeps various tracking bits in this memory area. */ salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid, (u64) SN_HWPERF_GET_HEAPSIZE, 0, (u64) sizeof(u64), (u64) &v, 0, 0, NULL); if (salr != SN_HWPERF_OP_OK) { e = -EINVAL; goto out; } if ((sn_hwperf_salheap = vmalloc(v)) == NULL) { e = -ENOMEM; goto out; } salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid, SN_HWPERF_INSTALL_HEAP, 0, v, (u64) sn_hwperf_salheap, 0, 0, NULL); if (salr != SN_HWPERF_OP_OK) { e = -EINVAL; goto out; } salr = ia64_sn_hwperf_op(sn_hwperf_master_nasid, SN_HWPERF_OBJECT_COUNT, 0, sizeof(u64), (u64) &v, 0, 0, NULL); if (salr != SN_HWPERF_OP_OK) { e = -EINVAL; goto out; } sn_hwperf_obj_cnt = (int)v; out: if (e < 0 && sn_hwperf_salheap) { vfree(sn_hwperf_salheap); sn_hwperf_salheap = NULL; sn_hwperf_obj_cnt = 0; } mutex_unlock(&sn_hwperf_init_mutex); return e; } int sn_topology_open(struct inode *inode, struct file *file) { int e; struct seq_file *seq; struct sn_hwperf_object_info *objbuf; int nobj; if ((e = sn_hwperf_enum_objects(&nobj, &objbuf)) == 0) { e = seq_open(file, &sn_topology_seq_ops); seq = file->private_data; seq->private = objbuf; } return e; } int sn_topology_release(struct inode *inode, struct file *file) { struct seq_file *seq = file->private_data; vfree(seq->private); return seq_release(inode, file); } int sn_hwperf_get_nearest_node(cnodeid_t node, cnodeid_t *near_mem_node, cnodeid_t *near_cpu_node) { int e; int nobj; struct sn_hwperf_object_info *objbuf; if ((e = sn_hwperf_enum_objects(&nobj, &objbuf)) == 0) { e = sn_hwperf_get_nearest_node_objdata(objbuf, nobj, node, near_mem_node, near_cpu_node); vfree(objbuf); } return e; } static int __devinit sn_hwperf_misc_register_init(void) { int e; if (!ia64_platform_is("sn2")) return 0; sn_hwperf_init(); /* * Register a dynamic misc device for hwperf ioctls. Platforms * supporting hotplug will create /dev/sn_hwperf, else user * can to look up the minor number in /proc/misc. */ if ((e = misc_register(&sn_hwperf_dev)) != 0) { printk(KERN_ERR "sn_hwperf_misc_register_init: failed to " "register misc device for \"%s\"\n", sn_hwperf_dev.name); } return e; } device_initcall(sn_hwperf_misc_register_init); /* after misc_init() */ EXPORT_SYMBOL(sn_hwperf_get_nearest_node);
gpl-2.0
michabs/linux-imx6-3.14
drivers/net/wireless/cw1200/queue.c
1103
14925
/* * O(1) TX queue with built-in allocator for ST-Ericsson CW1200 drivers * * Copyright (c) 2010, ST-Ericsson * Author: Dmitry Tarnyagin <dmitry.tarnyagin@lockless.no> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <net/mac80211.h> #include <linux/sched.h> #include "queue.h" #include "cw1200.h" #include "debug.h" /* private */ struct cw1200_queue_item { struct list_head head; struct sk_buff *skb; u32 packet_id; unsigned long queue_timestamp; unsigned long xmit_timestamp; struct cw1200_txpriv txpriv; u8 generation; }; static inline void __cw1200_queue_lock(struct cw1200_queue *queue) { struct cw1200_queue_stats *stats = queue->stats; if (queue->tx_locked_cnt++ == 0) { pr_debug("[TX] Queue %d is locked.\n", queue->queue_id); ieee80211_stop_queue(stats->priv->hw, queue->queue_id); } } static inline void __cw1200_queue_unlock(struct cw1200_queue *queue) { struct cw1200_queue_stats *stats = queue->stats; BUG_ON(!queue->tx_locked_cnt); if (--queue->tx_locked_cnt == 0) { pr_debug("[TX] Queue %d is unlocked.\n", queue->queue_id); ieee80211_wake_queue(stats->priv->hw, queue->queue_id); } } static inline void cw1200_queue_parse_id(u32 packet_id, u8 *queue_generation, u8 *queue_id, u8 *item_generation, u8 *item_id) { *item_id = (packet_id >> 0) & 0xFF; *item_generation = (packet_id >> 8) & 0xFF; *queue_id = (packet_id >> 16) & 0xFF; *queue_generation = (packet_id >> 24) & 0xFF; } static inline u32 cw1200_queue_mk_packet_id(u8 queue_generation, u8 queue_id, u8 item_generation, u8 item_id) { return ((u32)item_id << 0) | ((u32)item_generation << 8) | ((u32)queue_id << 16) | ((u32)queue_generation << 24); } static void cw1200_queue_post_gc(struct cw1200_queue_stats *stats, struct list_head *gc_list) { struct cw1200_queue_item *item, *tmp; list_for_each_entry_safe(item, tmp, gc_list, head) { list_del(&item->head); stats->skb_dtor(stats->priv, item->skb, &item->txpriv); kfree(item); } } static void cw1200_queue_register_post_gc(struct list_head *gc_list, struct cw1200_queue_item *item) { struct cw1200_queue_item *gc_item; gc_item = kmalloc(sizeof(struct cw1200_queue_item), GFP_ATOMIC); BUG_ON(!gc_item); memcpy(gc_item, item, sizeof(struct cw1200_queue_item)); list_add_tail(&gc_item->head, gc_list); } static void __cw1200_queue_gc(struct cw1200_queue *queue, struct list_head *head, bool unlock) { struct cw1200_queue_stats *stats = queue->stats; struct cw1200_queue_item *item = NULL, *tmp; bool wakeup_stats = false; list_for_each_entry_safe(item, tmp, &queue->queue, head) { if (jiffies - item->queue_timestamp < queue->ttl) break; --queue->num_queued; --queue->link_map_cache[item->txpriv.link_id]; spin_lock_bh(&stats->lock); --stats->num_queued; if (!--stats->link_map_cache[item->txpriv.link_id]) wakeup_stats = true; spin_unlock_bh(&stats->lock); cw1200_debug_tx_ttl(stats->priv); cw1200_queue_register_post_gc(head, item); item->skb = NULL; list_move_tail(&item->head, &queue->free_pool); } if (wakeup_stats) wake_up(&stats->wait_link_id_empty); if (queue->overfull) { if (queue->num_queued <= (queue->capacity >> 1)) { queue->overfull = false; if (unlock) __cw1200_queue_unlock(queue); } else if (item) { unsigned long tmo = item->queue_timestamp + queue->ttl; mod_timer(&queue->gc, tmo); cw1200_pm_stay_awake(&stats->priv->pm_state, tmo - jiffies); } } } static void cw1200_queue_gc(unsigned long arg) { LIST_HEAD(list); struct cw1200_queue *queue = (struct cw1200_queue *)arg; spin_lock_bh(&queue->lock); __cw1200_queue_gc(queue, &list, true); spin_unlock_bh(&queue->lock); cw1200_queue_post_gc(queue->stats, &list); } int cw1200_queue_stats_init(struct cw1200_queue_stats *stats, size_t map_capacity, cw1200_queue_skb_dtor_t skb_dtor, struct cw1200_common *priv) { memset(stats, 0, sizeof(*stats)); stats->map_capacity = map_capacity; stats->skb_dtor = skb_dtor; stats->priv = priv; spin_lock_init(&stats->lock); init_waitqueue_head(&stats->wait_link_id_empty); stats->link_map_cache = kzalloc(sizeof(int) * map_capacity, GFP_KERNEL); if (!stats->link_map_cache) return -ENOMEM; return 0; } int cw1200_queue_init(struct cw1200_queue *queue, struct cw1200_queue_stats *stats, u8 queue_id, size_t capacity, unsigned long ttl) { size_t i; memset(queue, 0, sizeof(*queue)); queue->stats = stats; queue->capacity = capacity; queue->queue_id = queue_id; queue->ttl = ttl; INIT_LIST_HEAD(&queue->queue); INIT_LIST_HEAD(&queue->pending); INIT_LIST_HEAD(&queue->free_pool); spin_lock_init(&queue->lock); init_timer(&queue->gc); queue->gc.data = (unsigned long)queue; queue->gc.function = cw1200_queue_gc; queue->pool = kzalloc(sizeof(struct cw1200_queue_item) * capacity, GFP_KERNEL); if (!queue->pool) return -ENOMEM; queue->link_map_cache = kzalloc(sizeof(int) * stats->map_capacity, GFP_KERNEL); if (!queue->link_map_cache) { kfree(queue->pool); queue->pool = NULL; return -ENOMEM; } for (i = 0; i < capacity; ++i) list_add_tail(&queue->pool[i].head, &queue->free_pool); return 0; } int cw1200_queue_clear(struct cw1200_queue *queue) { int i; LIST_HEAD(gc_list); struct cw1200_queue_stats *stats = queue->stats; struct cw1200_queue_item *item, *tmp; spin_lock_bh(&queue->lock); queue->generation++; list_splice_tail_init(&queue->queue, &queue->pending); list_for_each_entry_safe(item, tmp, &queue->pending, head) { WARN_ON(!item->skb); cw1200_queue_register_post_gc(&gc_list, item); item->skb = NULL; list_move_tail(&item->head, &queue->free_pool); } queue->num_queued = 0; queue->num_pending = 0; spin_lock_bh(&stats->lock); for (i = 0; i < stats->map_capacity; ++i) { stats->num_queued -= queue->link_map_cache[i]; stats->link_map_cache[i] -= queue->link_map_cache[i]; queue->link_map_cache[i] = 0; } spin_unlock_bh(&stats->lock); if (queue->overfull) { queue->overfull = false; __cw1200_queue_unlock(queue); } spin_unlock_bh(&queue->lock); wake_up(&stats->wait_link_id_empty); cw1200_queue_post_gc(stats, &gc_list); return 0; } void cw1200_queue_stats_deinit(struct cw1200_queue_stats *stats) { kfree(stats->link_map_cache); stats->link_map_cache = NULL; } void cw1200_queue_deinit(struct cw1200_queue *queue) { cw1200_queue_clear(queue); del_timer_sync(&queue->gc); INIT_LIST_HEAD(&queue->free_pool); kfree(queue->pool); kfree(queue->link_map_cache); queue->pool = NULL; queue->link_map_cache = NULL; queue->capacity = 0; } size_t cw1200_queue_get_num_queued(struct cw1200_queue *queue, u32 link_id_map) { size_t ret; int i, bit; size_t map_capacity = queue->stats->map_capacity; if (!link_id_map) return 0; spin_lock_bh(&queue->lock); if (link_id_map == (u32)-1) { ret = queue->num_queued - queue->num_pending; } else { ret = 0; for (i = 0, bit = 1; i < map_capacity; ++i, bit <<= 1) { if (link_id_map & bit) ret += queue->link_map_cache[i]; } } spin_unlock_bh(&queue->lock); return ret; } int cw1200_queue_put(struct cw1200_queue *queue, struct sk_buff *skb, struct cw1200_txpriv *txpriv) { int ret = 0; LIST_HEAD(gc_list); struct cw1200_queue_stats *stats = queue->stats; if (txpriv->link_id >= queue->stats->map_capacity) return -EINVAL; spin_lock_bh(&queue->lock); if (!WARN_ON(list_empty(&queue->free_pool))) { struct cw1200_queue_item *item = list_first_entry( &queue->free_pool, struct cw1200_queue_item, head); BUG_ON(item->skb); list_move_tail(&item->head, &queue->queue); item->skb = skb; item->txpriv = *txpriv; item->generation = 0; item->packet_id = cw1200_queue_mk_packet_id(queue->generation, queue->queue_id, item->generation, item - queue->pool); item->queue_timestamp = jiffies; ++queue->num_queued; ++queue->link_map_cache[txpriv->link_id]; spin_lock_bh(&stats->lock); ++stats->num_queued; ++stats->link_map_cache[txpriv->link_id]; spin_unlock_bh(&stats->lock); /* TX may happen in parallel sometimes. * Leave extra queue slots so we don't overflow. */ if (queue->overfull == false && queue->num_queued >= (queue->capacity - (num_present_cpus() - 1))) { queue->overfull = true; __cw1200_queue_lock(queue); mod_timer(&queue->gc, jiffies); } } else { ret = -ENOENT; } spin_unlock_bh(&queue->lock); return ret; } int cw1200_queue_get(struct cw1200_queue *queue, u32 link_id_map, struct wsm_tx **tx, struct ieee80211_tx_info **tx_info, const struct cw1200_txpriv **txpriv) { int ret = -ENOENT; struct cw1200_queue_item *item; struct cw1200_queue_stats *stats = queue->stats; bool wakeup_stats = false; spin_lock_bh(&queue->lock); list_for_each_entry(item, &queue->queue, head) { if (link_id_map & BIT(item->txpriv.link_id)) { ret = 0; break; } } if (!WARN_ON(ret)) { *tx = (struct wsm_tx *)item->skb->data; *tx_info = IEEE80211_SKB_CB(item->skb); *txpriv = &item->txpriv; (*tx)->packet_id = item->packet_id; list_move_tail(&item->head, &queue->pending); ++queue->num_pending; --queue->link_map_cache[item->txpriv.link_id]; item->xmit_timestamp = jiffies; spin_lock_bh(&stats->lock); --stats->num_queued; if (!--stats->link_map_cache[item->txpriv.link_id]) wakeup_stats = true; spin_unlock_bh(&stats->lock); } spin_unlock_bh(&queue->lock); if (wakeup_stats) wake_up(&stats->wait_link_id_empty); return ret; } int cw1200_queue_requeue(struct cw1200_queue *queue, u32 packet_id) { int ret = 0; u8 queue_generation, queue_id, item_generation, item_id; struct cw1200_queue_item *item; struct cw1200_queue_stats *stats = queue->stats; cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id, &item_generation, &item_id); item = &queue->pool[item_id]; spin_lock_bh(&queue->lock); BUG_ON(queue_id != queue->queue_id); if (queue_generation != queue->generation) { ret = -ENOENT; } else if (item_id >= (unsigned) queue->capacity) { WARN_ON(1); ret = -EINVAL; } else if (item->generation != item_generation) { WARN_ON(1); ret = -ENOENT; } else { --queue->num_pending; ++queue->link_map_cache[item->txpriv.link_id]; spin_lock_bh(&stats->lock); ++stats->num_queued; ++stats->link_map_cache[item->txpriv.link_id]; spin_unlock_bh(&stats->lock); item->generation = ++item_generation; item->packet_id = cw1200_queue_mk_packet_id(queue_generation, queue_id, item_generation, item_id); list_move(&item->head, &queue->queue); } spin_unlock_bh(&queue->lock); return ret; } int cw1200_queue_requeue_all(struct cw1200_queue *queue) { struct cw1200_queue_item *item, *tmp; struct cw1200_queue_stats *stats = queue->stats; spin_lock_bh(&queue->lock); list_for_each_entry_safe_reverse(item, tmp, &queue->pending, head) { --queue->num_pending; ++queue->link_map_cache[item->txpriv.link_id]; spin_lock_bh(&stats->lock); ++stats->num_queued; ++stats->link_map_cache[item->txpriv.link_id]; spin_unlock_bh(&stats->lock); ++item->generation; item->packet_id = cw1200_queue_mk_packet_id(queue->generation, queue->queue_id, item->generation, item - queue->pool); list_move(&item->head, &queue->queue); } spin_unlock_bh(&queue->lock); return 0; } int cw1200_queue_remove(struct cw1200_queue *queue, u32 packet_id) { int ret = 0; u8 queue_generation, queue_id, item_generation, item_id; struct cw1200_queue_item *item; struct cw1200_queue_stats *stats = queue->stats; struct sk_buff *gc_skb = NULL; struct cw1200_txpriv gc_txpriv; cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id, &item_generation, &item_id); item = &queue->pool[item_id]; spin_lock_bh(&queue->lock); BUG_ON(queue_id != queue->queue_id); if (queue_generation != queue->generation) { ret = -ENOENT; } else if (item_id >= (unsigned) queue->capacity) { WARN_ON(1); ret = -EINVAL; } else if (item->generation != item_generation) { WARN_ON(1); ret = -ENOENT; } else { gc_txpriv = item->txpriv; gc_skb = item->skb; item->skb = NULL; --queue->num_pending; --queue->num_queued; ++queue->num_sent; ++item->generation; /* Do not use list_move_tail here, but list_move: * try to utilize cache row. */ list_move(&item->head, &queue->free_pool); if (queue->overfull && (queue->num_queued <= (queue->capacity >> 1))) { queue->overfull = false; __cw1200_queue_unlock(queue); } } spin_unlock_bh(&queue->lock); if (gc_skb) stats->skb_dtor(stats->priv, gc_skb, &gc_txpriv); return ret; } int cw1200_queue_get_skb(struct cw1200_queue *queue, u32 packet_id, struct sk_buff **skb, const struct cw1200_txpriv **txpriv) { int ret = 0; u8 queue_generation, queue_id, item_generation, item_id; struct cw1200_queue_item *item; cw1200_queue_parse_id(packet_id, &queue_generation, &queue_id, &item_generation, &item_id); item = &queue->pool[item_id]; spin_lock_bh(&queue->lock); BUG_ON(queue_id != queue->queue_id); if (queue_generation != queue->generation) { ret = -ENOENT; } else if (item_id >= (unsigned) queue->capacity) { WARN_ON(1); ret = -EINVAL; } else if (item->generation != item_generation) { WARN_ON(1); ret = -ENOENT; } else { *skb = item->skb; *txpriv = &item->txpriv; } spin_unlock_bh(&queue->lock); return ret; } void cw1200_queue_lock(struct cw1200_queue *queue) { spin_lock_bh(&queue->lock); __cw1200_queue_lock(queue); spin_unlock_bh(&queue->lock); } void cw1200_queue_unlock(struct cw1200_queue *queue) { spin_lock_bh(&queue->lock); __cw1200_queue_unlock(queue); spin_unlock_bh(&queue->lock); } bool cw1200_queue_get_xmit_timestamp(struct cw1200_queue *queue, unsigned long *timestamp, u32 pending_frame_id) { struct cw1200_queue_item *item; bool ret; spin_lock_bh(&queue->lock); ret = !list_empty(&queue->pending); if (ret) { list_for_each_entry(item, &queue->pending, head) { if (item->packet_id != pending_frame_id) if (time_before(item->xmit_timestamp, *timestamp)) *timestamp = item->xmit_timestamp; } } spin_unlock_bh(&queue->lock); return ret; } bool cw1200_queue_stats_is_empty(struct cw1200_queue_stats *stats, u32 link_id_map) { bool empty = true; spin_lock_bh(&stats->lock); if (link_id_map == (u32)-1) { empty = stats->num_queued == 0; } else { int i; for (i = 0; i < stats->map_capacity; ++i) { if (link_id_map & BIT(i)) { if (stats->link_map_cache[i]) { empty = false; break; } } } } spin_unlock_bh(&stats->lock); return empty; }
gpl-2.0
AndroidOpenSourceXperia/android_kernel_sony_u8500
drivers/media/rc/redrat3.c
2383
35620
/* * USB RedRat3 IR Transceiver rc-core driver * * Copyright (c) 2011 by Jarod Wilson <jarod@redhat.com> * based heavily on the work of Stephen Cox, with additional * help from RedRat Ltd. * * This driver began life based an an old version of the first-generation * lirc_mceusb driver from the lirc 0.7.2 distribution. It was then * significantly rewritten by Stephen Cox with the aid of RedRat Ltd's * Chris Dodge. * * The driver was then ported to rc-core and significantly rewritten again, * by Jarod, using the in-kernel mceusb driver as a guide, after an initial * port effort was started by Stephen. * * TODO LIST: * - fix lirc not showing repeats properly * -- * * The RedRat3 is a USB transceiver with both send & receive, * with 2 separate sensors available for receive to enable * both good long range reception for general use, and good * short range reception when required for learning a signal. * * http://www.redrat.co.uk/ * * It uses its own little protocol to communicate, the required * parts of which are embedded within this driver. * -- * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/device.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/usb.h> #include <linux/usb/input.h> #include <media/rc-core.h> /* Driver Information */ #define DRIVER_VERSION "0.70" #define DRIVER_AUTHOR "Jarod Wilson <jarod@redhat.com>" #define DRIVER_AUTHOR2 "The Dweller, Stephen Cox" #define DRIVER_DESC "RedRat3 USB IR Transceiver Driver" #define DRIVER_NAME "redrat3" /* module parameters */ #ifdef CONFIG_USB_DEBUG static int debug = 1; #else static int debug; #endif #define RR3_DEBUG_STANDARD 0x1 #define RR3_DEBUG_FUNCTION_TRACE 0x2 #define rr3_dbg(dev, fmt, ...) \ do { \ if (debug & RR3_DEBUG_STANDARD) \ dev_info(dev, fmt, ## __VA_ARGS__); \ } while (0) #define rr3_ftr(dev, fmt, ...) \ do { \ if (debug & RR3_DEBUG_FUNCTION_TRACE) \ dev_info(dev, fmt, ## __VA_ARGS__); \ } while (0) /* bulk data transfer types */ #define RR3_ERROR 0x01 #define RR3_MOD_SIGNAL_IN 0x20 #define RR3_MOD_SIGNAL_OUT 0x21 /* Get the RR firmware version */ #define RR3_FW_VERSION 0xb1 #define RR3_FW_VERSION_LEN 64 /* Send encoded signal bulk-sent earlier*/ #define RR3_TX_SEND_SIGNAL 0xb3 #define RR3_SET_IR_PARAM 0xb7 #define RR3_GET_IR_PARAM 0xb8 /* Blink the red LED on the device */ #define RR3_BLINK_LED 0xb9 /* Read serial number of device */ #define RR3_READ_SER_NO 0xba #define RR3_SER_NO_LEN 4 /* Start capture with the RC receiver */ #define RR3_RC_DET_ENABLE 0xbb /* Stop capture with the RC receiver */ #define RR3_RC_DET_DISABLE 0xbc /* Return the status of RC detector capture */ #define RR3_RC_DET_STATUS 0xbd /* Reset redrat */ #define RR3_RESET 0xa0 /* Max number of lengths in the signal. */ #define RR3_IR_IO_MAX_LENGTHS 0x01 /* Periods to measure mod. freq. */ #define RR3_IR_IO_PERIODS_MF 0x02 /* Size of memory for main signal data */ #define RR3_IR_IO_SIG_MEM_SIZE 0x03 /* Delta value when measuring lengths */ #define RR3_IR_IO_LENGTH_FUZZ 0x04 /* Timeout for end of signal detection */ #define RR3_IR_IO_SIG_TIMEOUT 0x05 /* Minumum value for pause recognition. */ #define RR3_IR_IO_MIN_PAUSE 0x06 /* Clock freq. of EZ-USB chip */ #define RR3_CLK 24000000 /* Clock periods per timer count */ #define RR3_CLK_PER_COUNT 12 /* (RR3_CLK / RR3_CLK_PER_COUNT) */ #define RR3_CLK_CONV_FACTOR 2000000 /* USB bulk-in IR data endpoint address */ #define RR3_BULK_IN_EP_ADDR 0x82 /* Raw Modulated signal data value offsets */ #define RR3_PAUSE_OFFSET 0 #define RR3_FREQ_COUNT_OFFSET 4 #define RR3_NUM_PERIOD_OFFSET 6 #define RR3_MAX_LENGTHS_OFFSET 8 #define RR3_NUM_LENGTHS_OFFSET 9 #define RR3_MAX_SIGS_OFFSET 10 #define RR3_NUM_SIGS_OFFSET 12 #define RR3_REPEATS_OFFSET 14 /* Size of the fixed-length portion of the signal */ #define RR3_HEADER_LENGTH 15 #define RR3_DRIVER_MAXLENS 128 #define RR3_MAX_SIG_SIZE 512 #define RR3_MAX_BUF_SIZE \ ((2 * RR3_HEADER_LENGTH) + RR3_DRIVER_MAXLENS + RR3_MAX_SIG_SIZE) #define RR3_TIME_UNIT 50 #define RR3_END_OF_SIGNAL 0x7f #define RR3_TX_HEADER_OFFSET 4 #define RR3_TX_TRAILER_LEN 2 #define RR3_RX_MIN_TIMEOUT 5 #define RR3_RX_MAX_TIMEOUT 2000 /* The 8051's CPUCS Register address */ #define RR3_CPUCS_REG_ADDR 0x7f92 #define USB_RR3USB_VENDOR_ID 0x112a #define USB_RR3USB_PRODUCT_ID 0x0001 #define USB_RR3IIUSB_PRODUCT_ID 0x0005 /* table of devices that work with this driver */ static struct usb_device_id redrat3_dev_table[] = { /* Original version of the RedRat3 */ {USB_DEVICE(USB_RR3USB_VENDOR_ID, USB_RR3USB_PRODUCT_ID)}, /* Second Version/release of the RedRat3 - RetRat3-II */ {USB_DEVICE(USB_RR3USB_VENDOR_ID, USB_RR3IIUSB_PRODUCT_ID)}, {} /* Terminating entry */ }; /* Structure to hold all of our device specific stuff */ struct redrat3_dev { /* core device bits */ struct rc_dev *rc; struct device *dev; /* save off the usb device pointer */ struct usb_device *udev; /* the receive endpoint */ struct usb_endpoint_descriptor *ep_in; /* the buffer to receive data */ unsigned char *bulk_in_buf; /* urb used to read ir data */ struct urb *read_urb; /* the send endpoint */ struct usb_endpoint_descriptor *ep_out; /* the buffer to send data */ unsigned char *bulk_out_buf; /* the urb used to send data */ struct urb *write_urb; /* usb dma */ dma_addr_t dma_in; dma_addr_t dma_out; /* true if write urb is busy */ bool write_busy; /* wait for the write to finish */ struct completion write_finished; /* locks this structure */ struct mutex lock; /* rx signal timeout timer */ struct timer_list rx_timeout; /* Is the device currently receiving? */ bool recv_in_progress; /* is the detector enabled*/ bool det_enabled; /* Is the device currently transmitting?*/ bool transmitting; /* store for current packet */ char pbuf[RR3_MAX_BUF_SIZE]; u16 pktlen; u16 pkttype; u16 bytes_read; /* indicate whether we are going to reprocess * the USB callback with a bigger buffer */ int buftoosmall; char *datap; u32 carrier; char name[128]; char phys[64]; }; /* All incoming data buffers adhere to a very specific data format */ struct redrat3_signal_header { u16 length; /* Length of data being transferred */ u16 transfer_type; /* Type of data transferred */ u32 pause; /* Pause between main and repeat signals */ u16 mod_freq_count; /* Value of timer on mod. freq. measurement */ u16 no_periods; /* No. of periods over which mod. freq. is measured */ u8 max_lengths; /* Max no. of lengths (i.e. size of array) */ u8 no_lengths; /* Actual no. of elements in lengths array */ u16 max_sig_size; /* Max no. of values in signal data array */ u16 sig_size; /* Acuto no. of values in signal data array */ u8 no_repeats; /* No. of repeats of repeat signal section */ /* Here forward is the lengths and signal data */ }; static void redrat3_dump_signal_header(struct redrat3_signal_header *header) { pr_info("%s:\n", __func__); pr_info(" * length: %u, transfer_type: 0x%02x\n", header->length, header->transfer_type); pr_info(" * pause: %u, freq_count: %u, no_periods: %u\n", header->pause, header->mod_freq_count, header->no_periods); pr_info(" * lengths: %u (max: %u)\n", header->no_lengths, header->max_lengths); pr_info(" * sig_size: %u (max: %u)\n", header->sig_size, header->max_sig_size); pr_info(" * repeats: %u\n", header->no_repeats); } static void redrat3_dump_signal_data(char *buffer, u16 len) { int offset, i; char *data_vals; pr_info("%s:", __func__); offset = RR3_TX_HEADER_OFFSET + RR3_HEADER_LENGTH + (RR3_DRIVER_MAXLENS * sizeof(u16)); /* read RR3_DRIVER_MAXLENS from ctrl msg */ data_vals = buffer + offset; for (i = 0; i < len; i++) { if (i % 10 == 0) pr_cont("\n * "); pr_cont("%02x ", *data_vals++); } pr_cont("\n"); } /* * redrat3_issue_async * * Issues an async read to the ir data in port.. * sets the callback to be redrat3_handle_async */ static void redrat3_issue_async(struct redrat3_dev *rr3) { int res; rr3_ftr(rr3->dev, "Entering %s\n", __func__); if (!rr3->det_enabled) { dev_warn(rr3->dev, "not issuing async read, " "detector not enabled\n"); return; } memset(rr3->bulk_in_buf, 0, rr3->ep_in->wMaxPacketSize); res = usb_submit_urb(rr3->read_urb, GFP_ATOMIC); if (res) rr3_dbg(rr3->dev, "%s: receive request FAILED! " "(res %d, len %d)\n", __func__, res, rr3->read_urb->transfer_buffer_length); } static void redrat3_dump_fw_error(struct redrat3_dev *rr3, int code) { if (!rr3->transmitting && (code != 0x40)) dev_info(rr3->dev, "fw error code 0x%02x: ", code); switch (code) { case 0x00: pr_cont("No Error\n"); break; /* Codes 0x20 through 0x2f are IR Firmware Errors */ case 0x20: pr_cont("Initial signal pulse not long enough " "to measure carrier frequency\n"); break; case 0x21: pr_cont("Not enough length values allocated for signal\n"); break; case 0x22: pr_cont("Not enough memory allocated for signal data\n"); break; case 0x23: pr_cont("Too many signal repeats\n"); break; case 0x28: pr_cont("Insufficient memory available for IR signal " "data memory allocation\n"); break; case 0x29: pr_cont("Insufficient memory available " "for IrDa signal data memory allocation\n"); break; /* Codes 0x30 through 0x3f are USB Firmware Errors */ case 0x30: pr_cont("Insufficient memory available for bulk " "transfer structure\n"); break; /* * Other error codes... These are primarily errors that can occur in * the control messages sent to the redrat */ case 0x40: if (!rr3->transmitting) pr_cont("Signal capture has been terminated\n"); break; case 0x41: pr_cont("Attempt to set/get and unknown signal I/O " "algorithm parameter\n"); break; case 0x42: pr_cont("Signal capture already started\n"); break; default: pr_cont("Unknown Error\n"); break; } } static u32 redrat3_val_to_mod_freq(struct redrat3_signal_header *ph) { u32 mod_freq = 0; if (ph->mod_freq_count != 0) mod_freq = (RR3_CLK * ph->no_periods) / (ph->mod_freq_count * RR3_CLK_PER_COUNT); return mod_freq; } /* this function scales down the figures for the same result... */ static u32 redrat3_len_to_us(u32 length) { u32 biglen = length * 1000; u32 divisor = (RR3_CLK_CONV_FACTOR) / 1000; u32 result = (u32) (biglen / divisor); /* don't allow zero lengths to go back, breaks lirc */ return result ? result : 1; } /* * convert us back into redrat3 lengths * * length * 1000 length * 1000000 * ------------- = ---------------- = micro * rr3clk / 1000 rr3clk * 6 * 2 4 * 3 micro * rr3clk micro * rr3clk / 1000 * ----- = 4 ----- = 6 -------------- = len --------------------- * 3 2 1000000 1000 */ static u32 redrat3_us_to_len(u32 microsec) { u32 result; u32 divisor; microsec &= IR_MAX_DURATION; divisor = (RR3_CLK_CONV_FACTOR / 1000); result = (u32)(microsec * divisor) / 1000; /* don't allow zero lengths to go back, breaks lirc */ return result ? result : 1; } /* timer callback to send long trailing space on receive timeout */ static void redrat3_rx_timeout(unsigned long data) { struct redrat3_dev *rr3 = (struct redrat3_dev *)data; DEFINE_IR_RAW_EVENT(rawir); rawir.pulse = false; rawir.duration = rr3->rc->timeout; rr3_dbg(rr3->dev, "storing trailing space with duration %d\n", rawir.duration); ir_raw_event_store_with_filter(rr3->rc, &rawir); rr3_dbg(rr3->dev, "calling ir_raw_event_handle\n"); ir_raw_event_handle(rr3->rc); rr3_dbg(rr3->dev, "calling ir_raw_event_reset\n"); ir_raw_event_reset(rr3->rc); } static void redrat3_process_ir_data(struct redrat3_dev *rr3) { DEFINE_IR_RAW_EVENT(rawir); struct redrat3_signal_header header; struct device *dev; int i; unsigned long delay; u32 mod_freq, single_len; u16 *len_vals; u8 *data_vals; u32 tmp32; u16 tmp16; char *sig_data; if (!rr3) { pr_err("%s called with no context!\n", __func__); return; } rr3_ftr(rr3->dev, "Entered %s\n", __func__); dev = rr3->dev; sig_data = rr3->pbuf; header.length = rr3->pktlen; header.transfer_type = rr3->pkttype; /* Sanity check */ if (!(header.length >= RR3_HEADER_LENGTH)) dev_warn(dev, "read returned less than rr3 header len\n"); delay = usecs_to_jiffies(rr3->rc->timeout / 1000); mod_timer(&rr3->rx_timeout, jiffies + delay); memcpy(&tmp32, sig_data + RR3_PAUSE_OFFSET, sizeof(tmp32)); header.pause = be32_to_cpu(tmp32); memcpy(&tmp16, sig_data + RR3_FREQ_COUNT_OFFSET, sizeof(tmp16)); header.mod_freq_count = be16_to_cpu(tmp16); memcpy(&tmp16, sig_data + RR3_NUM_PERIOD_OFFSET, sizeof(tmp16)); header.no_periods = be16_to_cpu(tmp16); header.max_lengths = sig_data[RR3_MAX_LENGTHS_OFFSET]; header.no_lengths = sig_data[RR3_NUM_LENGTHS_OFFSET]; memcpy(&tmp16, sig_data + RR3_MAX_SIGS_OFFSET, sizeof(tmp16)); header.max_sig_size = be16_to_cpu(tmp16); memcpy(&tmp16, sig_data + RR3_NUM_SIGS_OFFSET, sizeof(tmp16)); header.sig_size = be16_to_cpu(tmp16); header.no_repeats= sig_data[RR3_REPEATS_OFFSET]; if (debug) { redrat3_dump_signal_header(&header); redrat3_dump_signal_data(sig_data, header.sig_size); } mod_freq = redrat3_val_to_mod_freq(&header); rr3_dbg(dev, "Got mod_freq of %u\n", mod_freq); /* Here we pull out the 'length' values from the signal */ len_vals = (u16 *)(sig_data + RR3_HEADER_LENGTH); data_vals = sig_data + RR3_HEADER_LENGTH + (header.max_lengths * sizeof(u16)); /* process each rr3 encoded byte into an int */ for (i = 0; i < header.sig_size; i++) { u16 val = len_vals[data_vals[i]]; single_len = redrat3_len_to_us((u32)be16_to_cpu(val)); /* cap the value to IR_MAX_DURATION */ single_len &= IR_MAX_DURATION; /* we should always get pulse/space/pulse/space samples */ if (i % 2) rawir.pulse = false; else rawir.pulse = true; rawir.duration = US_TO_NS(single_len); rr3_dbg(dev, "storing %s with duration %d (i: %d)\n", rawir.pulse ? "pulse" : "space", rawir.duration, i); ir_raw_event_store_with_filter(rr3->rc, &rawir); } /* add a trailing space, if need be */ if (i % 2) { rawir.pulse = false; /* this duration is made up, and may not be ideal... */ rawir.duration = rr3->rc->timeout / 2; rr3_dbg(dev, "storing trailing space with duration %d\n", rawir.duration); ir_raw_event_store_with_filter(rr3->rc, &rawir); } rr3_dbg(dev, "calling ir_raw_event_handle\n"); ir_raw_event_handle(rr3->rc); return; } /* Util fn to send rr3 cmds */ static u8 redrat3_send_cmd(int cmd, struct redrat3_dev *rr3) { struct usb_device *udev; u8 *data; int res; data = kzalloc(sizeof(u8), GFP_KERNEL); if (!data) return -ENOMEM; udev = rr3->udev; res = usb_control_msg(udev, usb_rcvctrlpipe(udev, 0), cmd, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0x0000, 0x0000, data, sizeof(u8), HZ * 10); if (res < 0) { dev_err(rr3->dev, "%s: Error sending rr3 cmd res %d, data %d", __func__, res, *data); res = -EIO; } else res = (u8)data[0]; kfree(data); return res; } /* Enables the long range detector and starts async receive */ static int redrat3_enable_detector(struct redrat3_dev *rr3) { struct device *dev = rr3->dev; u8 ret; rr3_ftr(dev, "Entering %s\n", __func__); ret = redrat3_send_cmd(RR3_RC_DET_ENABLE, rr3); if (ret != 0) dev_dbg(dev, "%s: unexpected ret of %d\n", __func__, ret); ret = redrat3_send_cmd(RR3_RC_DET_STATUS, rr3); if (ret != 1) { dev_err(dev, "%s: detector status: %d, should be 1\n", __func__, ret); return -EIO; } rr3->det_enabled = true; redrat3_issue_async(rr3); return 0; } /* Disables the rr3 long range detector */ static void redrat3_disable_detector(struct redrat3_dev *rr3) { struct device *dev = rr3->dev; u8 ret; rr3_ftr(dev, "Entering %s\n", __func__); ret = redrat3_send_cmd(RR3_RC_DET_DISABLE, rr3); if (ret != 0) dev_err(dev, "%s: failure!\n", __func__); ret = redrat3_send_cmd(RR3_RC_DET_STATUS, rr3); if (ret != 0) dev_warn(dev, "%s: detector status: %d, should be 0\n", __func__, ret); rr3->det_enabled = false; } static inline void redrat3_delete(struct redrat3_dev *rr3, struct usb_device *udev) { rr3_ftr(rr3->dev, "%s cleaning up\n", __func__); usb_kill_urb(rr3->read_urb); usb_kill_urb(rr3->write_urb); usb_free_urb(rr3->read_urb); usb_free_urb(rr3->write_urb); usb_free_coherent(udev, rr3->ep_in->wMaxPacketSize, rr3->bulk_in_buf, rr3->dma_in); usb_free_coherent(udev, rr3->ep_out->wMaxPacketSize, rr3->bulk_out_buf, rr3->dma_out); kfree(rr3); } static u32 redrat3_get_timeout(struct device *dev, struct rc_dev *rc, struct usb_device *udev) { u32 *tmp; u32 timeout = MS_TO_NS(150); /* a sane default, if things go haywire */ int len, ret, pipe; len = sizeof(*tmp); tmp = kzalloc(len, GFP_KERNEL); if (!tmp) { dev_warn(dev, "Memory allocation faillure\n"); return timeout; } pipe = usb_rcvctrlpipe(udev, 0); ret = usb_control_msg(udev, pipe, RR3_GET_IR_PARAM, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, RR3_IR_IO_SIG_TIMEOUT, 0, tmp, len, HZ * 5); if (ret != len) { dev_warn(dev, "Failed to read timeout from hardware\n"); return timeout; } timeout = US_TO_NS(redrat3_len_to_us(be32_to_cpu(*tmp))); if (timeout < rc->min_timeout) timeout = rc->min_timeout; else if (timeout > rc->max_timeout) timeout = rc->max_timeout; rr3_dbg(dev, "Got timeout of %d ms\n", timeout / (1000 * 1000)); return timeout; } static void redrat3_reset(struct redrat3_dev *rr3) { struct usb_device *udev = rr3->udev; struct device *dev = rr3->dev; int rc, rxpipe, txpipe; u8 *val; int len = sizeof(u8); rr3_ftr(dev, "Entering %s\n", __func__); rxpipe = usb_rcvctrlpipe(udev, 0); txpipe = usb_sndctrlpipe(udev, 0); val = kzalloc(len, GFP_KERNEL); if (!val) { dev_err(dev, "Memory allocation failure\n"); return; } *val = 0x01; rc = usb_control_msg(udev, rxpipe, RR3_RESET, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, RR3_CPUCS_REG_ADDR, 0, val, len, HZ * 25); rr3_dbg(dev, "reset returned 0x%02x\n", rc); *val = 5; rc = usb_control_msg(udev, txpipe, RR3_SET_IR_PARAM, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, RR3_IR_IO_LENGTH_FUZZ, 0, val, len, HZ * 25); rr3_dbg(dev, "set ir parm len fuzz %d rc 0x%02x\n", *val, rc); *val = RR3_DRIVER_MAXLENS; rc = usb_control_msg(udev, txpipe, RR3_SET_IR_PARAM, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, RR3_IR_IO_MAX_LENGTHS, 0, val, len, HZ * 25); rr3_dbg(dev, "set ir parm max lens %d rc 0x%02x\n", *val, rc); kfree(val); } static void redrat3_get_firmware_rev(struct redrat3_dev *rr3) { int rc = 0; char *buffer; rr3_ftr(rr3->dev, "Entering %s\n", __func__); buffer = kzalloc(sizeof(char) * (RR3_FW_VERSION_LEN + 1), GFP_KERNEL); if (!buffer) { dev_err(rr3->dev, "Memory allocation failure\n"); return; } rc = usb_control_msg(rr3->udev, usb_rcvctrlpipe(rr3->udev, 0), RR3_FW_VERSION, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0, 0, buffer, RR3_FW_VERSION_LEN, HZ * 5); if (rc >= 0) dev_info(rr3->dev, "Firmware rev: %s", buffer); else dev_err(rr3->dev, "Problem fetching firmware ID\n"); kfree(buffer); rr3_ftr(rr3->dev, "Exiting %s\n", __func__); } static void redrat3_read_packet_start(struct redrat3_dev *rr3, int len) { u16 tx_error; u16 hdrlen; rr3_ftr(rr3->dev, "Entering %s\n", __func__); /* grab the Length and type of transfer */ memcpy(&(rr3->pktlen), (unsigned char *) rr3->bulk_in_buf, sizeof(rr3->pktlen)); memcpy(&(rr3->pkttype), ((unsigned char *) rr3->bulk_in_buf + sizeof(rr3->pktlen)), sizeof(rr3->pkttype)); /*data needs conversion to know what its real values are*/ rr3->pktlen = be16_to_cpu(rr3->pktlen); rr3->pkttype = be16_to_cpu(rr3->pkttype); switch (rr3->pkttype) { case RR3_ERROR: memcpy(&tx_error, ((unsigned char *)rr3->bulk_in_buf + (sizeof(rr3->pktlen) + sizeof(rr3->pkttype))), sizeof(tx_error)); tx_error = be16_to_cpu(tx_error); redrat3_dump_fw_error(rr3, tx_error); break; case RR3_MOD_SIGNAL_IN: hdrlen = sizeof(rr3->pktlen) + sizeof(rr3->pkttype); rr3->bytes_read = len; rr3->bytes_read -= hdrlen; rr3->datap = &(rr3->pbuf[0]); memcpy(rr3->datap, ((unsigned char *)rr3->bulk_in_buf + hdrlen), rr3->bytes_read); rr3->datap += rr3->bytes_read; rr3_dbg(rr3->dev, "bytes_read %d, pktlen %d\n", rr3->bytes_read, rr3->pktlen); break; default: rr3_dbg(rr3->dev, "ignoring packet with type 0x%02x, " "len of %d, 0x%02x\n", rr3->pkttype, len, rr3->pktlen); break; } } static void redrat3_read_packet_continue(struct redrat3_dev *rr3, int len) { rr3_ftr(rr3->dev, "Entering %s\n", __func__); memcpy(rr3->datap, (unsigned char *)rr3->bulk_in_buf, len); rr3->datap += len; rr3->bytes_read += len; rr3_dbg(rr3->dev, "bytes_read %d, pktlen %d\n", rr3->bytes_read, rr3->pktlen); } /* gather IR data from incoming urb, process it when we have enough */ static int redrat3_get_ir_data(struct redrat3_dev *rr3, int len) { struct device *dev = rr3->dev; int ret = 0; rr3_ftr(dev, "Entering %s\n", __func__); if (rr3->pktlen > RR3_MAX_BUF_SIZE) { dev_err(rr3->dev, "error: packet larger than buffer\n"); ret = -EINVAL; goto out; } if ((rr3->bytes_read == 0) && (len >= (sizeof(rr3->pkttype) + sizeof(rr3->pktlen)))) { redrat3_read_packet_start(rr3, len); } else if (rr3->bytes_read != 0) { redrat3_read_packet_continue(rr3, len); } else if (rr3->bytes_read == 0) { dev_err(dev, "error: no packet data read\n"); ret = -ENODATA; goto out; } if (rr3->bytes_read > rr3->pktlen) { dev_err(dev, "bytes_read (%d) greater than pktlen (%d)\n", rr3->bytes_read, rr3->pktlen); ret = -EINVAL; goto out; } else if (rr3->bytes_read < rr3->pktlen) /* we're still accumulating data */ return 0; /* if we get here, we've got IR data to decode */ if (rr3->pkttype == RR3_MOD_SIGNAL_IN) redrat3_process_ir_data(rr3); else rr3_dbg(dev, "discarding non-signal data packet " "(type 0x%02x)\n", rr3->pkttype); out: rr3->bytes_read = 0; rr3->pktlen = 0; rr3->pkttype = 0; return ret; } /* callback function from USB when async USB request has completed */ static void redrat3_handle_async(struct urb *urb, struct pt_regs *regs) { struct redrat3_dev *rr3; if (!urb) return; rr3 = urb->context; if (!rr3) { pr_err("%s called with invalid context!\n", __func__); usb_unlink_urb(urb); return; } rr3_ftr(rr3->dev, "Entering %s\n", __func__); if (!rr3->det_enabled) { rr3_dbg(rr3->dev, "received a read callback but detector " "disabled - ignoring\n"); return; } switch (urb->status) { case 0: redrat3_get_ir_data(rr3, urb->actual_length); break; case -ECONNRESET: case -ENOENT: case -ESHUTDOWN: usb_unlink_urb(urb); return; case -EPIPE: default: dev_warn(rr3->dev, "Error: urb status = %d\n", urb->status); rr3->bytes_read = 0; rr3->pktlen = 0; rr3->pkttype = 0; break; } if (!rr3->transmitting) redrat3_issue_async(rr3); else rr3_dbg(rr3->dev, "IR transmit in progress\n"); } static void redrat3_write_bulk_callback(struct urb *urb, struct pt_regs *regs) { struct redrat3_dev *rr3; int len; if (!urb) return; rr3 = urb->context; if (rr3) { len = urb->actual_length; rr3_ftr(rr3->dev, "%s: called (status=%d len=%d)\n", __func__, urb->status, len); } } static u16 mod_freq_to_val(unsigned int mod_freq) { int mult = 6000000; /* Clk used in mod. freq. generation is CLK24/4. */ return (u16)(65536 - (mult / mod_freq)); } static int redrat3_set_tx_carrier(struct rc_dev *dev, u32 carrier) { struct redrat3_dev *rr3 = dev->priv; rr3->carrier = carrier; return carrier; } static int redrat3_transmit_ir(struct rc_dev *rcdev, int *txbuf, u32 n) { struct redrat3_dev *rr3 = rcdev->priv; struct device *dev = rr3->dev; struct redrat3_signal_header header; int i, j, count, ret, ret_len, offset; int lencheck, cur_sample_len, pipe; char *buffer = NULL, *sigdata = NULL; int *sample_lens = NULL; u32 tmpi; u16 tmps; u8 *datap; u8 curlencheck = 0; u16 *lengths_ptr; int sendbuf_len; rr3_ftr(dev, "Entering %s\n", __func__); if (rr3->transmitting) { dev_warn(dev, "%s: transmitter already in use\n", __func__); return -EAGAIN; } count = n / sizeof(int); if (count > (RR3_DRIVER_MAXLENS * 2)) return -EINVAL; rr3->transmitting = true; redrat3_disable_detector(rr3); if (rr3->det_enabled) { dev_err(dev, "%s: cannot tx while rx is enabled\n", __func__); ret = -EIO; goto out; } sample_lens = kzalloc(sizeof(int) * RR3_DRIVER_MAXLENS, GFP_KERNEL); if (!sample_lens) { ret = -ENOMEM; goto out; } for (i = 0; i < count; i++) { for (lencheck = 0; lencheck < curlencheck; lencheck++) { cur_sample_len = redrat3_us_to_len(txbuf[i]); if (sample_lens[lencheck] == cur_sample_len) break; } if (lencheck == curlencheck) { cur_sample_len = redrat3_us_to_len(txbuf[i]); rr3_dbg(dev, "txbuf[%d]=%u, pos %d, enc %u\n", i, txbuf[i], curlencheck, cur_sample_len); if (curlencheck < 255) { /* now convert the value to a proper * rr3 value.. */ sample_lens[curlencheck] = cur_sample_len; curlencheck++; } else { dev_err(dev, "signal too long\n"); ret = -EINVAL; goto out; } } } sigdata = kzalloc((count + RR3_TX_TRAILER_LEN), GFP_KERNEL); if (!sigdata) { ret = -ENOMEM; goto out; } sigdata[count] = RR3_END_OF_SIGNAL; sigdata[count + 1] = RR3_END_OF_SIGNAL; for (i = 0; i < count; i++) { for (j = 0; j < curlencheck; j++) { if (sample_lens[j] == redrat3_us_to_len(txbuf[i])) sigdata[i] = j; } } offset = RR3_TX_HEADER_OFFSET; sendbuf_len = RR3_HEADER_LENGTH + (sizeof(u16) * RR3_DRIVER_MAXLENS) + count + RR3_TX_TRAILER_LEN + offset; buffer = kzalloc(sendbuf_len, GFP_KERNEL); if (!buffer) { ret = -ENOMEM; goto out; } /* fill in our packet header */ header.length = sendbuf_len - offset; header.transfer_type = RR3_MOD_SIGNAL_OUT; header.pause = redrat3_len_to_us(100); header.mod_freq_count = mod_freq_to_val(rr3->carrier); header.no_periods = 0; /* n/a to transmit */ header.max_lengths = RR3_DRIVER_MAXLENS; header.no_lengths = curlencheck; header.max_sig_size = RR3_MAX_SIG_SIZE; header.sig_size = count + RR3_TX_TRAILER_LEN; /* we currently rely on repeat handling in the IR encoding source */ header.no_repeats = 0; tmps = cpu_to_be16(header.length); memcpy(buffer, &tmps, 2); tmps = cpu_to_be16(header.transfer_type); memcpy(buffer + 2, &tmps, 2); tmpi = cpu_to_be32(header.pause); memcpy(buffer + offset, &tmpi, sizeof(tmpi)); tmps = cpu_to_be16(header.mod_freq_count); memcpy(buffer + offset + RR3_FREQ_COUNT_OFFSET, &tmps, 2); buffer[offset + RR3_NUM_LENGTHS_OFFSET] = header.no_lengths; tmps = cpu_to_be16(header.sig_size); memcpy(buffer + offset + RR3_NUM_SIGS_OFFSET, &tmps, 2); buffer[offset + RR3_REPEATS_OFFSET] = header.no_repeats; lengths_ptr = (u16 *)(buffer + offset + RR3_HEADER_LENGTH); for (i = 0; i < curlencheck; ++i) lengths_ptr[i] = cpu_to_be16(sample_lens[i]); datap = (u8 *)(buffer + offset + RR3_HEADER_LENGTH + (sizeof(u16) * RR3_DRIVER_MAXLENS)); memcpy(datap, sigdata, (count + RR3_TX_TRAILER_LEN)); if (debug) { redrat3_dump_signal_header(&header); redrat3_dump_signal_data(buffer, header.sig_size); } pipe = usb_sndbulkpipe(rr3->udev, rr3->ep_out->bEndpointAddress); tmps = usb_bulk_msg(rr3->udev, pipe, buffer, sendbuf_len, &ret_len, 10 * HZ); rr3_dbg(dev, "sent %d bytes, (ret %d)\n", ret_len, tmps); /* now tell the hardware to transmit what we sent it */ pipe = usb_rcvctrlpipe(rr3->udev, 0); ret = usb_control_msg(rr3->udev, pipe, RR3_TX_SEND_SIGNAL, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0, 0, buffer, 2, HZ * 10); if (ret < 0) dev_err(dev, "Error: control msg send failed, rc %d\n", ret); else ret = n; out: kfree(sample_lens); kfree(buffer); kfree(sigdata); rr3->transmitting = false; redrat3_enable_detector(rr3); return ret; } static struct rc_dev *redrat3_init_rc_dev(struct redrat3_dev *rr3) { struct device *dev = rr3->dev; struct rc_dev *rc; int ret = -ENODEV; u16 prod = le16_to_cpu(rr3->udev->descriptor.idProduct); rc = rc_allocate_device(); if (!rc) { dev_err(dev, "remote input dev allocation failed\n"); goto out; } snprintf(rr3->name, sizeof(rr3->name), "RedRat3%s " "Infrared Remote Transceiver (%04x:%04x)", prod == USB_RR3IIUSB_PRODUCT_ID ? "-II" : "", le16_to_cpu(rr3->udev->descriptor.idVendor), prod); usb_make_path(rr3->udev, rr3->phys, sizeof(rr3->phys)); rc->input_name = rr3->name; rc->input_phys = rr3->phys; usb_to_input_id(rr3->udev, &rc->input_id); rc->dev.parent = dev; rc->priv = rr3; rc->driver_type = RC_DRIVER_IR_RAW; rc->allowed_protos = RC_TYPE_ALL; rc->min_timeout = MS_TO_NS(RR3_RX_MIN_TIMEOUT); rc->max_timeout = MS_TO_NS(RR3_RX_MAX_TIMEOUT); rc->timeout = redrat3_get_timeout(dev, rc, rr3->udev); rc->tx_ir = redrat3_transmit_ir; rc->s_tx_carrier = redrat3_set_tx_carrier; rc->driver_name = DRIVER_NAME; rc->map_name = RC_MAP_HAUPPAUGE; ret = rc_register_device(rc); if (ret < 0) { dev_err(dev, "remote dev registration failed\n"); goto out; } return rc; out: rc_free_device(rc); return NULL; } static int __devinit redrat3_dev_probe(struct usb_interface *intf, const struct usb_device_id *id) { struct usb_device *udev = interface_to_usbdev(intf); struct device *dev = &intf->dev; struct usb_host_interface *uhi; struct redrat3_dev *rr3; struct usb_endpoint_descriptor *ep; struct usb_endpoint_descriptor *ep_in = NULL; struct usb_endpoint_descriptor *ep_out = NULL; u8 addr, attrs; int pipe, i; int retval = -ENOMEM; rr3_ftr(dev, "%s called\n", __func__); uhi = intf->cur_altsetting; /* find our bulk-in and bulk-out endpoints */ for (i = 0; i < uhi->desc.bNumEndpoints; ++i) { ep = &uhi->endpoint[i].desc; addr = ep->bEndpointAddress; attrs = ep->bmAttributes; if ((ep_in == NULL) && ((addr & USB_ENDPOINT_DIR_MASK) == USB_DIR_IN) && ((attrs & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_BULK)) { rr3_dbg(dev, "found bulk-in endpoint at 0x%02x\n", ep->bEndpointAddress); /* data comes in on 0x82, 0x81 is for other data... */ if (ep->bEndpointAddress == RR3_BULK_IN_EP_ADDR) ep_in = ep; } if ((ep_out == NULL) && ((addr & USB_ENDPOINT_DIR_MASK) == USB_DIR_OUT) && ((attrs & USB_ENDPOINT_XFERTYPE_MASK) == USB_ENDPOINT_XFER_BULK)) { rr3_dbg(dev, "found bulk-out endpoint at 0x%02x\n", ep->bEndpointAddress); ep_out = ep; } } if (!ep_in || !ep_out) { dev_err(dev, "Couldn't find both in and out endpoints\n"); retval = -ENODEV; goto no_endpoints; } /* allocate memory for our device state and initialize it */ rr3 = kzalloc(sizeof(*rr3), GFP_KERNEL); if (rr3 == NULL) { dev_err(dev, "Memory allocation failure\n"); goto error; } rr3->dev = &intf->dev; /* set up bulk-in endpoint */ rr3->read_urb = usb_alloc_urb(0, GFP_KERNEL); if (!rr3->read_urb) { dev_err(dev, "Read urb allocation failure\n"); goto error; } rr3->ep_in = ep_in; rr3->bulk_in_buf = usb_alloc_coherent(udev, ep_in->wMaxPacketSize, GFP_ATOMIC, &rr3->dma_in); if (!rr3->bulk_in_buf) { dev_err(dev, "Read buffer allocation failure\n"); goto error; } pipe = usb_rcvbulkpipe(udev, ep_in->bEndpointAddress); usb_fill_bulk_urb(rr3->read_urb, udev, pipe, rr3->bulk_in_buf, ep_in->wMaxPacketSize, (usb_complete_t)redrat3_handle_async, rr3); /* set up bulk-out endpoint*/ rr3->write_urb = usb_alloc_urb(0, GFP_KERNEL); if (!rr3->write_urb) { dev_err(dev, "Write urb allocation failure\n"); goto error; } rr3->ep_out = ep_out; rr3->bulk_out_buf = usb_alloc_coherent(udev, ep_out->wMaxPacketSize, GFP_ATOMIC, &rr3->dma_out); if (!rr3->bulk_out_buf) { dev_err(dev, "Write buffer allocation failure\n"); goto error; } pipe = usb_sndbulkpipe(udev, ep_out->bEndpointAddress); usb_fill_bulk_urb(rr3->write_urb, udev, pipe, rr3->bulk_out_buf, ep_out->wMaxPacketSize, (usb_complete_t)redrat3_write_bulk_callback, rr3); mutex_init(&rr3->lock); rr3->udev = udev; redrat3_reset(rr3); redrat3_get_firmware_rev(rr3); /* might be all we need to do? */ retval = redrat3_enable_detector(rr3); if (retval < 0) goto error; /* default.. will get overridden by any sends with a freq defined */ rr3->carrier = 38000; rr3->rc = redrat3_init_rc_dev(rr3); if (!rr3->rc) goto error; setup_timer(&rr3->rx_timeout, redrat3_rx_timeout, (unsigned long)rr3); /* we can register the device now, as it is ready */ usb_set_intfdata(intf, rr3); rr3_ftr(dev, "Exiting %s\n", __func__); return 0; error: redrat3_delete(rr3, rr3->udev); no_endpoints: dev_err(dev, "%s: retval = %x", __func__, retval); return retval; } static void __devexit redrat3_dev_disconnect(struct usb_interface *intf) { struct usb_device *udev = interface_to_usbdev(intf); struct redrat3_dev *rr3 = usb_get_intfdata(intf); rr3_ftr(&intf->dev, "Entering %s\n", __func__); if (!rr3) return; redrat3_disable_detector(rr3); usb_set_intfdata(intf, NULL); rc_unregister_device(rr3->rc); redrat3_delete(rr3, udev); rr3_ftr(&intf->dev, "RedRat3 IR Transceiver now disconnected\n"); } static int redrat3_dev_suspend(struct usb_interface *intf, pm_message_t message) { struct redrat3_dev *rr3 = usb_get_intfdata(intf); rr3_ftr(rr3->dev, "suspend\n"); usb_kill_urb(rr3->read_urb); return 0; } static int redrat3_dev_resume(struct usb_interface *intf) { struct redrat3_dev *rr3 = usb_get_intfdata(intf); rr3_ftr(rr3->dev, "resume\n"); if (usb_submit_urb(rr3->read_urb, GFP_ATOMIC)) return -EIO; return 0; } static struct usb_driver redrat3_dev_driver = { .name = DRIVER_NAME, .probe = redrat3_dev_probe, .disconnect = redrat3_dev_disconnect, .suspend = redrat3_dev_suspend, .resume = redrat3_dev_resume, .reset_resume = redrat3_dev_resume, .id_table = redrat3_dev_table }; static int __init redrat3_dev_init(void) { int ret; ret = usb_register(&redrat3_dev_driver); if (ret < 0) pr_err(DRIVER_NAME ": usb register failed, result = %d\n", ret); return ret; } static void __exit redrat3_dev_exit(void) { usb_deregister(&redrat3_dev_driver); } module_init(redrat3_dev_init); module_exit(redrat3_dev_exit); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_AUTHOR(DRIVER_AUTHOR2); MODULE_LICENSE("GPL"); MODULE_DEVICE_TABLE(usb, redrat3_dev_table); module_param(debug, int, S_IRUGO | S_IWUSR); MODULE_PARM_DESC(debug, "Enable module debug spew. 0 = no debugging (default) " "0x1 = standard debug messages, 0x2 = function tracing debug. " "Flag bits are addative (i.e., 0x3 for both debug types).");
gpl-2.0
htc-mirror/ville-u-ics-3.0.8-e2a40ab
drivers/tty/serial/bfin_5xx.c
2639
41193
/* * Blackfin On-Chip Serial Driver * * Copyright 2006-2010 Analog Devices Inc. * * Enter bugs at http://blackfin.uclinux.org/ * * Licensed under the GPL-2 or later. */ #if defined(CONFIG_SERIAL_BFIN_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ) #define SUPPORT_SYSRQ #endif #define DRIVER_NAME "bfin-uart" #define pr_fmt(fmt) DRIVER_NAME ": " fmt #include <linux/module.h> #include <linux/ioport.h> #include <linux/gfp.h> #include <linux/io.h> #include <linux/init.h> #include <linux/console.h> #include <linux/sysrq.h> #include <linux/platform_device.h> #include <linux/tty.h> #include <linux/tty_flip.h> #include <linux/serial_core.h> #include <linux/gpio.h> #include <linux/irq.h> #include <linux/kgdb.h> #include <linux/slab.h> #include <linux/dma-mapping.h> #include <asm/portmux.h> #include <asm/cacheflush.h> #include <asm/dma.h> #define port_membase(uart) (((struct bfin_serial_port *)(uart))->port.membase) #define get_lsr_cache(uart) (((struct bfin_serial_port *)(uart))->lsr) #define put_lsr_cache(uart, v) (((struct bfin_serial_port *)(uart))->lsr = (v)) #include <asm/bfin_serial.h> #ifdef CONFIG_SERIAL_BFIN_MODULE # undef CONFIG_EARLY_PRINTK #endif #ifdef CONFIG_SERIAL_BFIN_MODULE # undef CONFIG_EARLY_PRINTK #endif /* UART name and device definitions */ #define BFIN_SERIAL_DEV_NAME "ttyBF" #define BFIN_SERIAL_MAJOR 204 #define BFIN_SERIAL_MINOR 64 static struct bfin_serial_port *bfin_serial_ports[BFIN_UART_NR_PORTS]; #if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) # ifndef CONFIG_SERIAL_BFIN_PIO # error KGDB only support UART in PIO mode. # endif static int kgdboc_port_line; static int kgdboc_break_enabled; #endif /* * Setup for console. Argument comes from the menuconfig */ #define DMA_RX_XCOUNT 512 #define DMA_RX_YCOUNT (PAGE_SIZE / DMA_RX_XCOUNT) #define DMA_RX_FLUSH_JIFFIES (HZ / 50) #ifdef CONFIG_SERIAL_BFIN_DMA static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart); #else static void bfin_serial_tx_chars(struct bfin_serial_port *uart); #endif static void bfin_serial_reset_irda(struct uart_port *port); #if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \ defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS) static unsigned int bfin_serial_get_mctrl(struct uart_port *port) { struct bfin_serial_port *uart = (struct bfin_serial_port *)port; if (uart->cts_pin < 0) return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; /* CTS PIN is negative assertive. */ if (UART_GET_CTS(uart)) return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; else return TIOCM_DSR | TIOCM_CAR; } static void bfin_serial_set_mctrl(struct uart_port *port, unsigned int mctrl) { struct bfin_serial_port *uart = (struct bfin_serial_port *)port; if (uart->rts_pin < 0) return; /* RTS PIN is negative assertive. */ if (mctrl & TIOCM_RTS) UART_ENABLE_RTS(uart); else UART_DISABLE_RTS(uart); } /* * Handle any change of modem status signal. */ static irqreturn_t bfin_serial_mctrl_cts_int(int irq, void *dev_id) { struct bfin_serial_port *uart = dev_id; unsigned int status; status = bfin_serial_get_mctrl(&uart->port); uart_handle_cts_change(&uart->port, status & TIOCM_CTS); #ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS uart->scts = 1; UART_CLEAR_SCTS(uart); UART_CLEAR_IER(uart, EDSSI); #endif return IRQ_HANDLED; } #else static unsigned int bfin_serial_get_mctrl(struct uart_port *port) { return TIOCM_CTS | TIOCM_DSR | TIOCM_CAR; } static void bfin_serial_set_mctrl(struct uart_port *port, unsigned int mctrl) { } #endif /* * interrupts are disabled on entry */ static void bfin_serial_stop_tx(struct uart_port *port) { struct bfin_serial_port *uart = (struct bfin_serial_port *)port; #ifdef CONFIG_SERIAL_BFIN_DMA struct circ_buf *xmit = &uart->port.state->xmit; #endif while (!(UART_GET_LSR(uart) & TEMT)) cpu_relax(); #ifdef CONFIG_SERIAL_BFIN_DMA disable_dma(uart->tx_dma_channel); xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1); uart->port.icount.tx += uart->tx_count; uart->tx_count = 0; uart->tx_done = 1; #else #ifdef CONFIG_BF54x /* Clear TFI bit */ UART_PUT_LSR(uart, TFI); #endif UART_CLEAR_IER(uart, ETBEI); #endif } /* * port is locked and interrupts are disabled */ static void bfin_serial_start_tx(struct uart_port *port) { struct bfin_serial_port *uart = (struct bfin_serial_port *)port; struct tty_struct *tty = uart->port.state->port.tty; #ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS if (uart->scts && !(bfin_serial_get_mctrl(&uart->port) & TIOCM_CTS)) { uart->scts = 0; uart_handle_cts_change(&uart->port, uart->scts); } #endif /* * To avoid losting RX interrupt, we reset IR function * before sending data. */ if (tty->termios->c_line == N_IRDA) bfin_serial_reset_irda(port); #ifdef CONFIG_SERIAL_BFIN_DMA if (uart->tx_done) bfin_serial_dma_tx_chars(uart); #else UART_SET_IER(uart, ETBEI); bfin_serial_tx_chars(uart); #endif } /* * Interrupts are enabled */ static void bfin_serial_stop_rx(struct uart_port *port) { struct bfin_serial_port *uart = (struct bfin_serial_port *)port; UART_CLEAR_IER(uart, ERBFI); } /* * Set the modem control timer to fire immediately. */ static void bfin_serial_enable_ms(struct uart_port *port) { } #if ANOMALY_05000363 && defined(CONFIG_SERIAL_BFIN_PIO) # define UART_GET_ANOMALY_THRESHOLD(uart) ((uart)->anomaly_threshold) # define UART_SET_ANOMALY_THRESHOLD(uart, v) ((uart)->anomaly_threshold = (v)) #else # define UART_GET_ANOMALY_THRESHOLD(uart) 0 # define UART_SET_ANOMALY_THRESHOLD(uart, v) #endif #ifdef CONFIG_SERIAL_BFIN_PIO static void bfin_serial_rx_chars(struct bfin_serial_port *uart) { struct tty_struct *tty = NULL; unsigned int status, ch, flg; static struct timeval anomaly_start = { .tv_sec = 0 }; status = UART_GET_LSR(uart); UART_CLEAR_LSR(uart); ch = UART_GET_CHAR(uart); uart->port.icount.rx++; #if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) if (kgdb_connected && kgdboc_port_line == uart->port.line && kgdboc_break_enabled) if (ch == 0x3) {/* Ctrl + C */ kgdb_breakpoint(); return; } if (!uart->port.state || !uart->port.state->port.tty) return; #endif tty = uart->port.state->port.tty; if (ANOMALY_05000363) { /* The BF533 (and BF561) family of processors have a nice anomaly * where they continuously generate characters for a "single" break. * We have to basically ignore this flood until the "next" valid * character comes across. Due to the nature of the flood, it is * not possible to reliably catch bytes that are sent too quickly * after this break. So application code talking to the Blackfin * which sends a break signal must allow at least 1.5 character * times after the end of the break for things to stabilize. This * timeout was picked as it must absolutely be larger than 1 * character time +/- some percent. So 1.5 sounds good. All other * Blackfin families operate properly. Woo. */ if (anomaly_start.tv_sec) { struct timeval curr; suseconds_t usecs; if ((~ch & (~ch + 1)) & 0xff) goto known_good_char; do_gettimeofday(&curr); if (curr.tv_sec - anomaly_start.tv_sec > 1) goto known_good_char; usecs = 0; if (curr.tv_sec != anomaly_start.tv_sec) usecs += USEC_PER_SEC; usecs += curr.tv_usec - anomaly_start.tv_usec; if (usecs > UART_GET_ANOMALY_THRESHOLD(uart)) goto known_good_char; if (ch) anomaly_start.tv_sec = 0; else anomaly_start = curr; return; known_good_char: status &= ~BI; anomaly_start.tv_sec = 0; } } if (status & BI) { if (ANOMALY_05000363) if (bfin_revid() < 5) do_gettimeofday(&anomaly_start); uart->port.icount.brk++; if (uart_handle_break(&uart->port)) goto ignore_char; status &= ~(PE | FE); } if (status & PE) uart->port.icount.parity++; if (status & OE) uart->port.icount.overrun++; if (status & FE) uart->port.icount.frame++; status &= uart->port.read_status_mask; if (status & BI) flg = TTY_BREAK; else if (status & PE) flg = TTY_PARITY; else if (status & FE) flg = TTY_FRAME; else flg = TTY_NORMAL; if (uart_handle_sysrq_char(&uart->port, ch)) goto ignore_char; uart_insert_char(&uart->port, status, OE, ch, flg); ignore_char: tty_flip_buffer_push(tty); } static void bfin_serial_tx_chars(struct bfin_serial_port *uart) { struct circ_buf *xmit = &uart->port.state->xmit; if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) { #ifdef CONFIG_BF54x /* Clear TFI bit */ UART_PUT_LSR(uart, TFI); #endif /* Anomaly notes: * 05000215 - we always clear ETBEI within last UART TX * interrupt to end a string. It is always set * when start a new tx. */ UART_CLEAR_IER(uart, ETBEI); return; } if (uart->port.x_char) { UART_PUT_CHAR(uart, uart->port.x_char); uart->port.icount.tx++; uart->port.x_char = 0; } while ((UART_GET_LSR(uart) & THRE) && xmit->tail != xmit->head) { UART_PUT_CHAR(uart, xmit->buf[xmit->tail]); xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1); uart->port.icount.tx++; } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&uart->port); } static irqreturn_t bfin_serial_rx_int(int irq, void *dev_id) { struct bfin_serial_port *uart = dev_id; while (UART_GET_LSR(uart) & DR) bfin_serial_rx_chars(uart); return IRQ_HANDLED; } static irqreturn_t bfin_serial_tx_int(int irq, void *dev_id) { struct bfin_serial_port *uart = dev_id; #ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS if (uart->scts && !(bfin_serial_get_mctrl(&uart->port) & TIOCM_CTS)) { uart->scts = 0; uart_handle_cts_change(&uart->port, uart->scts); } #endif spin_lock(&uart->port.lock); if (UART_GET_LSR(uart) & THRE) bfin_serial_tx_chars(uart); spin_unlock(&uart->port.lock); return IRQ_HANDLED; } #endif #ifdef CONFIG_SERIAL_BFIN_DMA static void bfin_serial_dma_tx_chars(struct bfin_serial_port *uart) { struct circ_buf *xmit = &uart->port.state->xmit; uart->tx_done = 0; if (uart_circ_empty(xmit) || uart_tx_stopped(&uart->port)) { uart->tx_count = 0; uart->tx_done = 1; return; } if (uart->port.x_char) { UART_PUT_CHAR(uart, uart->port.x_char); uart->port.icount.tx++; uart->port.x_char = 0; } uart->tx_count = CIRC_CNT(xmit->head, xmit->tail, UART_XMIT_SIZE); if (uart->tx_count > (UART_XMIT_SIZE - xmit->tail)) uart->tx_count = UART_XMIT_SIZE - xmit->tail; blackfin_dcache_flush_range((unsigned long)(xmit->buf+xmit->tail), (unsigned long)(xmit->buf+xmit->tail+uart->tx_count)); set_dma_config(uart->tx_dma_channel, set_bfin_dma_config(DIR_READ, DMA_FLOW_STOP, INTR_ON_BUF, DIMENSION_LINEAR, DATA_SIZE_8, DMA_SYNC_RESTART)); set_dma_start_addr(uart->tx_dma_channel, (unsigned long)(xmit->buf+xmit->tail)); set_dma_x_count(uart->tx_dma_channel, uart->tx_count); set_dma_x_modify(uart->tx_dma_channel, 1); SSYNC(); enable_dma(uart->tx_dma_channel); UART_SET_IER(uart, ETBEI); } static void bfin_serial_dma_rx_chars(struct bfin_serial_port *uart) { struct tty_struct *tty = uart->port.state->port.tty; int i, flg, status; status = UART_GET_LSR(uart); UART_CLEAR_LSR(uart); uart->port.icount.rx += CIRC_CNT(uart->rx_dma_buf.head, uart->rx_dma_buf.tail, UART_XMIT_SIZE); if (status & BI) { uart->port.icount.brk++; if (uart_handle_break(&uart->port)) goto dma_ignore_char; status &= ~(PE | FE); } if (status & PE) uart->port.icount.parity++; if (status & OE) uart->port.icount.overrun++; if (status & FE) uart->port.icount.frame++; status &= uart->port.read_status_mask; if (status & BI) flg = TTY_BREAK; else if (status & PE) flg = TTY_PARITY; else if (status & FE) flg = TTY_FRAME; else flg = TTY_NORMAL; for (i = uart->rx_dma_buf.tail; ; i++) { if (i >= UART_XMIT_SIZE) i = 0; if (i == uart->rx_dma_buf.head) break; if (!uart_handle_sysrq_char(&uart->port, uart->rx_dma_buf.buf[i])) uart_insert_char(&uart->port, status, OE, uart->rx_dma_buf.buf[i], flg); } dma_ignore_char: tty_flip_buffer_push(tty); } void bfin_serial_rx_dma_timeout(struct bfin_serial_port *uart) { int x_pos, pos; dma_disable_irq_nosync(uart->rx_dma_channel); spin_lock_bh(&uart->rx_lock); /* 2D DMA RX buffer ring is used. Because curr_y_count and * curr_x_count can't be read as an atomic operation, * curr_y_count should be read before curr_x_count. When * curr_x_count is read, curr_y_count may already indicate * next buffer line. But, the position calculated here is * still indicate the old line. The wrong position data may * be smaller than current buffer tail, which cause garbages * are received if it is not prohibit. */ uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel); x_pos = get_dma_curr_xcount(uart->rx_dma_channel); uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows; if (uart->rx_dma_nrows == DMA_RX_YCOUNT || x_pos == 0) uart->rx_dma_nrows = 0; x_pos = DMA_RX_XCOUNT - x_pos; if (x_pos == DMA_RX_XCOUNT) x_pos = 0; pos = uart->rx_dma_nrows * DMA_RX_XCOUNT + x_pos; /* Ignore receiving data if new position is in the same line of * current buffer tail and small. */ if (pos > uart->rx_dma_buf.tail || uart->rx_dma_nrows < (uart->rx_dma_buf.tail/DMA_RX_XCOUNT)) { uart->rx_dma_buf.head = pos; bfin_serial_dma_rx_chars(uart); uart->rx_dma_buf.tail = uart->rx_dma_buf.head; } spin_unlock_bh(&uart->rx_lock); dma_enable_irq(uart->rx_dma_channel); mod_timer(&(uart->rx_dma_timer), jiffies + DMA_RX_FLUSH_JIFFIES); } static irqreturn_t bfin_serial_dma_tx_int(int irq, void *dev_id) { struct bfin_serial_port *uart = dev_id; struct circ_buf *xmit = &uart->port.state->xmit; #ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS if (uart->scts && !(bfin_serial_get_mctrl(&uart->port)&TIOCM_CTS)) { uart->scts = 0; uart_handle_cts_change(&uart->port, uart->scts); } #endif spin_lock(&uart->port.lock); if (!(get_dma_curr_irqstat(uart->tx_dma_channel)&DMA_RUN)) { disable_dma(uart->tx_dma_channel); clear_dma_irqstat(uart->tx_dma_channel); /* Anomaly notes: * 05000215 - we always clear ETBEI within last UART TX * interrupt to end a string. It is always set * when start a new tx. */ UART_CLEAR_IER(uart, ETBEI); xmit->tail = (xmit->tail + uart->tx_count) & (UART_XMIT_SIZE - 1); uart->port.icount.tx += uart->tx_count; if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&uart->port); bfin_serial_dma_tx_chars(uart); } spin_unlock(&uart->port.lock); return IRQ_HANDLED; } static irqreturn_t bfin_serial_dma_rx_int(int irq, void *dev_id) { struct bfin_serial_port *uart = dev_id; unsigned short irqstat; int x_pos, pos; spin_lock(&uart->rx_lock); irqstat = get_dma_curr_irqstat(uart->rx_dma_channel); clear_dma_irqstat(uart->rx_dma_channel); uart->rx_dma_nrows = get_dma_curr_ycount(uart->rx_dma_channel); x_pos = get_dma_curr_xcount(uart->rx_dma_channel); uart->rx_dma_nrows = DMA_RX_YCOUNT - uart->rx_dma_nrows; if (uart->rx_dma_nrows == DMA_RX_YCOUNT || x_pos == 0) uart->rx_dma_nrows = 0; pos = uart->rx_dma_nrows * DMA_RX_XCOUNT; if (pos > uart->rx_dma_buf.tail || uart->rx_dma_nrows < (uart->rx_dma_buf.tail/DMA_RX_XCOUNT)) { uart->rx_dma_buf.head = pos; bfin_serial_dma_rx_chars(uart); uart->rx_dma_buf.tail = uart->rx_dma_buf.head; } spin_unlock(&uart->rx_lock); return IRQ_HANDLED; } #endif /* * Return TIOCSER_TEMT when transmitter is not busy. */ static unsigned int bfin_serial_tx_empty(struct uart_port *port) { struct bfin_serial_port *uart = (struct bfin_serial_port *)port; unsigned short lsr; lsr = UART_GET_LSR(uart); if (lsr & TEMT) return TIOCSER_TEMT; else return 0; } static void bfin_serial_break_ctl(struct uart_port *port, int break_state) { struct bfin_serial_port *uart = (struct bfin_serial_port *)port; u16 lcr = UART_GET_LCR(uart); if (break_state) lcr |= SB; else lcr &= ~SB; UART_PUT_LCR(uart, lcr); SSYNC(); } static int bfin_serial_startup(struct uart_port *port) { struct bfin_serial_port *uart = (struct bfin_serial_port *)port; #ifdef CONFIG_SERIAL_BFIN_DMA dma_addr_t dma_handle; if (request_dma(uart->rx_dma_channel, "BFIN_UART_RX") < 0) { printk(KERN_NOTICE "Unable to attach Blackfin UART RX DMA channel\n"); return -EBUSY; } if (request_dma(uart->tx_dma_channel, "BFIN_UART_TX") < 0) { printk(KERN_NOTICE "Unable to attach Blackfin UART TX DMA channel\n"); free_dma(uart->rx_dma_channel); return -EBUSY; } set_dma_callback(uart->rx_dma_channel, bfin_serial_dma_rx_int, uart); set_dma_callback(uart->tx_dma_channel, bfin_serial_dma_tx_int, uart); uart->rx_dma_buf.buf = (unsigned char *)dma_alloc_coherent(NULL, PAGE_SIZE, &dma_handle, GFP_DMA); uart->rx_dma_buf.head = 0; uart->rx_dma_buf.tail = 0; uart->rx_dma_nrows = 0; set_dma_config(uart->rx_dma_channel, set_bfin_dma_config(DIR_WRITE, DMA_FLOW_AUTO, INTR_ON_ROW, DIMENSION_2D, DATA_SIZE_8, DMA_SYNC_RESTART)); set_dma_x_count(uart->rx_dma_channel, DMA_RX_XCOUNT); set_dma_x_modify(uart->rx_dma_channel, 1); set_dma_y_count(uart->rx_dma_channel, DMA_RX_YCOUNT); set_dma_y_modify(uart->rx_dma_channel, 1); set_dma_start_addr(uart->rx_dma_channel, (unsigned long)uart->rx_dma_buf.buf); enable_dma(uart->rx_dma_channel); uart->rx_dma_timer.data = (unsigned long)(uart); uart->rx_dma_timer.function = (void *)bfin_serial_rx_dma_timeout; uart->rx_dma_timer.expires = jiffies + DMA_RX_FLUSH_JIFFIES; add_timer(&(uart->rx_dma_timer)); #else # if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) if (kgdboc_port_line == uart->port.line && kgdboc_break_enabled) kgdboc_break_enabled = 0; else { # endif if (request_irq(uart->port.irq, bfin_serial_rx_int, IRQF_DISABLED, "BFIN_UART_RX", uart)) { printk(KERN_NOTICE "Unable to attach BlackFin UART RX interrupt\n"); return -EBUSY; } if (request_irq (uart->port.irq+1, bfin_serial_tx_int, IRQF_DISABLED, "BFIN_UART_TX", uart)) { printk(KERN_NOTICE "Unable to attach BlackFin UART TX interrupt\n"); free_irq(uart->port.irq, uart); return -EBUSY; } # ifdef CONFIG_BF54x { /* * UART2 and UART3 on BF548 share interrupt PINs and DMA * controllers with SPORT2 and SPORT3. UART rx and tx * interrupts are generated in PIO mode only when configure * their peripheral mapping registers properly, which means * request corresponding DMA channels in PIO mode as well. */ unsigned uart_dma_ch_rx, uart_dma_ch_tx; switch (uart->port.irq) { case IRQ_UART3_RX: uart_dma_ch_rx = CH_UART3_RX; uart_dma_ch_tx = CH_UART3_TX; break; case IRQ_UART2_RX: uart_dma_ch_rx = CH_UART2_RX; uart_dma_ch_tx = CH_UART2_TX; break; default: uart_dma_ch_rx = uart_dma_ch_tx = 0; break; }; if (uart_dma_ch_rx && request_dma(uart_dma_ch_rx, "BFIN_UART_RX") < 0) { printk(KERN_NOTICE"Fail to attach UART interrupt\n"); free_irq(uart->port.irq, uart); free_irq(uart->port.irq + 1, uart); return -EBUSY; } if (uart_dma_ch_tx && request_dma(uart_dma_ch_tx, "BFIN_UART_TX") < 0) { printk(KERN_NOTICE "Fail to attach UART interrupt\n"); free_dma(uart_dma_ch_rx); free_irq(uart->port.irq, uart); free_irq(uart->port.irq + 1, uart); return -EBUSY; } } # endif # if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) } # endif #endif #ifdef CONFIG_SERIAL_BFIN_CTSRTS if (uart->cts_pin >= 0) { if (request_irq(gpio_to_irq(uart->cts_pin), bfin_serial_mctrl_cts_int, IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING | IRQF_DISABLED, "BFIN_UART_CTS", uart)) { uart->cts_pin = -1; pr_info("Unable to attach BlackFin UART CTS interrupt. So, disable it.\n"); } } if (uart->rts_pin >= 0) { gpio_direction_output(uart->rts_pin, 0); } #endif #ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS if (uart->cts_pin >= 0 && request_irq(uart->status_irq, bfin_serial_mctrl_cts_int, IRQF_DISABLED, "BFIN_UART_MODEM_STATUS", uart)) { uart->cts_pin = -1; pr_info("Unable to attach BlackFin UART Modem Status interrupt.\n"); } /* CTS RTS PINs are negative assertive. */ UART_PUT_MCR(uart, ACTS); UART_SET_IER(uart, EDSSI); #endif UART_SET_IER(uart, ERBFI); return 0; } static void bfin_serial_shutdown(struct uart_port *port) { struct bfin_serial_port *uart = (struct bfin_serial_port *)port; #ifdef CONFIG_SERIAL_BFIN_DMA disable_dma(uart->tx_dma_channel); free_dma(uart->tx_dma_channel); disable_dma(uart->rx_dma_channel); free_dma(uart->rx_dma_channel); del_timer(&(uart->rx_dma_timer)); dma_free_coherent(NULL, PAGE_SIZE, uart->rx_dma_buf.buf, 0); #else #ifdef CONFIG_BF54x switch (uart->port.irq) { case IRQ_UART3_RX: free_dma(CH_UART3_RX); free_dma(CH_UART3_TX); break; case IRQ_UART2_RX: free_dma(CH_UART2_RX); free_dma(CH_UART2_TX); break; default: break; }; #endif free_irq(uart->port.irq, uart); free_irq(uart->port.irq+1, uart); #endif #ifdef CONFIG_SERIAL_BFIN_CTSRTS if (uart->cts_pin >= 0) free_irq(gpio_to_irq(uart->cts_pin), uart); #endif #ifdef CONFIG_SERIAL_BFIN_HARD_CTSRTS if (uart->cts_pin >= 0) free_irq(uart->status_irq, uart); #endif } static void bfin_serial_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { struct bfin_serial_port *uart = (struct bfin_serial_port *)port; unsigned long flags; unsigned int baud, quot; unsigned short val, ier, lcr = 0; switch (termios->c_cflag & CSIZE) { case CS8: lcr = WLS(8); break; case CS7: lcr = WLS(7); break; case CS6: lcr = WLS(6); break; case CS5: lcr = WLS(5); break; default: printk(KERN_ERR "%s: word lengh not supported\n", __func__); } /* Anomaly notes: * 05000231 - STOP bit is always set to 1 whatever the user is set. */ if (termios->c_cflag & CSTOPB) { if (ANOMALY_05000231) printk(KERN_WARNING "STOP bits other than 1 is not " "supported in case of anomaly 05000231.\n"); else lcr |= STB; } if (termios->c_cflag & PARENB) lcr |= PEN; if (!(termios->c_cflag & PARODD)) lcr |= EPS; if (termios->c_cflag & CMSPAR) lcr |= STP; spin_lock_irqsave(&uart->port.lock, flags); port->read_status_mask = OE; if (termios->c_iflag & INPCK) port->read_status_mask |= (FE | PE); if (termios->c_iflag & (BRKINT | PARMRK)) port->read_status_mask |= BI; /* * Characters to ignore */ port->ignore_status_mask = 0; if (termios->c_iflag & IGNPAR) port->ignore_status_mask |= FE | PE; if (termios->c_iflag & IGNBRK) { port->ignore_status_mask |= BI; /* * If we're ignoring parity and break indicators, * ignore overruns too (for real raw support). */ if (termios->c_iflag & IGNPAR) port->ignore_status_mask |= OE; } baud = uart_get_baud_rate(port, termios, old, 0, port->uartclk/16); quot = uart_get_divisor(port, baud); /* If discipline is not IRDA, apply ANOMALY_05000230 */ if (termios->c_line != N_IRDA) quot -= ANOMALY_05000230; UART_SET_ANOMALY_THRESHOLD(uart, USEC_PER_SEC / baud * 15); /* Disable UART */ ier = UART_GET_IER(uart); UART_DISABLE_INTS(uart); /* Set DLAB in LCR to Access DLL and DLH */ UART_SET_DLAB(uart); UART_PUT_DLL(uart, quot & 0xFF); UART_PUT_DLH(uart, (quot >> 8) & 0xFF); SSYNC(); /* Clear DLAB in LCR to Access THR RBR IER */ UART_CLEAR_DLAB(uart); UART_PUT_LCR(uart, lcr); /* Enable UART */ UART_ENABLE_INTS(uart, ier); val = UART_GET_GCTL(uart); val |= UCEN; UART_PUT_GCTL(uart, val); /* Port speed changed, update the per-port timeout. */ uart_update_timeout(port, termios->c_cflag, baud); spin_unlock_irqrestore(&uart->port.lock, flags); } static const char *bfin_serial_type(struct uart_port *port) { struct bfin_serial_port *uart = (struct bfin_serial_port *)port; return uart->port.type == PORT_BFIN ? "BFIN-UART" : NULL; } /* * Release the memory region(s) being used by 'port'. */ static void bfin_serial_release_port(struct uart_port *port) { } /* * Request the memory region(s) being used by 'port'. */ static int bfin_serial_request_port(struct uart_port *port) { return 0; } /* * Configure/autoconfigure the port. */ static void bfin_serial_config_port(struct uart_port *port, int flags) { struct bfin_serial_port *uart = (struct bfin_serial_port *)port; if (flags & UART_CONFIG_TYPE && bfin_serial_request_port(&uart->port) == 0) uart->port.type = PORT_BFIN; } /* * Verify the new serial_struct (for TIOCSSERIAL). * The only change we allow are to the flags and type, and * even then only between PORT_BFIN and PORT_UNKNOWN */ static int bfin_serial_verify_port(struct uart_port *port, struct serial_struct *ser) { return 0; } /* * Enable the IrDA function if tty->ldisc.num is N_IRDA. * In other cases, disable IrDA function. */ static void bfin_serial_set_ldisc(struct uart_port *port, int ld) { struct bfin_serial_port *uart = (struct bfin_serial_port *)port; unsigned short val; switch (ld) { case N_IRDA: val = UART_GET_GCTL(uart); val |= (IREN | RPOLC); UART_PUT_GCTL(uart, val); break; default: val = UART_GET_GCTL(uart); val &= ~(IREN | RPOLC); UART_PUT_GCTL(uart, val); } } static void bfin_serial_reset_irda(struct uart_port *port) { struct bfin_serial_port *uart = (struct bfin_serial_port *)port; unsigned short val; val = UART_GET_GCTL(uart); val &= ~(IREN | RPOLC); UART_PUT_GCTL(uart, val); SSYNC(); val |= (IREN | RPOLC); UART_PUT_GCTL(uart, val); SSYNC(); } #ifdef CONFIG_CONSOLE_POLL /* Anomaly notes: * 05000099 - Because we only use THRE in poll_put and DR in poll_get, * losing other bits of UART_LSR is not a problem here. */ static void bfin_serial_poll_put_char(struct uart_port *port, unsigned char chr) { struct bfin_serial_port *uart = (struct bfin_serial_port *)port; while (!(UART_GET_LSR(uart) & THRE)) cpu_relax(); UART_CLEAR_DLAB(uart); UART_PUT_CHAR(uart, (unsigned char)chr); } static int bfin_serial_poll_get_char(struct uart_port *port) { struct bfin_serial_port *uart = (struct bfin_serial_port *)port; unsigned char chr; while (!(UART_GET_LSR(uart) & DR)) cpu_relax(); UART_CLEAR_DLAB(uart); chr = UART_GET_CHAR(uart); return chr; } #endif #if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) static void bfin_kgdboc_port_shutdown(struct uart_port *port) { if (kgdboc_break_enabled) { kgdboc_break_enabled = 0; bfin_serial_shutdown(port); } } static int bfin_kgdboc_port_startup(struct uart_port *port) { kgdboc_port_line = port->line; kgdboc_break_enabled = !bfin_serial_startup(port); return 0; } #endif static struct uart_ops bfin_serial_pops = { .tx_empty = bfin_serial_tx_empty, .set_mctrl = bfin_serial_set_mctrl, .get_mctrl = bfin_serial_get_mctrl, .stop_tx = bfin_serial_stop_tx, .start_tx = bfin_serial_start_tx, .stop_rx = bfin_serial_stop_rx, .enable_ms = bfin_serial_enable_ms, .break_ctl = bfin_serial_break_ctl, .startup = bfin_serial_startup, .shutdown = bfin_serial_shutdown, .set_termios = bfin_serial_set_termios, .set_ldisc = bfin_serial_set_ldisc, .type = bfin_serial_type, .release_port = bfin_serial_release_port, .request_port = bfin_serial_request_port, .config_port = bfin_serial_config_port, .verify_port = bfin_serial_verify_port, #if defined(CONFIG_KGDB_SERIAL_CONSOLE) || \ defined(CONFIG_KGDB_SERIAL_CONSOLE_MODULE) .kgdboc_port_startup = bfin_kgdboc_port_startup, .kgdboc_port_shutdown = bfin_kgdboc_port_shutdown, #endif #ifdef CONFIG_CONSOLE_POLL .poll_put_char = bfin_serial_poll_put_char, .poll_get_char = bfin_serial_poll_get_char, #endif }; #if defined(CONFIG_SERIAL_BFIN_CONSOLE) || defined(CONFIG_EARLY_PRINTK) /* * If the port was already initialised (eg, by a boot loader), * try to determine the current setup. */ static void __init bfin_serial_console_get_options(struct bfin_serial_port *uart, int *baud, int *parity, int *bits) { unsigned short status; status = UART_GET_IER(uart) & (ERBFI | ETBEI); if (status == (ERBFI | ETBEI)) { /* ok, the port was enabled */ u16 lcr, dlh, dll; lcr = UART_GET_LCR(uart); *parity = 'n'; if (lcr & PEN) { if (lcr & EPS) *parity = 'e'; else *parity = 'o'; } switch (lcr & 0x03) { case 0: *bits = 5; break; case 1: *bits = 6; break; case 2: *bits = 7; break; case 3: *bits = 8; break; } /* Set DLAB in LCR to Access DLL and DLH */ UART_SET_DLAB(uart); dll = UART_GET_DLL(uart); dlh = UART_GET_DLH(uart); /* Clear DLAB in LCR to Access THR RBR IER */ UART_CLEAR_DLAB(uart); *baud = get_sclk() / (16*(dll | dlh << 8)); } pr_debug("%s:baud = %d, parity = %c, bits= %d\n", __func__, *baud, *parity, *bits); } static struct uart_driver bfin_serial_reg; static void bfin_serial_console_putchar(struct uart_port *port, int ch) { struct bfin_serial_port *uart = (struct bfin_serial_port *)port; while (!(UART_GET_LSR(uart) & THRE)) barrier(); UART_PUT_CHAR(uart, ch); } #endif /* defined (CONFIG_SERIAL_BFIN_CONSOLE) || defined (CONFIG_EARLY_PRINTK) */ #ifdef CONFIG_SERIAL_BFIN_CONSOLE #define CLASS_BFIN_CONSOLE "bfin-console" /* * Interrupts are disabled on entering */ static void bfin_serial_console_write(struct console *co, const char *s, unsigned int count) { struct bfin_serial_port *uart = bfin_serial_ports[co->index]; unsigned long flags; spin_lock_irqsave(&uart->port.lock, flags); uart_console_write(&uart->port, s, count, bfin_serial_console_putchar); spin_unlock_irqrestore(&uart->port.lock, flags); } static int __init bfin_serial_console_setup(struct console *co, char *options) { struct bfin_serial_port *uart; int baud = 57600; int bits = 8; int parity = 'n'; # if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \ defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS) int flow = 'r'; # else int flow = 'n'; # endif /* * Check whether an invalid uart number has been specified, and * if so, search for the first available port that does have * console support. */ if (co->index < 0 || co->index >= BFIN_UART_NR_PORTS) return -ENODEV; uart = bfin_serial_ports[co->index]; if (!uart) return -ENODEV; if (options) uart_parse_options(options, &baud, &parity, &bits, &flow); else bfin_serial_console_get_options(uart, &baud, &parity, &bits); return uart_set_options(&uart->port, co, baud, parity, bits, flow); } static struct console bfin_serial_console = { .name = BFIN_SERIAL_DEV_NAME, .write = bfin_serial_console_write, .device = uart_console_device, .setup = bfin_serial_console_setup, .flags = CON_PRINTBUFFER, .index = -1, .data = &bfin_serial_reg, }; #define BFIN_SERIAL_CONSOLE &bfin_serial_console #else #define BFIN_SERIAL_CONSOLE NULL #endif /* CONFIG_SERIAL_BFIN_CONSOLE */ #ifdef CONFIG_EARLY_PRINTK static struct bfin_serial_port bfin_earlyprintk_port; #define CLASS_BFIN_EARLYPRINTK "bfin-earlyprintk" /* * Interrupts are disabled on entering */ static void bfin_earlyprintk_console_write(struct console *co, const char *s, unsigned int count) { unsigned long flags; if (bfin_earlyprintk_port.port.line != co->index) return; spin_lock_irqsave(&bfin_earlyprintk_port.port.lock, flags); uart_console_write(&bfin_earlyprintk_port.port, s, count, bfin_serial_console_putchar); spin_unlock_irqrestore(&bfin_earlyprintk_port.port.lock, flags); } /* * This should have a .setup or .early_setup in it, but then things get called * without the command line options, and the baud rate gets messed up - so * don't let the common infrastructure play with things. (see calls to setup * & earlysetup in ./kernel/printk.c:register_console() */ static struct __initdata console bfin_early_serial_console = { .name = "early_BFuart", .write = bfin_earlyprintk_console_write, .device = uart_console_device, .flags = CON_PRINTBUFFER, .index = -1, .data = &bfin_serial_reg, }; #endif static struct uart_driver bfin_serial_reg = { .owner = THIS_MODULE, .driver_name = DRIVER_NAME, .dev_name = BFIN_SERIAL_DEV_NAME, .major = BFIN_SERIAL_MAJOR, .minor = BFIN_SERIAL_MINOR, .nr = BFIN_UART_NR_PORTS, .cons = BFIN_SERIAL_CONSOLE, }; static int bfin_serial_suspend(struct platform_device *pdev, pm_message_t state) { struct bfin_serial_port *uart = platform_get_drvdata(pdev); return uart_suspend_port(&bfin_serial_reg, &uart->port); } static int bfin_serial_resume(struct platform_device *pdev) { struct bfin_serial_port *uart = platform_get_drvdata(pdev); return uart_resume_port(&bfin_serial_reg, &uart->port); } static int bfin_serial_probe(struct platform_device *pdev) { struct resource *res; struct bfin_serial_port *uart = NULL; int ret = 0; if (pdev->id < 0 || pdev->id >= BFIN_UART_NR_PORTS) { dev_err(&pdev->dev, "Wrong bfin uart platform device id.\n"); return -ENOENT; } if (bfin_serial_ports[pdev->id] == NULL) { uart = kzalloc(sizeof(*uart), GFP_KERNEL); if (!uart) { dev_err(&pdev->dev, "fail to malloc bfin_serial_port\n"); return -ENOMEM; } bfin_serial_ports[pdev->id] = uart; #ifdef CONFIG_EARLY_PRINTK if (!(bfin_earlyprintk_port.port.membase && bfin_earlyprintk_port.port.line == pdev->id)) { /* * If the peripheral PINs of current port is allocated * in earlyprintk probe stage, don't do it again. */ #endif ret = peripheral_request_list( (unsigned short *)pdev->dev.platform_data, DRIVER_NAME); if (ret) { dev_err(&pdev->dev, "fail to request bfin serial peripherals\n"); goto out_error_free_mem; } #ifdef CONFIG_EARLY_PRINTK } #endif spin_lock_init(&uart->port.lock); uart->port.uartclk = get_sclk(); uart->port.fifosize = BFIN_UART_TX_FIFO_SIZE; uart->port.ops = &bfin_serial_pops; uart->port.line = pdev->id; uart->port.iotype = UPIO_MEM; uart->port.flags = UPF_BOOT_AUTOCONF; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n"); ret = -ENOENT; goto out_error_free_peripherals; } uart->port.membase = ioremap(res->start, res->end - res->start); if (!uart->port.membase) { dev_err(&pdev->dev, "Cannot map uart IO\n"); ret = -ENXIO; goto out_error_free_peripherals; } uart->port.mapbase = res->start; uart->port.irq = platform_get_irq(pdev, 0); if (uart->port.irq < 0) { dev_err(&pdev->dev, "No uart RX/TX IRQ specified\n"); ret = -ENOENT; goto out_error_unmap; } uart->status_irq = platform_get_irq(pdev, 1); if (uart->status_irq < 0) { dev_err(&pdev->dev, "No uart status IRQ specified\n"); ret = -ENOENT; goto out_error_unmap; } #ifdef CONFIG_SERIAL_BFIN_DMA spin_lock_init(&uart->rx_lock); uart->tx_done = 1; uart->tx_count = 0; res = platform_get_resource(pdev, IORESOURCE_DMA, 0); if (res == NULL) { dev_err(&pdev->dev, "No uart TX DMA channel specified\n"); ret = -ENOENT; goto out_error_unmap; } uart->tx_dma_channel = res->start; res = platform_get_resource(pdev, IORESOURCE_DMA, 1); if (res == NULL) { dev_err(&pdev->dev, "No uart RX DMA channel specified\n"); ret = -ENOENT; goto out_error_unmap; } uart->rx_dma_channel = res->start; init_timer(&(uart->rx_dma_timer)); #endif #if defined(CONFIG_SERIAL_BFIN_CTSRTS) || \ defined(CONFIG_SERIAL_BFIN_HARD_CTSRTS) res = platform_get_resource(pdev, IORESOURCE_IO, 0); if (res == NULL) uart->cts_pin = -1; else uart->cts_pin = res->start; res = platform_get_resource(pdev, IORESOURCE_IO, 1); if (res == NULL) uart->rts_pin = -1; else uart->rts_pin = res->start; # if defined(CONFIG_SERIAL_BFIN_CTSRTS) if (uart->rts_pin >= 0) gpio_request(uart->rts_pin, DRIVER_NAME); # endif #endif } #ifdef CONFIG_SERIAL_BFIN_CONSOLE if (!is_early_platform_device(pdev)) { #endif uart = bfin_serial_ports[pdev->id]; uart->port.dev = &pdev->dev; dev_set_drvdata(&pdev->dev, uart); ret = uart_add_one_port(&bfin_serial_reg, &uart->port); #ifdef CONFIG_SERIAL_BFIN_CONSOLE } #endif if (!ret) return 0; if (uart) { out_error_unmap: iounmap(uart->port.membase); out_error_free_peripherals: peripheral_free_list( (unsigned short *)pdev->dev.platform_data); out_error_free_mem: kfree(uart); bfin_serial_ports[pdev->id] = NULL; } return ret; } static int __devexit bfin_serial_remove(struct platform_device *pdev) { struct bfin_serial_port *uart = platform_get_drvdata(pdev); dev_set_drvdata(&pdev->dev, NULL); if (uart) { uart_remove_one_port(&bfin_serial_reg, &uart->port); #ifdef CONFIG_SERIAL_BFIN_CTSRTS if (uart->rts_pin >= 0) gpio_free(uart->rts_pin); #endif iounmap(uart->port.membase); peripheral_free_list( (unsigned short *)pdev->dev.platform_data); kfree(uart); bfin_serial_ports[pdev->id] = NULL; } return 0; } static struct platform_driver bfin_serial_driver = { .probe = bfin_serial_probe, .remove = __devexit_p(bfin_serial_remove), .suspend = bfin_serial_suspend, .resume = bfin_serial_resume, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, }; #if defined(CONFIG_SERIAL_BFIN_CONSOLE) static __initdata struct early_platform_driver early_bfin_serial_driver = { .class_str = CLASS_BFIN_CONSOLE, .pdrv = &bfin_serial_driver, .requested_id = EARLY_PLATFORM_ID_UNSET, }; static int __init bfin_serial_rs_console_init(void) { early_platform_driver_register(&early_bfin_serial_driver, DRIVER_NAME); early_platform_driver_probe(CLASS_BFIN_CONSOLE, BFIN_UART_NR_PORTS, 0); register_console(&bfin_serial_console); return 0; } console_initcall(bfin_serial_rs_console_init); #endif #ifdef CONFIG_EARLY_PRINTK /* * Memory can't be allocated dynamically during earlyprink init stage. * So, do individual probe for earlyprink with a static uart port variable. */ static int bfin_earlyprintk_probe(struct platform_device *pdev) { struct resource *res; int ret; if (pdev->id < 0 || pdev->id >= BFIN_UART_NR_PORTS) { dev_err(&pdev->dev, "Wrong earlyprintk platform device id.\n"); return -ENOENT; } ret = peripheral_request_list( (unsigned short *)pdev->dev.platform_data, DRIVER_NAME); if (ret) { dev_err(&pdev->dev, "fail to request bfin serial peripherals\n"); return ret; } res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (res == NULL) { dev_err(&pdev->dev, "Cannot get IORESOURCE_MEM\n"); ret = -ENOENT; goto out_error_free_peripherals; } bfin_earlyprintk_port.port.membase = ioremap(res->start, res->end - res->start); if (!bfin_earlyprintk_port.port.membase) { dev_err(&pdev->dev, "Cannot map uart IO\n"); ret = -ENXIO; goto out_error_free_peripherals; } bfin_earlyprintk_port.port.mapbase = res->start; bfin_earlyprintk_port.port.line = pdev->id; bfin_earlyprintk_port.port.uartclk = get_sclk(); bfin_earlyprintk_port.port.fifosize = BFIN_UART_TX_FIFO_SIZE; spin_lock_init(&bfin_earlyprintk_port.port.lock); return 0; out_error_free_peripherals: peripheral_free_list( (unsigned short *)pdev->dev.platform_data); return ret; } static struct platform_driver bfin_earlyprintk_driver = { .probe = bfin_earlyprintk_probe, .driver = { .name = DRIVER_NAME, .owner = THIS_MODULE, }, }; static __initdata struct early_platform_driver early_bfin_earlyprintk_driver = { .class_str = CLASS_BFIN_EARLYPRINTK, .pdrv = &bfin_earlyprintk_driver, .requested_id = EARLY_PLATFORM_ID_UNSET, }; struct console __init *bfin_earlyserial_init(unsigned int port, unsigned int cflag) { struct ktermios t; char port_name[20]; if (port < 0 || port >= BFIN_UART_NR_PORTS) return NULL; /* * Only probe resource of the given port in earlyprintk boot arg. * The expected port id should be indicated in port name string. */ snprintf(port_name, 20, DRIVER_NAME ".%d", port); early_platform_driver_register(&early_bfin_earlyprintk_driver, port_name); early_platform_driver_probe(CLASS_BFIN_EARLYPRINTK, 1, 0); if (!bfin_earlyprintk_port.port.membase) return NULL; #ifdef CONFIG_SERIAL_BFIN_CONSOLE /* * If we are using early serial, don't let the normal console rewind * log buffer, since that causes things to be printed multiple times */ bfin_serial_console.flags &= ~CON_PRINTBUFFER; #endif bfin_early_serial_console.index = port; t.c_cflag = cflag; t.c_iflag = 0; t.c_oflag = 0; t.c_lflag = ICANON; t.c_line = port; bfin_serial_set_termios(&bfin_earlyprintk_port.port, &t, &t); return &bfin_early_serial_console; } #endif /* CONFIG_EARLY_PRINTK */ static int __init bfin_serial_init(void) { int ret; pr_info("Blackfin serial driver\n"); ret = uart_register_driver(&bfin_serial_reg); if (ret) { pr_err("failed to register %s:%d\n", bfin_serial_reg.driver_name, ret); } ret = platform_driver_register(&bfin_serial_driver); if (ret) { pr_err("fail to register bfin uart\n"); uart_unregister_driver(&bfin_serial_reg); } return ret; } static void __exit bfin_serial_exit(void) { platform_driver_unregister(&bfin_serial_driver); uart_unregister_driver(&bfin_serial_reg); } module_init(bfin_serial_init); module_exit(bfin_serial_exit); MODULE_AUTHOR("Sonic Zhang, Aubrey Li"); MODULE_DESCRIPTION("Blackfin generic serial port driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS_CHARDEV_MAJOR(BFIN_SERIAL_MAJOR); MODULE_ALIAS("platform:bfin-uart");
gpl-2.0
ztemt/A465_5.1_kernel
arch/arm/mach-imx/mach-mx21ads.c
2639
8238
/* * Copyright (C) 2000 Deep Blue Solutions Ltd * Copyright (C) 2002 Shane Nay (shane@minirl.com) * Copyright 2006-2007 Freescale Semiconductor, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #include <linux/platform_device.h> #include <linux/mtd/mtd.h> #include <linux/mtd/physmap.h> #include <linux/gpio.h> #include <asm/mach-types.h> #include <asm/mach/arch.h> #include <asm/mach/time.h> #include <asm/mach/map.h> #include "common.h" #include "devices-imx21.h" #include "hardware.h" #include "iomux-mx21.h" /* * Memory-mapped I/O on MX21ADS base board */ #define MX21ADS_MMIO_BASE_ADDR 0xf5000000 #define MX21ADS_MMIO_SIZE 0xc00000 #define MX21ADS_REG_ADDR(offset) (void __force __iomem *) \ (MX21ADS_MMIO_BASE_ADDR + (offset)) #define MX21ADS_CS8900A_MMIO_SIZE 0x200000 #define MX21ADS_CS8900A_IRQ_GPIO IMX_GPIO_NR(5, 11) #define MX21ADS_ST16C255_IOBASE_REG MX21ADS_REG_ADDR(0x200000) #define MX21ADS_VERSION_REG MX21ADS_REG_ADDR(0x400000) #define MX21ADS_IO_REG MX21ADS_REG_ADDR(0x800000) /* MX21ADS_IO_REG bit definitions */ #define MX21ADS_IO_SD_WP 0x0001 /* read */ #define MX21ADS_IO_TP6 0x0001 /* write */ #define MX21ADS_IO_SW_SEL 0x0002 /* read */ #define MX21ADS_IO_TP7 0x0002 /* write */ #define MX21ADS_IO_RESET_E_UART 0x0004 #define MX21ADS_IO_RESET_BASE 0x0008 #define MX21ADS_IO_CSI_CTL2 0x0010 #define MX21ADS_IO_CSI_CTL1 0x0020 #define MX21ADS_IO_CSI_CTL0 0x0040 #define MX21ADS_IO_UART1_EN 0x0080 #define MX21ADS_IO_UART4_EN 0x0100 #define MX21ADS_IO_LCDON 0x0200 #define MX21ADS_IO_IRDA_EN 0x0400 #define MX21ADS_IO_IRDA_FIR_SEL 0x0800 #define MX21ADS_IO_IRDA_MD0_B 0x1000 #define MX21ADS_IO_IRDA_MD1 0x2000 #define MX21ADS_IO_LED4_ON 0x4000 #define MX21ADS_IO_LED3_ON 0x8000 static const int mx21ads_pins[] __initconst = { /* CS8900A */ (GPIO_PORTE | GPIO_GPIO | GPIO_IN | 11), /* UART1 */ PE12_PF_UART1_TXD, PE13_PF_UART1_RXD, PE14_PF_UART1_CTS, PE15_PF_UART1_RTS, /* UART3 (IrDA) - only TXD and RXD */ PE8_PF_UART3_TXD, PE9_PF_UART3_RXD, /* UART4 */ PB26_AF_UART4_RTS, PB28_AF_UART4_TXD, PB29_AF_UART4_CTS, PB31_AF_UART4_RXD, /* LCDC */ PA5_PF_LSCLK, PA6_PF_LD0, PA7_PF_LD1, PA8_PF_LD2, PA9_PF_LD3, PA10_PF_LD4, PA11_PF_LD5, PA12_PF_LD6, PA13_PF_LD7, PA14_PF_LD8, PA15_PF_LD9, PA16_PF_LD10, PA17_PF_LD11, PA18_PF_LD12, PA19_PF_LD13, PA20_PF_LD14, PA21_PF_LD15, PA22_PF_LD16, PA24_PF_REV, /* Sharp panel dedicated signal */ PA25_PF_CLS, /* Sharp panel dedicated signal */ PA26_PF_PS, /* Sharp panel dedicated signal */ PA27_PF_SPL_SPR, /* Sharp panel dedicated signal */ PA28_PF_HSYNC, PA29_PF_VSYNC, PA30_PF_CONTRAST, PA31_PF_OE_ACD, /* MMC/SDHC */ PE18_PF_SD1_D0, PE19_PF_SD1_D1, PE20_PF_SD1_D2, PE21_PF_SD1_D3, PE22_PF_SD1_CMD, PE23_PF_SD1_CLK, /* NFC */ PF0_PF_NRFB, PF1_PF_NFCE, PF2_PF_NFWP, PF3_PF_NFCLE, PF4_PF_NFALE, PF5_PF_NFRE, PF6_PF_NFWE, PF7_PF_NFIO0, PF8_PF_NFIO1, PF9_PF_NFIO2, PF10_PF_NFIO3, PF11_PF_NFIO4, PF12_PF_NFIO5, PF13_PF_NFIO6, PF14_PF_NFIO7, }; /* ADS's NOR flash: 2x AM29BDS128HE9VKI on 32-bit bus */ static struct physmap_flash_data mx21ads_flash_data = { .width = 4, }; static struct resource mx21ads_flash_resource = { .start = MX21_CS0_BASE_ADDR, .end = MX21_CS0_BASE_ADDR + 0x02000000 - 1, .flags = IORESOURCE_MEM, }; static struct platform_device mx21ads_nor_mtd_device = { .name = "physmap-flash", .id = 0, .dev = { .platform_data = &mx21ads_flash_data, }, .num_resources = 1, .resource = &mx21ads_flash_resource, }; static struct resource mx21ads_cs8900_resources[] __initdata = { DEFINE_RES_MEM(MX21_CS1_BASE_ADDR, MX21ADS_CS8900A_MMIO_SIZE), /* irq number is run-time assigned */ DEFINE_RES_IRQ(-1), }; static const struct platform_device_info mx21ads_cs8900_devinfo __initconst = { .name = "cs89x0", .id = 0, .res = mx21ads_cs8900_resources, .num_res = ARRAY_SIZE(mx21ads_cs8900_resources), }; static const struct imxuart_platform_data uart_pdata_rts __initconst = { .flags = IMXUART_HAVE_RTSCTS, }; static const struct imxuart_platform_data uart_pdata_norts __initconst = { }; static int mx21ads_fb_init(struct platform_device *pdev) { u16 tmp; tmp = __raw_readw(MX21ADS_IO_REG); tmp |= MX21ADS_IO_LCDON; __raw_writew(tmp, MX21ADS_IO_REG); return 0; } static void mx21ads_fb_exit(struct platform_device *pdev) { u16 tmp; tmp = __raw_readw(MX21ADS_IO_REG); tmp &= ~MX21ADS_IO_LCDON; __raw_writew(tmp, MX21ADS_IO_REG); } /* * Connected is a portrait Sharp-QVGA display * of type: LQ035Q7DB02 */ static struct imx_fb_videomode mx21ads_modes[] = { { .mode = { .name = "Sharp-LQ035Q7", .refresh = 60, .xres = 240, .yres = 320, .pixclock = 188679, /* in ps (5.3MHz) */ .hsync_len = 2, .left_margin = 6, .right_margin = 16, .vsync_len = 1, .upper_margin = 8, .lower_margin = 10, }, .pcr = 0xfb108bc7, .bpp = 16, }, }; static const struct imx_fb_platform_data mx21ads_fb_data __initconst = { .mode = mx21ads_modes, .num_modes = ARRAY_SIZE(mx21ads_modes), .pwmr = 0x00a903ff, .lscr1 = 0x00120300, .dmacr = 0x00020008, .init = mx21ads_fb_init, .exit = mx21ads_fb_exit, }; static int mx21ads_sdhc_get_ro(struct device *dev) { return (__raw_readw(MX21ADS_IO_REG) & MX21ADS_IO_SD_WP) ? 1 : 0; } static int mx21ads_sdhc_init(struct device *dev, irq_handler_t detect_irq, void *data) { return request_irq(gpio_to_irq(IMX_GPIO_NR(4, 25)), detect_irq, IRQF_TRIGGER_FALLING, "mmc-detect", data); } static void mx21ads_sdhc_exit(struct device *dev, void *data) { free_irq(gpio_to_irq(IMX_GPIO_NR(4, 25)), data); } static const struct imxmmc_platform_data mx21ads_sdhc_pdata __initconst = { .ocr_avail = MMC_VDD_29_30 | MMC_VDD_30_31, /* 3.0V */ .get_ro = mx21ads_sdhc_get_ro, .init = mx21ads_sdhc_init, .exit = mx21ads_sdhc_exit, }; static const struct mxc_nand_platform_data mx21ads_nand_board_info __initconst = { .width = 1, .hw_ecc = 1, }; static struct map_desc mx21ads_io_desc[] __initdata = { /* * Memory-mapped I/O on MX21ADS Base board: * - CS8900A Ethernet controller * - ST16C2552CJ UART * - CPU and Base board version * - Base board I/O register */ { .virtual = MX21ADS_MMIO_BASE_ADDR, .pfn = __phys_to_pfn(MX21_CS1_BASE_ADDR), .length = MX21ADS_MMIO_SIZE, .type = MT_DEVICE, }, }; static void __init mx21ads_map_io(void) { mx21_map_io(); iotable_init(mx21ads_io_desc, ARRAY_SIZE(mx21ads_io_desc)); } static struct platform_device *platform_devices[] __initdata = { &mx21ads_nor_mtd_device, }; static void __init mx21ads_board_init(void) { imx21_soc_init(); mxc_gpio_setup_multiple_pins(mx21ads_pins, ARRAY_SIZE(mx21ads_pins), "mx21ads"); imx21_add_imx_uart0(&uart_pdata_rts); imx21_add_imx_uart2(&uart_pdata_norts); imx21_add_imx_uart3(&uart_pdata_rts); imx21_add_imx_fb(&mx21ads_fb_data); imx21_add_mxc_mmc(0, &mx21ads_sdhc_pdata); imx21_add_mxc_nand(&mx21ads_nand_board_info); platform_add_devices(platform_devices, ARRAY_SIZE(platform_devices)); mx21ads_cs8900_resources[1].start = gpio_to_irq(MX21ADS_CS8900A_IRQ_GPIO); mx21ads_cs8900_resources[1].end = gpio_to_irq(MX21ADS_CS8900A_IRQ_GPIO); platform_device_register_full(&mx21ads_cs8900_devinfo); } static void __init mx21ads_timer_init(void) { mx21_clocks_init(32768, 26000000); } MACHINE_START(MX21ADS, "Freescale i.MX21ADS") /* maintainer: Freescale Semiconductor, Inc. */ .atag_offset = 0x100, .map_io = mx21ads_map_io, .init_early = imx21_init_early, .init_irq = mx21_init_irq, .handle_irq = imx21_handle_irq, .init_time = mx21ads_timer_init, .init_machine = mx21ads_board_init, .restart = mxc_restart, MACHINE_END
gpl-2.0
AstroProfundis/android_kernel_samsung_sc03e
sound/soc/pxa/raumfeld.c
2895
7997
/* * raumfeld_audio.c -- SoC audio for Raumfeld audio devices * * Copyright (c) 2009 Daniel Mack <daniel@caiaq.de> * * based on code from: * * Wolfson Microelectronics PLC. * Openedhand Ltd. * Liam Girdwood <lrg@slimlogic.co.uk> * Richard Purdie <richard@openedhand.com> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/module.h> #include <linux/i2c.h> #include <linux/delay.h> #include <linux/gpio.h> #include <sound/pcm.h> #include <sound/soc.h> #include <asm/mach-types.h> #include "pxa-ssp.h" #define GPIO_SPDIF_RESET (38) #define GPIO_MCLK_RESET (111) #define GPIO_CODEC_RESET (120) static struct i2c_client *max9486_client; static struct i2c_board_info max9486_hwmon_info = { I2C_BOARD_INFO("max9485", 0x63), }; #define MAX9485_MCLK_FREQ_112896 0x22 #define MAX9485_MCLK_FREQ_122880 0x23 #define MAX9485_MCLK_FREQ_225792 0x32 #define MAX9485_MCLK_FREQ_245760 0x33 static void set_max9485_clk(char clk) { i2c_master_send(max9486_client, &clk, 1); } static void raumfeld_enable_audio(bool en) { if (en) { gpio_set_value(GPIO_MCLK_RESET, 1); /* wait some time to let the clocks become stable */ msleep(100); gpio_set_value(GPIO_SPDIF_RESET, 1); gpio_set_value(GPIO_CODEC_RESET, 1); } else { gpio_set_value(GPIO_MCLK_RESET, 0); gpio_set_value(GPIO_SPDIF_RESET, 0); gpio_set_value(GPIO_CODEC_RESET, 0); } } /* CS4270 */ static int raumfeld_cs4270_startup(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; /* set freq to 0 to enable all possible codec sample rates */ return snd_soc_dai_set_sysclk(codec_dai, 0, 0, 0); } static void raumfeld_cs4270_shutdown(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; /* set freq to 0 to enable all possible codec sample rates */ snd_soc_dai_set_sysclk(codec_dai, 0, 0, 0); } static int raumfeld_cs4270_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; unsigned int fmt, clk = 0; int ret = 0; switch (params_rate(params)) { case 44100: set_max9485_clk(MAX9485_MCLK_FREQ_112896); clk = 11289600; break; case 48000: set_max9485_clk(MAX9485_MCLK_FREQ_122880); clk = 12288000; break; case 88200: set_max9485_clk(MAX9485_MCLK_FREQ_225792); clk = 22579200; break; case 96000: set_max9485_clk(MAX9485_MCLK_FREQ_245760); clk = 24576000; break; default: return -EINVAL; } fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF | SND_SOC_DAIFMT_CBS_CFS; /* setup the CODEC DAI */ ret = snd_soc_dai_set_fmt(codec_dai, fmt); if (ret < 0) return ret; ret = snd_soc_dai_set_sysclk(codec_dai, 0, clk, 0); if (ret < 0) return ret; /* setup the CPU DAI */ ret = snd_soc_dai_set_pll(cpu_dai, 0, 0, 0, clk); if (ret < 0) return ret; ret = snd_soc_dai_set_fmt(cpu_dai, fmt); if (ret < 0) return ret; ret = snd_soc_dai_set_clkdiv(cpu_dai, PXA_SSP_DIV_SCR, 4); if (ret < 0) return ret; ret = snd_soc_dai_set_sysclk(cpu_dai, PXA_SSP_CLK_EXT, clk, 1); if (ret < 0) return ret; return 0; } static struct snd_soc_ops raumfeld_cs4270_ops = { .startup = raumfeld_cs4270_startup, .shutdown = raumfeld_cs4270_shutdown, .hw_params = raumfeld_cs4270_hw_params, }; static int raumfeld_analog_suspend(struct snd_soc_card *card) { raumfeld_enable_audio(false); return 0; } static int raumfeld_analog_resume(struct snd_soc_card *card) { raumfeld_enable_audio(true); return 0; } /* AK4104 */ static int raumfeld_ak4104_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct snd_soc_dai *codec_dai = rtd->codec_dai; struct snd_soc_dai *cpu_dai = rtd->cpu_dai; int fmt, ret = 0, clk = 0; switch (params_rate(params)) { case 44100: set_max9485_clk(MAX9485_MCLK_FREQ_112896); clk = 11289600; break; case 48000: set_max9485_clk(MAX9485_MCLK_FREQ_122880); clk = 12288000; break; case 88200: set_max9485_clk(MAX9485_MCLK_FREQ_225792); clk = 22579200; break; case 96000: set_max9485_clk(MAX9485_MCLK_FREQ_245760); clk = 24576000; break; default: return -EINVAL; } fmt = SND_SOC_DAIFMT_I2S | SND_SOC_DAIFMT_NB_NF; /* setup the CODEC DAI */ ret = snd_soc_dai_set_fmt(codec_dai, fmt | SND_SOC_DAIFMT_CBS_CFS); if (ret < 0) return ret; /* setup the CPU DAI */ ret = snd_soc_dai_set_pll(cpu_dai, 0, 0, 0, clk); if (ret < 0) return ret; ret = snd_soc_dai_set_fmt(cpu_dai, fmt | SND_SOC_DAIFMT_CBS_CFS); if (ret < 0) return ret; ret = snd_soc_dai_set_clkdiv(cpu_dai, PXA_SSP_DIV_SCR, 4); if (ret < 0) return ret; ret = snd_soc_dai_set_sysclk(cpu_dai, PXA_SSP_CLK_EXT, clk, 1); if (ret < 0) return ret; return 0; } static struct snd_soc_ops raumfeld_ak4104_ops = { .hw_params = raumfeld_ak4104_hw_params, }; #define DAI_LINK_CS4270 \ { \ .name = "CS4270", \ .stream_name = "CS4270", \ .cpu_dai_name = "pxa-ssp-dai.0", \ .platform_name = "pxa-pcm-audio", \ .codec_dai_name = "cs4270-hifi", \ .codec_name = "cs4270-codec.0-0048", \ .ops = &raumfeld_cs4270_ops, \ } #define DAI_LINK_AK4104 \ { \ .name = "ak4104", \ .stream_name = "Playback", \ .cpu_dai_name = "pxa-ssp-dai.1", \ .codec_dai_name = "ak4104-hifi", \ .platform_name = "pxa-pcm-audio", \ .ops = &raumfeld_ak4104_ops, \ .codec_name = "spi0.0", \ } static struct snd_soc_dai_link snd_soc_raumfeld_connector_dai[] = { DAI_LINK_CS4270, DAI_LINK_AK4104, }; static struct snd_soc_dai_link snd_soc_raumfeld_speaker_dai[] = { DAI_LINK_CS4270, }; static struct snd_soc_card snd_soc_raumfeld_connector = { .name = "Raumfeld Connector", .dai_link = snd_soc_raumfeld_connector_dai, .num_links = ARRAY_SIZE(snd_soc_raumfeld_connector_dai), .suspend_post = raumfeld_analog_suspend, .resume_pre = raumfeld_analog_resume, }; static struct snd_soc_card snd_soc_raumfeld_speaker = { .name = "Raumfeld Speaker", .dai_link = snd_soc_raumfeld_speaker_dai, .num_links = ARRAY_SIZE(snd_soc_raumfeld_speaker_dai), .suspend_post = raumfeld_analog_suspend, .resume_pre = raumfeld_analog_resume, }; static struct platform_device *raumfeld_audio_device; static int __init raumfeld_audio_init(void) { int ret; if (!machine_is_raumfeld_speaker() && !machine_is_raumfeld_connector()) return 0; max9486_client = i2c_new_device(i2c_get_adapter(0), &max9486_hwmon_info); if (!max9486_client) return -ENOMEM; set_max9485_clk(MAX9485_MCLK_FREQ_122880); /* Register analog device */ raumfeld_audio_device = platform_device_alloc("soc-audio", 0); if (!raumfeld_audio_device) return -ENOMEM; if (machine_is_raumfeld_speaker()) platform_set_drvdata(raumfeld_audio_device, &snd_soc_raumfeld_speaker); if (machine_is_raumfeld_connector()) platform_set_drvdata(raumfeld_audio_device, &snd_soc_raumfeld_connector); ret = platform_device_add(raumfeld_audio_device); if (ret < 0) return ret; raumfeld_enable_audio(true); return 0; } static void __exit raumfeld_audio_exit(void) { raumfeld_enable_audio(false); platform_device_unregister(raumfeld_audio_device); i2c_unregister_device(max9486_client); gpio_free(GPIO_MCLK_RESET); gpio_free(GPIO_CODEC_RESET); gpio_free(GPIO_SPDIF_RESET); } module_init(raumfeld_audio_init); module_exit(raumfeld_audio_exit); /* Module information */ MODULE_AUTHOR("Daniel Mack <daniel@caiaq.de>"); MODULE_DESCRIPTION("Raumfeld audio SoC"); MODULE_LICENSE("GPL");
gpl-2.0
m0zes/linux
drivers/isdn/hardware/avm/t1pci.c
4687
6759
/* $Id: t1pci.c,v 1.1.2.2 2004/01/16 21:09:27 keil Exp $ * * Module for AVM T1 PCI-card. * * Copyright 1999 by Carsten Paeth <calle@calle.de> * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/skbuff.h> #include <linux/delay.h> #include <linux/mm.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/pci.h> #include <linux/capi.h> #include <linux/init.h> #include <asm/io.h> #include <linux/isdn/capicmd.h> #include <linux/isdn/capiutil.h> #include <linux/isdn/capilli.h> #include "avmcard.h" #undef CONFIG_T1PCI_DEBUG #undef CONFIG_T1PCI_POLLDEBUG /* ------------------------------------------------------------- */ static char *revision = "$Revision: 1.1.2.2 $"; /* ------------------------------------------------------------- */ static struct pci_device_id t1pci_pci_tbl[] = { { PCI_VENDOR_ID_AVM, PCI_DEVICE_ID_AVM_T1, PCI_ANY_ID, PCI_ANY_ID }, { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(pci, t1pci_pci_tbl); MODULE_DESCRIPTION("CAPI4Linux: Driver for AVM T1 PCI card"); MODULE_AUTHOR("Carsten Paeth"); MODULE_LICENSE("GPL"); /* ------------------------------------------------------------- */ static char *t1pci_procinfo(struct capi_ctr *ctrl); static int t1pci_add_card(struct capicardparams *p, struct pci_dev *pdev) { avmcard *card; avmctrl_info *cinfo; int retval; card = b1_alloc_card(1); if (!card) { printk(KERN_WARNING "t1pci: no memory.\n"); retval = -ENOMEM; goto err; } card->dma = avmcard_dma_alloc("t1pci", pdev, 2048 + 128, 2048 + 128); if (!card->dma) { printk(KERN_WARNING "t1pci: no memory.\n"); retval = -ENOMEM; goto err_free; } cinfo = card->ctrlinfo; sprintf(card->name, "t1pci-%x", p->port); card->port = p->port; card->irq = p->irq; card->membase = p->membase; card->cardtype = avm_t1pci; if (!request_region(card->port, AVMB1_PORTLEN, card->name)) { printk(KERN_WARNING "t1pci: ports 0x%03x-0x%03x in use.\n", card->port, card->port + AVMB1_PORTLEN); retval = -EBUSY; goto err_free_dma; } card->mbase = ioremap(card->membase, 64); if (!card->mbase) { printk(KERN_NOTICE "t1pci: can't remap memory at 0x%lx\n", card->membase); retval = -EIO; goto err_release_region; } b1dma_reset(card); retval = t1pci_detect(card); if (retval != 0) { if (retval < 6) printk(KERN_NOTICE "t1pci: NO card at 0x%x (%d)\n", card->port, retval); else printk(KERN_NOTICE "t1pci: card at 0x%x, but cable not connected or T1 has no power (%d)\n", card->port, retval); retval = -EIO; goto err_unmap; } b1dma_reset(card); retval = request_irq(card->irq, b1dma_interrupt, IRQF_SHARED, card->name, card); if (retval) { printk(KERN_ERR "t1pci: unable to get IRQ %d.\n", card->irq); retval = -EBUSY; goto err_unmap; } cinfo->capi_ctrl.owner = THIS_MODULE; cinfo->capi_ctrl.driver_name = "t1pci"; cinfo->capi_ctrl.driverdata = cinfo; cinfo->capi_ctrl.register_appl = b1dma_register_appl; cinfo->capi_ctrl.release_appl = b1dma_release_appl; cinfo->capi_ctrl.send_message = b1dma_send_message; cinfo->capi_ctrl.load_firmware = b1dma_load_firmware; cinfo->capi_ctrl.reset_ctr = b1dma_reset_ctr; cinfo->capi_ctrl.procinfo = t1pci_procinfo; cinfo->capi_ctrl.proc_fops = &b1dmactl_proc_fops; strcpy(cinfo->capi_ctrl.name, card->name); retval = attach_capi_ctr(&cinfo->capi_ctrl); if (retval) { printk(KERN_ERR "t1pci: attach controller failed.\n"); retval = -EBUSY; goto err_free_irq; } card->cardnr = cinfo->capi_ctrl.cnr; printk(KERN_INFO "t1pci: AVM T1 PCI at i/o %#x, irq %d, mem %#lx\n", card->port, card->irq, card->membase); pci_set_drvdata(pdev, card); return 0; err_free_irq: free_irq(card->irq, card); err_unmap: iounmap(card->mbase); err_release_region: release_region(card->port, AVMB1_PORTLEN); err_free_dma: avmcard_dma_free(card->dma); err_free: b1_free_card(card); err: return retval; } /* ------------------------------------------------------------- */ static void t1pci_remove(struct pci_dev *pdev) { avmcard *card = pci_get_drvdata(pdev); avmctrl_info *cinfo = card->ctrlinfo; b1dma_reset(card); detach_capi_ctr(&cinfo->capi_ctrl); free_irq(card->irq, card); iounmap(card->mbase); release_region(card->port, AVMB1_PORTLEN); avmcard_dma_free(card->dma); b1_free_card(card); } /* ------------------------------------------------------------- */ static char *t1pci_procinfo(struct capi_ctr *ctrl) { avmctrl_info *cinfo = (avmctrl_info *)(ctrl->driverdata); if (!cinfo) return ""; sprintf(cinfo->infobuf, "%s %s 0x%x %d 0x%lx", cinfo->cardname[0] ? cinfo->cardname : "-", cinfo->version[VER_DRIVER] ? cinfo->version[VER_DRIVER] : "-", cinfo->card ? cinfo->card->port : 0x0, cinfo->card ? cinfo->card->irq : 0, cinfo->card ? cinfo->card->membase : 0 ); return cinfo->infobuf; } /* ------------------------------------------------------------- */ static int t1pci_probe(struct pci_dev *dev, const struct pci_device_id *ent) { struct capicardparams param; int retval; if (pci_enable_device(dev) < 0) { printk(KERN_ERR "t1pci: failed to enable AVM-T1-PCI\n"); return -ENODEV; } pci_set_master(dev); param.port = pci_resource_start(dev, 1); param.irq = dev->irq; param.membase = pci_resource_start(dev, 0); printk(KERN_INFO "t1pci: PCI BIOS reports AVM-T1-PCI at i/o %#x, irq %d, mem %#x\n", param.port, param.irq, param.membase); retval = t1pci_add_card(&param, dev); if (retval != 0) { printk(KERN_ERR "t1pci: no AVM-T1-PCI at i/o %#x, irq %d detected, mem %#x\n", param.port, param.irq, param.membase); pci_disable_device(dev); return -ENODEV; } return 0; } static struct pci_driver t1pci_pci_driver = { .name = "t1pci", .id_table = t1pci_pci_tbl, .probe = t1pci_probe, .remove = t1pci_remove, }; static struct capi_driver capi_driver_t1pci = { .name = "t1pci", .revision = "1.0", }; static int __init t1pci_init(void) { char *p; char rev[32]; int err; if ((p = strchr(revision, ':')) != NULL && p[1]) { strlcpy(rev, p + 2, 32); if ((p = strchr(rev, '$')) != NULL && p > rev) *(p - 1) = 0; } else strcpy(rev, "1.0"); err = pci_register_driver(&t1pci_pci_driver); if (!err) { strlcpy(capi_driver_t1pci.revision, rev, 32); register_capi_driver(&capi_driver_t1pci); printk(KERN_INFO "t1pci: revision %s\n", rev); } return err; } static void __exit t1pci_exit(void) { unregister_capi_driver(&capi_driver_t1pci); pci_unregister_driver(&t1pci_pci_driver); } module_init(t1pci_init); module_exit(t1pci_exit);
gpl-2.0
omega-roms/I9500_Stock_Kernel_KK_4.4.2
drivers/hid/hid-prodikeys.c
4943
21536
/* * HID driver for the Prodikeys PC-MIDI Keyboard * providing midi & extra multimedia keys functionality * * Copyright (c) 2009 Don Prince <dhprince.devel@yahoo.co.uk> * * Controls for Octave Shift Up/Down, Channel, and * Sustain Duration available via sysfs. * */ /* * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/device.h> #include <linux/module.h> #include <linux/usb.h> #include <linux/mutex.h> #include <linux/hid.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/rawmidi.h> #include "usbhid/usbhid.h" #include "hid-ids.h" #define pk_debug(format, arg...) \ pr_debug("hid-prodikeys: " format "\n" , ## arg) #define pk_error(format, arg...) \ pr_err("hid-prodikeys: " format "\n" , ## arg) struct pcmidi_snd; struct pk_device { unsigned long quirks; struct hid_device *hdev; struct pcmidi_snd *pm; /* pcmidi device context */ }; struct pcmidi_sustain { unsigned long in_use; struct pcmidi_snd *pm; struct timer_list timer; unsigned char status; unsigned char note; unsigned char velocity; }; #define PCMIDI_SUSTAINED_MAX 32 struct pcmidi_snd { struct pk_device *pk; unsigned short ifnum; struct hid_report *pcmidi_report6; struct input_dev *input_ep82; unsigned short midi_mode; unsigned short midi_sustain_mode; unsigned short midi_sustain; unsigned short midi_channel; short midi_octave; struct pcmidi_sustain sustained_notes[PCMIDI_SUSTAINED_MAX]; unsigned short fn_state; unsigned short last_key[24]; spinlock_t rawmidi_in_lock; struct snd_card *card; struct snd_rawmidi *rwmidi; struct snd_rawmidi_substream *in_substream; struct snd_rawmidi_substream *out_substream; unsigned long in_triggered; unsigned long out_active; }; #define PK_QUIRK_NOGET 0x00010000 #define PCMIDI_MIDDLE_C 60 #define PCMIDI_CHANNEL_MIN 0 #define PCMIDI_CHANNEL_MAX 15 #define PCMIDI_OCTAVE_MIN (-2) #define PCMIDI_OCTAVE_MAX 2 #define PCMIDI_SUSTAIN_MIN 0 #define PCMIDI_SUSTAIN_MAX 5000 static const char shortname[] = "PC-MIDI"; static const char longname[] = "Prodikeys PC-MIDI Keyboard"; static int index[SNDRV_CARDS] = SNDRV_DEFAULT_IDX; static char *id[SNDRV_CARDS] = SNDRV_DEFAULT_STR; static bool enable[SNDRV_CARDS] = SNDRV_DEFAULT_ENABLE_PNP; module_param_array(index, int, NULL, 0444); module_param_array(id, charp, NULL, 0444); module_param_array(enable, bool, NULL, 0444); MODULE_PARM_DESC(index, "Index value for the PC-MIDI virtual audio driver"); MODULE_PARM_DESC(id, "ID string for the PC-MIDI virtual audio driver"); MODULE_PARM_DESC(enable, "Enable for the PC-MIDI virtual audio driver"); /* Output routine for the sysfs channel file */ static ssize_t show_channel(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = container_of(dev, struct hid_device, dev); struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); dbg_hid("pcmidi sysfs read channel=%u\n", pk->pm->midi_channel); return sprintf(buf, "%u (min:%u, max:%u)\n", pk->pm->midi_channel, PCMIDI_CHANNEL_MIN, PCMIDI_CHANNEL_MAX); } /* Input routine for the sysfs channel file */ static ssize_t store_channel(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = container_of(dev, struct hid_device, dev); struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); unsigned channel = 0; if (sscanf(buf, "%u", &channel) > 0 && channel <= PCMIDI_CHANNEL_MAX) { dbg_hid("pcmidi sysfs write channel=%u\n", channel); pk->pm->midi_channel = channel; return strlen(buf); } return -EINVAL; } static DEVICE_ATTR(channel, S_IRUGO | S_IWUSR | S_IWGRP , show_channel, store_channel); static struct device_attribute *sysfs_device_attr_channel = { &dev_attr_channel, }; /* Output routine for the sysfs sustain file */ static ssize_t show_sustain(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = container_of(dev, struct hid_device, dev); struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); dbg_hid("pcmidi sysfs read sustain=%u\n", pk->pm->midi_sustain); return sprintf(buf, "%u (off:%u, max:%u (ms))\n", pk->pm->midi_sustain, PCMIDI_SUSTAIN_MIN, PCMIDI_SUSTAIN_MAX); } /* Input routine for the sysfs sustain file */ static ssize_t store_sustain(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = container_of(dev, struct hid_device, dev); struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); unsigned sustain = 0; if (sscanf(buf, "%u", &sustain) > 0 && sustain <= PCMIDI_SUSTAIN_MAX) { dbg_hid("pcmidi sysfs write sustain=%u\n", sustain); pk->pm->midi_sustain = sustain; pk->pm->midi_sustain_mode = (0 == sustain || !pk->pm->midi_mode) ? 0 : 1; return strlen(buf); } return -EINVAL; } static DEVICE_ATTR(sustain, S_IRUGO | S_IWUSR | S_IWGRP, show_sustain, store_sustain); static struct device_attribute *sysfs_device_attr_sustain = { &dev_attr_sustain, }; /* Output routine for the sysfs octave file */ static ssize_t show_octave(struct device *dev, struct device_attribute *attr, char *buf) { struct hid_device *hdev = container_of(dev, struct hid_device, dev); struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); dbg_hid("pcmidi sysfs read octave=%d\n", pk->pm->midi_octave); return sprintf(buf, "%d (min:%d, max:%d)\n", pk->pm->midi_octave, PCMIDI_OCTAVE_MIN, PCMIDI_OCTAVE_MAX); } /* Input routine for the sysfs octave file */ static ssize_t store_octave(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct hid_device *hdev = container_of(dev, struct hid_device, dev); struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); int octave = 0; if (sscanf(buf, "%d", &octave) > 0 && octave >= PCMIDI_OCTAVE_MIN && octave <= PCMIDI_OCTAVE_MAX) { dbg_hid("pcmidi sysfs write octave=%d\n", octave); pk->pm->midi_octave = octave; return strlen(buf); } return -EINVAL; } static DEVICE_ATTR(octave, S_IRUGO | S_IWUSR | S_IWGRP, show_octave, store_octave); static struct device_attribute *sysfs_device_attr_octave = { &dev_attr_octave, }; static void pcmidi_send_note(struct pcmidi_snd *pm, unsigned char status, unsigned char note, unsigned char velocity) { unsigned long flags; unsigned char buffer[3]; buffer[0] = status; buffer[1] = note; buffer[2] = velocity; spin_lock_irqsave(&pm->rawmidi_in_lock, flags); if (!pm->in_substream) goto drop_note; if (!test_bit(pm->in_substream->number, &pm->in_triggered)) goto drop_note; snd_rawmidi_receive(pm->in_substream, buffer, 3); drop_note: spin_unlock_irqrestore(&pm->rawmidi_in_lock, flags); return; } static void pcmidi_sustained_note_release(unsigned long data) { struct pcmidi_sustain *pms = (struct pcmidi_sustain *)data; pcmidi_send_note(pms->pm, pms->status, pms->note, pms->velocity); pms->in_use = 0; } static void init_sustain_timers(struct pcmidi_snd *pm) { struct pcmidi_sustain *pms; unsigned i; for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) { pms = &pm->sustained_notes[i]; pms->in_use = 0; pms->pm = pm; setup_timer(&pms->timer, pcmidi_sustained_note_release, (unsigned long)pms); } } static void stop_sustain_timers(struct pcmidi_snd *pm) { struct pcmidi_sustain *pms; unsigned i; for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) { pms = &pm->sustained_notes[i]; pms->in_use = 1; del_timer_sync(&pms->timer); } } static int pcmidi_get_output_report(struct pcmidi_snd *pm) { struct hid_device *hdev = pm->pk->hdev; struct hid_report *report; list_for_each_entry(report, &hdev->report_enum[HID_OUTPUT_REPORT].report_list, list) { if (!(6 == report->id)) continue; if (report->maxfield < 1) { hid_err(hdev, "output report is empty\n"); break; } if (report->field[0]->report_count != 2) { hid_err(hdev, "field count too low\n"); break; } pm->pcmidi_report6 = report; return 0; } /* should never get here */ return -ENODEV; } static void pcmidi_submit_output_report(struct pcmidi_snd *pm, int state) { struct hid_device *hdev = pm->pk->hdev; struct hid_report *report = pm->pcmidi_report6; report->field[0]->value[0] = 0x01; report->field[0]->value[1] = state; usbhid_submit_report(hdev, report, USB_DIR_OUT); } static int pcmidi_handle_report1(struct pcmidi_snd *pm, u8 *data) { u32 bit_mask; bit_mask = data[1]; bit_mask = (bit_mask << 8) | data[2]; bit_mask = (bit_mask << 8) | data[3]; dbg_hid("pcmidi mode: %d\n", pm->midi_mode); /*KEY_MAIL or octave down*/ if (pm->midi_mode && bit_mask == 0x004000) { /* octave down */ pm->midi_octave--; if (pm->midi_octave < -2) pm->midi_octave = -2; dbg_hid("pcmidi mode: %d octave: %d\n", pm->midi_mode, pm->midi_octave); return 1; } /*KEY_WWW or sustain*/ else if (pm->midi_mode && bit_mask == 0x000004) { /* sustain on/off*/ pm->midi_sustain_mode ^= 0x1; return 1; } return 0; /* continue key processing */ } static int pcmidi_handle_report3(struct pcmidi_snd *pm, u8 *data, int size) { struct pcmidi_sustain *pms; unsigned i, j; unsigned char status, note, velocity; unsigned num_notes = (size-1)/2; for (j = 0; j < num_notes; j++) { note = data[j*2+1]; velocity = data[j*2+2]; if (note < 0x81) { /* note on */ status = 128 + 16 + pm->midi_channel; /* 1001nnnn */ note = note - 0x54 + PCMIDI_MIDDLE_C + (pm->midi_octave * 12); if (0 == velocity) velocity = 1; /* force note on */ } else { /* note off */ status = 128 + pm->midi_channel; /* 1000nnnn */ note = note - 0x94 + PCMIDI_MIDDLE_C + (pm->midi_octave*12); if (pm->midi_sustain_mode) { for (i = 0; i < PCMIDI_SUSTAINED_MAX; i++) { pms = &pm->sustained_notes[i]; if (!pms->in_use) { pms->status = status; pms->note = note; pms->velocity = velocity; pms->in_use = 1; mod_timer(&pms->timer, jiffies + msecs_to_jiffies(pm->midi_sustain)); return 1; } } } } pcmidi_send_note(pm, status, note, velocity); } return 1; } static int pcmidi_handle_report4(struct pcmidi_snd *pm, u8 *data) { unsigned key; u32 bit_mask; u32 bit_index; bit_mask = data[1]; bit_mask = (bit_mask << 8) | data[2]; bit_mask = (bit_mask << 8) | data[3]; /* break keys */ for (bit_index = 0; bit_index < 24; bit_index++) { key = pm->last_key[bit_index]; if (!((0x01 << bit_index) & bit_mask)) { input_event(pm->input_ep82, EV_KEY, pm->last_key[bit_index], 0); pm->last_key[bit_index] = 0; } } /* make keys */ for (bit_index = 0; bit_index < 24; bit_index++) { key = 0; switch ((0x01 << bit_index) & bit_mask) { case 0x000010: /* Fn lock*/ pm->fn_state ^= 0x000010; if (pm->fn_state) pcmidi_submit_output_report(pm, 0xc5); else pcmidi_submit_output_report(pm, 0xc6); continue; case 0x020000: /* midi launcher..send a key (qwerty) or not? */ pcmidi_submit_output_report(pm, 0xc1); pm->midi_mode ^= 0x01; dbg_hid("pcmidi mode: %d\n", pm->midi_mode); continue; case 0x100000: /* KEY_MESSENGER or octave up */ dbg_hid("pcmidi mode: %d\n", pm->midi_mode); if (pm->midi_mode) { pm->midi_octave++; if (pm->midi_octave > 2) pm->midi_octave = 2; dbg_hid("pcmidi mode: %d octave: %d\n", pm->midi_mode, pm->midi_octave); continue; } else key = KEY_MESSENGER; break; case 0x400000: key = KEY_CALENDAR; break; case 0x080000: key = KEY_ADDRESSBOOK; break; case 0x040000: key = KEY_DOCUMENTS; break; case 0x800000: key = KEY_WORDPROCESSOR; break; case 0x200000: key = KEY_SPREADSHEET; break; case 0x010000: key = KEY_COFFEE; break; case 0x000100: key = KEY_HELP; break; case 0x000200: key = KEY_SEND; break; case 0x000400: key = KEY_REPLY; break; case 0x000800: key = KEY_FORWARDMAIL; break; case 0x001000: key = KEY_NEW; break; case 0x002000: key = KEY_OPEN; break; case 0x004000: key = KEY_CLOSE; break; case 0x008000: key = KEY_SAVE; break; case 0x000001: key = KEY_UNDO; break; case 0x000002: key = KEY_REDO; break; case 0x000004: key = KEY_SPELLCHECK; break; case 0x000008: key = KEY_PRINT; break; } if (key) { input_event(pm->input_ep82, EV_KEY, key, 1); pm->last_key[bit_index] = key; } } return 1; } static int pcmidi_handle_report( struct pcmidi_snd *pm, unsigned report_id, u8 *data, int size) { int ret = 0; switch (report_id) { case 0x01: /* midi keys (qwerty)*/ ret = pcmidi_handle_report1(pm, data); break; case 0x03: /* midi keyboard (musical)*/ ret = pcmidi_handle_report3(pm, data, size); break; case 0x04: /* multimedia/midi keys (qwerty)*/ ret = pcmidi_handle_report4(pm, data); break; } return ret; } static void pcmidi_setup_extra_keys( struct pcmidi_snd *pm, struct input_dev *input) { /* reassigned functionality for N/A keys MY PICTURES => KEY_WORDPROCESSOR MY MUSIC=> KEY_SPREADSHEET */ unsigned int keys[] = { KEY_FN, KEY_MESSENGER, KEY_CALENDAR, KEY_ADDRESSBOOK, KEY_DOCUMENTS, KEY_WORDPROCESSOR, KEY_SPREADSHEET, KEY_COFFEE, KEY_HELP, KEY_SEND, KEY_REPLY, KEY_FORWARDMAIL, KEY_NEW, KEY_OPEN, KEY_CLOSE, KEY_SAVE, KEY_UNDO, KEY_REDO, KEY_SPELLCHECK, KEY_PRINT, 0 }; unsigned int *pkeys = &keys[0]; unsigned short i; if (pm->ifnum != 1) /* only set up ONCE for interace 1 */ return; pm->input_ep82 = input; for (i = 0; i < 24; i++) pm->last_key[i] = 0; while (*pkeys != 0) { set_bit(*pkeys, pm->input_ep82->keybit); ++pkeys; } } static int pcmidi_set_operational(struct pcmidi_snd *pm) { if (pm->ifnum != 1) return 0; /* only set up ONCE for interace 1 */ pcmidi_get_output_report(pm); pcmidi_submit_output_report(pm, 0xc1); return 0; } static int pcmidi_snd_free(struct snd_device *dev) { return 0; } static int pcmidi_in_open(struct snd_rawmidi_substream *substream) { struct pcmidi_snd *pm = substream->rmidi->private_data; dbg_hid("pcmidi in open\n"); pm->in_substream = substream; return 0; } static int pcmidi_in_close(struct snd_rawmidi_substream *substream) { dbg_hid("pcmidi in close\n"); return 0; } static void pcmidi_in_trigger(struct snd_rawmidi_substream *substream, int up) { struct pcmidi_snd *pm = substream->rmidi->private_data; dbg_hid("pcmidi in trigger %d\n", up); pm->in_triggered = up; } static struct snd_rawmidi_ops pcmidi_in_ops = { .open = pcmidi_in_open, .close = pcmidi_in_close, .trigger = pcmidi_in_trigger }; static int pcmidi_snd_initialise(struct pcmidi_snd *pm) { static int dev; struct snd_card *card; struct snd_rawmidi *rwmidi; int err; static struct snd_device_ops ops = { .dev_free = pcmidi_snd_free, }; if (pm->ifnum != 1) return 0; /* only set up midi device ONCE for interace 1 */ if (dev >= SNDRV_CARDS) return -ENODEV; if (!enable[dev]) { dev++; return -ENOENT; } /* Setup sound card */ err = snd_card_create(index[dev], id[dev], THIS_MODULE, 0, &card); if (err < 0) { pk_error("failed to create pc-midi sound card\n"); err = -ENOMEM; goto fail; } pm->card = card; /* Setup sound device */ err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, pm, &ops); if (err < 0) { pk_error("failed to create pc-midi sound device: error %d\n", err); goto fail; } strncpy(card->driver, shortname, sizeof(card->driver)); strncpy(card->shortname, shortname, sizeof(card->shortname)); strncpy(card->longname, longname, sizeof(card->longname)); /* Set up rawmidi */ err = snd_rawmidi_new(card, card->shortname, 0, 0, 1, &rwmidi); if (err < 0) { pk_error("failed to create pc-midi rawmidi device: error %d\n", err); goto fail; } pm->rwmidi = rwmidi; strncpy(rwmidi->name, card->shortname, sizeof(rwmidi->name)); rwmidi->info_flags = SNDRV_RAWMIDI_INFO_INPUT; rwmidi->private_data = pm; snd_rawmidi_set_ops(rwmidi, SNDRV_RAWMIDI_STREAM_INPUT, &pcmidi_in_ops); snd_card_set_dev(card, &pm->pk->hdev->dev); /* create sysfs variables */ err = device_create_file(&pm->pk->hdev->dev, sysfs_device_attr_channel); if (err < 0) { pk_error("failed to create sysfs attribute channel: error %d\n", err); goto fail; } err = device_create_file(&pm->pk->hdev->dev, sysfs_device_attr_sustain); if (err < 0) { pk_error("failed to create sysfs attribute sustain: error %d\n", err); goto fail_attr_sustain; } err = device_create_file(&pm->pk->hdev->dev, sysfs_device_attr_octave); if (err < 0) { pk_error("failed to create sysfs attribute octave: error %d\n", err); goto fail_attr_octave; } spin_lock_init(&pm->rawmidi_in_lock); init_sustain_timers(pm); pcmidi_set_operational(pm); /* register it */ err = snd_card_register(card); if (err < 0) { pk_error("failed to register pc-midi sound card: error %d\n", err); goto fail_register; } dbg_hid("pcmidi_snd_initialise finished ok\n"); return 0; fail_register: stop_sustain_timers(pm); device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_octave); fail_attr_octave: device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_sustain); fail_attr_sustain: device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_channel); fail: if (pm->card) { snd_card_free(pm->card); pm->card = NULL; } return err; } static int pcmidi_snd_terminate(struct pcmidi_snd *pm) { if (pm->card) { stop_sustain_timers(pm); device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_channel); device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_sustain); device_remove_file(&pm->pk->hdev->dev, sysfs_device_attr_octave); snd_card_disconnect(pm->card); snd_card_free_when_closed(pm->card); } return 0; } /* * PC-MIDI report descriptor for report id is wrong. */ static __u8 *pk_report_fixup(struct hid_device *hdev, __u8 *rdesc, unsigned int *rsize) { if (*rsize == 178 && rdesc[111] == 0x06 && rdesc[112] == 0x00 && rdesc[113] == 0xff) { hid_info(hdev, "fixing up pc-midi keyboard report descriptor\n"); rdesc[144] = 0x18; /* report 4: was 0x10 report count */ } return rdesc; } static int pk_input_mapping(struct hid_device *hdev, struct hid_input *hi, struct hid_field *field, struct hid_usage *usage, unsigned long **bit, int *max) { struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); struct pcmidi_snd *pm; pm = pk->pm; if (HID_UP_MSVENDOR == (usage->hid & HID_USAGE_PAGE) && 1 == pm->ifnum) { pcmidi_setup_extra_keys(pm, hi->input); return 0; } return 0; } static int pk_raw_event(struct hid_device *hdev, struct hid_report *report, u8 *data, int size) { struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); int ret = 0; if (1 == pk->pm->ifnum) { if (report->id == data[0]) switch (report->id) { case 0x01: /* midi keys (qwerty)*/ case 0x03: /* midi keyboard (musical)*/ case 0x04: /* extra/midi keys (qwerty)*/ ret = pcmidi_handle_report(pk->pm, report->id, data, size); break; } } return ret; } static int pk_probe(struct hid_device *hdev, const struct hid_device_id *id) { int ret; struct usb_interface *intf = to_usb_interface(hdev->dev.parent); unsigned short ifnum = intf->cur_altsetting->desc.bInterfaceNumber; unsigned long quirks = id->driver_data; struct pk_device *pk; struct pcmidi_snd *pm = NULL; pk = kzalloc(sizeof(*pk), GFP_KERNEL); if (pk == NULL) { hid_err(hdev, "can't alloc descriptor\n"); return -ENOMEM; } pk->hdev = hdev; pm = kzalloc(sizeof(*pm), GFP_KERNEL); if (pm == NULL) { hid_err(hdev, "can't alloc descriptor\n"); ret = -ENOMEM; goto err_free_pk; } pm->pk = pk; pk->pm = pm; pm->ifnum = ifnum; hid_set_drvdata(hdev, pk); ret = hid_parse(hdev); if (ret) { hid_err(hdev, "hid parse failed\n"); goto err_free; } if (quirks & PK_QUIRK_NOGET) { /* hid_parse cleared all the quirks */ hdev->quirks |= HID_QUIRK_NOGET; } ret = hid_hw_start(hdev, HID_CONNECT_DEFAULT); if (ret) { hid_err(hdev, "hw start failed\n"); goto err_free; } ret = pcmidi_snd_initialise(pm); if (ret < 0) goto err_stop; return 0; err_stop: hid_hw_stop(hdev); err_free: kfree(pm); err_free_pk: kfree(pk); return ret; } static void pk_remove(struct hid_device *hdev) { struct pk_device *pk = (struct pk_device *)hid_get_drvdata(hdev); struct pcmidi_snd *pm; pm = pk->pm; if (pm) { pcmidi_snd_terminate(pm); kfree(pm); } hid_hw_stop(hdev); kfree(pk); } static const struct hid_device_id pk_devices[] = { {HID_USB_DEVICE(USB_VENDOR_ID_CREATIVELABS, USB_DEVICE_ID_PRODIKEYS_PCMIDI), .driver_data = PK_QUIRK_NOGET}, { } }; MODULE_DEVICE_TABLE(hid, pk_devices); static struct hid_driver pk_driver = { .name = "prodikeys", .id_table = pk_devices, .report_fixup = pk_report_fixup, .input_mapping = pk_input_mapping, .raw_event = pk_raw_event, .probe = pk_probe, .remove = pk_remove, }; static int pk_init(void) { int ret; ret = hid_register_driver(&pk_driver); if (ret) pr_err("can't register prodikeys driver\n"); return ret; } static void pk_exit(void) { hid_unregister_driver(&pk_driver); } module_init(pk_init); module_exit(pk_exit); MODULE_LICENSE("GPL");
gpl-2.0
neXusPRIME/android_kernel_sony_msm8960t_pac
net/mac80211/aes_cmac.c
5711
2704
/* * AES-128-CMAC with TLen 16 for IEEE 802.11w BIP * Copyright 2008, Jouni Malinen <j@w1.fi> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/types.h> #include <linux/crypto.h> #include <linux/err.h> #include <crypto/aes.h> #include <net/mac80211.h> #include "key.h" #include "aes_cmac.h" #define AES_CMAC_KEY_LEN 16 #define CMAC_TLEN 8 /* CMAC TLen = 64 bits (8 octets) */ #define AAD_LEN 20 static void gf_mulx(u8 *pad) { int i, carry; carry = pad[0] & 0x80; for (i = 0; i < AES_BLOCK_SIZE - 1; i++) pad[i] = (pad[i] << 1) | (pad[i + 1] >> 7); pad[AES_BLOCK_SIZE - 1] <<= 1; if (carry) pad[AES_BLOCK_SIZE - 1] ^= 0x87; } static void aes_128_cmac_vector(struct crypto_cipher *tfm, size_t num_elem, const u8 *addr[], const size_t *len, u8 *mac) { u8 scratch[2 * AES_BLOCK_SIZE]; u8 *cbc, *pad; const u8 *pos, *end; size_t i, e, left, total_len; cbc = scratch; pad = scratch + AES_BLOCK_SIZE; memset(cbc, 0, AES_BLOCK_SIZE); total_len = 0; for (e = 0; e < num_elem; e++) total_len += len[e]; left = total_len; e = 0; pos = addr[0]; end = pos + len[0]; while (left >= AES_BLOCK_SIZE) { for (i = 0; i < AES_BLOCK_SIZE; i++) { cbc[i] ^= *pos++; if (pos >= end) { e++; pos = addr[e]; end = pos + len[e]; } } if (left > AES_BLOCK_SIZE) crypto_cipher_encrypt_one(tfm, cbc, cbc); left -= AES_BLOCK_SIZE; } memset(pad, 0, AES_BLOCK_SIZE); crypto_cipher_encrypt_one(tfm, pad, pad); gf_mulx(pad); if (left || total_len == 0) { for (i = 0; i < left; i++) { cbc[i] ^= *pos++; if (pos >= end) { e++; pos = addr[e]; end = pos + len[e]; } } cbc[left] ^= 0x80; gf_mulx(pad); } for (i = 0; i < AES_BLOCK_SIZE; i++) pad[i] ^= cbc[i]; crypto_cipher_encrypt_one(tfm, pad, pad); memcpy(mac, pad, CMAC_TLEN); } void ieee80211_aes_cmac(struct crypto_cipher *tfm, const u8 *aad, const u8 *data, size_t data_len, u8 *mic) { const u8 *addr[3]; size_t len[3]; u8 zero[CMAC_TLEN]; memset(zero, 0, CMAC_TLEN); addr[0] = aad; len[0] = AAD_LEN; addr[1] = data; len[1] = data_len - CMAC_TLEN; addr[2] = zero; len[2] = CMAC_TLEN; aes_128_cmac_vector(tfm, 3, addr, len, mic); } struct crypto_cipher * ieee80211_aes_cmac_key_setup(const u8 key[]) { struct crypto_cipher *tfm; tfm = crypto_alloc_cipher("aes", 0, CRYPTO_ALG_ASYNC); if (!IS_ERR(tfm)) crypto_cipher_setkey(tfm, key, AES_CMAC_KEY_LEN); return tfm; } void ieee80211_aes_cmac_key_free(struct crypto_cipher *tfm) { crypto_free_cipher(tfm); }
gpl-2.0
TeamGlade-Devices/android_kernel_sony_msm8930
fs/hpfs/map.c
6223
8540
/* * linux/fs/hpfs/map.c * * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999 * * mapping structures to memory with some minimal checks */ #include "hpfs_fn.h" unsigned *hpfs_map_dnode_bitmap(struct super_block *s, struct quad_buffer_head *qbh) { return hpfs_map_4sectors(s, hpfs_sb(s)->sb_dmap, qbh, 0); } unsigned int *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block, struct quad_buffer_head *qbh, char *id) { secno sec; if (hpfs_sb(s)->sb_chk) if (bmp_block * 16384 > hpfs_sb(s)->sb_fs_size) { hpfs_error(s, "hpfs_map_bitmap called with bad parameter: %08x at %s", bmp_block, id); return NULL; } sec = le32_to_cpu(hpfs_sb(s)->sb_bmp_dir[bmp_block]); if (!sec || sec > hpfs_sb(s)->sb_fs_size-4) { hpfs_error(s, "invalid bitmap block pointer %08x -> %08x at %s", bmp_block, sec, id); return NULL; } return hpfs_map_4sectors(s, sec, qbh, 4); } /* * Load first code page into kernel memory, return pointer to 256-byte array, * first 128 bytes are uppercasing table for chars 128-255, next 128 bytes are * lowercasing table */ unsigned char *hpfs_load_code_page(struct super_block *s, secno cps) { struct buffer_head *bh; secno cpds; unsigned cpi; unsigned char *ptr; unsigned char *cp_table; int i; struct code_page_data *cpd; struct code_page_directory *cp = hpfs_map_sector(s, cps, &bh, 0); if (!cp) return NULL; if (le32_to_cpu(cp->magic) != CP_DIR_MAGIC) { printk("HPFS: Code page directory magic doesn't match (magic = %08x)\n", le32_to_cpu(cp->magic)); brelse(bh); return NULL; } if (!le32_to_cpu(cp->n_code_pages)) { printk("HPFS: n_code_pages == 0\n"); brelse(bh); return NULL; } cpds = le32_to_cpu(cp->array[0].code_page_data); cpi = le16_to_cpu(cp->array[0].index); brelse(bh); if (cpi >= 3) { printk("HPFS: Code page index out of array\n"); return NULL; } if (!(cpd = hpfs_map_sector(s, cpds, &bh, 0))) return NULL; if (le16_to_cpu(cpd->offs[cpi]) > 0x178) { printk("HPFS: Code page index out of sector\n"); brelse(bh); return NULL; } ptr = (unsigned char *)cpd + le16_to_cpu(cpd->offs[cpi]) + 6; if (!(cp_table = kmalloc(256, GFP_KERNEL))) { printk("HPFS: out of memory for code page table\n"); brelse(bh); return NULL; } memcpy(cp_table, ptr, 128); brelse(bh); /* Try to build lowercasing table from uppercasing one */ for (i=128; i<256; i++) cp_table[i]=i; for (i=128; i<256; i++) if (cp_table[i-128]!=i && cp_table[i-128]>=128) cp_table[cp_table[i-128]] = i; return cp_table; } secno *hpfs_load_bitmap_directory(struct super_block *s, secno bmp) { struct buffer_head *bh; int n = (hpfs_sb(s)->sb_fs_size + 0x200000 - 1) >> 21; int i; secno *b; if (!(b = kmalloc(n * 512, GFP_KERNEL))) { printk("HPFS: can't allocate memory for bitmap directory\n"); return NULL; } for (i=0;i<n;i++) { secno *d = hpfs_map_sector(s, bmp+i, &bh, n - i - 1); if (!d) { kfree(b); return NULL; } memcpy((char *)b + 512 * i, d, 512); brelse(bh); } return b; } /* * Load fnode to memory */ struct fnode *hpfs_map_fnode(struct super_block *s, ino_t ino, struct buffer_head **bhp) { struct fnode *fnode; if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, ino, 1, "fnode")) { return NULL; } if ((fnode = hpfs_map_sector(s, ino, bhp, FNODE_RD_AHEAD))) { if (hpfs_sb(s)->sb_chk) { struct extended_attribute *ea; struct extended_attribute *ea_end; if (le32_to_cpu(fnode->magic) != FNODE_MAGIC) { hpfs_error(s, "bad magic on fnode %08lx", (unsigned long)ino); goto bail; } if (!fnode->dirflag) { if ((unsigned)fnode->btree.n_used_nodes + (unsigned)fnode->btree.n_free_nodes != (fnode->btree.internal ? 12 : 8)) { hpfs_error(s, "bad number of nodes in fnode %08lx", (unsigned long)ino); goto bail; } if (le16_to_cpu(fnode->btree.first_free) != 8 + fnode->btree.n_used_nodes * (fnode->btree.internal ? 8 : 12)) { hpfs_error(s, "bad first_free pointer in fnode %08lx", (unsigned long)ino); goto bail; } } if (le16_to_cpu(fnode->ea_size_s) && (le16_to_cpu(fnode->ea_offs) < 0xc4 || le16_to_cpu(fnode->ea_offs) + le16_to_cpu(fnode->acl_size_s) + le16_to_cpu(fnode->ea_size_s) > 0x200)) { hpfs_error(s, "bad EA info in fnode %08lx: ea_offs == %04x ea_size_s == %04x", (unsigned long)ino, le16_to_cpu(fnode->ea_offs), le16_to_cpu(fnode->ea_size_s)); goto bail; } ea = fnode_ea(fnode); ea_end = fnode_end_ea(fnode); while (ea != ea_end) { if (ea > ea_end) { hpfs_error(s, "bad EA in fnode %08lx", (unsigned long)ino); goto bail; } ea = next_ea(ea); } } } return fnode; bail: brelse(*bhp); return NULL; } struct anode *hpfs_map_anode(struct super_block *s, anode_secno ano, struct buffer_head **bhp) { struct anode *anode; if (hpfs_sb(s)->sb_chk) if (hpfs_chk_sectors(s, ano, 1, "anode")) return NULL; if ((anode = hpfs_map_sector(s, ano, bhp, ANODE_RD_AHEAD))) if (hpfs_sb(s)->sb_chk) { if (le32_to_cpu(anode->magic) != ANODE_MAGIC) { hpfs_error(s, "bad magic on anode %08x", ano); goto bail; } if (le32_to_cpu(anode->self) != ano) { hpfs_error(s, "self pointer invalid on anode %08x", ano); goto bail; } if ((unsigned)anode->btree.n_used_nodes + (unsigned)anode->btree.n_free_nodes != (anode->btree.internal ? 60 : 40)) { hpfs_error(s, "bad number of nodes in anode %08x", ano); goto bail; } if (le16_to_cpu(anode->btree.first_free) != 8 + anode->btree.n_used_nodes * (anode->btree.internal ? 8 : 12)) { hpfs_error(s, "bad first_free pointer in anode %08x", ano); goto bail; } } return anode; bail: brelse(*bhp); return NULL; } /* * Load dnode to memory and do some checks */ struct dnode *hpfs_map_dnode(struct super_block *s, unsigned secno, struct quad_buffer_head *qbh) { struct dnode *dnode; if (hpfs_sb(s)->sb_chk) { if (hpfs_chk_sectors(s, secno, 4, "dnode")) return NULL; if (secno & 3) { hpfs_error(s, "dnode %08x not byte-aligned", secno); return NULL; } } if ((dnode = hpfs_map_4sectors(s, secno, qbh, DNODE_RD_AHEAD))) if (hpfs_sb(s)->sb_chk) { unsigned p, pp = 0; unsigned char *d = (unsigned char *)dnode; int b = 0; if (le32_to_cpu(dnode->magic) != DNODE_MAGIC) { hpfs_error(s, "bad magic on dnode %08x", secno); goto bail; } if (le32_to_cpu(dnode->self) != secno) hpfs_error(s, "bad self pointer on dnode %08x self = %08x", secno, le32_to_cpu(dnode->self)); /* Check dirents - bad dirents would cause infinite loops or shooting to memory */ if (le32_to_cpu(dnode->first_free) > 2048) { hpfs_error(s, "dnode %08x has first_free == %08x", secno, le32_to_cpu(dnode->first_free)); goto bail; } for (p = 20; p < le32_to_cpu(dnode->first_free); p += d[p] + (d[p+1] << 8)) { struct hpfs_dirent *de = (struct hpfs_dirent *)((char *)dnode + p); if (le16_to_cpu(de->length) > 292 || (le16_to_cpu(de->length) < 32) || (le16_to_cpu(de->length) & 3) || p + le16_to_cpu(de->length) > 2048) { hpfs_error(s, "bad dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp); goto bail; } if (((31 + de->namelen + de->down*4 + 3) & ~3) != le16_to_cpu(de->length)) { if (((31 + de->namelen + de->down*4 + 3) & ~3) < le16_to_cpu(de->length) && s->s_flags & MS_RDONLY) goto ok; hpfs_error(s, "namelen does not match dirent size in dnode %08x, dirent %03x, last %03x", secno, p, pp); goto bail; } ok: if (hpfs_sb(s)->sb_chk >= 2) b |= 1 << de->down; if (de->down) if (de_down_pointer(de) < 0x10) { hpfs_error(s, "bad down pointer in dnode %08x, dirent %03x, last %03x", secno, p, pp); goto bail; } pp = p; } if (p != le32_to_cpu(dnode->first_free)) { hpfs_error(s, "size on last dirent does not match first_free; dnode %08x", secno); goto bail; } if (d[pp + 30] != 1 || d[pp + 31] != 255) { hpfs_error(s, "dnode %08x does not end with \\377 entry", secno); goto bail; } if (b == 3) printk("HPFS: warning: unbalanced dnode tree, dnode %08x; see hpfs.txt 4 more info\n", secno); } return dnode; bail: hpfs_brelse4(qbh); return NULL; } dnode_secno hpfs_fnode_dno(struct super_block *s, ino_t ino) { struct buffer_head *bh; struct fnode *fnode; dnode_secno dno; fnode = hpfs_map_fnode(s, ino, &bh); if (!fnode) return 0; dno = le32_to_cpu(fnode->u.external[0].disk_secno); brelse(bh); return dno; }
gpl-2.0
Vachounette/Acer_S500_Kernel
drivers/platform/x86/tc1100-wmi.c
8015
6756
/* * HP Compaq TC1100 Tablet WMI Extras Driver * * Copyright (C) 2007 Carlos Corbacho <carlos@strangeworlds.co.uk> * Copyright (C) 2004 Jamey Hicks <jamey.hicks@hp.com> * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com> * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com> * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or (at * your option) any later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. * * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/module.h> #include <linux/slab.h> #include <linux/init.h> #include <linux/types.h> #include <acpi/acpi.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> #include <linux/platform_device.h> #define GUID "C364AC71-36DB-495A-8494-B439D472A505" #define TC1100_INSTANCE_WIRELESS 1 #define TC1100_INSTANCE_JOGDIAL 2 MODULE_AUTHOR("Jamey Hicks, Carlos Corbacho"); MODULE_DESCRIPTION("HP Compaq TC1100 Tablet WMI Extras"); MODULE_LICENSE("GPL"); MODULE_ALIAS("wmi:C364AC71-36DB-495A-8494-B439D472A505"); static struct platform_device *tc1100_device; struct tc1100_data { u32 wireless; u32 jogdial; }; static struct tc1100_data suspend_data; /* -------------------------------------------------------------------------- Device Management -------------------------------------------------------------------------- */ static int get_state(u32 *out, u8 instance) { u32 tmp; acpi_status status; struct acpi_buffer result = { ACPI_ALLOCATE_BUFFER, NULL }; union acpi_object *obj; if (!out) return -EINVAL; if (instance > 2) return -ENODEV; status = wmi_query_block(GUID, instance, &result); if (ACPI_FAILURE(status)) return -ENODEV; obj = (union acpi_object *) result.pointer; if (obj && obj->type == ACPI_TYPE_INTEGER) { tmp = obj->integer.value; } else { tmp = 0; } if (result.length > 0 && result.pointer) kfree(result.pointer); switch (instance) { case TC1100_INSTANCE_WIRELESS: *out = (tmp == 3) ? 1 : 0; return 0; case TC1100_INSTANCE_JOGDIAL: *out = (tmp == 1) ? 0 : 1; return 0; default: return -ENODEV; } } static int set_state(u32 *in, u8 instance) { u32 value; acpi_status status; struct acpi_buffer input; if (!in) return -EINVAL; if (instance > 2) return -ENODEV; switch (instance) { case TC1100_INSTANCE_WIRELESS: value = (*in) ? 1 : 2; break; case TC1100_INSTANCE_JOGDIAL: value = (*in) ? 0 : 1; break; default: return -ENODEV; } input.length = sizeof(u32); input.pointer = &value; status = wmi_set_block(GUID, instance, &input); if (ACPI_FAILURE(status)) return -ENODEV; return 0; } /* -------------------------------------------------------------------------- FS Interface (/sys) -------------------------------------------------------------------------- */ /* * Read/ write bool sysfs macro */ #define show_set_bool(value, instance) \ static ssize_t \ show_bool_##value(struct device *dev, struct device_attribute *attr, \ char *buf) \ { \ u32 result; \ acpi_status status = get_state(&result, instance); \ if (ACPI_SUCCESS(status)) \ return sprintf(buf, "%d\n", result); \ return sprintf(buf, "Read error\n"); \ } \ \ static ssize_t \ set_bool_##value(struct device *dev, struct device_attribute *attr, \ const char *buf, size_t count) \ { \ u32 tmp = simple_strtoul(buf, NULL, 10); \ acpi_status status = set_state(&tmp, instance); \ if (ACPI_FAILURE(status)) \ return -EINVAL; \ return count; \ } \ static DEVICE_ATTR(value, S_IRUGO | S_IWUSR, \ show_bool_##value, set_bool_##value); show_set_bool(wireless, TC1100_INSTANCE_WIRELESS); show_set_bool(jogdial, TC1100_INSTANCE_JOGDIAL); static struct attribute *tc1100_attributes[] = { &dev_attr_wireless.attr, &dev_attr_jogdial.attr, NULL }; static struct attribute_group tc1100_attribute_group = { .attrs = tc1100_attributes, }; /* -------------------------------------------------------------------------- Driver Model -------------------------------------------------------------------------- */ static int __init tc1100_probe(struct platform_device *device) { return sysfs_create_group(&device->dev.kobj, &tc1100_attribute_group); } static int __devexit tc1100_remove(struct platform_device *device) { sysfs_remove_group(&device->dev.kobj, &tc1100_attribute_group); return 0; } #ifdef CONFIG_PM static int tc1100_suspend(struct device *dev) { int ret; ret = get_state(&suspend_data.wireless, TC1100_INSTANCE_WIRELESS); if (ret) return ret; ret = get_state(&suspend_data.jogdial, TC1100_INSTANCE_JOGDIAL); if (ret) return ret; return 0; } static int tc1100_resume(struct device *dev) { int ret; ret = set_state(&suspend_data.wireless, TC1100_INSTANCE_WIRELESS); if (ret) return ret; ret = set_state(&suspend_data.jogdial, TC1100_INSTANCE_JOGDIAL); if (ret) return ret; return 0; } static const struct dev_pm_ops tc1100_pm_ops = { .suspend = tc1100_suspend, .resume = tc1100_resume, .freeze = tc1100_suspend, .restore = tc1100_resume, }; #endif static struct platform_driver tc1100_driver = { .driver = { .name = "tc1100-wmi", .owner = THIS_MODULE, #ifdef CONFIG_PM .pm = &tc1100_pm_ops, #endif }, .remove = __devexit_p(tc1100_remove), }; static int __init tc1100_init(void) { int error; if (!wmi_has_guid(GUID)) return -ENODEV; tc1100_device = platform_device_alloc("tc1100-wmi", -1); if (!tc1100_device) return -ENOMEM; error = platform_device_add(tc1100_device); if (error) goto err_device_put; error = platform_driver_probe(&tc1100_driver, tc1100_probe); if (error) goto err_device_del; pr_info("HP Compaq TC1100 Tablet WMI Extras loaded\n"); return 0; err_device_del: platform_device_del(tc1100_device); err_device_put: platform_device_put(tc1100_device); return error; } static void __exit tc1100_exit(void) { platform_device_unregister(tc1100_device); platform_driver_unregister(&tc1100_driver); } module_init(tc1100_init); module_exit(tc1100_exit);
gpl-2.0
Cl3Kener/UBER-N5
arch/arm/mach-shmobile/console.c
11855
1043
/* * SH-Mobile Console * * Copyright (C) 2010 Magnus Damm * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <mach/common.h> #include <asm/mach/map.h> void __init shmobile_setup_console(void) { parse_early_param(); /* Let earlyprintk output early console messages */ early_platform_driver_probe("earlyprintk", 1, 1); }
gpl-2.0
srfarias/srfarias_kernel_msm8916
arch/arm/mach-shmobile/console.c
11855
1043
/* * SH-Mobile Console * * Copyright (C) 2010 Magnus Damm * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; version 2 of the License. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #include <linux/kernel.h> #include <linux/init.h> #include <linux/platform_device.h> #include <mach/common.h> #include <asm/mach/map.h> void __init shmobile_setup_console(void) { parse_early_param(); /* Let earlyprintk output early console messages */ early_platform_driver_probe("earlyprintk", 1, 1); }
gpl-2.0
monishk10/kernel_kenzo
drivers/rapidio/rio-access.c
14671
5663
/* * RapidIO configuration space access support * * Copyright 2005 MontaVista Software, Inc. * Matt Porter <mporter@kernel.crashing.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. */ #include <linux/rio.h> #include <linux/module.h> /* * These interrupt-safe spinlocks protect all accesses to RIO * configuration space and doorbell access. */ static DEFINE_SPINLOCK(rio_config_lock); static DEFINE_SPINLOCK(rio_doorbell_lock); /* * Wrappers for all RIO configuration access functions. They just check * alignment, do locking and call the low-level functions pointed to * by rio_mport->ops. */ #define RIO_8_BAD 0 #define RIO_16_BAD (offset & 1) #define RIO_32_BAD (offset & 3) /** * RIO_LOP_READ - Generate rio_local_read_config_* functions * @size: Size of configuration space read (8, 16, 32 bits) * @type: C type of value argument * @len: Length of configuration space read (1, 2, 4 bytes) * * Generates rio_local_read_config_* functions used to access * configuration space registers on the local device. */ #define RIO_LOP_READ(size,type,len) \ int __rio_local_read_config_##size \ (struct rio_mport *mport, u32 offset, type *value) \ { \ int res; \ unsigned long flags; \ u32 data = 0; \ if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ spin_lock_irqsave(&rio_config_lock, flags); \ res = mport->ops->lcread(mport, mport->id, offset, len, &data); \ *value = (type)data; \ spin_unlock_irqrestore(&rio_config_lock, flags); \ return res; \ } /** * RIO_LOP_WRITE - Generate rio_local_write_config_* functions * @size: Size of configuration space write (8, 16, 32 bits) * @type: C type of value argument * @len: Length of configuration space write (1, 2, 4 bytes) * * Generates rio_local_write_config_* functions used to access * configuration space registers on the local device. */ #define RIO_LOP_WRITE(size,type,len) \ int __rio_local_write_config_##size \ (struct rio_mport *mport, u32 offset, type value) \ { \ int res; \ unsigned long flags; \ if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ spin_lock_irqsave(&rio_config_lock, flags); \ res = mport->ops->lcwrite(mport, mport->id, offset, len, value);\ spin_unlock_irqrestore(&rio_config_lock, flags); \ return res; \ } RIO_LOP_READ(8, u8, 1) RIO_LOP_READ(16, u16, 2) RIO_LOP_READ(32, u32, 4) RIO_LOP_WRITE(8, u8, 1) RIO_LOP_WRITE(16, u16, 2) RIO_LOP_WRITE(32, u32, 4) EXPORT_SYMBOL_GPL(__rio_local_read_config_8); EXPORT_SYMBOL_GPL(__rio_local_read_config_16); EXPORT_SYMBOL_GPL(__rio_local_read_config_32); EXPORT_SYMBOL_GPL(__rio_local_write_config_8); EXPORT_SYMBOL_GPL(__rio_local_write_config_16); EXPORT_SYMBOL_GPL(__rio_local_write_config_32); /** * RIO_OP_READ - Generate rio_mport_read_config_* functions * @size: Size of configuration space read (8, 16, 32 bits) * @type: C type of value argument * @len: Length of configuration space read (1, 2, 4 bytes) * * Generates rio_mport_read_config_* functions used to access * configuration space registers on the local device. */ #define RIO_OP_READ(size,type,len) \ int rio_mport_read_config_##size \ (struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type *value) \ { \ int res; \ unsigned long flags; \ u32 data = 0; \ if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ spin_lock_irqsave(&rio_config_lock, flags); \ res = mport->ops->cread(mport, mport->id, destid, hopcount, offset, len, &data); \ *value = (type)data; \ spin_unlock_irqrestore(&rio_config_lock, flags); \ return res; \ } /** * RIO_OP_WRITE - Generate rio_mport_write_config_* functions * @size: Size of configuration space write (8, 16, 32 bits) * @type: C type of value argument * @len: Length of configuration space write (1, 2, 4 bytes) * * Generates rio_mport_write_config_* functions used to access * configuration space registers on the local device. */ #define RIO_OP_WRITE(size,type,len) \ int rio_mport_write_config_##size \ (struct rio_mport *mport, u16 destid, u8 hopcount, u32 offset, type value) \ { \ int res; \ unsigned long flags; \ if (RIO_##size##_BAD) return RIO_BAD_SIZE; \ spin_lock_irqsave(&rio_config_lock, flags); \ res = mport->ops->cwrite(mport, mport->id, destid, hopcount, offset, len, value); \ spin_unlock_irqrestore(&rio_config_lock, flags); \ return res; \ } RIO_OP_READ(8, u8, 1) RIO_OP_READ(16, u16, 2) RIO_OP_READ(32, u32, 4) RIO_OP_WRITE(8, u8, 1) RIO_OP_WRITE(16, u16, 2) RIO_OP_WRITE(32, u32, 4) EXPORT_SYMBOL_GPL(rio_mport_read_config_8); EXPORT_SYMBOL_GPL(rio_mport_read_config_16); EXPORT_SYMBOL_GPL(rio_mport_read_config_32); EXPORT_SYMBOL_GPL(rio_mport_write_config_8); EXPORT_SYMBOL_GPL(rio_mport_write_config_16); EXPORT_SYMBOL_GPL(rio_mport_write_config_32); /** * rio_mport_send_doorbell - Send a doorbell message * * @mport: RIO master port * @destid: RIO device destination ID * @data: Doorbell message data * * Send a doorbell message to a RIO device. The doorbell message * has a 16-bit info field provided by the data argument. */ int rio_mport_send_doorbell(struct rio_mport *mport, u16 destid, u16 data) { int res; unsigned long flags; spin_lock_irqsave(&rio_doorbell_lock, flags); res = mport->ops->dsend(mport, mport->id, destid, data); spin_unlock_irqrestore(&rio_doorbell_lock, flags); return res; } EXPORT_SYMBOL_GPL(rio_mport_send_doorbell);
gpl-2.0
erikvarga/gcc
gcc/testsuite/gfortran.dg/coarray/collectives_2.f90
80
1792
! { dg-do run } ! ! CO_SUM/CO_MIN/CO_MAX ! program test implicit none intrinsic co_max intrinsic co_min intrinsic co_sum integer :: val(3), tmp_val(3) integer :: vec(3) vec = [2,3,1] if (this_image() == 1) then val(1) = 42 else val(1) = -99 endif val(2) = this_image() if (this_image() == num_images()) then val(3) = -55 else val(3) = 101 endif tmp_val = val call test_min val = tmp_val call test_max val = tmp_val call test_sum contains subroutine test_max integer :: tmp call co_max (val(::2)) if (num_images() > 1) then if (any (val /= [42, this_image(), 101])) call abort() else if (any (val /= [42, this_image(), -55])) call abort() endif val = tmp_val call co_max (val(:)) if (num_images() > 1) then if (any (val /= [42, num_images(), 101])) call abort() else if (any (val /= [42, num_images(), -55])) call abort() endif end subroutine test_max subroutine test_min call co_min (val, result_image=num_images()) if (this_image() == num_images()) then !write(*,*) "Minimal value", val if (num_images() > 1) then if (any (val /= [-99, 1, -55])) call abort() else if (any (val /= [42, 1, -55])) call abort() endif else if (any (val /= tmp_val)) call abort() endif end subroutine test_min subroutine test_sum integer :: n n = 88 call co_sum (val, result_image=1, stat=n) if (n /= 0) call abort() if (this_image() == 1) then n = num_images() !write(*,*) "The sum is ", val if (any (val /= [42 + (n-1)*(-99), (n**2 + n)/2, -55+(n-1)*101])) call abort() else if (any (val /= tmp_val)) call abort() end if end subroutine test_sum end program test
gpl-2.0
jwpi/glibc
resolv/ns_netint.c
80
1584
/* * Copyright (c) 2004 by Internet Systems Consortium, Inc. ("ISC") * Copyright (c) 1996,1999 by Internet Software Consortium. * * Permission to use, copy, modify, and distribute this software for any * purpose with or without fee is hereby granted, provided that the above * copyright notice and this permission notice appear in all copies. * * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. */ #if !defined(_LIBC) && !defined(lint) static const char rcsid[] = "$BINDId: ns_netint.c,v 8.4 1999/10/13 16:39:35 vixie Exp $"; #endif /* Import. */ #include <arpa/nameser.h> #include <resolv.h> /* Public. */ u_int ns_get16(const u_char *src) { u_int dst; NS_GET16(dst, src); return (dst); } libresolv_hidden_def (ns_get16) strong_alias (ns_get16, __ns_get16) u_long ns_get32(const u_char *src) { u_long dst; NS_GET32(dst, src); return (dst); } libresolv_hidden_def (ns_get32) strong_alias (ns_get32, __ns_get32) void ns_put16(u_int src, u_char *dst) { NS_PUT16(src, dst); } libresolv_hidden_def (ns_put16) void ns_put32(u_long src, u_char *dst) { NS_PUT32(src, dst); } libresolv_hidden_def (ns_put32) /*! \file */
gpl-2.0
shminer/kernel-msm-3.18
drivers/pci/host/pci-host-generic.c
80
7539
/* * Simple, generic PCI host controller driver targetting firmware-initialised * systems and virtual machines (e.g. the PCI emulation provided by kvmtool). * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <http://www.gnu.org/licenses/>. * * Copyright (C) 2014 ARM Limited * * Author: Will Deacon <will.deacon@arm.com> */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/of_address.h> #include <linux/of_pci.h> #include <linux/platform_device.h> struct gen_pci_cfg_bus_ops { u32 bus_shift; void __iomem *(*map_bus)(struct pci_bus *, unsigned int, int); }; struct gen_pci_cfg_windows { struct resource res; struct resource *bus_range; void __iomem **win; const struct gen_pci_cfg_bus_ops *ops; }; /* * ARM pcibios functions expect the ARM struct pci_sys_data as the PCI * sysdata. Add pci_sys_data as the first element in struct gen_pci so * that when we use a gen_pci pointer as sysdata, it is also a pointer to * a struct pci_sys_data. */ struct gen_pci { #ifdef CONFIG_ARM struct pci_sys_data sys; #endif struct pci_host_bridge host; struct gen_pci_cfg_windows cfg; struct list_head resources; }; static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus, unsigned int devfn, int where) { struct gen_pci *pci = bus->sysdata; resource_size_t idx = bus->number - pci->cfg.bus_range->start; return pci->cfg.win[idx] + ((devfn << 8) | where); } static struct gen_pci_cfg_bus_ops gen_pci_cfg_cam_bus_ops = { .bus_shift = 16, .map_bus = gen_pci_map_cfg_bus_cam, }; static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus, unsigned int devfn, int where) { struct gen_pci *pci = bus->sysdata; resource_size_t idx = bus->number - pci->cfg.bus_range->start; return pci->cfg.win[idx] + ((devfn << 12) | where); } static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = { .bus_shift = 20, .map_bus = gen_pci_map_cfg_bus_ecam, }; static struct pci_ops gen_pci_ops = { .read = pci_generic_config_read, .write = pci_generic_config_write, }; static const struct of_device_id gen_pci_of_match[] = { { .compatible = "pci-host-cam-generic", .data = &gen_pci_cfg_cam_bus_ops }, { .compatible = "pci-host-ecam-generic", .data = &gen_pci_cfg_ecam_bus_ops }, { }, }; MODULE_DEVICE_TABLE(of, gen_pci_of_match); static void gen_pci_release_of_pci_ranges(struct gen_pci *pci) { pci_free_resource_list(&pci->resources); } static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci) { int err, res_valid = 0; struct device *dev = pci->host.dev.parent; struct device_node *np = dev->of_node; resource_size_t iobase; struct pci_host_bridge_window *win; err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources, &iobase); if (err) return err; list_for_each_entry(win, &pci->resources, list) { struct resource *parent, *res = win->res; switch (resource_type(res)) { case IORESOURCE_IO: parent = &ioport_resource; err = pci_remap_iospace(res, iobase); if (err) { dev_warn(dev, "error %d: failed to map resource %pR\n", err, res); continue; } break; case IORESOURCE_MEM: parent = &iomem_resource; res_valid |= !(res->flags & IORESOURCE_PREFETCH); break; case IORESOURCE_BUS: pci->cfg.bus_range = res; default: continue; } err = devm_request_resource(dev, parent, res); if (err) goto out_release_res; } if (!res_valid) { dev_err(dev, "non-prefetchable memory resource required\n"); err = -EINVAL; goto out_release_res; } return 0; out_release_res: gen_pci_release_of_pci_ranges(pci); return err; } static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci) { int err; u8 bus_max; resource_size_t busn; struct resource *bus_range; struct device *dev = pci->host.dev.parent; struct device_node *np = dev->of_node; err = of_address_to_resource(np, 0, &pci->cfg.res); if (err) { dev_err(dev, "missing \"reg\" property\n"); return err; } /* Limit the bus-range to fit within reg */ bus_max = pci->cfg.bus_range->start + (resource_size(&pci->cfg.res) >> pci->cfg.ops->bus_shift) - 1; pci->cfg.bus_range->end = min_t(resource_size_t, pci->cfg.bus_range->end, bus_max); pci->cfg.win = devm_kcalloc(dev, resource_size(pci->cfg.bus_range), sizeof(*pci->cfg.win), GFP_KERNEL); if (!pci->cfg.win) return -ENOMEM; /* Map our Configuration Space windows */ if (!devm_request_mem_region(dev, pci->cfg.res.start, resource_size(&pci->cfg.res), "Configuration Space")) return -ENOMEM; bus_range = pci->cfg.bus_range; for (busn = bus_range->start; busn <= bus_range->end; ++busn) { u32 idx = busn - bus_range->start; u32 sz = 1 << pci->cfg.ops->bus_shift; pci->cfg.win[idx] = devm_ioremap(dev, pci->cfg.res.start + busn * sz, sz); if (!pci->cfg.win[idx]) return -ENOMEM; } return 0; } static int gen_pci_probe(struct platform_device *pdev) { int err; const char *type; const struct of_device_id *of_id; const int *prop; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; struct gen_pci *pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); struct pci_bus *bus, *child; if (!pci) return -ENOMEM; type = of_get_property(np, "device_type", NULL); if (!type || strcmp(type, "pci")) { dev_err(dev, "invalid \"device_type\" %s\n", type); return -EINVAL; } prop = of_get_property(of_chosen, "linux,pci-probe-only", NULL); if (prop) { if (*prop) pci_add_flags(PCI_PROBE_ONLY); else pci_clear_flags(PCI_PROBE_ONLY); } of_id = of_match_node(gen_pci_of_match, np); pci->cfg.ops = of_id->data; gen_pci_ops.map_bus = pci->cfg.ops->map_bus; pci->host.dev.parent = dev; INIT_LIST_HEAD(&pci->host.windows); INIT_LIST_HEAD(&pci->resources); /* Parse our PCI ranges and request their resources */ err = gen_pci_parse_request_of_pci_ranges(pci); if (err) return err; /* Parse and map our Configuration Space windows */ err = gen_pci_parse_map_cfg_windows(pci); if (err) { gen_pci_release_of_pci_ranges(pci); return err; } /* Do not reassign resources if probe only */ if (!pci_has_flag(PCI_PROBE_ONLY)) pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS); bus = pci_scan_root_bus(dev, 0, &gen_pci_ops, pci, &pci->resources); if (!bus) { dev_err(dev, "Scanning rootbus failed"); return -ENODEV; } pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); if (!pci_has_flag(PCI_PROBE_ONLY)) { pci_bus_size_bridges(bus); pci_bus_assign_resources(bus); list_for_each_entry(child, &bus->children, node) pcie_bus_configure_settings(child); } pci_bus_add_devices(bus); return 0; } static struct platform_driver gen_pci_driver = { .driver = { .name = "pci-host-generic", .of_match_table = gen_pci_of_match, }, .probe = gen_pci_probe, }; module_platform_driver(gen_pci_driver); MODULE_DESCRIPTION("Generic PCI host driver"); MODULE_AUTHOR("Will Deacon <will.deacon@arm.com>"); MODULE_LICENSE("GPL v2");
gpl-2.0
percona/debian-percona-xtradb-cluster-5.6
plugin/auth/dialog.c
80
9798
/* Copyright (c) 2010, 2011, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ /** @file dialog client authentication plugin with examples dialog is a general purpose client authentication plugin, it simply asks the user the question, as provided by the server and reports the answer back to the server. No encryption is involved, the answers are sent in clear text. Two examples are provided: two_questions server plugin, that asks the password and an "Are you sure?" question with a reply "yes, of course". It demonstrates the usage of "password" (input is hidden) and "ordinary" (input can be echoed) questions, and how to mark the last question, to avoid an extra roundtrip. And three_attempts plugin that gives the user three attempts to enter a correct password. It shows the situation when a number of questions is not known in advance. */ #if defined (WIN32) && !defined (RTLD_DEFAULT) # define RTLD_DEFAULT GetModuleHandle(NULL) #endif #include <my_global.h> #include <mysql.h> #include <mysql/plugin_auth.h> #include <mysql/client_plugin.h> #include <string.h> #include <stdio.h> #include <stdlib.h> #if !defined (_GNU_SOURCE) # define _GNU_SOURCE /* for RTLD_DEFAULT */ #endif /** first byte of the question string is the question "type". It can be an "ordinary" or a "password" question. The last bit set marks a last question in the authentication exchange. */ #define ORDINARY_QUESTION "\2" #define LAST_QUESTION "\3" #define PASSWORD_QUESTION "\4" #define LAST_PASSWORD "\5" /********************* SERVER SIDE ****************************************/ /** dialog demo with two questions, one password and one, the last, ordinary. */ static int two_questions(MYSQL_PLUGIN_VIO *vio, MYSQL_SERVER_AUTH_INFO *info) { unsigned char *pkt; int pkt_len; /* send a password question */ if (vio->write_packet(vio, (const unsigned char *) PASSWORD_QUESTION "Password, please:", 18)) return CR_ERROR; /* read the answer */ if ((pkt_len= vio->read_packet(vio, &pkt)) < 0) return CR_ERROR; info->password_used= PASSWORD_USED_YES; /* fail if the password is wrong */ if (strcmp((const char *) pkt, info->auth_string)) return CR_ERROR; /* send the last, ordinary, question */ if (vio->write_packet(vio, (const unsigned char *) LAST_QUESTION "Are you sure ?", 15)) return CR_ERROR; /* read the answer */ if ((pkt_len= vio->read_packet(vio, &pkt)) < 0) return CR_ERROR; /* check the reply */ return strcmp((const char *) pkt, "yes, of course") ? CR_ERROR : CR_OK; } static struct st_mysql_auth two_handler= { MYSQL_AUTHENTICATION_INTERFACE_VERSION, "dialog", /* requires dialog client plugin */ two_questions }; /* dialog demo where the number of questions is not known in advance */ static int three_attempts(MYSQL_PLUGIN_VIO *vio, MYSQL_SERVER_AUTH_INFO *info) { unsigned char *pkt; int pkt_len, i; for (i= 0; i < 3; i++) { /* send the prompt */ if (vio->write_packet(vio, (const unsigned char *) PASSWORD_QUESTION "Password, please:", 18)) return CR_ERROR; /* read the password */ if ((pkt_len= vio->read_packet(vio, &pkt)) < 0) return CR_ERROR; info->password_used= PASSWORD_USED_YES; /* finish, if the password is correct. note, that we did not mark the prompt packet as "last" */ if (strcmp((const char *) pkt, info->auth_string) == 0) return CR_OK; } return CR_ERROR; } static struct st_mysql_auth three_handler= { MYSQL_AUTHENTICATION_INTERFACE_VERSION, "dialog", /* requires dialog client plugin */ three_attempts }; mysql_declare_plugin(dialog) { MYSQL_AUTHENTICATION_PLUGIN, &two_handler, "two_questions", "Sergei Golubchik", "Dialog plugin demo 1", PLUGIN_LICENSE_GPL, NULL, NULL, 0x0100, NULL, NULL, NULL, 0, }, { MYSQL_AUTHENTICATION_PLUGIN, &three_handler, "three_attempts", "Sergei Golubchik", "Dialog plugin demo 2", PLUGIN_LICENSE_GPL, NULL, NULL, 0x0100, NULL, NULL, NULL, 0, } mysql_declare_plugin_end; /********************* CLIENT SIDE ***************************************/ /* This plugin performs a dialog with the user, asking questions and reading answers. Depending on the client it may be desirable to do it using GUI, or console, with or without curses, or read answers from a smartcard, for example. To support all this variety, the dialog plugin has a callback function "authentication_dialog_ask". If the client has a function of this name dialog plugin will use it for communication with the user. Otherwise a default fgets() based implementation will be used. */ /** type of the mysql_authentication_dialog_ask function @param mysql mysql @param type type of the input 1 - ordinary string input 2 - password string @param prompt prompt @param buf a buffer to store the use input @param buf_len the length of the buffer @retval a pointer to the user input string. It may be equal to 'buf' or to 'mysql->password'. In all other cases it is assumed to be an allocated string, and the "dialog" plugin will free() it. */ typedef char *(*mysql_authentication_dialog_ask_t)(struct st_mysql *mysql, int type, const char *prompt, char *buf, int buf_len); static mysql_authentication_dialog_ask_t ask; static char *builtin_ask(MYSQL *mysql __attribute__((unused)), int type __attribute__((unused)), const char *prompt, char *buf, int buf_len) { char *ptr; fputs(prompt, stdout); fputc(' ', stdout); if (fgets(buf, buf_len, stdin) == NULL) return NULL; if ((ptr= strchr(buf, '\n'))) *ptr= 0; return buf; } /** The main function of the dialog plugin. Read the prompt, ask the question, send the reply, repeat until the server is satisfied. @note 1. this plugin shows how a client authentication plugin may read a MySQL protocol OK packet internally - which is important where a number of packets is not known in advance. 2. the first byte of the prompt is special. it is not shown to the user, but signals whether it is the last question (prompt[0] & 1 == 1) or not last (prompt[0] & 1 == 0), and whether the input is a password (not echoed). 3. the prompt is expected to be sent zero-terminated */ static int perform_dialog(MYSQL_PLUGIN_VIO *vio, MYSQL *mysql) { unsigned char *pkt, cmd= 0; int pkt_len, res; char reply_buf[1024], *reply; do { /* read the prompt */ pkt_len= vio->read_packet(vio, &pkt); if (pkt_len < 0) return CR_ERROR; if (pkt == 0) { /* in mysql_change_user() the client sends the first packet, so the first vio->read_packet() does nothing (pkt == 0). We send the "password", assuming the client knows what it's doing. (in other words, the dialog plugin should be only set as a default authentication plugin on the client if the first question asks for a password - which will be sent in clear text, by the way) */ reply= mysql->passwd; } else { cmd= *pkt++; /* is it MySQL protocol packet ? */ if (cmd == 0 || cmd == 254) return CR_OK_HANDSHAKE_COMPLETE; /* yes. we're done */ /* asking for a password with an empty prompt means mysql->password otherwise we ask the user and read the reply */ if ((cmd >> 1) == 2 && *pkt == 0) reply= mysql->passwd; else reply= ask(mysql, cmd >> 1, (const char *) pkt, reply_buf, sizeof(reply_buf)); if (!reply) return CR_ERROR; } /* send the reply to the server */ res= vio->write_packet(vio, (const unsigned char *) reply, strlen(reply)+1); if (reply != mysql->passwd && reply != reply_buf) free(reply); if (res) return CR_ERROR; /* repeat unless it was the last question */ } while ((cmd & 1) != 1); /* the job of reading the ok/error packet is left to the server */ return CR_OK; } /** initialization function of the dialog plugin Pick up the client's authentication_dialog_ask() function, if exists, or fall back to the default implementation. */ static int init_dialog(char *unused1 __attribute__((unused)), size_t unused2 __attribute__((unused)), int unused3 __attribute__((unused)), va_list unused4 __attribute__((unused))) { void *sym= dlsym(RTLD_DEFAULT, "mysql_authentication_dialog_ask"); ask= sym ? (mysql_authentication_dialog_ask_t) sym : builtin_ask; return 0; } mysql_declare_client_plugin(AUTHENTICATION) "dialog", "Sergei Golubchik", "Dialog Client Authentication Plugin", {0,1,0}, "GPL", NULL, init_dialog, NULL, NULL, perform_dialog mysql_end_client_plugin;
gpl-2.0
loongson-community/linux-3A
drivers/media/dvb/siano/sms-cards.c
336
8707
/* * Card-specific functions for the Siano SMS1xxx USB dongle * * Copyright (c) 2008 Michael Krufky <mkrufky@linuxtv.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation; * * Software distributed under the License is distributed on an "AS IS" * basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. * * See the GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include "sms-cards.h" #include "smsir.h" static int sms_dbg; module_param_named(cards_dbg, sms_dbg, int, 0644); MODULE_PARM_DESC(cards_dbg, "set debug level (info=1, adv=2 (or-able))"); static struct sms_board sms_boards[] = { [SMS_BOARD_UNKNOWN] = { .name = "Unknown board", }, [SMS1XXX_BOARD_SIANO_STELLAR] = { .name = "Siano Stellar Digital Receiver", .type = SMS_STELLAR, }, [SMS1XXX_BOARD_SIANO_NOVA_A] = { .name = "Siano Nova A Digital Receiver", .type = SMS_NOVA_A0, }, [SMS1XXX_BOARD_SIANO_NOVA_B] = { .name = "Siano Nova B Digital Receiver", .type = SMS_NOVA_B0, }, [SMS1XXX_BOARD_SIANO_VEGA] = { .name = "Siano Vega Digital Receiver", .type = SMS_VEGA, }, [SMS1XXX_BOARD_HAUPPAUGE_CATAMOUNT] = { .name = "Hauppauge Catamount", .type = SMS_STELLAR, .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-stellar-dvbt-01.fw", }, [SMS1XXX_BOARD_HAUPPAUGE_OKEMO_A] = { .name = "Hauppauge Okemo-A", .type = SMS_NOVA_A0, .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-nova-a-dvbt-01.fw", }, [SMS1XXX_BOARD_HAUPPAUGE_OKEMO_B] = { .name = "Hauppauge Okemo-B", .type = SMS_NOVA_B0, .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-nova-b-dvbt-01.fw", }, [SMS1XXX_BOARD_HAUPPAUGE_WINDHAM] = { .name = "Hauppauge WinTV MiniStick", .type = SMS_NOVA_B0, .fw[DEVICE_MODE_ISDBT_BDA] = "sms1xxx-hcw-55xxx-isdbt-02.fw", .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-hcw-55xxx-dvbt-02.fw", .rc_codes = RC_MAP_RC5_HAUPPAUGE_NEW, .board_cfg.leds_power = 26, .board_cfg.led0 = 27, .board_cfg.led1 = 28, .board_cfg.ir = 9, .led_power = 26, .led_lo = 27, .led_hi = 28, }, [SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD] = { .name = "Hauppauge WinTV MiniCard", .type = SMS_NOVA_B0, .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-hcw-55xxx-dvbt-02.fw", .lna_ctrl = 29, .board_cfg.foreign_lna0_ctrl = 29, .rf_switch = 17, .board_cfg.rf_switch_uhf = 17, }, [SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2] = { .name = "Hauppauge WinTV MiniCard", .type = SMS_NOVA_B0, .fw[DEVICE_MODE_DVBT_BDA] = "sms1xxx-hcw-55xxx-dvbt-02.fw", .lna_ctrl = -1, }, [SMS1XXX_BOARD_SIANO_NICE] = { /* 11 */ .name = "Siano Nice Digital Receiver", .type = SMS_NOVA_B0, }, [SMS1XXX_BOARD_SIANO_VENICE] = { /* 12 */ .name = "Siano Venice Digital Receiver", .type = SMS_VEGA, }, }; struct sms_board *sms_get_board(unsigned id) { BUG_ON(id >= ARRAY_SIZE(sms_boards)); return &sms_boards[id]; } EXPORT_SYMBOL_GPL(sms_get_board); static inline void sms_gpio_assign_11xx_default_led_config( struct smscore_gpio_config *pGpioConfig) { pGpioConfig->Direction = SMS_GPIO_DIRECTION_OUTPUT; pGpioConfig->InputCharacteristics = SMS_GPIO_INPUT_CHARACTERISTICS_NORMAL; pGpioConfig->OutputDriving = SMS_GPIO_OUTPUT_DRIVING_4mA; pGpioConfig->OutputSlewRate = SMS_GPIO_OUTPUT_SLEW_RATE_0_45_V_NS; pGpioConfig->PullUpDown = SMS_GPIO_PULL_UP_DOWN_NONE; } int sms_board_event(struct smscore_device_t *coredev, enum SMS_BOARD_EVENTS gevent) { struct smscore_gpio_config MyGpioConfig; sms_gpio_assign_11xx_default_led_config(&MyGpioConfig); switch (gevent) { case BOARD_EVENT_POWER_INIT: /* including hotplug */ break; /* BOARD_EVENT_BIND */ case BOARD_EVENT_POWER_SUSPEND: break; /* BOARD_EVENT_POWER_SUSPEND */ case BOARD_EVENT_POWER_RESUME: break; /* BOARD_EVENT_POWER_RESUME */ case BOARD_EVENT_BIND: break; /* BOARD_EVENT_BIND */ case BOARD_EVENT_SCAN_PROG: break; /* BOARD_EVENT_SCAN_PROG */ case BOARD_EVENT_SCAN_COMP: break; /* BOARD_EVENT_SCAN_COMP */ case BOARD_EVENT_EMERGENCY_WARNING_SIGNAL: break; /* BOARD_EVENT_EMERGENCY_WARNING_SIGNAL */ case BOARD_EVENT_FE_LOCK: break; /* BOARD_EVENT_FE_LOCK */ case BOARD_EVENT_FE_UNLOCK: break; /* BOARD_EVENT_FE_UNLOCK */ case BOARD_EVENT_DEMOD_LOCK: break; /* BOARD_EVENT_DEMOD_LOCK */ case BOARD_EVENT_DEMOD_UNLOCK: break; /* BOARD_EVENT_DEMOD_UNLOCK */ case BOARD_EVENT_RECEPTION_MAX_4: break; /* BOARD_EVENT_RECEPTION_MAX_4 */ case BOARD_EVENT_RECEPTION_3: break; /* BOARD_EVENT_RECEPTION_3 */ case BOARD_EVENT_RECEPTION_2: break; /* BOARD_EVENT_RECEPTION_2 */ case BOARD_EVENT_RECEPTION_1: break; /* BOARD_EVENT_RECEPTION_1 */ case BOARD_EVENT_RECEPTION_LOST_0: break; /* BOARD_EVENT_RECEPTION_LOST_0 */ case BOARD_EVENT_MULTIPLEX_OK: break; /* BOARD_EVENT_MULTIPLEX_OK */ case BOARD_EVENT_MULTIPLEX_ERRORS: break; /* BOARD_EVENT_MULTIPLEX_ERRORS */ default: sms_err("Unknown SMS board event"); break; } return 0; } EXPORT_SYMBOL_GPL(sms_board_event); static int sms_set_gpio(struct smscore_device_t *coredev, int pin, int enable) { int lvl, ret; u32 gpio; struct smscore_config_gpio gpioconfig = { .direction = SMS_GPIO_DIRECTION_OUTPUT, .pullupdown = SMS_GPIO_PULLUPDOWN_NONE, .inputcharacteristics = SMS_GPIO_INPUTCHARACTERISTICS_NORMAL, .outputslewrate = SMS_GPIO_OUTPUTSLEWRATE_FAST, .outputdriving = SMS_GPIO_OUTPUTDRIVING_4mA, }; if (pin == 0) return -EINVAL; if (pin < 0) { /* inverted gpio */ gpio = pin * -1; lvl = enable ? 0 : 1; } else { gpio = pin; lvl = enable ? 1 : 0; } ret = smscore_configure_gpio(coredev, gpio, &gpioconfig); if (ret < 0) return ret; return smscore_set_gpio(coredev, gpio, lvl); } int sms_board_setup(struct smscore_device_t *coredev) { int board_id = smscore_get_board_id(coredev); struct sms_board *board = sms_get_board(board_id); switch (board_id) { case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM: /* turn off all LEDs */ sms_set_gpio(coredev, board->led_power, 0); sms_set_gpio(coredev, board->led_hi, 0); sms_set_gpio(coredev, board->led_lo, 0); break; case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2: case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD: /* turn off LNA */ sms_set_gpio(coredev, board->lna_ctrl, 0); break; } return 0; } EXPORT_SYMBOL_GPL(sms_board_setup); int sms_board_power(struct smscore_device_t *coredev, int onoff) { int board_id = smscore_get_board_id(coredev); struct sms_board *board = sms_get_board(board_id); switch (board_id) { case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM: /* power LED */ sms_set_gpio(coredev, board->led_power, onoff ? 1 : 0); break; case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2: case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD: /* LNA */ if (!onoff) sms_set_gpio(coredev, board->lna_ctrl, 0); break; } return 0; } EXPORT_SYMBOL_GPL(sms_board_power); int sms_board_led_feedback(struct smscore_device_t *coredev, int led) { int board_id = smscore_get_board_id(coredev); struct sms_board *board = sms_get_board(board_id); /* dont touch GPIO if LEDs are already set */ if (smscore_led_state(coredev, -1) == led) return 0; switch (board_id) { case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM: sms_set_gpio(coredev, board->led_lo, (led & SMS_LED_LO) ? 1 : 0); sms_set_gpio(coredev, board->led_hi, (led & SMS_LED_HI) ? 1 : 0); smscore_led_state(coredev, led); break; } return 0; } EXPORT_SYMBOL_GPL(sms_board_led_feedback); int sms_board_lna_control(struct smscore_device_t *coredev, int onoff) { int board_id = smscore_get_board_id(coredev); struct sms_board *board = sms_get_board(board_id); sms_debug("%s: LNA %s", __func__, onoff ? "enabled" : "disabled"); switch (board_id) { case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2: case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD: sms_set_gpio(coredev, board->rf_switch, onoff ? 1 : 0); return sms_set_gpio(coredev, board->lna_ctrl, onoff ? 1 : 0); } return -EINVAL; } EXPORT_SYMBOL_GPL(sms_board_lna_control); int sms_board_load_modules(int id) { switch (id) { case SMS1XXX_BOARD_HAUPPAUGE_CATAMOUNT: case SMS1XXX_BOARD_HAUPPAUGE_OKEMO_A: case SMS1XXX_BOARD_HAUPPAUGE_OKEMO_B: case SMS1XXX_BOARD_HAUPPAUGE_WINDHAM: case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD: case SMS1XXX_BOARD_HAUPPAUGE_TIGER_MINICARD_R2: request_module("smsdvb"); break; default: /* do nothing */ break; } return 0; } EXPORT_SYMBOL_GPL(sms_board_load_modules);
gpl-2.0
VorkTeam/vorkKernel-LGP990
sound/soc/fsl/fsl_ssi.c
592
20775
/* * Freescale SSI ALSA SoC Digital Audio Interface (DAI) driver * * Author: Timur Tabi <timur@freescale.com> * * Copyright 2007-2008 Freescale Semiconductor, Inc. This file is licensed * under the terms of the GNU General Public License version 2. This * program is licensed "as is" without any warranty of any kind, whether * express or implied. */ #include <linux/init.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/device.h> #include <linux/delay.h> #include <sound/core.h> #include <sound/pcm.h> #include <sound/pcm_params.h> #include <sound/initval.h> #include <sound/soc.h> #include <asm/immap_86xx.h> #include "fsl_ssi.h" /** * FSLSSI_I2S_RATES: sample rates supported by the I2S * * This driver currently only supports the SSI running in I2S slave mode, * which means the codec determines the sample rate. Therefore, we tell * ALSA that we support all rates and let the codec driver decide what rates * are really supported. */ #define FSLSSI_I2S_RATES (SNDRV_PCM_RATE_5512 | SNDRV_PCM_RATE_8000_192000 | \ SNDRV_PCM_RATE_CONTINUOUS) /** * FSLSSI_I2S_FORMATS: audio formats supported by the SSI * * This driver currently only supports the SSI running in I2S slave mode. * * The SSI has a limitation in that the samples must be in the same byte * order as the host CPU. This is because when multiple bytes are written * to the STX register, the bytes and bits must be written in the same * order. The STX is a shift register, so all the bits need to be aligned * (bit-endianness must match byte-endianness). Processors typically write * the bits within a byte in the same order that the bytes of a word are * written in. So if the host CPU is big-endian, then only big-endian * samples will be written to STX properly. */ #ifdef __BIG_ENDIAN #define FSLSSI_I2S_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_BE | \ SNDRV_PCM_FMTBIT_S18_3BE | SNDRV_PCM_FMTBIT_S20_3BE | \ SNDRV_PCM_FMTBIT_S24_3BE | SNDRV_PCM_FMTBIT_S24_BE) #else #define FSLSSI_I2S_FORMATS (SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_LE | \ SNDRV_PCM_FMTBIT_S18_3LE | SNDRV_PCM_FMTBIT_S20_3LE | \ SNDRV_PCM_FMTBIT_S24_3LE | SNDRV_PCM_FMTBIT_S24_LE) #endif /* SIER bitflag of interrupts to enable */ #define SIER_FLAGS (CCSR_SSI_SIER_TFRC_EN | CCSR_SSI_SIER_TDMAE | \ CCSR_SSI_SIER_TIE | CCSR_SSI_SIER_TUE0_EN | \ CCSR_SSI_SIER_TUE1_EN | CCSR_SSI_SIER_RFRC_EN | \ CCSR_SSI_SIER_RDMAE | CCSR_SSI_SIER_RIE | \ CCSR_SSI_SIER_ROE0_EN | CCSR_SSI_SIER_ROE1_EN) /** * fsl_ssi_private: per-SSI private data * * @name: short name for this device ("SSI0", "SSI1", etc) * @ssi: pointer to the SSI's registers * @ssi_phys: physical address of the SSI registers * @irq: IRQ of this SSI * @first_stream: pointer to the stream that was opened first * @second_stream: pointer to second stream * @dev: struct device pointer * @playback: the number of playback streams opened * @capture: the number of capture streams opened * @asynchronous: 0=synchronous mode, 1=asynchronous mode * @cpu_dai: the CPU DAI for this device * @dev_attr: the sysfs device attribute structure * @stats: SSI statistics */ struct fsl_ssi_private { char name[8]; struct ccsr_ssi __iomem *ssi; dma_addr_t ssi_phys; unsigned int irq; struct snd_pcm_substream *first_stream; struct snd_pcm_substream *second_stream; struct device *dev; unsigned int playback; unsigned int capture; int asynchronous; struct snd_soc_dai cpu_dai; struct device_attribute dev_attr; struct { unsigned int rfrc; unsigned int tfrc; unsigned int cmdau; unsigned int cmddu; unsigned int rxt; unsigned int rdr1; unsigned int rdr0; unsigned int tde1; unsigned int tde0; unsigned int roe1; unsigned int roe0; unsigned int tue1; unsigned int tue0; unsigned int tfs; unsigned int rfs; unsigned int tls; unsigned int rls; unsigned int rff1; unsigned int rff0; unsigned int tfe1; unsigned int tfe0; } stats; }; /** * fsl_ssi_isr: SSI interrupt handler * * Although it's possible to use the interrupt handler to send and receive * data to/from the SSI, we use the DMA instead. Programming is more * complicated, but the performance is much better. * * This interrupt handler is used only to gather statistics. * * @irq: IRQ of the SSI device * @dev_id: pointer to the ssi_private structure for this SSI device */ static irqreturn_t fsl_ssi_isr(int irq, void *dev_id) { struct fsl_ssi_private *ssi_private = dev_id; struct ccsr_ssi __iomem *ssi = ssi_private->ssi; irqreturn_t ret = IRQ_NONE; __be32 sisr; __be32 sisr2 = 0; /* We got an interrupt, so read the status register to see what we were interrupted for. We mask it with the Interrupt Enable register so that we only check for events that we're interested in. */ sisr = in_be32(&ssi->sisr) & SIER_FLAGS; if (sisr & CCSR_SSI_SISR_RFRC) { ssi_private->stats.rfrc++; sisr2 |= CCSR_SSI_SISR_RFRC; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_TFRC) { ssi_private->stats.tfrc++; sisr2 |= CCSR_SSI_SISR_TFRC; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_CMDAU) { ssi_private->stats.cmdau++; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_CMDDU) { ssi_private->stats.cmddu++; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_RXT) { ssi_private->stats.rxt++; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_RDR1) { ssi_private->stats.rdr1++; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_RDR0) { ssi_private->stats.rdr0++; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_TDE1) { ssi_private->stats.tde1++; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_TDE0) { ssi_private->stats.tde0++; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_ROE1) { ssi_private->stats.roe1++; sisr2 |= CCSR_SSI_SISR_ROE1; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_ROE0) { ssi_private->stats.roe0++; sisr2 |= CCSR_SSI_SISR_ROE0; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_TUE1) { ssi_private->stats.tue1++; sisr2 |= CCSR_SSI_SISR_TUE1; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_TUE0) { ssi_private->stats.tue0++; sisr2 |= CCSR_SSI_SISR_TUE0; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_TFS) { ssi_private->stats.tfs++; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_RFS) { ssi_private->stats.rfs++; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_TLS) { ssi_private->stats.tls++; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_RLS) { ssi_private->stats.rls++; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_RFF1) { ssi_private->stats.rff1++; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_RFF0) { ssi_private->stats.rff0++; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_TFE1) { ssi_private->stats.tfe1++; ret = IRQ_HANDLED; } if (sisr & CCSR_SSI_SISR_TFE0) { ssi_private->stats.tfe0++; ret = IRQ_HANDLED; } /* Clear the bits that we set */ if (sisr2) out_be32(&ssi->sisr, sisr2); return ret; } /** * fsl_ssi_startup: create a new substream * * This is the first function called when a stream is opened. * * If this is the first stream open, then grab the IRQ and program most of * the SSI registers. */ static int fsl_ssi_startup(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct fsl_ssi_private *ssi_private = rtd->dai->cpu_dai->private_data; /* * If this is the first stream opened, then request the IRQ * and initialize the SSI registers. */ if (!ssi_private->playback && !ssi_private->capture) { struct ccsr_ssi __iomem *ssi = ssi_private->ssi; int ret; ret = request_irq(ssi_private->irq, fsl_ssi_isr, 0, ssi_private->name, ssi_private); if (ret < 0) { dev_err(substream->pcm->card->dev, "could not claim irq %u\n", ssi_private->irq); return ret; } /* * Section 16.5 of the MPC8610 reference manual says that the * SSI needs to be disabled before updating the registers we set * here. */ clrbits32(&ssi->scr, CCSR_SSI_SCR_SSIEN); /* * Program the SSI into I2S Slave Non-Network Synchronous mode. * Also enable the transmit and receive FIFO. * * FIXME: Little-endian samples require a different shift dir */ clrsetbits_be32(&ssi->scr, CCSR_SSI_SCR_I2S_MODE_MASK | CCSR_SSI_SCR_SYN, CCSR_SSI_SCR_TFR_CLK_DIS | CCSR_SSI_SCR_I2S_MODE_SLAVE | (ssi_private->asynchronous ? 0 : CCSR_SSI_SCR_SYN)); out_be32(&ssi->stcr, CCSR_SSI_STCR_TXBIT0 | CCSR_SSI_STCR_TFEN0 | CCSR_SSI_STCR_TFSI | CCSR_SSI_STCR_TEFS | CCSR_SSI_STCR_TSCKP); out_be32(&ssi->srcr, CCSR_SSI_SRCR_RXBIT0 | CCSR_SSI_SRCR_RFEN0 | CCSR_SSI_SRCR_RFSI | CCSR_SSI_SRCR_REFS | CCSR_SSI_SRCR_RSCKP); /* * The DC and PM bits are only used if the SSI is the clock * master. */ /* 4. Enable the interrupts and DMA requests */ out_be32(&ssi->sier, SIER_FLAGS); /* * Set the watermark for transmit FIFI 0 and receive FIFO 0. We * don't use FIFO 1. Since the SSI only supports stereo, the * watermark should never be an odd number. */ out_be32(&ssi->sfcsr, CCSR_SSI_SFCSR_TFWM0(6) | CCSR_SSI_SFCSR_RFWM0(2)); /* * We keep the SSI disabled because if we enable it, then the * DMA controller will start. It's not supposed to start until * the SCR.TE (or SCR.RE) bit is set, but it does anyway. The * DMA controller will transfer one "BWC" of data (i.e. the * amount of data that the MR.BWC bits are set to). The reason * this is bad is because at this point, the PCM driver has not * finished initializing the DMA controller. */ } if (!ssi_private->first_stream) ssi_private->first_stream = substream; else { /* This is the second stream open, so we need to impose sample * rate and maybe sample size constraints. Note that this can * cause a race condition if the second stream is opened before * the first stream is fully initialized. * * We provide some protection by checking to make sure the first * stream is initialized, but it's not perfect. ALSA sometimes * re-initializes the driver with a different sample rate or * size. If the second stream is opened before the first stream * has received its final parameters, then the second stream may * be constrained to the wrong sample rate or size. * * FIXME: This code does not handle opening and closing streams * repeatedly. If you open two streams and then close the first * one, you may not be able to open another stream until you * close the second one as well. */ struct snd_pcm_runtime *first_runtime = ssi_private->first_stream->runtime; if (!first_runtime->sample_bits) { dev_err(substream->pcm->card->dev, "set sample size in %s stream first\n", substream->stream == SNDRV_PCM_STREAM_PLAYBACK ? "capture" : "playback"); return -EAGAIN; } /* If we're in synchronous mode, then we need to constrain * the sample size as well. We don't support independent sample * rates in asynchronous mode. */ if (!ssi_private->asynchronous) snd_pcm_hw_constraint_minmax(substream->runtime, SNDRV_PCM_HW_PARAM_SAMPLE_BITS, first_runtime->sample_bits, first_runtime->sample_bits); ssi_private->second_stream = substream; } if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ssi_private->playback++; if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ssi_private->capture++; return 0; } /** * fsl_ssi_hw_params - program the sample size * * Most of the SSI registers have been programmed in the startup function, * but the word length must be programmed here. Unfortunately, programming * the SxCCR.WL bits requires the SSI to be temporarily disabled. This can * cause a problem with supporting simultaneous playback and capture. If * the SSI is already playing a stream, then that stream may be temporarily * stopped when you start capture. * * Note: The SxCCR.DC and SxCCR.PM bits are only used if the SSI is the * clock master. */ static int fsl_ssi_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *hw_params, struct snd_soc_dai *cpu_dai) { struct fsl_ssi_private *ssi_private = cpu_dai->private_data; if (substream == ssi_private->first_stream) { struct ccsr_ssi __iomem *ssi = ssi_private->ssi; unsigned int sample_size = snd_pcm_format_width(params_format(hw_params)); u32 wl = CCSR_SSI_SxCCR_WL(sample_size); /* The SSI should always be disabled at this points (SSIEN=0) */ /* In synchronous mode, the SSI uses STCCR for capture */ if ((substream->stream == SNDRV_PCM_STREAM_PLAYBACK) || !ssi_private->asynchronous) clrsetbits_be32(&ssi->stccr, CCSR_SSI_SxCCR_WL_MASK, wl); else clrsetbits_be32(&ssi->srccr, CCSR_SSI_SxCCR_WL_MASK, wl); } return 0; } /** * fsl_ssi_trigger: start and stop the DMA transfer. * * This function is called by ALSA to start, stop, pause, and resume the DMA * transfer of data. * * The DMA channel is in external master start and pause mode, which * means the SSI completely controls the flow of data. */ static int fsl_ssi_trigger(struct snd_pcm_substream *substream, int cmd, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct fsl_ssi_private *ssi_private = rtd->dai->cpu_dai->private_data; struct ccsr_ssi __iomem *ssi = ssi_private->ssi; switch (cmd) { case SNDRV_PCM_TRIGGER_START: clrbits32(&ssi->scr, CCSR_SSI_SCR_SSIEN); case SNDRV_PCM_TRIGGER_PAUSE_RELEASE: if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) setbits32(&ssi->scr, CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_TE); else setbits32(&ssi->scr, CCSR_SSI_SCR_SSIEN | CCSR_SSI_SCR_RE); break; case SNDRV_PCM_TRIGGER_STOP: case SNDRV_PCM_TRIGGER_PAUSE_PUSH: if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) clrbits32(&ssi->scr, CCSR_SSI_SCR_TE); else clrbits32(&ssi->scr, CCSR_SSI_SCR_RE); break; default: return -EINVAL; } return 0; } /** * fsl_ssi_shutdown: shutdown the SSI * * Shutdown the SSI if there are no other substreams open. */ static void fsl_ssi_shutdown(struct snd_pcm_substream *substream, struct snd_soc_dai *dai) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct fsl_ssi_private *ssi_private = rtd->dai->cpu_dai->private_data; if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) ssi_private->playback--; if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) ssi_private->capture--; if (ssi_private->first_stream == substream) ssi_private->first_stream = ssi_private->second_stream; ssi_private->second_stream = NULL; /* * If this is the last active substream, disable the SSI and release * the IRQ. */ if (!ssi_private->playback && !ssi_private->capture) { struct ccsr_ssi __iomem *ssi = ssi_private->ssi; clrbits32(&ssi->scr, CCSR_SSI_SCR_SSIEN); free_irq(ssi_private->irq, ssi_private); } } /** * fsl_ssi_set_sysclk: set the clock frequency and direction * * This function is called by the machine driver to tell us what the clock * frequency and direction are. * * Currently, we only support operating as a clock slave (SND_SOC_CLOCK_IN), * and we don't care about the frequency. Return an error if the direction * is not SND_SOC_CLOCK_IN. * * @clk_id: reserved, should be zero * @freq: the frequency of the given clock ID, currently ignored * @dir: SND_SOC_CLOCK_IN (clock slave) or SND_SOC_CLOCK_OUT (clock master) */ static int fsl_ssi_set_sysclk(struct snd_soc_dai *cpu_dai, int clk_id, unsigned int freq, int dir) { return (dir == SND_SOC_CLOCK_IN) ? 0 : -EINVAL; } /** * fsl_ssi_set_fmt: set the serial format. * * This function is called by the machine driver to tell us what serial * format to use. * * Currently, we only support I2S mode. Return an error if the format is * not SND_SOC_DAIFMT_I2S. * * @format: one of SND_SOC_DAIFMT_xxx */ static int fsl_ssi_set_fmt(struct snd_soc_dai *cpu_dai, unsigned int format) { return (format == SND_SOC_DAIFMT_I2S) ? 0 : -EINVAL; } /** * fsl_ssi_dai_template: template CPU DAI for the SSI */ static struct snd_soc_dai_ops fsl_ssi_dai_ops = { .startup = fsl_ssi_startup, .hw_params = fsl_ssi_hw_params, .shutdown = fsl_ssi_shutdown, .trigger = fsl_ssi_trigger, .set_sysclk = fsl_ssi_set_sysclk, .set_fmt = fsl_ssi_set_fmt, }; static struct snd_soc_dai fsl_ssi_dai_template = { .playback = { /* The SSI does not support monaural audio. */ .channels_min = 2, .channels_max = 2, .rates = FSLSSI_I2S_RATES, .formats = FSLSSI_I2S_FORMATS, }, .capture = { .channels_min = 2, .channels_max = 2, .rates = FSLSSI_I2S_RATES, .formats = FSLSSI_I2S_FORMATS, }, .ops = &fsl_ssi_dai_ops, }; /* Show the statistics of a flag only if its interrupt is enabled. The * compiler will optimze this code to a no-op if the interrupt is not * enabled. */ #define SIER_SHOW(flag, name) \ do { \ if (SIER_FLAGS & CCSR_SSI_SIER_##flag) \ length += sprintf(buf + length, #name "=%u\n", \ ssi_private->stats.name); \ } while (0) /** * fsl_sysfs_ssi_show: display SSI statistics * * Display the statistics for the current SSI device. To avoid confusion, * we only show those counts that are enabled. */ static ssize_t fsl_sysfs_ssi_show(struct device *dev, struct device_attribute *attr, char *buf) { struct fsl_ssi_private *ssi_private = container_of(attr, struct fsl_ssi_private, dev_attr); ssize_t length = 0; SIER_SHOW(RFRC_EN, rfrc); SIER_SHOW(TFRC_EN, tfrc); SIER_SHOW(CMDAU_EN, cmdau); SIER_SHOW(CMDDU_EN, cmddu); SIER_SHOW(RXT_EN, rxt); SIER_SHOW(RDR1_EN, rdr1); SIER_SHOW(RDR0_EN, rdr0); SIER_SHOW(TDE1_EN, tde1); SIER_SHOW(TDE0_EN, tde0); SIER_SHOW(ROE1_EN, roe1); SIER_SHOW(ROE0_EN, roe0); SIER_SHOW(TUE1_EN, tue1); SIER_SHOW(TUE0_EN, tue0); SIER_SHOW(TFS_EN, tfs); SIER_SHOW(RFS_EN, rfs); SIER_SHOW(TLS_EN, tls); SIER_SHOW(RLS_EN, rls); SIER_SHOW(RFF1_EN, rff1); SIER_SHOW(RFF0_EN, rff0); SIER_SHOW(TFE1_EN, tfe1); SIER_SHOW(TFE0_EN, tfe0); return length; } /** * fsl_ssi_create_dai: create a snd_soc_dai structure * * This function is called by the machine driver to create a snd_soc_dai * structure. The function creates an ssi_private object, which contains * the snd_soc_dai. It also creates the sysfs statistics device. */ struct snd_soc_dai *fsl_ssi_create_dai(struct fsl_ssi_info *ssi_info) { struct snd_soc_dai *fsl_ssi_dai; struct fsl_ssi_private *ssi_private; int ret = 0; struct device_attribute *dev_attr; ssi_private = kzalloc(sizeof(struct fsl_ssi_private), GFP_KERNEL); if (!ssi_private) { dev_err(ssi_info->dev, "could not allocate DAI object\n"); return NULL; } memcpy(&ssi_private->cpu_dai, &fsl_ssi_dai_template, sizeof(struct snd_soc_dai)); fsl_ssi_dai = &ssi_private->cpu_dai; dev_attr = &ssi_private->dev_attr; sprintf(ssi_private->name, "ssi%u", (u8) ssi_info->id); ssi_private->ssi = ssi_info->ssi; ssi_private->ssi_phys = ssi_info->ssi_phys; ssi_private->irq = ssi_info->irq; ssi_private->dev = ssi_info->dev; ssi_private->asynchronous = ssi_info->asynchronous; dev_set_drvdata(ssi_private->dev, fsl_ssi_dai); /* Initialize the the device_attribute structure */ dev_attr->attr.name = "ssi-stats"; dev_attr->attr.mode = S_IRUGO; dev_attr->show = fsl_sysfs_ssi_show; ret = device_create_file(ssi_private->dev, dev_attr); if (ret) { dev_err(ssi_info->dev, "could not create sysfs %s file\n", ssi_private->dev_attr.attr.name); kfree(fsl_ssi_dai); return NULL; } fsl_ssi_dai->private_data = ssi_private; fsl_ssi_dai->name = ssi_private->name; fsl_ssi_dai->id = ssi_info->id; fsl_ssi_dai->dev = ssi_info->dev; fsl_ssi_dai->symmetric_rates = 1; ret = snd_soc_register_dai(fsl_ssi_dai); if (ret != 0) { dev_err(ssi_info->dev, "failed to register DAI: %d\n", ret); kfree(fsl_ssi_dai); return NULL; } return fsl_ssi_dai; } EXPORT_SYMBOL_GPL(fsl_ssi_create_dai); /** * fsl_ssi_destroy_dai: destroy the snd_soc_dai object * * This function undoes the operations of fsl_ssi_create_dai() */ void fsl_ssi_destroy_dai(struct snd_soc_dai *fsl_ssi_dai) { struct fsl_ssi_private *ssi_private = container_of(fsl_ssi_dai, struct fsl_ssi_private, cpu_dai); device_remove_file(ssi_private->dev, &ssi_private->dev_attr); snd_soc_unregister_dai(&ssi_private->cpu_dai); kfree(ssi_private); } EXPORT_SYMBOL_GPL(fsl_ssi_destroy_dai); static int __init fsl_ssi_init(void) { printk(KERN_INFO "Freescale Synchronous Serial Interface (SSI) ASoC Driver\n"); return 0; } module_init(fsl_ssi_init); MODULE_AUTHOR("Timur Tabi <timur@freescale.com>"); MODULE_DESCRIPTION("Freescale Synchronous Serial Interface (SSI) ASoC Driver"); MODULE_LICENSE("GPL");
gpl-2.0
yncconsulting/HTC_Express_Kernel
drivers/net/bmac.c
848
42586
/* * Network device driver for the BMAC ethernet controller on * Apple Powermacs. Assumes it's under a DBDMA controller. * * Copyright (C) 1998 Randy Gobbel. * * May 1999, Al Viro: proper release of /proc/net/bmac entry, switched to * dynamic procfs inode. */ #include <linux/module.h> #include <linux/kernel.h> #include <linux/netdevice.h> #include <linux/etherdevice.h> #include <linux/delay.h> #include <linux/string.h> #include <linux/timer.h> #include <linux/proc_fs.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/crc32.h> #include <linux/bitrev.h> #include <linux/ethtool.h> #include <linux/slab.h> #include <asm/prom.h> #include <asm/dbdma.h> #include <asm/io.h> #include <asm/page.h> #include <asm/pgtable.h> #include <asm/machdep.h> #include <asm/pmac_feature.h> #include <asm/macio.h> #include <asm/irq.h> #include "bmac.h" #define trunc_page(x) ((void *)(((unsigned long)(x)) & ~((unsigned long)(PAGE_SIZE - 1)))) #define round_page(x) trunc_page(((unsigned long)(x)) + ((unsigned long)(PAGE_SIZE - 1))) /* * CRC polynomial - used in working out multicast filter bits. */ #define ENET_CRCPOLY 0x04c11db7 /* switch to use multicast code lifted from sunhme driver */ #define SUNHME_MULTICAST #define N_RX_RING 64 #define N_TX_RING 32 #define MAX_TX_ACTIVE 1 #define ETHERCRC 4 #define ETHERMINPACKET 64 #define ETHERMTU 1500 #define RX_BUFLEN (ETHERMTU + 14 + ETHERCRC + 2) #define TX_TIMEOUT HZ /* 1 second */ /* Bits in transmit DMA status */ #define TX_DMA_ERR 0x80 #define XXDEBUG(args) struct bmac_data { /* volatile struct bmac *bmac; */ struct sk_buff_head *queue; volatile struct dbdma_regs __iomem *tx_dma; int tx_dma_intr; volatile struct dbdma_regs __iomem *rx_dma; int rx_dma_intr; volatile struct dbdma_cmd *tx_cmds; /* xmit dma command list */ volatile struct dbdma_cmd *rx_cmds; /* recv dma command list */ struct macio_dev *mdev; int is_bmac_plus; struct sk_buff *rx_bufs[N_RX_RING]; int rx_fill; int rx_empty; struct sk_buff *tx_bufs[N_TX_RING]; int tx_fill; int tx_empty; unsigned char tx_fullup; struct timer_list tx_timeout; int timeout_active; int sleeping; int opened; unsigned short hash_use_count[64]; unsigned short hash_table_mask[4]; spinlock_t lock; }; #if 0 /* Move that to ethtool */ typedef struct bmac_reg_entry { char *name; unsigned short reg_offset; } bmac_reg_entry_t; #define N_REG_ENTRIES 31 static bmac_reg_entry_t reg_entries[N_REG_ENTRIES] = { {"MEMADD", MEMADD}, {"MEMDATAHI", MEMDATAHI}, {"MEMDATALO", MEMDATALO}, {"TXPNTR", TXPNTR}, {"RXPNTR", RXPNTR}, {"IPG1", IPG1}, {"IPG2", IPG2}, {"ALIMIT", ALIMIT}, {"SLOT", SLOT}, {"PALEN", PALEN}, {"PAPAT", PAPAT}, {"TXSFD", TXSFD}, {"JAM", JAM}, {"TXCFG", TXCFG}, {"TXMAX", TXMAX}, {"TXMIN", TXMIN}, {"PAREG", PAREG}, {"DCNT", DCNT}, {"NCCNT", NCCNT}, {"NTCNT", NTCNT}, {"EXCNT", EXCNT}, {"LTCNT", LTCNT}, {"TXSM", TXSM}, {"RXCFG", RXCFG}, {"RXMAX", RXMAX}, {"RXMIN", RXMIN}, {"FRCNT", FRCNT}, {"AECNT", AECNT}, {"FECNT", FECNT}, {"RXSM", RXSM}, {"RXCV", RXCV} }; #endif static unsigned char *bmac_emergency_rxbuf; /* * Number of bytes of private data per BMAC: allow enough for * the rx and tx dma commands plus a branch dma command each, * and another 16 bytes to allow us to align the dma command * buffers on a 16 byte boundary. */ #define PRIV_BYTES (sizeof(struct bmac_data) \ + (N_RX_RING + N_TX_RING + 4) * sizeof(struct dbdma_cmd) \ + sizeof(struct sk_buff_head)) static int bmac_open(struct net_device *dev); static int bmac_close(struct net_device *dev); static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev); static void bmac_set_multicast(struct net_device *dev); static void bmac_reset_and_enable(struct net_device *dev); static void bmac_start_chip(struct net_device *dev); static void bmac_init_chip(struct net_device *dev); static void bmac_init_registers(struct net_device *dev); static void bmac_enable_and_reset_chip(struct net_device *dev); static int bmac_set_address(struct net_device *dev, void *addr); static irqreturn_t bmac_misc_intr(int irq, void *dev_id); static irqreturn_t bmac_txdma_intr(int irq, void *dev_id); static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id); static void bmac_set_timeout(struct net_device *dev); static void bmac_tx_timeout(unsigned long data); static int bmac_output(struct sk_buff *skb, struct net_device *dev); static void bmac_start(struct net_device *dev); #define DBDMA_SET(x) ( ((x) | (x) << 16) ) #define DBDMA_CLEAR(x) ( (x) << 16) static inline void dbdma_st32(volatile __u32 __iomem *a, unsigned long x) { __asm__ volatile( "stwbrx %0,0,%1" : : "r" (x), "r" (a) : "memory"); } static inline unsigned long dbdma_ld32(volatile __u32 __iomem *a) { __u32 swap; __asm__ volatile ("lwbrx %0,0,%1" : "=r" (swap) : "r" (a)); return swap; } static void dbdma_continue(volatile struct dbdma_regs __iomem *dmap) { dbdma_st32(&dmap->control, DBDMA_SET(RUN|WAKE) | DBDMA_CLEAR(PAUSE|DEAD)); eieio(); } static void dbdma_reset(volatile struct dbdma_regs __iomem *dmap) { dbdma_st32(&dmap->control, DBDMA_CLEAR(ACTIVE|DEAD|WAKE|FLUSH|PAUSE|RUN)); eieio(); while (dbdma_ld32(&dmap->status) & RUN) eieio(); } static void dbdma_setcmd(volatile struct dbdma_cmd *cp, unsigned short cmd, unsigned count, unsigned long addr, unsigned long cmd_dep) { out_le16(&cp->command, cmd); out_le16(&cp->req_count, count); out_le32(&cp->phy_addr, addr); out_le32(&cp->cmd_dep, cmd_dep); out_le16(&cp->xfer_status, 0); out_le16(&cp->res_count, 0); } static inline void bmwrite(struct net_device *dev, unsigned long reg_offset, unsigned data ) { out_le16((void __iomem *)dev->base_addr + reg_offset, data); } static inline unsigned short bmread(struct net_device *dev, unsigned long reg_offset ) { return in_le16((void __iomem *)dev->base_addr + reg_offset); } static void bmac_enable_and_reset_chip(struct net_device *dev) { struct bmac_data *bp = netdev_priv(dev); volatile struct dbdma_regs __iomem *rd = bp->rx_dma; volatile struct dbdma_regs __iomem *td = bp->tx_dma; if (rd) dbdma_reset(rd); if (td) dbdma_reset(td); pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 1); } #define MIFDELAY udelay(10) static unsigned int bmac_mif_readbits(struct net_device *dev, int nb) { unsigned int val = 0; while (--nb >= 0) { bmwrite(dev, MIFCSR, 0); MIFDELAY; if (bmread(dev, MIFCSR) & 8) val |= 1 << nb; bmwrite(dev, MIFCSR, 1); MIFDELAY; } bmwrite(dev, MIFCSR, 0); MIFDELAY; bmwrite(dev, MIFCSR, 1); MIFDELAY; return val; } static void bmac_mif_writebits(struct net_device *dev, unsigned int val, int nb) { int b; while (--nb >= 0) { b = (val & (1 << nb))? 6: 4; bmwrite(dev, MIFCSR, b); MIFDELAY; bmwrite(dev, MIFCSR, b|1); MIFDELAY; } } static unsigned int bmac_mif_read(struct net_device *dev, unsigned int addr) { unsigned int val; bmwrite(dev, MIFCSR, 4); MIFDELAY; bmac_mif_writebits(dev, ~0U, 32); bmac_mif_writebits(dev, 6, 4); bmac_mif_writebits(dev, addr, 10); bmwrite(dev, MIFCSR, 2); MIFDELAY; bmwrite(dev, MIFCSR, 1); MIFDELAY; val = bmac_mif_readbits(dev, 17); bmwrite(dev, MIFCSR, 4); MIFDELAY; return val; } static void bmac_mif_write(struct net_device *dev, unsigned int addr, unsigned int val) { bmwrite(dev, MIFCSR, 4); MIFDELAY; bmac_mif_writebits(dev, ~0U, 32); bmac_mif_writebits(dev, 5, 4); bmac_mif_writebits(dev, addr, 10); bmac_mif_writebits(dev, 2, 2); bmac_mif_writebits(dev, val, 16); bmac_mif_writebits(dev, 3, 2); } static void bmac_init_registers(struct net_device *dev) { struct bmac_data *bp = netdev_priv(dev); volatile unsigned short regValue; unsigned short *pWord16; int i; /* XXDEBUG(("bmac: enter init_registers\n")); */ bmwrite(dev, RXRST, RxResetValue); bmwrite(dev, TXRST, TxResetBit); i = 100; do { --i; udelay(10000); regValue = bmread(dev, TXRST); /* wait for reset to clear..acknowledge */ } while ((regValue & TxResetBit) && i > 0); if (!bp->is_bmac_plus) { regValue = bmread(dev, XCVRIF); regValue |= ClkBit | SerialMode | COLActiveLow; bmwrite(dev, XCVRIF, regValue); udelay(10000); } bmwrite(dev, RSEED, (unsigned short)0x1968); regValue = bmread(dev, XIFC); regValue |= TxOutputEnable; bmwrite(dev, XIFC, regValue); bmread(dev, PAREG); /* set collision counters to 0 */ bmwrite(dev, NCCNT, 0); bmwrite(dev, NTCNT, 0); bmwrite(dev, EXCNT, 0); bmwrite(dev, LTCNT, 0); /* set rx counters to 0 */ bmwrite(dev, FRCNT, 0); bmwrite(dev, LECNT, 0); bmwrite(dev, AECNT, 0); bmwrite(dev, FECNT, 0); bmwrite(dev, RXCV, 0); /* set tx fifo information */ bmwrite(dev, TXTH, 4); /* 4 octets before tx starts */ bmwrite(dev, TXFIFOCSR, 0); /* first disable txFIFO */ bmwrite(dev, TXFIFOCSR, TxFIFOEnable ); /* set rx fifo information */ bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */ bmwrite(dev, RXFIFOCSR, RxFIFOEnable ); //bmwrite(dev, TXCFG, TxMACEnable); /* TxNeverGiveUp maybe later */ bmread(dev, STATUS); /* read it just to clear it */ /* zero out the chip Hash Filter registers */ for (i=0; i<4; i++) bp->hash_table_mask[i] = 0; bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */ bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */ bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */ bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */ pWord16 = (unsigned short *)dev->dev_addr; bmwrite(dev, MADD0, *pWord16++); bmwrite(dev, MADD1, *pWord16++); bmwrite(dev, MADD2, *pWord16); bmwrite(dev, RXCFG, RxCRCNoStrip | RxHashFilterEnable | RxRejectOwnPackets); bmwrite(dev, INTDISABLE, EnableNormal); } #if 0 static void bmac_disable_interrupts(struct net_device *dev) { bmwrite(dev, INTDISABLE, DisableAll); } static void bmac_enable_interrupts(struct net_device *dev) { bmwrite(dev, INTDISABLE, EnableNormal); } #endif static void bmac_start_chip(struct net_device *dev) { struct bmac_data *bp = netdev_priv(dev); volatile struct dbdma_regs __iomem *rd = bp->rx_dma; unsigned short oldConfig; /* enable rx dma channel */ dbdma_continue(rd); oldConfig = bmread(dev, TXCFG); bmwrite(dev, TXCFG, oldConfig | TxMACEnable ); /* turn on rx plus any other bits already on (promiscuous possibly) */ oldConfig = bmread(dev, RXCFG); bmwrite(dev, RXCFG, oldConfig | RxMACEnable ); udelay(20000); } static void bmac_init_phy(struct net_device *dev) { unsigned int addr; struct bmac_data *bp = netdev_priv(dev); printk(KERN_DEBUG "phy registers:"); for (addr = 0; addr < 32; ++addr) { if ((addr & 7) == 0) printk(KERN_DEBUG); printk(KERN_CONT " %.4x", bmac_mif_read(dev, addr)); } printk(KERN_CONT "\n"); if (bp->is_bmac_plus) { unsigned int capable, ctrl; ctrl = bmac_mif_read(dev, 0); capable = ((bmac_mif_read(dev, 1) & 0xf800) >> 6) | 1; if (bmac_mif_read(dev, 4) != capable || (ctrl & 0x1000) == 0) { bmac_mif_write(dev, 4, capable); bmac_mif_write(dev, 0, 0x1200); } else bmac_mif_write(dev, 0, 0x1000); } } static void bmac_init_chip(struct net_device *dev) { bmac_init_phy(dev); bmac_init_registers(dev); } #ifdef CONFIG_PM static int bmac_suspend(struct macio_dev *mdev, pm_message_t state) { struct net_device* dev = macio_get_drvdata(mdev); struct bmac_data *bp = netdev_priv(dev); unsigned long flags; unsigned short config; int i; netif_device_detach(dev); /* prolly should wait for dma to finish & turn off the chip */ spin_lock_irqsave(&bp->lock, flags); if (bp->timeout_active) { del_timer(&bp->tx_timeout); bp->timeout_active = 0; } disable_irq(dev->irq); disable_irq(bp->tx_dma_intr); disable_irq(bp->rx_dma_intr); bp->sleeping = 1; spin_unlock_irqrestore(&bp->lock, flags); if (bp->opened) { volatile struct dbdma_regs __iomem *rd = bp->rx_dma; volatile struct dbdma_regs __iomem *td = bp->tx_dma; config = bmread(dev, RXCFG); bmwrite(dev, RXCFG, (config & ~RxMACEnable)); config = bmread(dev, TXCFG); bmwrite(dev, TXCFG, (config & ~TxMACEnable)); bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */ /* disable rx and tx dma */ st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ /* free some skb's */ for (i=0; i<N_RX_RING; i++) { if (bp->rx_bufs[i] != NULL) { dev_kfree_skb(bp->rx_bufs[i]); bp->rx_bufs[i] = NULL; } } for (i = 0; i<N_TX_RING; i++) { if (bp->tx_bufs[i] != NULL) { dev_kfree_skb(bp->tx_bufs[i]); bp->tx_bufs[i] = NULL; } } } pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); return 0; } static int bmac_resume(struct macio_dev *mdev) { struct net_device* dev = macio_get_drvdata(mdev); struct bmac_data *bp = netdev_priv(dev); /* see if this is enough */ if (bp->opened) bmac_reset_and_enable(dev); enable_irq(dev->irq); enable_irq(bp->tx_dma_intr); enable_irq(bp->rx_dma_intr); netif_device_attach(dev); return 0; } #endif /* CONFIG_PM */ static int bmac_set_address(struct net_device *dev, void *addr) { struct bmac_data *bp = netdev_priv(dev); unsigned char *p = addr; unsigned short *pWord16; unsigned long flags; int i; XXDEBUG(("bmac: enter set_address\n")); spin_lock_irqsave(&bp->lock, flags); for (i = 0; i < 6; ++i) { dev->dev_addr[i] = p[i]; } /* load up the hardware address */ pWord16 = (unsigned short *)dev->dev_addr; bmwrite(dev, MADD0, *pWord16++); bmwrite(dev, MADD1, *pWord16++); bmwrite(dev, MADD2, *pWord16); spin_unlock_irqrestore(&bp->lock, flags); XXDEBUG(("bmac: exit set_address\n")); return 0; } static inline void bmac_set_timeout(struct net_device *dev) { struct bmac_data *bp = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&bp->lock, flags); if (bp->timeout_active) del_timer(&bp->tx_timeout); bp->tx_timeout.expires = jiffies + TX_TIMEOUT; bp->tx_timeout.function = bmac_tx_timeout; bp->tx_timeout.data = (unsigned long) dev; add_timer(&bp->tx_timeout); bp->timeout_active = 1; spin_unlock_irqrestore(&bp->lock, flags); } static void bmac_construct_xmt(struct sk_buff *skb, volatile struct dbdma_cmd *cp) { void *vaddr; unsigned long baddr; unsigned long len; len = skb->len; vaddr = skb->data; baddr = virt_to_bus(vaddr); dbdma_setcmd(cp, (OUTPUT_LAST | INTR_ALWAYS | WAIT_IFCLR), len, baddr, 0); } static void bmac_construct_rxbuff(struct sk_buff *skb, volatile struct dbdma_cmd *cp) { unsigned char *addr = skb? skb->data: bmac_emergency_rxbuf; dbdma_setcmd(cp, (INPUT_LAST | INTR_ALWAYS), RX_BUFLEN, virt_to_bus(addr), 0); } static void bmac_init_tx_ring(struct bmac_data *bp) { volatile struct dbdma_regs __iomem *td = bp->tx_dma; memset((char *)bp->tx_cmds, 0, (N_TX_RING+1) * sizeof(struct dbdma_cmd)); bp->tx_empty = 0; bp->tx_fill = 0; bp->tx_fullup = 0; /* put a branch at the end of the tx command list */ dbdma_setcmd(&bp->tx_cmds[N_TX_RING], (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->tx_cmds)); /* reset tx dma */ dbdma_reset(td); out_le32(&td->wait_sel, 0x00200020); out_le32(&td->cmdptr, virt_to_bus(bp->tx_cmds)); } static int bmac_init_rx_ring(struct bmac_data *bp) { volatile struct dbdma_regs __iomem *rd = bp->rx_dma; int i; struct sk_buff *skb; /* initialize list of sk_buffs for receiving and set up recv dma */ memset((char *)bp->rx_cmds, 0, (N_RX_RING + 1) * sizeof(struct dbdma_cmd)); for (i = 0; i < N_RX_RING; i++) { if ((skb = bp->rx_bufs[i]) == NULL) { bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2); if (skb != NULL) skb_reserve(skb, 2); } bmac_construct_rxbuff(skb, &bp->rx_cmds[i]); } bp->rx_empty = 0; bp->rx_fill = i; /* Put a branch back to the beginning of the receive command list */ dbdma_setcmd(&bp->rx_cmds[N_RX_RING], (DBDMA_NOP | BR_ALWAYS), 0, 0, virt_to_bus(bp->rx_cmds)); /* start rx dma */ dbdma_reset(rd); out_le32(&rd->cmdptr, virt_to_bus(bp->rx_cmds)); return 1; } static int bmac_transmit_packet(struct sk_buff *skb, struct net_device *dev) { struct bmac_data *bp = netdev_priv(dev); volatile struct dbdma_regs __iomem *td = bp->tx_dma; int i; /* see if there's a free slot in the tx ring */ /* XXDEBUG(("bmac_xmit_start: empty=%d fill=%d\n", */ /* bp->tx_empty, bp->tx_fill)); */ i = bp->tx_fill + 1; if (i >= N_TX_RING) i = 0; if (i == bp->tx_empty) { netif_stop_queue(dev); bp->tx_fullup = 1; XXDEBUG(("bmac_transmit_packet: tx ring full\n")); return -1; /* can't take it at the moment */ } dbdma_setcmd(&bp->tx_cmds[i], DBDMA_STOP, 0, 0, 0); bmac_construct_xmt(skb, &bp->tx_cmds[bp->tx_fill]); bp->tx_bufs[bp->tx_fill] = skb; bp->tx_fill = i; dev->stats.tx_bytes += skb->len; dbdma_continue(td); return 0; } static int rxintcount; static irqreturn_t bmac_rxdma_intr(int irq, void *dev_id) { struct net_device *dev = (struct net_device *) dev_id; struct bmac_data *bp = netdev_priv(dev); volatile struct dbdma_regs __iomem *rd = bp->rx_dma; volatile struct dbdma_cmd *cp; int i, nb, stat; struct sk_buff *skb; unsigned int residual; int last; unsigned long flags; spin_lock_irqsave(&bp->lock, flags); if (++rxintcount < 10) { XXDEBUG(("bmac_rxdma_intr\n")); } last = -1; i = bp->rx_empty; while (1) { cp = &bp->rx_cmds[i]; stat = ld_le16(&cp->xfer_status); residual = ld_le16(&cp->res_count); if ((stat & ACTIVE) == 0) break; nb = RX_BUFLEN - residual - 2; if (nb < (ETHERMINPACKET - ETHERCRC)) { skb = NULL; dev->stats.rx_length_errors++; dev->stats.rx_errors++; } else { skb = bp->rx_bufs[i]; bp->rx_bufs[i] = NULL; } if (skb != NULL) { nb -= ETHERCRC; skb_put(skb, nb); skb->protocol = eth_type_trans(skb, dev); netif_rx(skb); ++dev->stats.rx_packets; dev->stats.rx_bytes += nb; } else { ++dev->stats.rx_dropped; } if ((skb = bp->rx_bufs[i]) == NULL) { bp->rx_bufs[i] = skb = dev_alloc_skb(RX_BUFLEN+2); if (skb != NULL) skb_reserve(bp->rx_bufs[i], 2); } bmac_construct_rxbuff(skb, &bp->rx_cmds[i]); st_le16(&cp->res_count, 0); st_le16(&cp->xfer_status, 0); last = i; if (++i >= N_RX_RING) i = 0; } if (last != -1) { bp->rx_fill = last; bp->rx_empty = i; } dbdma_continue(rd); spin_unlock_irqrestore(&bp->lock, flags); if (rxintcount < 10) { XXDEBUG(("bmac_rxdma_intr done\n")); } return IRQ_HANDLED; } static int txintcount; static irqreturn_t bmac_txdma_intr(int irq, void *dev_id) { struct net_device *dev = (struct net_device *) dev_id; struct bmac_data *bp = netdev_priv(dev); volatile struct dbdma_cmd *cp; int stat; unsigned long flags; spin_lock_irqsave(&bp->lock, flags); if (txintcount++ < 10) { XXDEBUG(("bmac_txdma_intr\n")); } /* del_timer(&bp->tx_timeout); */ /* bp->timeout_active = 0; */ while (1) { cp = &bp->tx_cmds[bp->tx_empty]; stat = ld_le16(&cp->xfer_status); if (txintcount < 10) { XXDEBUG(("bmac_txdma_xfer_stat=%#0x\n", stat)); } if (!(stat & ACTIVE)) { /* * status field might not have been filled by DBDMA */ if (cp == bus_to_virt(in_le32(&bp->tx_dma->cmdptr))) break; } if (bp->tx_bufs[bp->tx_empty]) { ++dev->stats.tx_packets; dev_kfree_skb_irq(bp->tx_bufs[bp->tx_empty]); } bp->tx_bufs[bp->tx_empty] = NULL; bp->tx_fullup = 0; netif_wake_queue(dev); if (++bp->tx_empty >= N_TX_RING) bp->tx_empty = 0; if (bp->tx_empty == bp->tx_fill) break; } spin_unlock_irqrestore(&bp->lock, flags); if (txintcount < 10) { XXDEBUG(("bmac_txdma_intr done->bmac_start\n")); } bmac_start(dev); return IRQ_HANDLED; } #ifndef SUNHME_MULTICAST /* Real fast bit-reversal algorithm, 6-bit values */ static int reverse6[64] = { 0x0,0x20,0x10,0x30,0x8,0x28,0x18,0x38, 0x4,0x24,0x14,0x34,0xc,0x2c,0x1c,0x3c, 0x2,0x22,0x12,0x32,0xa,0x2a,0x1a,0x3a, 0x6,0x26,0x16,0x36,0xe,0x2e,0x1e,0x3e, 0x1,0x21,0x11,0x31,0x9,0x29,0x19,0x39, 0x5,0x25,0x15,0x35,0xd,0x2d,0x1d,0x3d, 0x3,0x23,0x13,0x33,0xb,0x2b,0x1b,0x3b, 0x7,0x27,0x17,0x37,0xf,0x2f,0x1f,0x3f }; static unsigned int crc416(unsigned int curval, unsigned short nxtval) { register unsigned int counter, cur = curval, next = nxtval; register int high_crc_set, low_data_set; /* Swap bytes */ next = ((next & 0x00FF) << 8) | (next >> 8); /* Compute bit-by-bit */ for (counter = 0; counter < 16; ++counter) { /* is high CRC bit set? */ if ((cur & 0x80000000) == 0) high_crc_set = 0; else high_crc_set = 1; cur = cur << 1; if ((next & 0x0001) == 0) low_data_set = 0; else low_data_set = 1; next = next >> 1; /* do the XOR */ if (high_crc_set ^ low_data_set) cur = cur ^ ENET_CRCPOLY; } return cur; } static unsigned int bmac_crc(unsigned short *address) { unsigned int newcrc; XXDEBUG(("bmac_crc: addr=%#04x, %#04x, %#04x\n", *address, address[1], address[2])); newcrc = crc416(0xffffffff, *address); /* address bits 47 - 32 */ newcrc = crc416(newcrc, address[1]); /* address bits 31 - 16 */ newcrc = crc416(newcrc, address[2]); /* address bits 15 - 0 */ return(newcrc); } /* * Add requested mcast addr to BMac's hash table filter. * */ static void bmac_addhash(struct bmac_data *bp, unsigned char *addr) { unsigned int crc; unsigned short mask; if (!(*addr)) return; crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */ crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */ if (bp->hash_use_count[crc]++) return; /* This bit is already set */ mask = crc % 16; mask = (unsigned char)1 << mask; bp->hash_use_count[crc/16] |= mask; } static void bmac_removehash(struct bmac_data *bp, unsigned char *addr) { unsigned int crc; unsigned char mask; /* Now, delete the address from the filter copy, as indicated */ crc = bmac_crc((unsigned short *)addr) & 0x3f; /* Big-endian alert! */ crc = reverse6[crc]; /* Hyperfast bit-reversing algorithm */ if (bp->hash_use_count[crc] == 0) return; /* That bit wasn't in use! */ if (--bp->hash_use_count[crc]) return; /* That bit is still in use */ mask = crc % 16; mask = ((unsigned char)1 << mask) ^ 0xffff; /* To turn off bit */ bp->hash_table_mask[crc/16] &= mask; } /* * Sync the adapter with the software copy of the multicast mask * (logical address filter). */ static void bmac_rx_off(struct net_device *dev) { unsigned short rx_cfg; rx_cfg = bmread(dev, RXCFG); rx_cfg &= ~RxMACEnable; bmwrite(dev, RXCFG, rx_cfg); do { rx_cfg = bmread(dev, RXCFG); } while (rx_cfg & RxMACEnable); } unsigned short bmac_rx_on(struct net_device *dev, int hash_enable, int promisc_enable) { unsigned short rx_cfg; rx_cfg = bmread(dev, RXCFG); rx_cfg |= RxMACEnable; if (hash_enable) rx_cfg |= RxHashFilterEnable; else rx_cfg &= ~RxHashFilterEnable; if (promisc_enable) rx_cfg |= RxPromiscEnable; else rx_cfg &= ~RxPromiscEnable; bmwrite(dev, RXRST, RxResetValue); bmwrite(dev, RXFIFOCSR, 0); /* first disable rxFIFO */ bmwrite(dev, RXFIFOCSR, RxFIFOEnable ); bmwrite(dev, RXCFG, rx_cfg ); return rx_cfg; } static void bmac_update_hash_table_mask(struct net_device *dev, struct bmac_data *bp) { bmwrite(dev, BHASH3, bp->hash_table_mask[0]); /* bits 15 - 0 */ bmwrite(dev, BHASH2, bp->hash_table_mask[1]); /* bits 31 - 16 */ bmwrite(dev, BHASH1, bp->hash_table_mask[2]); /* bits 47 - 32 */ bmwrite(dev, BHASH0, bp->hash_table_mask[3]); /* bits 63 - 48 */ } #if 0 static void bmac_add_multi(struct net_device *dev, struct bmac_data *bp, unsigned char *addr) { /* XXDEBUG(("bmac: enter bmac_add_multi\n")); */ bmac_addhash(bp, addr); bmac_rx_off(dev); bmac_update_hash_table_mask(dev, bp); bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0); /* XXDEBUG(("bmac: exit bmac_add_multi\n")); */ } static void bmac_remove_multi(struct net_device *dev, struct bmac_data *bp, unsigned char *addr) { bmac_removehash(bp, addr); bmac_rx_off(dev); bmac_update_hash_table_mask(dev, bp); bmac_rx_on(dev, 1, (dev->flags & IFF_PROMISC)? 1 : 0); } #endif /* Set or clear the multicast filter for this adaptor. num_addrs == -1 Promiscuous mode, receive all packets num_addrs == 0 Normal mode, clear multicast list num_addrs > 0 Multicast mode, receive normal and MC packets, and do best-effort filtering. */ static void bmac_set_multicast(struct net_device *dev) { struct netdev_hw_addr *ha; struct bmac_data *bp = netdev_priv(dev); int num_addrs = netdev_mc_count(dev); unsigned short rx_cfg; int i; if (bp->sleeping) return; XXDEBUG(("bmac: enter bmac_set_multicast, n_addrs=%d\n", num_addrs)); if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { for (i=0; i<4; i++) bp->hash_table_mask[i] = 0xffff; bmac_update_hash_table_mask(dev, bp); rx_cfg = bmac_rx_on(dev, 1, 0); XXDEBUG(("bmac: all multi, rx_cfg=%#08x\n")); } else if ((dev->flags & IFF_PROMISC) || (num_addrs < 0)) { rx_cfg = bmread(dev, RXCFG); rx_cfg |= RxPromiscEnable; bmwrite(dev, RXCFG, rx_cfg); rx_cfg = bmac_rx_on(dev, 0, 1); XXDEBUG(("bmac: promisc mode enabled, rx_cfg=%#08x\n", rx_cfg)); } else { for (i=0; i<4; i++) bp->hash_table_mask[i] = 0; for (i=0; i<64; i++) bp->hash_use_count[i] = 0; if (num_addrs == 0) { rx_cfg = bmac_rx_on(dev, 0, 0); XXDEBUG(("bmac: multi disabled, rx_cfg=%#08x\n", rx_cfg)); } else { netdev_for_each_mc_addr(ha, dev) bmac_addhash(bp, ha->addr); bmac_update_hash_table_mask(dev, bp); rx_cfg = bmac_rx_on(dev, 1, 0); XXDEBUG(("bmac: multi enabled, rx_cfg=%#08x\n", rx_cfg)); } } /* XXDEBUG(("bmac: exit bmac_set_multicast\n")); */ } #else /* ifdef SUNHME_MULTICAST */ /* The version of set_multicast below was lifted from sunhme.c */ static void bmac_set_multicast(struct net_device *dev) { struct netdev_hw_addr *ha; char *addrs; int i; unsigned short rx_cfg; u32 crc; if((dev->flags & IFF_ALLMULTI) || (netdev_mc_count(dev) > 64)) { bmwrite(dev, BHASH0, 0xffff); bmwrite(dev, BHASH1, 0xffff); bmwrite(dev, BHASH2, 0xffff); bmwrite(dev, BHASH3, 0xffff); } else if(dev->flags & IFF_PROMISC) { rx_cfg = bmread(dev, RXCFG); rx_cfg |= RxPromiscEnable; bmwrite(dev, RXCFG, rx_cfg); } else { u16 hash_table[4]; rx_cfg = bmread(dev, RXCFG); rx_cfg &= ~RxPromiscEnable; bmwrite(dev, RXCFG, rx_cfg); for(i = 0; i < 4; i++) hash_table[i] = 0; netdev_for_each_mc_addr(ha, dev) { addrs = ha->addr; if(!(*addrs & 1)) continue; crc = ether_crc_le(6, addrs); crc >>= 26; hash_table[crc >> 4] |= 1 << (crc & 0xf); } bmwrite(dev, BHASH0, hash_table[0]); bmwrite(dev, BHASH1, hash_table[1]); bmwrite(dev, BHASH2, hash_table[2]); bmwrite(dev, BHASH3, hash_table[3]); } } #endif /* SUNHME_MULTICAST */ static int miscintcount; static irqreturn_t bmac_misc_intr(int irq, void *dev_id) { struct net_device *dev = (struct net_device *) dev_id; unsigned int status = bmread(dev, STATUS); if (miscintcount++ < 10) { XXDEBUG(("bmac_misc_intr\n")); } /* XXDEBUG(("bmac_misc_intr, status=%#08x\n", status)); */ /* bmac_txdma_intr_inner(irq, dev_id); */ /* if (status & FrameReceived) dev->stats.rx_dropped++; */ if (status & RxErrorMask) dev->stats.rx_errors++; if (status & RxCRCCntExp) dev->stats.rx_crc_errors++; if (status & RxLenCntExp) dev->stats.rx_length_errors++; if (status & RxOverFlow) dev->stats.rx_over_errors++; if (status & RxAlignCntExp) dev->stats.rx_frame_errors++; /* if (status & FrameSent) dev->stats.tx_dropped++; */ if (status & TxErrorMask) dev->stats.tx_errors++; if (status & TxUnderrun) dev->stats.tx_fifo_errors++; if (status & TxNormalCollExp) dev->stats.collisions++; return IRQ_HANDLED; } /* * Procedure for reading EEPROM */ #define SROMAddressLength 5 #define DataInOn 0x0008 #define DataInOff 0x0000 #define Clk 0x0002 #define ChipSelect 0x0001 #define SDIShiftCount 3 #define SD0ShiftCount 2 #define DelayValue 1000 /* number of microseconds */ #define SROMStartOffset 10 /* this is in words */ #define SROMReadCount 3 /* number of words to read from SROM */ #define SROMAddressBits 6 #define EnetAddressOffset 20 static unsigned char bmac_clock_out_bit(struct net_device *dev) { unsigned short data; unsigned short val; bmwrite(dev, SROMCSR, ChipSelect | Clk); udelay(DelayValue); data = bmread(dev, SROMCSR); udelay(DelayValue); val = (data >> SD0ShiftCount) & 1; bmwrite(dev, SROMCSR, ChipSelect); udelay(DelayValue); return val; } static void bmac_clock_in_bit(struct net_device *dev, unsigned int val) { unsigned short data; if (val != 0 && val != 1) return; data = (val << SDIShiftCount); bmwrite(dev, SROMCSR, data | ChipSelect ); udelay(DelayValue); bmwrite(dev, SROMCSR, data | ChipSelect | Clk ); udelay(DelayValue); bmwrite(dev, SROMCSR, data | ChipSelect); udelay(DelayValue); } static void reset_and_select_srom(struct net_device *dev) { /* first reset */ bmwrite(dev, SROMCSR, 0); udelay(DelayValue); /* send it the read command (110) */ bmac_clock_in_bit(dev, 1); bmac_clock_in_bit(dev, 1); bmac_clock_in_bit(dev, 0); } static unsigned short read_srom(struct net_device *dev, unsigned int addr, unsigned int addr_len) { unsigned short data, val; int i; /* send out the address we want to read from */ for (i = 0; i < addr_len; i++) { val = addr >> (addr_len-i-1); bmac_clock_in_bit(dev, val & 1); } /* Now read in the 16-bit data */ data = 0; for (i = 0; i < 16; i++) { val = bmac_clock_out_bit(dev); data <<= 1; data |= val; } bmwrite(dev, SROMCSR, 0); return data; } /* * It looks like Cogent and SMC use different methods for calculating * checksums. What a pain.. */ static int bmac_verify_checksum(struct net_device *dev) { unsigned short data, storedCS; reset_and_select_srom(dev); data = read_srom(dev, 3, SROMAddressBits); storedCS = ((data >> 8) & 0x0ff) | ((data << 8) & 0xff00); return 0; } static void bmac_get_station_address(struct net_device *dev, unsigned char *ea) { int i; unsigned short data; for (i = 0; i < 6; i++) { reset_and_select_srom(dev); data = read_srom(dev, i + EnetAddressOffset/2, SROMAddressBits); ea[2*i] = bitrev8(data & 0x0ff); ea[2*i+1] = bitrev8((data >> 8) & 0x0ff); } } static void bmac_reset_and_enable(struct net_device *dev) { struct bmac_data *bp = netdev_priv(dev); unsigned long flags; struct sk_buff *skb; unsigned char *data; spin_lock_irqsave(&bp->lock, flags); bmac_enable_and_reset_chip(dev); bmac_init_tx_ring(bp); bmac_init_rx_ring(bp); bmac_init_chip(dev); bmac_start_chip(dev); bmwrite(dev, INTDISABLE, EnableNormal); bp->sleeping = 0; /* * It seems that the bmac can't receive until it's transmitted * a packet. So we give it a dummy packet to transmit. */ skb = dev_alloc_skb(ETHERMINPACKET); if (skb != NULL) { data = skb_put(skb, ETHERMINPACKET); memset(data, 0, ETHERMINPACKET); memcpy(data, dev->dev_addr, 6); memcpy(data+6, dev->dev_addr, 6); bmac_transmit_packet(skb, dev); } spin_unlock_irqrestore(&bp->lock, flags); } static void bmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { struct bmac_data *bp = netdev_priv(dev); strcpy(info->driver, "bmac"); strcpy(info->bus_info, dev_name(&bp->mdev->ofdev.dev)); } static const struct ethtool_ops bmac_ethtool_ops = { .get_drvinfo = bmac_get_drvinfo, .get_link = ethtool_op_get_link, }; static const struct net_device_ops bmac_netdev_ops = { .ndo_open = bmac_open, .ndo_stop = bmac_close, .ndo_start_xmit = bmac_output, .ndo_set_multicast_list = bmac_set_multicast, .ndo_set_mac_address = bmac_set_address, .ndo_change_mtu = eth_change_mtu, .ndo_validate_addr = eth_validate_addr, }; static int __devinit bmac_probe(struct macio_dev *mdev, const struct of_device_id *match) { int j, rev, ret; struct bmac_data *bp; const unsigned char *prop_addr; unsigned char addr[6]; struct net_device *dev; int is_bmac_plus = ((int)match->data) != 0; if (macio_resource_count(mdev) != 3 || macio_irq_count(mdev) != 3) { printk(KERN_ERR "BMAC: can't use, need 3 addrs and 3 intrs\n"); return -ENODEV; } prop_addr = of_get_property(macio_get_of_node(mdev), "mac-address", NULL); if (prop_addr == NULL) { prop_addr = of_get_property(macio_get_of_node(mdev), "local-mac-address", NULL); if (prop_addr == NULL) { printk(KERN_ERR "BMAC: Can't get mac-address\n"); return -ENODEV; } } memcpy(addr, prop_addr, sizeof(addr)); dev = alloc_etherdev(PRIV_BYTES); if (!dev) { printk(KERN_ERR "BMAC: alloc_etherdev failed, out of memory\n"); return -ENOMEM; } bp = netdev_priv(dev); SET_NETDEV_DEV(dev, &mdev->ofdev.dev); macio_set_drvdata(mdev, dev); bp->mdev = mdev; spin_lock_init(&bp->lock); if (macio_request_resources(mdev, "bmac")) { printk(KERN_ERR "BMAC: can't request IO resource !\n"); goto out_free; } dev->base_addr = (unsigned long) ioremap(macio_resource_start(mdev, 0), macio_resource_len(mdev, 0)); if (dev->base_addr == 0) goto out_release; dev->irq = macio_irq(mdev, 0); bmac_enable_and_reset_chip(dev); bmwrite(dev, INTDISABLE, DisableAll); rev = addr[0] == 0 && addr[1] == 0xA0; for (j = 0; j < 6; ++j) dev->dev_addr[j] = rev ? bitrev8(addr[j]): addr[j]; /* Enable chip without interrupts for now */ bmac_enable_and_reset_chip(dev); bmwrite(dev, INTDISABLE, DisableAll); dev->netdev_ops = &bmac_netdev_ops; dev->ethtool_ops = &bmac_ethtool_ops; bmac_get_station_address(dev, addr); if (bmac_verify_checksum(dev) != 0) goto err_out_iounmap; bp->is_bmac_plus = is_bmac_plus; bp->tx_dma = ioremap(macio_resource_start(mdev, 1), macio_resource_len(mdev, 1)); if (!bp->tx_dma) goto err_out_iounmap; bp->tx_dma_intr = macio_irq(mdev, 1); bp->rx_dma = ioremap(macio_resource_start(mdev, 2), macio_resource_len(mdev, 2)); if (!bp->rx_dma) goto err_out_iounmap_tx; bp->rx_dma_intr = macio_irq(mdev, 2); bp->tx_cmds = (volatile struct dbdma_cmd *) DBDMA_ALIGN(bp + 1); bp->rx_cmds = bp->tx_cmds + N_TX_RING + 1; bp->queue = (struct sk_buff_head *)(bp->rx_cmds + N_RX_RING + 1); skb_queue_head_init(bp->queue); init_timer(&bp->tx_timeout); ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev); if (ret) { printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq); goto err_out_iounmap_rx; } ret = request_irq(bp->tx_dma_intr, bmac_txdma_intr, 0, "BMAC-txdma", dev); if (ret) { printk(KERN_ERR "BMAC: can't get irq %d\n", bp->tx_dma_intr); goto err_out_irq0; } ret = request_irq(bp->rx_dma_intr, bmac_rxdma_intr, 0, "BMAC-rxdma", dev); if (ret) { printk(KERN_ERR "BMAC: can't get irq %d\n", bp->rx_dma_intr); goto err_out_irq1; } /* Mask chip interrupts and disable chip, will be * re-enabled on open() */ disable_irq(dev->irq); pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); if (register_netdev(dev) != 0) { printk(KERN_ERR "BMAC: Ethernet registration failed\n"); goto err_out_irq2; } printk(KERN_INFO "%s: BMAC%s at %pM", dev->name, (is_bmac_plus ? "+" : ""), dev->dev_addr); XXDEBUG((", base_addr=%#0lx", dev->base_addr)); printk("\n"); return 0; err_out_irq2: free_irq(bp->rx_dma_intr, dev); err_out_irq1: free_irq(bp->tx_dma_intr, dev); err_out_irq0: free_irq(dev->irq, dev); err_out_iounmap_rx: iounmap(bp->rx_dma); err_out_iounmap_tx: iounmap(bp->tx_dma); err_out_iounmap: iounmap((void __iomem *)dev->base_addr); out_release: macio_release_resources(mdev); out_free: pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); free_netdev(dev); return -ENODEV; } static int bmac_open(struct net_device *dev) { struct bmac_data *bp = netdev_priv(dev); /* XXDEBUG(("bmac: enter open\n")); */ /* reset the chip */ bp->opened = 1; bmac_reset_and_enable(dev); enable_irq(dev->irq); return 0; } static int bmac_close(struct net_device *dev) { struct bmac_data *bp = netdev_priv(dev); volatile struct dbdma_regs __iomem *rd = bp->rx_dma; volatile struct dbdma_regs __iomem *td = bp->tx_dma; unsigned short config; int i; bp->sleeping = 1; /* disable rx and tx */ config = bmread(dev, RXCFG); bmwrite(dev, RXCFG, (config & ~RxMACEnable)); config = bmread(dev, TXCFG); bmwrite(dev, TXCFG, (config & ~TxMACEnable)); bmwrite(dev, INTDISABLE, DisableAll); /* disable all intrs */ /* disable rx and tx dma */ st_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ st_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE)); /* clear run bit */ /* free some skb's */ XXDEBUG(("bmac: free rx bufs\n")); for (i=0; i<N_RX_RING; i++) { if (bp->rx_bufs[i] != NULL) { dev_kfree_skb(bp->rx_bufs[i]); bp->rx_bufs[i] = NULL; } } XXDEBUG(("bmac: free tx bufs\n")); for (i = 0; i<N_TX_RING; i++) { if (bp->tx_bufs[i] != NULL) { dev_kfree_skb(bp->tx_bufs[i]); bp->tx_bufs[i] = NULL; } } XXDEBUG(("bmac: all bufs freed\n")); bp->opened = 0; disable_irq(dev->irq); pmac_call_feature(PMAC_FTR_BMAC_ENABLE, macio_get_of_node(bp->mdev), 0, 0); return 0; } static void bmac_start(struct net_device *dev) { struct bmac_data *bp = netdev_priv(dev); int i; struct sk_buff *skb; unsigned long flags; if (bp->sleeping) return; spin_lock_irqsave(&bp->lock, flags); while (1) { i = bp->tx_fill + 1; if (i >= N_TX_RING) i = 0; if (i == bp->tx_empty) break; skb = skb_dequeue(bp->queue); if (skb == NULL) break; bmac_transmit_packet(skb, dev); } spin_unlock_irqrestore(&bp->lock, flags); } static int bmac_output(struct sk_buff *skb, struct net_device *dev) { struct bmac_data *bp = netdev_priv(dev); skb_queue_tail(bp->queue, skb); bmac_start(dev); return NETDEV_TX_OK; } static void bmac_tx_timeout(unsigned long data) { struct net_device *dev = (struct net_device *) data; struct bmac_data *bp = netdev_priv(dev); volatile struct dbdma_regs __iomem *td = bp->tx_dma; volatile struct dbdma_regs __iomem *rd = bp->rx_dma; volatile struct dbdma_cmd *cp; unsigned long flags; unsigned short config, oldConfig; int i; XXDEBUG(("bmac: tx_timeout called\n")); spin_lock_irqsave(&bp->lock, flags); bp->timeout_active = 0; /* update various counters */ /* bmac_handle_misc_intrs(bp, 0); */ cp = &bp->tx_cmds[bp->tx_empty]; /* XXDEBUG((KERN_DEBUG "bmac: tx dmastat=%x %x runt=%d pr=%x fs=%x fc=%x\n", */ /* ld_le32(&td->status), ld_le16(&cp->xfer_status), bp->tx_bad_runt, */ /* mb->pr, mb->xmtfs, mb->fifofc)); */ /* turn off both tx and rx and reset the chip */ config = bmread(dev, RXCFG); bmwrite(dev, RXCFG, (config & ~RxMACEnable)); config = bmread(dev, TXCFG); bmwrite(dev, TXCFG, (config & ~TxMACEnable)); out_le32(&td->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD)); printk(KERN_ERR "bmac: transmit timeout - resetting\n"); bmac_enable_and_reset_chip(dev); /* restart rx dma */ cp = bus_to_virt(ld_le32(&rd->cmdptr)); out_le32(&rd->control, DBDMA_CLEAR(RUN|PAUSE|FLUSH|WAKE|ACTIVE|DEAD)); out_le16(&cp->xfer_status, 0); out_le32(&rd->cmdptr, virt_to_bus(cp)); out_le32(&rd->control, DBDMA_SET(RUN|WAKE)); /* fix up the transmit side */ XXDEBUG((KERN_DEBUG "bmac: tx empty=%d fill=%d fullup=%d\n", bp->tx_empty, bp->tx_fill, bp->tx_fullup)); i = bp->tx_empty; ++dev->stats.tx_errors; if (i != bp->tx_fill) { dev_kfree_skb(bp->tx_bufs[i]); bp->tx_bufs[i] = NULL; if (++i >= N_TX_RING) i = 0; bp->tx_empty = i; } bp->tx_fullup = 0; netif_wake_queue(dev); if (i != bp->tx_fill) { cp = &bp->tx_cmds[i]; out_le16(&cp->xfer_status, 0); out_le16(&cp->command, OUTPUT_LAST); out_le32(&td->cmdptr, virt_to_bus(cp)); out_le32(&td->control, DBDMA_SET(RUN)); /* bmac_set_timeout(dev); */ XXDEBUG((KERN_DEBUG "bmac: starting %d\n", i)); } /* turn it back on */ oldConfig = bmread(dev, RXCFG); bmwrite(dev, RXCFG, oldConfig | RxMACEnable ); oldConfig = bmread(dev, TXCFG); bmwrite(dev, TXCFG, oldConfig | TxMACEnable ); spin_unlock_irqrestore(&bp->lock, flags); } #if 0 static void dump_dbdma(volatile struct dbdma_cmd *cp,int count) { int i,*ip; for (i=0;i< count;i++) { ip = (int*)(cp+i); printk("dbdma req 0x%x addr 0x%x baddr 0x%x xfer/res 0x%x\n", ld_le32(ip+0), ld_le32(ip+1), ld_le32(ip+2), ld_le32(ip+3)); } } #endif #if 0 static int bmac_proc_info(char *buffer, char **start, off_t offset, int length) { int len = 0; off_t pos = 0; off_t begin = 0; int i; if (bmac_devs == NULL) return (-ENOSYS); len += sprintf(buffer, "BMAC counters & registers\n"); for (i = 0; i<N_REG_ENTRIES; i++) { len += sprintf(buffer + len, "%s: %#08x\n", reg_entries[i].name, bmread(bmac_devs, reg_entries[i].reg_offset)); pos = begin + len; if (pos < offset) { len = 0; begin = pos; } if (pos > offset+length) break; } *start = buffer + (offset - begin); len -= (offset - begin); if (len > length) len = length; return len; } #endif static int __devexit bmac_remove(struct macio_dev *mdev) { struct net_device *dev = macio_get_drvdata(mdev); struct bmac_data *bp = netdev_priv(dev); unregister_netdev(dev); free_irq(dev->irq, dev); free_irq(bp->tx_dma_intr, dev); free_irq(bp->rx_dma_intr, dev); iounmap((void __iomem *)dev->base_addr); iounmap(bp->tx_dma); iounmap(bp->rx_dma); macio_release_resources(mdev); free_netdev(dev); return 0; } static struct of_device_id bmac_match[] = { { .name = "bmac", .data = (void *)0, }, { .type = "network", .compatible = "bmac+", .data = (void *)1, }, {}, }; MODULE_DEVICE_TABLE (of, bmac_match); static struct macio_driver bmac_driver = { .driver = { .name = "bmac", .owner = THIS_MODULE, .of_match_table = bmac_match, }, .probe = bmac_probe, .remove = bmac_remove, #ifdef CONFIG_PM .suspend = bmac_suspend, .resume = bmac_resume, #endif }; static int __init bmac_init(void) { if (bmac_emergency_rxbuf == NULL) { bmac_emergency_rxbuf = kmalloc(RX_BUFLEN, GFP_KERNEL); if (bmac_emergency_rxbuf == NULL) { printk(KERN_ERR "BMAC: can't allocate emergency RX buffer\n"); return -ENOMEM; } } return macio_register_driver(&bmac_driver); } static void __exit bmac_exit(void) { macio_unregister_driver(&bmac_driver); kfree(bmac_emergency_rxbuf); bmac_emergency_rxbuf = NULL; } MODULE_AUTHOR("Randy Gobbel/Paul Mackerras"); MODULE_DESCRIPTION("PowerMac BMAC ethernet driver."); MODULE_LICENSE("GPL"); module_init(bmac_init); module_exit(bmac_exit);
gpl-2.0
imnuts/sch-i510_kernel
drivers/zorro/zorro.c
848
5111
/* * Zorro Bus Services * * Copyright (C) 1995-2003 Geert Uytterhoeven * * This file is subject to the terms and conditions of the GNU General Public * License. See the file COPYING in the main directory of this archive * for more details. */ #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> #include <linux/init.h> #include <linux/zorro.h> #include <linux/bitops.h> #include <linux/string.h> #include <linux/platform_device.h> #include <linux/slab.h> #include <asm/setup.h> #include <asm/amigahw.h> #include "zorro.h" /* * Zorro Expansion Devices */ unsigned int zorro_num_autocon; struct zorro_dev zorro_autocon[ZORRO_NUM_AUTO]; /* * Zorro bus */ struct zorro_bus { struct list_head devices; /* list of devices on this bus */ struct device dev; }; /* * Find Zorro Devices */ struct zorro_dev *zorro_find_device(zorro_id id, struct zorro_dev *from) { struct zorro_dev *z; if (!zorro_num_autocon) return NULL; for (z = from ? from+1 : &zorro_autocon[0]; z < zorro_autocon+zorro_num_autocon; z++) if (id == ZORRO_WILDCARD || id == z->id) return z; return NULL; } EXPORT_SYMBOL(zorro_find_device); /* * Bitmask indicating portions of available Zorro II RAM that are unused * by the system. Every bit represents a 64K chunk, for a maximum of 8MB * (128 chunks, physical 0x00200000-0x009fffff). * * If you want to use (= allocate) portions of this RAM, you should clear * the corresponding bits. * * Possible uses: * - z2ram device * - SCSI DMA bounce buffers * * FIXME: use the normal resource management */ DECLARE_BITMAP(zorro_unused_z2ram, 128); EXPORT_SYMBOL(zorro_unused_z2ram); static void __init mark_region(unsigned long start, unsigned long end, int flag) { if (flag) start += Z2RAM_CHUNKMASK; else end += Z2RAM_CHUNKMASK; start &= ~Z2RAM_CHUNKMASK; end &= ~Z2RAM_CHUNKMASK; if (end <= Z2RAM_START || start >= Z2RAM_END) return; start = start < Z2RAM_START ? 0x00000000 : start-Z2RAM_START; end = end > Z2RAM_END ? Z2RAM_SIZE : end-Z2RAM_START; while (start < end) { u32 chunk = start>>Z2RAM_CHUNKSHIFT; if (flag) set_bit(chunk, zorro_unused_z2ram); else clear_bit(chunk, zorro_unused_z2ram); start += Z2RAM_CHUNKSIZE; } } static struct resource __init *zorro_find_parent_resource( struct platform_device *bridge, struct zorro_dev *z) { int i; for (i = 0; i < bridge->num_resources; i++) { struct resource *r = &bridge->resource[i]; if (zorro_resource_start(z) >= r->start && zorro_resource_end(z) <= r->end) return r; } return &iomem_resource; } static int __init amiga_zorro_probe(struct platform_device *pdev) { struct zorro_bus *bus; struct zorro_dev *z; struct resource *r; unsigned int i; int error; /* Initialize the Zorro bus */ bus = kzalloc(sizeof(*bus), GFP_KERNEL); if (!bus) return -ENOMEM; INIT_LIST_HEAD(&bus->devices); bus->dev.parent = &pdev->dev; dev_set_name(&bus->dev, "zorro"); error = device_register(&bus->dev); if (error) { pr_err("Zorro: Error registering zorro_bus\n"); kfree(bus); return error; } platform_set_drvdata(pdev, bus); /* Register all devices */ pr_info("Zorro: Probing AutoConfig expansion devices: %u device%s\n", zorro_num_autocon, zorro_num_autocon == 1 ? "" : "s"); for (i = 0; i < zorro_num_autocon; i++) { z = &zorro_autocon[i]; z->id = (z->rom.er_Manufacturer<<16) | (z->rom.er_Product<<8); if (z->id == ZORRO_PROD_GVP_EPC_BASE) { /* GVP quirk */ unsigned long magic = zorro_resource_start(z)+0x8000; z->id |= *(u16 *)ZTWO_VADDR(magic) & GVP_PRODMASK; } sprintf(z->name, "Zorro device %08x", z->id); zorro_name_device(z); z->resource.name = z->name; r = zorro_find_parent_resource(pdev, z); error = request_resource(r, &z->resource); if (error) dev_err(&bus->dev, "Address space collision on device %s %pR\n", z->name, &z->resource); dev_set_name(&z->dev, "%02x", i); z->dev.parent = &bus->dev; z->dev.bus = &zorro_bus_type; error = device_register(&z->dev); if (error) { dev_err(&bus->dev, "Error registering device %s\n", z->name); continue; } error = zorro_create_sysfs_dev_files(z); if (error) dev_err(&z->dev, "Error creating sysfs files\n"); } /* Mark all available Zorro II memory */ zorro_for_each_dev(z) { if (z->rom.er_Type & ERTF_MEMLIST) mark_region(zorro_resource_start(z), zorro_resource_end(z)+1, 1); } /* Unmark all used Zorro II memory */ for (i = 0; i < m68k_num_memory; i++) if (m68k_memory[i].addr < 16*1024*1024) mark_region(m68k_memory[i].addr, m68k_memory[i].addr+m68k_memory[i].size, 0); return 0; } static struct platform_driver amiga_zorro_driver = { .driver = { .name = "amiga-zorro", .owner = THIS_MODULE, }, }; static int __init amiga_zorro_init(void) { return platform_driver_probe(&amiga_zorro_driver, amiga_zorro_probe); } module_init(amiga_zorro_init); MODULE_LICENSE("GPL");
gpl-2.0
jollaman999/LGF180-Optimus-G-_Android_KK_v30b_Kernel
arch/arm/mach-msm/batterydata-lib.c
1104
8108
/* Copyright (c) 2012, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/module.h> #include <linux/mfd/pm8xxx/batterydata-lib.h> int linear_interpolate(int y0, int x0, int y1, int x1, int x) { if (y0 == y1 || x == x0) return y0; if (x1 == x0 || x == x1) return y1; return y0 + ((y1 - y0) * (x - x0) / (x1 - x0)); } int is_between(int left, int right, int value) { if (left >= right && left >= value && value >= right) return 1; if (left <= right && left <= value && value <= right) return 1; return 0; } static int interpolate_single_lut(struct single_row_lut *lut, int x) { int i, result; if (x < lut->x[0]) { pr_debug("x %d less than known range return y = %d lut = %pS\n", x, lut->y[0], lut); return lut->y[0]; } if (x > lut->x[lut->cols - 1]) { pr_debug("x %d more than known range return y = %d lut = %pS\n", x, lut->y[lut->cols - 1], lut); return lut->y[lut->cols - 1]; } for (i = 0; i < lut->cols; i++) if (x <= lut->x[i]) break; if (x == lut->x[i]) { result = lut->y[i]; } else { result = linear_interpolate( lut->y[i - 1], lut->x[i - 1], lut->y[i], lut->x[i], x); } return result; } int interpolate_fcc(struct single_row_lut *fcc_temp_lut, int batt_temp) { /* batt_temp is in tenths of degC - convert it to degC for lookups */ batt_temp = batt_temp/10; return interpolate_single_lut(fcc_temp_lut, batt_temp); } int interpolate_scalingfactor_fcc(struct single_row_lut *fcc_sf_lut, int cycles) { /* * sf table could be null when no battery aging data is available, in * that case return 100% */ if (fcc_sf_lut) return interpolate_single_lut(fcc_sf_lut, cycles); else return 100; } int interpolate_scalingfactor(struct sf_lut *sf_lut, int row_entry, int pc) { int i, scalefactorrow1, scalefactorrow2, scalefactor, rows, cols; int row1 = 0; int row2 = 0; /* * sf table could be null when no battery aging data is available, in * that case return 100% */ if (!sf_lut) return 100; rows = sf_lut->rows; cols = sf_lut->cols; if (pc > sf_lut->percent[0]) { pr_debug("pc %d greater than known pc ranges for sfd\n", pc); row1 = 0; row2 = 0; } if (pc < sf_lut->percent[rows - 1]) { pr_debug("pc %d less than known pc ranges for sf\n", pc); row1 = rows - 1; row2 = rows - 1; } for (i = 0; i < rows; i++) { if (pc == sf_lut->percent[i]) { row1 = i; row2 = i; break; } if (pc > sf_lut->percent[i]) { row1 = i - 1; row2 = i; break; } } if (row_entry < sf_lut->row_entries[0]) row_entry = sf_lut->row_entries[0]; if (row_entry > sf_lut->row_entries[cols - 1]) row_entry = sf_lut->row_entries[cols - 1]; for (i = 0; i < cols; i++) if (row_entry <= sf_lut->row_entries[i]) break; if (row_entry == sf_lut->row_entries[i]) { scalefactor = linear_interpolate( sf_lut->sf[row1][i], sf_lut->percent[row1], sf_lut->sf[row2][i], sf_lut->percent[row2], pc); return scalefactor; } scalefactorrow1 = linear_interpolate( sf_lut->sf[row1][i - 1], sf_lut->row_entries[i - 1], sf_lut->sf[row1][i], sf_lut->row_entries[i], row_entry); scalefactorrow2 = linear_interpolate( sf_lut->sf[row2][i - 1], sf_lut->row_entries[i - 1], sf_lut->sf[row2][i], sf_lut->row_entries[i], row_entry); scalefactor = linear_interpolate( scalefactorrow1, sf_lut->percent[row1], scalefactorrow2, sf_lut->percent[row2], pc); return scalefactor; } /* get ocv given a soc -- reverse lookup */ int interpolate_ocv(struct pc_temp_ocv_lut *pc_temp_ocv, int batt_temp_degc, int pc) { int i, ocvrow1, ocvrow2, ocv, rows, cols; int row1 = 0; int row2 = 0; rows = pc_temp_ocv->rows; cols = pc_temp_ocv->cols; if (pc > pc_temp_ocv->percent[0]) { pr_debug("pc %d greater than known pc ranges for sfd\n", pc); row1 = 0; row2 = 0; } if (pc < pc_temp_ocv->percent[rows - 1]) { pr_debug("pc %d less than known pc ranges for sf\n", pc); row1 = rows - 1; row2 = rows - 1; } for (i = 0; i < rows; i++) { if (pc == pc_temp_ocv->percent[i]) { row1 = i; row2 = i; break; } if (pc > pc_temp_ocv->percent[i]) { row1 = i - 1; row2 = i; break; } } if (batt_temp_degc < pc_temp_ocv->temp[0]) batt_temp_degc = pc_temp_ocv->temp[0]; if (batt_temp_degc > pc_temp_ocv->temp[cols - 1]) batt_temp_degc = pc_temp_ocv->temp[cols - 1]; for (i = 0; i < cols; i++) if (batt_temp_degc <= pc_temp_ocv->temp[i]) break; if (batt_temp_degc == pc_temp_ocv->temp[i]) { ocv = linear_interpolate( pc_temp_ocv->ocv[row1][i], pc_temp_ocv->percent[row1], pc_temp_ocv->ocv[row2][i], pc_temp_ocv->percent[row2], pc); return ocv; } ocvrow1 = linear_interpolate( pc_temp_ocv->ocv[row1][i - 1], pc_temp_ocv->temp[i - 1], pc_temp_ocv->ocv[row1][i], pc_temp_ocv->temp[i], batt_temp_degc); ocvrow2 = linear_interpolate( pc_temp_ocv->ocv[row2][i - 1], pc_temp_ocv->temp[i - 1], pc_temp_ocv->ocv[row2][i], pc_temp_ocv->temp[i], batt_temp_degc); ocv = linear_interpolate( ocvrow1, pc_temp_ocv->percent[row1], ocvrow2, pc_temp_ocv->percent[row2], pc); return ocv; } int interpolate_pc(struct pc_temp_ocv_lut *pc_temp_ocv, int batt_temp_degc, int ocv) { int i, j, pcj, pcj_minus_one, pc; int rows = pc_temp_ocv->rows; int cols = pc_temp_ocv->cols; if (batt_temp_degc < pc_temp_ocv->temp[0]) { pr_debug("batt_temp %d < known temp range\n", batt_temp_degc); batt_temp_degc = pc_temp_ocv->temp[0]; } if (batt_temp_degc > pc_temp_ocv->temp[cols - 1]) { pr_debug("batt_temp %d > known temp range\n", batt_temp_degc); batt_temp_degc = pc_temp_ocv->temp[cols - 1]; } for (j = 0; j < cols; j++) if (batt_temp_degc <= pc_temp_ocv->temp[j]) break; if (batt_temp_degc == pc_temp_ocv->temp[j]) { /* found an exact match for temp in the table */ if (ocv >= pc_temp_ocv->ocv[0][j]) return pc_temp_ocv->percent[0]; if (ocv <= pc_temp_ocv->ocv[rows - 1][j]) return pc_temp_ocv->percent[rows - 1]; for (i = 0; i < rows; i++) { if (ocv >= pc_temp_ocv->ocv[i][j]) { if (ocv == pc_temp_ocv->ocv[i][j]) return pc_temp_ocv->percent[i]; pc = linear_interpolate( pc_temp_ocv->percent[i], pc_temp_ocv->ocv[i][j], pc_temp_ocv->percent[i - 1], pc_temp_ocv->ocv[i - 1][j], ocv); return pc; } } } /* * batt_temp_degc is within temperature for * column j-1 and j */ if (ocv >= pc_temp_ocv->ocv[0][j]) return pc_temp_ocv->percent[0]; if (ocv <= pc_temp_ocv->ocv[rows - 1][j - 1]) return pc_temp_ocv->percent[rows - 1]; pcj_minus_one = 0; pcj = 0; for (i = 0; i < rows-1; i++) { if (pcj == 0 && is_between(pc_temp_ocv->ocv[i][j], pc_temp_ocv->ocv[i+1][j], ocv)) { pcj = linear_interpolate( pc_temp_ocv->percent[i], pc_temp_ocv->ocv[i][j], pc_temp_ocv->percent[i + 1], pc_temp_ocv->ocv[i+1][j], ocv); } if (pcj_minus_one == 0 && is_between(pc_temp_ocv->ocv[i][j-1], pc_temp_ocv->ocv[i+1][j-1], ocv)) { pcj_minus_one = linear_interpolate( pc_temp_ocv->percent[i], pc_temp_ocv->ocv[i][j-1], pc_temp_ocv->percent[i + 1], pc_temp_ocv->ocv[i+1][j-1], ocv); } if (pcj && pcj_minus_one) { pc = linear_interpolate( pcj_minus_one, pc_temp_ocv->temp[j-1], pcj, pc_temp_ocv->temp[j], batt_temp_degc); return pc; } } if (pcj) return pcj; if (pcj_minus_one) return pcj_minus_one; pr_debug("%d ocv wasn't found for temp %d in the LUT returning 100%%\n", ocv, batt_temp_degc); return 100; }
gpl-2.0
froggy666uk/Froggy_SensMod_CM10.1
drivers/of/base.c
1872
23280
/* * Procedures for creating, accessing and interpreting the device tree. * * Paul Mackerras August 1996. * Copyright (C) 1996-2005 Paul Mackerras. * * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner. * {engebret|bergner}@us.ibm.com * * Adapted for sparc and sparc64 by David S. Miller davem@davemloft.net * * Reconsolidated from arch/x/kernel/prom.c by Stephen Rothwell and * Grant Likely. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/module.h> #include <linux/of.h> #include <linux/spinlock.h> #include <linux/slab.h> #include <linux/proc_fs.h> struct device_node *allnodes; struct device_node *of_chosen; /* use when traversing tree through the allnext, child, sibling, * or parent members of struct device_node. */ DEFINE_RWLOCK(devtree_lock); int of_n_addr_cells(struct device_node *np) { const __be32 *ip; do { if (np->parent) np = np->parent; ip = of_get_property(np, "#address-cells", NULL); if (ip) return be32_to_cpup(ip); } while (np->parent); /* No #address-cells property for the root node */ return OF_ROOT_NODE_ADDR_CELLS_DEFAULT; } EXPORT_SYMBOL(of_n_addr_cells); int of_n_size_cells(struct device_node *np) { const __be32 *ip; do { if (np->parent) np = np->parent; ip = of_get_property(np, "#size-cells", NULL); if (ip) return be32_to_cpup(ip); } while (np->parent); /* No #size-cells property for the root node */ return OF_ROOT_NODE_SIZE_CELLS_DEFAULT; } EXPORT_SYMBOL(of_n_size_cells); #if !defined(CONFIG_SPARC) /* SPARC doesn't do ref counting (yet) */ /** * of_node_get - Increment refcount of a node * @node: Node to inc refcount, NULL is supported to * simplify writing of callers * * Returns node. */ struct device_node *of_node_get(struct device_node *node) { if (node) kref_get(&node->kref); return node; } EXPORT_SYMBOL(of_node_get); static inline struct device_node *kref_to_device_node(struct kref *kref) { return container_of(kref, struct device_node, kref); } /** * of_node_release - release a dynamically allocated node * @kref: kref element of the node to be released * * In of_node_put() this function is passed to kref_put() * as the destructor. */ static void of_node_release(struct kref *kref) { struct device_node *node = kref_to_device_node(kref); struct property *prop = node->properties; /* We should never be releasing nodes that haven't been detached. */ if (!of_node_check_flag(node, OF_DETACHED)) { pr_err("ERROR: Bad of_node_put() on %s\n", node->full_name); dump_stack(); kref_init(&node->kref); return; } if (!of_node_check_flag(node, OF_DYNAMIC)) return; while (prop) { struct property *next = prop->next; kfree(prop->name); kfree(prop->value); kfree(prop); prop = next; if (!prop) { prop = node->deadprops; node->deadprops = NULL; } } kfree(node->full_name); kfree(node->data); kfree(node); } /** * of_node_put - Decrement refcount of a node * @node: Node to dec refcount, NULL is supported to * simplify writing of callers * */ void of_node_put(struct device_node *node) { if (node) kref_put(&node->kref, of_node_release); } EXPORT_SYMBOL(of_node_put); #endif /* !CONFIG_SPARC */ struct property *of_find_property(const struct device_node *np, const char *name, int *lenp) { struct property *pp; if (!np) return NULL; read_lock(&devtree_lock); for (pp = np->properties; pp != 0; pp = pp->next) { if (of_prop_cmp(pp->name, name) == 0) { if (lenp != 0) *lenp = pp->length; break; } } read_unlock(&devtree_lock); return pp; } EXPORT_SYMBOL(of_find_property); /** * of_find_all_nodes - Get next node in global list * @prev: Previous node or NULL to start iteration * of_node_put() will be called on it * * Returns a node pointer with refcount incremented, use * of_node_put() on it when done. */ struct device_node *of_find_all_nodes(struct device_node *prev) { struct device_node *np; read_lock(&devtree_lock); np = prev ? prev->allnext : allnodes; for (; np != NULL; np = np->allnext) if (of_node_get(np)) break; of_node_put(prev); read_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_find_all_nodes); /* * Find a property with a given name for a given node * and return the value. */ const void *of_get_property(const struct device_node *np, const char *name, int *lenp) { struct property *pp = of_find_property(np, name, lenp); return pp ? pp->value : NULL; } EXPORT_SYMBOL(of_get_property); /** Checks if the given "compat" string matches one of the strings in * the device's "compatible" property */ int of_device_is_compatible(const struct device_node *device, const char *compat) { const char* cp; int cplen, l; cp = of_get_property(device, "compatible", &cplen); if (cp == NULL) return 0; while (cplen > 0) { if (of_compat_cmp(cp, compat, strlen(compat)) == 0) return 1; l = strlen(cp) + 1; cp += l; cplen -= l; } return 0; } EXPORT_SYMBOL(of_device_is_compatible); /** * of_machine_is_compatible - Test root of device tree for a given compatible value * @compat: compatible string to look for in root node's compatible property. * * Returns true if the root node has the given value in its * compatible property. */ int of_machine_is_compatible(const char *compat) { struct device_node *root; int rc = 0; root = of_find_node_by_path("/"); if (root) { rc = of_device_is_compatible(root, compat); of_node_put(root); } return rc; } EXPORT_SYMBOL(of_machine_is_compatible); /** * of_device_is_available - check if a device is available for use * * @device: Node to check for availability * * Returns 1 if the status property is absent or set to "okay" or "ok", * 0 otherwise */ int of_device_is_available(const struct device_node *device) { const char *status; int statlen; status = of_get_property(device, "status", &statlen); if (status == NULL) return 1; if (statlen > 0) { if (!strcmp(status, "okay") || !strcmp(status, "ok")) return 1; } return 0; } EXPORT_SYMBOL(of_device_is_available); /** * of_get_parent - Get a node's parent if any * @node: Node to get parent * * Returns a node pointer with refcount incremented, use * of_node_put() on it when done. */ struct device_node *of_get_parent(const struct device_node *node) { struct device_node *np; if (!node) return NULL; read_lock(&devtree_lock); np = of_node_get(node->parent); read_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_get_parent); /** * of_get_next_parent - Iterate to a node's parent * @node: Node to get parent of * * This is like of_get_parent() except that it drops the * refcount on the passed node, making it suitable for iterating * through a node's parents. * * Returns a node pointer with refcount incremented, use * of_node_put() on it when done. */ struct device_node *of_get_next_parent(struct device_node *node) { struct device_node *parent; if (!node) return NULL; read_lock(&devtree_lock); parent = of_node_get(node->parent); of_node_put(node); read_unlock(&devtree_lock); return parent; } /** * of_get_next_child - Iterate a node childs * @node: parent node * @prev: previous child of the parent node, or NULL to get first * * Returns a node pointer with refcount incremented, use * of_node_put() on it when done. */ struct device_node *of_get_next_child(const struct device_node *node, struct device_node *prev) { struct device_node *next; read_lock(&devtree_lock); next = prev ? prev->sibling : node->child; for (; next; next = next->sibling) if (of_node_get(next)) break; of_node_put(prev); read_unlock(&devtree_lock); return next; } EXPORT_SYMBOL(of_get_next_child); /** * of_find_node_by_path - Find a node matching a full OF path * @path: The full path to match * * Returns a node pointer with refcount incremented, use * of_node_put() on it when done. */ struct device_node *of_find_node_by_path(const char *path) { struct device_node *np = allnodes; read_lock(&devtree_lock); for (; np; np = np->allnext) { if (np->full_name && (of_node_cmp(np->full_name, path) == 0) && of_node_get(np)) break; } read_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_find_node_by_path); /** * of_find_node_by_name - Find a node by its "name" property * @from: The node to start searching from or NULL, the node * you pass will not be searched, only the next one * will; typically, you pass what the previous call * returned. of_node_put() will be called on it * @name: The name string to match against * * Returns a node pointer with refcount incremented, use * of_node_put() on it when done. */ struct device_node *of_find_node_by_name(struct device_node *from, const char *name) { struct device_node *np; read_lock(&devtree_lock); np = from ? from->allnext : allnodes; for (; np; np = np->allnext) if (np->name && (of_node_cmp(np->name, name) == 0) && of_node_get(np)) break; of_node_put(from); read_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_find_node_by_name); /** * of_find_node_by_type - Find a node by its "device_type" property * @from: The node to start searching from, or NULL to start searching * the entire device tree. The node you pass will not be * searched, only the next one will; typically, you pass * what the previous call returned. of_node_put() will be * called on from for you. * @type: The type string to match against * * Returns a node pointer with refcount incremented, use * of_node_put() on it when done. */ struct device_node *of_find_node_by_type(struct device_node *from, const char *type) { struct device_node *np; read_lock(&devtree_lock); np = from ? from->allnext : allnodes; for (; np; np = np->allnext) if (np->type && (of_node_cmp(np->type, type) == 0) && of_node_get(np)) break; of_node_put(from); read_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_find_node_by_type); /** * of_find_compatible_node - Find a node based on type and one of the * tokens in its "compatible" property * @from: The node to start searching from or NULL, the node * you pass will not be searched, only the next one * will; typically, you pass what the previous call * returned. of_node_put() will be called on it * @type: The type string to match "device_type" or NULL to ignore * @compatible: The string to match to one of the tokens in the device * "compatible" list. * * Returns a node pointer with refcount incremented, use * of_node_put() on it when done. */ struct device_node *of_find_compatible_node(struct device_node *from, const char *type, const char *compatible) { struct device_node *np; read_lock(&devtree_lock); np = from ? from->allnext : allnodes; for (; np; np = np->allnext) { if (type && !(np->type && (of_node_cmp(np->type, type) == 0))) continue; if (of_device_is_compatible(np, compatible) && of_node_get(np)) break; } of_node_put(from); read_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_find_compatible_node); /** * of_find_node_with_property - Find a node which has a property with * the given name. * @from: The node to start searching from or NULL, the node * you pass will not be searched, only the next one * will; typically, you pass what the previous call * returned. of_node_put() will be called on it * @prop_name: The name of the property to look for. * * Returns a node pointer with refcount incremented, use * of_node_put() on it when done. */ struct device_node *of_find_node_with_property(struct device_node *from, const char *prop_name) { struct device_node *np; struct property *pp; read_lock(&devtree_lock); np = from ? from->allnext : allnodes; for (; np; np = np->allnext) { for (pp = np->properties; pp != 0; pp = pp->next) { if (of_prop_cmp(pp->name, prop_name) == 0) { of_node_get(np); goto out; } } } out: of_node_put(from); read_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_find_node_with_property); /** * of_match_node - Tell if an device_node has a matching of_match structure * @matches: array of of device match structures to search in * @node: the of device structure to match against * * Low level utility function used by device matching. */ const struct of_device_id *of_match_node(const struct of_device_id *matches, const struct device_node *node) { if (!matches) return NULL; while (matches->name[0] || matches->type[0] || matches->compatible[0]) { int match = 1; if (matches->name[0]) match &= node->name && !strcmp(matches->name, node->name); if (matches->type[0]) match &= node->type && !strcmp(matches->type, node->type); if (matches->compatible[0]) match &= of_device_is_compatible(node, matches->compatible); if (match) return matches; matches++; } return NULL; } EXPORT_SYMBOL(of_match_node); /** * of_find_matching_node - Find a node based on an of_device_id match * table. * @from: The node to start searching from or NULL, the node * you pass will not be searched, only the next one * will; typically, you pass what the previous call * returned. of_node_put() will be called on it * @matches: array of of device match structures to search in * * Returns a node pointer with refcount incremented, use * of_node_put() on it when done. */ struct device_node *of_find_matching_node(struct device_node *from, const struct of_device_id *matches) { struct device_node *np; read_lock(&devtree_lock); np = from ? from->allnext : allnodes; for (; np; np = np->allnext) { if (of_match_node(matches, np) && of_node_get(np)) break; } of_node_put(from); read_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_find_matching_node); /** * of_modalias_node - Lookup appropriate modalias for a device node * @node: pointer to a device tree node * @modalias: Pointer to buffer that modalias value will be copied into * @len: Length of modalias value * * Based on the value of the compatible property, this routine will attempt * to choose an appropriate modalias value for a particular device tree node. * It does this by stripping the manufacturer prefix (as delimited by a ',') * from the first entry in the compatible list property. * * This routine returns 0 on success, <0 on failure. */ int of_modalias_node(struct device_node *node, char *modalias, int len) { const char *compatible, *p; int cplen; compatible = of_get_property(node, "compatible", &cplen); if (!compatible || strlen(compatible) > cplen) return -ENODEV; p = strchr(compatible, ','); strlcpy(modalias, p ? p + 1 : compatible, len); return 0; } EXPORT_SYMBOL_GPL(of_modalias_node); /** * of_find_node_by_phandle - Find a node given a phandle * @handle: phandle of the node to find * * Returns a node pointer with refcount incremented, use * of_node_put() on it when done. */ struct device_node *of_find_node_by_phandle(phandle handle) { struct device_node *np; read_lock(&devtree_lock); for (np = allnodes; np; np = np->allnext) if (np->phandle == handle) break; of_node_get(np); read_unlock(&devtree_lock); return np; } EXPORT_SYMBOL(of_find_node_by_phandle); /** * of_parse_phandle - Resolve a phandle property to a device_node pointer * @np: Pointer to device node holding phandle property * @phandle_name: Name of property holding a phandle value * @index: For properties holding a table of phandles, this is the index into * the table * * Returns the device_node pointer with refcount incremented. Use * of_node_put() on it when done. */ struct device_node * of_parse_phandle(struct device_node *np, const char *phandle_name, int index) { const __be32 *phandle; int size; phandle = of_get_property(np, phandle_name, &size); if ((!phandle) || (size < sizeof(*phandle) * (index + 1))) return NULL; return of_find_node_by_phandle(be32_to_cpup(phandle + index)); } EXPORT_SYMBOL(of_parse_phandle); /** * of_parse_phandles_with_args - Find a node pointed by phandle in a list * @np: pointer to a device tree node containing a list * @list_name: property name that contains a list * @cells_name: property name that specifies phandles' arguments count * @index: index of a phandle to parse out * @out_node: optional pointer to device_node struct pointer (will be filled) * @out_args: optional pointer to arguments pointer (will be filled) * * This function is useful to parse lists of phandles and their arguments. * Returns 0 on success and fills out_node and out_args, on error returns * appropriate errno value. * * Example: * * phandle1: node1 { * #list-cells = <2>; * } * * phandle2: node2 { * #list-cells = <1>; * } * * node3 { * list = <&phandle1 1 2 &phandle2 3>; * } * * To get a device_node of the `node2' node you may call this: * of_parse_phandles_with_args(node3, "list", "#list-cells", 2, &node2, &args); */ int of_parse_phandles_with_args(struct device_node *np, const char *list_name, const char *cells_name, int index, struct device_node **out_node, const void **out_args) { int ret = -EINVAL; const __be32 *list; const __be32 *list_end; int size; int cur_index = 0; struct device_node *node = NULL; const void *args = NULL; list = of_get_property(np, list_name, &size); if (!list) { ret = -ENOENT; goto err0; } list_end = list + size / sizeof(*list); while (list < list_end) { const __be32 *cells; phandle phandle; phandle = be32_to_cpup(list++); args = list; /* one cell hole in the list = <>; */ if (!phandle) goto next; node = of_find_node_by_phandle(phandle); if (!node) { pr_debug("%s: could not find phandle\n", np->full_name); goto err0; } cells = of_get_property(node, cells_name, &size); if (!cells || size != sizeof(*cells)) { pr_debug("%s: could not get %s for %s\n", np->full_name, cells_name, node->full_name); goto err1; } list += be32_to_cpup(cells); if (list > list_end) { pr_debug("%s: insufficient arguments length\n", np->full_name); goto err1; } next: if (cur_index == index) break; of_node_put(node); node = NULL; args = NULL; cur_index++; } if (!node) { /* * args w/o node indicates that the loop above has stopped at * the 'hole' cell. Report this differently. */ if (args) ret = -EEXIST; else ret = -ENOENT; goto err0; } if (out_node) *out_node = node; if (out_args) *out_args = args; return 0; err1: of_node_put(node); err0: pr_debug("%s failed with status %d\n", __func__, ret); return ret; } EXPORT_SYMBOL(of_parse_phandles_with_args); /** * prom_add_property - Add a property to a node */ int prom_add_property(struct device_node *np, struct property *prop) { struct property **next; unsigned long flags; prop->next = NULL; write_lock_irqsave(&devtree_lock, flags); next = &np->properties; while (*next) { if (strcmp(prop->name, (*next)->name) == 0) { /* duplicate ! don't insert it */ write_unlock_irqrestore(&devtree_lock, flags); return -1; } next = &(*next)->next; } *next = prop; write_unlock_irqrestore(&devtree_lock, flags); #ifdef CONFIG_PROC_DEVICETREE /* try to add to proc as well if it was initialized */ if (np->pde) proc_device_tree_add_prop(np->pde, prop); #endif /* CONFIG_PROC_DEVICETREE */ return 0; } /** * prom_remove_property - Remove a property from a node. * * Note that we don't actually remove it, since we have given out * who-knows-how-many pointers to the data using get-property. * Instead we just move the property to the "dead properties" * list, so it won't be found any more. */ int prom_remove_property(struct device_node *np, struct property *prop) { struct property **next; unsigned long flags; int found = 0; write_lock_irqsave(&devtree_lock, flags); next = &np->properties; while (*next) { if (*next == prop) { /* found the node */ *next = prop->next; prop->next = np->deadprops; np->deadprops = prop; found = 1; break; } next = &(*next)->next; } write_unlock_irqrestore(&devtree_lock, flags); if (!found) return -ENODEV; #ifdef CONFIG_PROC_DEVICETREE /* try to remove the proc node as well */ if (np->pde) proc_device_tree_remove_prop(np->pde, prop); #endif /* CONFIG_PROC_DEVICETREE */ return 0; } /* * prom_update_property - Update a property in a node. * * Note that we don't actually remove it, since we have given out * who-knows-how-many pointers to the data using get-property. * Instead we just move the property to the "dead properties" list, * and add the new property to the property list */ int prom_update_property(struct device_node *np, struct property *newprop, struct property *oldprop) { struct property **next; unsigned long flags; int found = 0; write_lock_irqsave(&devtree_lock, flags); next = &np->properties; while (*next) { if (*next == oldprop) { /* found the node */ newprop->next = oldprop->next; *next = newprop; oldprop->next = np->deadprops; np->deadprops = oldprop; found = 1; break; } next = &(*next)->next; } write_unlock_irqrestore(&devtree_lock, flags); if (!found) return -ENODEV; #ifdef CONFIG_PROC_DEVICETREE /* try to add to proc as well if it was initialized */ if (np->pde) proc_device_tree_update_prop(np->pde, newprop, oldprop); #endif /* CONFIG_PROC_DEVICETREE */ return 0; } #if defined(CONFIG_OF_DYNAMIC) /* * Support for dynamic device trees. * * On some platforms, the device tree can be manipulated at runtime. * The routines in this section support adding, removing and changing * device tree nodes. */ /** * of_attach_node - Plug a device node into the tree and global list. */ void of_attach_node(struct device_node *np) { unsigned long flags; write_lock_irqsave(&devtree_lock, flags); np->sibling = np->parent->child; np->allnext = allnodes; np->parent->child = np; allnodes = np; write_unlock_irqrestore(&devtree_lock, flags); } /** * of_detach_node - "Unplug" a node from the device tree. * * The caller must hold a reference to the node. The memory associated with * the node is not freed until its refcount goes to zero. */ void of_detach_node(struct device_node *np) { struct device_node *parent; unsigned long flags; write_lock_irqsave(&devtree_lock, flags); parent = np->parent; if (!parent) goto out_unlock; if (allnodes == np) allnodes = np->allnext; else { struct device_node *prev; for (prev = allnodes; prev->allnext != np; prev = prev->allnext) ; prev->allnext = np->allnext; } if (parent->child == np) parent->child = np->sibling; else { struct device_node *prevsib; for (prevsib = np->parent->child; prevsib->sibling != np; prevsib = prevsib->sibling) ; prevsib->sibling = np->sibling; } of_node_set_flag(np, OF_DETACHED); out_unlock: write_unlock_irqrestore(&devtree_lock, flags); } #endif /* defined(CONFIG_OF_DYNAMIC) */
gpl-2.0
nychitman1/android_kernel_google_pixel
arch/parisc/kernel/signal32.c
1872
13281
/* Signal support for 32-bit kernel builds * * Copyright (C) 2001 Matthew Wilcox <willy at parisc-linux.org> * Copyright (C) 2006 Kyle McMartin <kyle at parisc-linux.org> * * Code was mostly borrowed from kernel/signal.c. * See kernel/signal.c for additional Copyrights. * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/compat.h> #include <linux/module.h> #include <linux/unistd.h> #include <linux/init.h> #include <linux/sched.h> #include <linux/syscalls.h> #include <linux/types.h> #include <linux/errno.h> #include <asm/uaccess.h> #include "signal32.h" #define DEBUG_COMPAT_SIG 0 #define DEBUG_COMPAT_SIG_LEVEL 2 #if DEBUG_COMPAT_SIG #define DBG(LEVEL, ...) \ ((DEBUG_COMPAT_SIG_LEVEL >= LEVEL) \ ? printk(__VA_ARGS__) : (void) 0) #else #define DBG(LEVEL, ...) #endif inline void sigset_32to64(sigset_t *s64, compat_sigset_t *s32) { s64->sig[0] = s32->sig[0] | ((unsigned long)s32->sig[1] << 32); } inline void sigset_64to32(compat_sigset_t *s32, sigset_t *s64) { s32->sig[0] = s64->sig[0] & 0xffffffffUL; s32->sig[1] = (s64->sig[0] >> 32) & 0xffffffffUL; } long restore_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __user * rf, struct pt_regs *regs) { long err = 0; compat_uint_t compat_reg; compat_uint_t compat_regt; int regn; /* When loading 32-bit values into 64-bit registers make sure to clear the upper 32-bits */ DBG(2,"restore_sigcontext32: PER_LINUX32 process\n"); DBG(2,"restore_sigcontext32: sc = 0x%p, rf = 0x%p, regs = 0x%p\n", sc, rf, regs); DBG(2,"restore_sigcontext32: compat_sigcontext is %#lx bytes\n", sizeof(*sc)); for(regn=0; regn < 32; regn++){ err |= __get_user(compat_reg,&sc->sc_gr[regn]); regs->gr[regn] = compat_reg; /* Load upper half */ err |= __get_user(compat_regt,&rf->rf_gr[regn]); regs->gr[regn] = ((u64)compat_regt << 32) | (u64)compat_reg; DBG(3,"restore_sigcontext32: gr%02d = %#lx (%#x / %#x)\n", regn, regs->gr[regn], compat_regt, compat_reg); } DBG(2,"restore_sigcontext32: sc->sc_fr = 0x%p (%#lx)\n",sc->sc_fr, sizeof(sc->sc_fr)); /* XXX: BE WARNED FR's are 64-BIT! */ err |= __copy_from_user(regs->fr, sc->sc_fr, sizeof(regs->fr)); /* Better safe than sorry, pass __get_user two things of the same size and let gcc do the upward conversion to 64-bits */ err |= __get_user(compat_reg, &sc->sc_iaoq[0]); /* Load upper half */ err |= __get_user(compat_regt, &rf->rf_iaoq[0]); regs->iaoq[0] = ((u64)compat_regt << 32) | (u64)compat_reg; DBG(2,"restore_sigcontext32: upper half of iaoq[0] = %#lx\n", compat_regt); DBG(2,"restore_sigcontext32: sc->sc_iaoq[0] = %p => %#x\n", &sc->sc_iaoq[0], compat_reg); err |= __get_user(compat_reg, &sc->sc_iaoq[1]); /* Load upper half */ err |= __get_user(compat_regt, &rf->rf_iaoq[1]); regs->iaoq[1] = ((u64)compat_regt << 32) | (u64)compat_reg; DBG(2,"restore_sigcontext32: upper half of iaoq[1] = %#lx\n", compat_regt); DBG(2,"restore_sigcontext32: sc->sc_iaoq[1] = %p => %#x\n", &sc->sc_iaoq[1],compat_reg); DBG(2,"restore_sigcontext32: iaoq is %#lx / %#lx\n", regs->iaoq[0],regs->iaoq[1]); err |= __get_user(compat_reg, &sc->sc_iasq[0]); /* Load the upper half for iasq */ err |= __get_user(compat_regt, &rf->rf_iasq[0]); regs->iasq[0] = ((u64)compat_regt << 32) | (u64)compat_reg; DBG(2,"restore_sigcontext32: upper half of iasq[0] = %#lx\n", compat_regt); err |= __get_user(compat_reg, &sc->sc_iasq[1]); /* Load the upper half for iasq */ err |= __get_user(compat_regt, &rf->rf_iasq[1]); regs->iasq[1] = ((u64)compat_regt << 32) | (u64)compat_reg; DBG(2,"restore_sigcontext32: upper half of iasq[1] = %#lx\n", compat_regt); DBG(2,"restore_sigcontext32: iasq is %#lx / %#lx\n", regs->iasq[0],regs->iasq[1]); err |= __get_user(compat_reg, &sc->sc_sar); /* Load the upper half for sar */ err |= __get_user(compat_regt, &rf->rf_sar); regs->sar = ((u64)compat_regt << 32) | (u64)compat_reg; DBG(2,"restore_sigcontext32: upper_half & sar = %#lx\n", compat_regt); DBG(2,"restore_sigcontext32: sar is %#lx\n", regs->sar); DBG(2,"restore_sigcontext32: r28 is %ld\n", regs->gr[28]); return err; } /* * Set up the sigcontext structure for this process. * This is not an easy task if the kernel is 64-bit, it will require * that we examine the process personality to determine if we need to * truncate for a 32-bit userspace. */ long setup_sigcontext32(struct compat_sigcontext __user *sc, struct compat_regfile __user * rf, struct pt_regs *regs, int in_syscall) { compat_int_t flags = 0; long err = 0; compat_uint_t compat_reg; compat_uint_t compat_regb; int regn; if (on_sig_stack((unsigned long) sc)) flags |= PARISC_SC_FLAG_ONSTACK; if (in_syscall) { DBG(1,"setup_sigcontext32: in_syscall\n"); flags |= PARISC_SC_FLAG_IN_SYSCALL; /* Truncate gr31 */ compat_reg = (compat_uint_t)(regs->gr[31]); /* regs->iaoq is undefined in the syscall return path */ err |= __put_user(compat_reg, &sc->sc_iaoq[0]); DBG(2,"setup_sigcontext32: sc->sc_iaoq[0] = %p <= %#x\n", &sc->sc_iaoq[0], compat_reg); /* Store upper half */ compat_reg = (compat_uint_t)(regs->gr[31] >> 32); err |= __put_user(compat_reg, &rf->rf_iaoq[0]); DBG(2,"setup_sigcontext32: upper half iaoq[0] = %#x\n", compat_reg); compat_reg = (compat_uint_t)(regs->gr[31]+4); err |= __put_user(compat_reg, &sc->sc_iaoq[1]); DBG(2,"setup_sigcontext32: sc->sc_iaoq[1] = %p <= %#x\n", &sc->sc_iaoq[1], compat_reg); /* Store upper half */ compat_reg = (compat_uint_t)((regs->gr[31]+4) >> 32); err |= __put_user(compat_reg, &rf->rf_iaoq[1]); DBG(2,"setup_sigcontext32: upper half iaoq[1] = %#x\n", compat_reg); /* Truncate sr3 */ compat_reg = (compat_uint_t)(regs->sr[3]); err |= __put_user(compat_reg, &sc->sc_iasq[0]); err |= __put_user(compat_reg, &sc->sc_iasq[1]); /* Store upper half */ compat_reg = (compat_uint_t)(regs->sr[3] >> 32); err |= __put_user(compat_reg, &rf->rf_iasq[0]); err |= __put_user(compat_reg, &rf->rf_iasq[1]); DBG(2,"setup_sigcontext32: upper half iasq[0] = %#x\n", compat_reg); DBG(2,"setup_sigcontext32: upper half iasq[1] = %#x\n", compat_reg); DBG(1,"setup_sigcontext32: iaoq %#lx / %#lx\n", regs->gr[31], regs->gr[31]+4); } else { compat_reg = (compat_uint_t)(regs->iaoq[0]); err |= __put_user(compat_reg, &sc->sc_iaoq[0]); DBG(2,"setup_sigcontext32: sc->sc_iaoq[0] = %p <= %#x\n", &sc->sc_iaoq[0], compat_reg); /* Store upper half */ compat_reg = (compat_uint_t)(regs->iaoq[0] >> 32); err |= __put_user(compat_reg, &rf->rf_iaoq[0]); DBG(2,"setup_sigcontext32: upper half iaoq[0] = %#x\n", compat_reg); compat_reg = (compat_uint_t)(regs->iaoq[1]); err |= __put_user(compat_reg, &sc->sc_iaoq[1]); DBG(2,"setup_sigcontext32: sc->sc_iaoq[1] = %p <= %#x\n", &sc->sc_iaoq[1], compat_reg); /* Store upper half */ compat_reg = (compat_uint_t)(regs->iaoq[1] >> 32); err |= __put_user(compat_reg, &rf->rf_iaoq[1]); DBG(2,"setup_sigcontext32: upper half iaoq[1] = %#x\n", compat_reg); compat_reg = (compat_uint_t)(regs->iasq[0]); err |= __put_user(compat_reg, &sc->sc_iasq[0]); DBG(2,"setup_sigcontext32: sc->sc_iasq[0] = %p <= %#x\n", &sc->sc_iasq[0], compat_reg); /* Store upper half */ compat_reg = (compat_uint_t)(regs->iasq[0] >> 32); err |= __put_user(compat_reg, &rf->rf_iasq[0]); DBG(2,"setup_sigcontext32: upper half iasq[0] = %#x\n", compat_reg); compat_reg = (compat_uint_t)(regs->iasq[1]); err |= __put_user(compat_reg, &sc->sc_iasq[1]); DBG(2,"setup_sigcontext32: sc->sc_iasq[1] = %p <= %#x\n", &sc->sc_iasq[1], compat_reg); /* Store upper half */ compat_reg = (compat_uint_t)(regs->iasq[1] >> 32); err |= __put_user(compat_reg, &rf->rf_iasq[1]); DBG(2,"setup_sigcontext32: upper half iasq[1] = %#x\n", compat_reg); /* Print out the IAOQ for debugging */ DBG(1,"setup_sigcontext32: ia0q %#lx / %#lx\n", regs->iaoq[0], regs->iaoq[1]); } err |= __put_user(flags, &sc->sc_flags); DBG(1,"setup_sigcontext32: Truncating general registers.\n"); for(regn=0; regn < 32; regn++){ /* Truncate a general register */ compat_reg = (compat_uint_t)(regs->gr[regn]); err |= __put_user(compat_reg, &sc->sc_gr[regn]); /* Store upper half */ compat_regb = (compat_uint_t)(regs->gr[regn] >> 32); err |= __put_user(compat_regb, &rf->rf_gr[regn]); /* DEBUG: Write out the "upper / lower" register data */ DBG(2,"setup_sigcontext32: gr%02d = %#x / %#x\n", regn, compat_regb, compat_reg); } /* Copy the floating point registers (same size) XXX: BE WARNED FR's are 64-BIT! */ DBG(1,"setup_sigcontext32: Copying from regs to sc, " "sc->sc_fr size = %#lx, regs->fr size = %#lx\n", sizeof(regs->fr), sizeof(sc->sc_fr)); err |= __copy_to_user(sc->sc_fr, regs->fr, sizeof(regs->fr)); compat_reg = (compat_uint_t)(regs->sar); err |= __put_user(compat_reg, &sc->sc_sar); DBG(2,"setup_sigcontext32: sar is %#x\n", compat_reg); /* Store upper half */ compat_reg = (compat_uint_t)(regs->sar >> 32); err |= __put_user(compat_reg, &rf->rf_sar); DBG(2,"setup_sigcontext32: upper half sar = %#x\n", compat_reg); DBG(1,"setup_sigcontext32: r28 is %ld\n", regs->gr[28]); return err; } int copy_siginfo_from_user32 (siginfo_t *to, compat_siginfo_t __user *from) { compat_uptr_t addr; int err; if (!access_ok(VERIFY_READ, from, sizeof(compat_siginfo_t))) return -EFAULT; err = __get_user(to->si_signo, &from->si_signo); err |= __get_user(to->si_errno, &from->si_errno); err |= __get_user(to->si_code, &from->si_code); if (to->si_code < 0) err |= __copy_from_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE); else { switch (to->si_code >> 16) { case __SI_CHLD >> 16: err |= __get_user(to->si_utime, &from->si_utime); err |= __get_user(to->si_stime, &from->si_stime); err |= __get_user(to->si_status, &from->si_status); default: err |= __get_user(to->si_pid, &from->si_pid); err |= __get_user(to->si_uid, &from->si_uid); break; case __SI_FAULT >> 16: err |= __get_user(addr, &from->si_addr); to->si_addr = compat_ptr(addr); break; case __SI_POLL >> 16: err |= __get_user(to->si_band, &from->si_band); err |= __get_user(to->si_fd, &from->si_fd); break; case __SI_RT >> 16: /* This is not generated by the kernel as of now. */ case __SI_MESGQ >> 16: err |= __get_user(to->si_pid, &from->si_pid); err |= __get_user(to->si_uid, &from->si_uid); err |= __get_user(to->si_int, &from->si_int); break; } } return err; } int copy_siginfo_to_user32 (compat_siginfo_t __user *to, const siginfo_t *from) { compat_uptr_t addr; compat_int_t val; int err; if (!access_ok(VERIFY_WRITE, to, sizeof(compat_siginfo_t))) return -EFAULT; /* If you change siginfo_t structure, please be sure this code is fixed accordingly. It should never copy any pad contained in the structure to avoid security leaks, but must copy the generic 3 ints plus the relevant union member. This routine must convert siginfo from 64bit to 32bit as well at the same time. */ err = __put_user(from->si_signo, &to->si_signo); err |= __put_user(from->si_errno, &to->si_errno); err |= __put_user((short)from->si_code, &to->si_code); if (from->si_code < 0) err |= __copy_to_user(&to->_sifields._pad, &from->_sifields._pad, SI_PAD_SIZE); else { switch (from->si_code >> 16) { case __SI_CHLD >> 16: err |= __put_user(from->si_utime, &to->si_utime); err |= __put_user(from->si_stime, &to->si_stime); err |= __put_user(from->si_status, &to->si_status); default: err |= __put_user(from->si_pid, &to->si_pid); err |= __put_user(from->si_uid, &to->si_uid); break; case __SI_FAULT >> 16: addr = ptr_to_compat(from->si_addr); err |= __put_user(addr, &to->si_addr); break; case __SI_POLL >> 16: err |= __put_user(from->si_band, &to->si_band); err |= __put_user(from->si_fd, &to->si_fd); break; case __SI_TIMER >> 16: err |= __put_user(from->si_tid, &to->si_tid); err |= __put_user(from->si_overrun, &to->si_overrun); val = (compat_int_t)from->si_int; err |= __put_user(val, &to->si_int); break; case __SI_RT >> 16: /* Not generated by the kernel as of now. */ case __SI_MESGQ >> 16: err |= __put_user(from->si_uid, &to->si_uid); err |= __put_user(from->si_pid, &to->si_pid); val = (compat_int_t)from->si_int; err |= __put_user(val, &to->si_int); break; } } return err; }
gpl-2.0
bkero/android_kernel_teclast_x98pro
arch/mips/sgi-ip27/ip27-klconfig.c
2128
2917
/* * Copyright (C) 1999, 2000 Ralf Baechle (ralf@gnu.org) * Copyright (C) 1999, 2000 Silicon Graphics, Inc. */ #include <linux/kernel.h> #include <linux/sched.h> #include <linux/interrupt.h> #include <linux/kernel_stat.h> #include <linux/param.h> #include <linux/timex.h> #include <linux/mm.h> #include <asm/sn/klconfig.h> #include <asm/sn/arch.h> #include <asm/sn/gda.h> klinfo_t *find_component(lboard_t *brd, klinfo_t *kli, unsigned char struct_type) { int index, j; if (kli == (klinfo_t *)NULL) { index = 0; } else { for (j = 0; j < KLCF_NUM_COMPS(brd); j++) if (kli == KLCF_COMP(brd, j)) break; index = j; if (index == KLCF_NUM_COMPS(brd)) { printk("find_component: Bad pointer: 0x%p\n", kli); return (klinfo_t *)NULL; } index++; /* next component */ } for (; index < KLCF_NUM_COMPS(brd); index++) { kli = KLCF_COMP(brd, index); if (KLCF_COMP_TYPE(kli) == struct_type) return kli; } /* Didn't find it. */ return (klinfo_t *)NULL; } klinfo_t *find_first_component(lboard_t *brd, unsigned char struct_type) { return find_component(brd, (klinfo_t *)NULL, struct_type); } lboard_t *find_lboard(lboard_t *start, unsigned char brd_type) { /* Search all boards stored on this node. */ while (start) { if (start->brd_type == brd_type) return start; start = KLCF_NEXT(start); } /* Didn't find it. */ return (lboard_t *)NULL; } lboard_t *find_lboard_class(lboard_t *start, unsigned char brd_type) { /* Search all boards stored on this node. */ while (start) { if (KLCLASS(start->brd_type) == KLCLASS(brd_type)) return start; start = KLCF_NEXT(start); } /* Didn't find it. */ return (lboard_t *)NULL; } cnodeid_t get_cpu_cnode(cpuid_t cpu) { return CPUID_TO_COMPACT_NODEID(cpu); } klcpu_t *nasid_slice_to_cpuinfo(nasid_t nasid, int slice) { lboard_t *brd; klcpu_t *acpu; if (!(brd = find_lboard((lboard_t *)KL_CONFIG_INFO(nasid), KLTYPE_IP27))) return (klcpu_t *)NULL; if (!(acpu = (klcpu_t *)find_first_component(brd, KLSTRUCT_CPU))) return (klcpu_t *)NULL; do { if ((acpu->cpu_info.physid) == slice) return acpu; } while ((acpu = (klcpu_t *)find_component(brd, (klinfo_t *)acpu, KLSTRUCT_CPU))); return (klcpu_t *)NULL; } klcpu_t *sn_get_cpuinfo(cpuid_t cpu) { nasid_t nasid; int slice; klcpu_t *acpu; gda_t *gdap = GDA; cnodeid_t cnode; if (!(cpu < MAXCPUS)) { printk("sn_get_cpuinfo: illegal cpuid 0x%lx\n", cpu); return NULL; } cnode = get_cpu_cnode(cpu); if (cnode == INVALID_CNODEID) return NULL; if ((nasid = gdap->g_nasidtable[cnode]) == INVALID_NASID) return NULL; for (slice = 0; slice < CPUS_PER_NODE; slice++) { acpu = nasid_slice_to_cpuinfo(nasid, slice); if (acpu && acpu->cpu_info.virtid == cpu) return acpu; } return NULL; } int get_cpu_slice(cpuid_t cpu) { klcpu_t *acpu; if ((acpu = sn_get_cpuinfo(cpu)) == NULL) return -1; return acpu->cpu_info.physid; }
gpl-2.0
Ki113R/android_kernel_samsung_golden
drivers/clocksource/sh_cmt.c
2384
16971
/* * SuperH Timer Support - CMT * * Copyright (C) 2008 Magnus Damm * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/init.h> #include <linux/platform_device.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/io.h> #include <linux/clk.h> #include <linux/irq.h> #include <linux/err.h> #include <linux/clocksource.h> #include <linux/clockchips.h> #include <linux/sh_timer.h> #include <linux/slab.h> struct sh_cmt_priv { void __iomem *mapbase; struct clk *clk; unsigned long width; /* 16 or 32 bit version of hardware block */ unsigned long overflow_bit; unsigned long clear_bits; struct irqaction irqaction; struct platform_device *pdev; unsigned long flags; unsigned long match_value; unsigned long next_match_value; unsigned long max_match_value; unsigned long rate; spinlock_t lock; struct clock_event_device ced; struct clocksource cs; unsigned long total_cycles; }; static DEFINE_SPINLOCK(sh_cmt_lock); #define CMSTR -1 /* shared register */ #define CMCSR 0 /* channel register */ #define CMCNT 1 /* channel register */ #define CMCOR 2 /* channel register */ static inline unsigned long sh_cmt_read(struct sh_cmt_priv *p, int reg_nr) { struct sh_timer_config *cfg = p->pdev->dev.platform_data; void __iomem *base = p->mapbase; unsigned long offs; if (reg_nr == CMSTR) { offs = 0; base -= cfg->channel_offset; } else offs = reg_nr; if (p->width == 16) offs <<= 1; else { offs <<= 2; if ((reg_nr == CMCNT) || (reg_nr == CMCOR)) return ioread32(base + offs); } return ioread16(base + offs); } static inline void sh_cmt_write(struct sh_cmt_priv *p, int reg_nr, unsigned long value) { struct sh_timer_config *cfg = p->pdev->dev.platform_data; void __iomem *base = p->mapbase; unsigned long offs; if (reg_nr == CMSTR) { offs = 0; base -= cfg->channel_offset; } else offs = reg_nr; if (p->width == 16) offs <<= 1; else { offs <<= 2; if ((reg_nr == CMCNT) || (reg_nr == CMCOR)) { iowrite32(value, base + offs); return; } } iowrite16(value, base + offs); } static unsigned long sh_cmt_get_counter(struct sh_cmt_priv *p, int *has_wrapped) { unsigned long v1, v2, v3; int o1, o2; o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit; /* Make sure the timer value is stable. Stolen from acpi_pm.c */ do { o2 = o1; v1 = sh_cmt_read(p, CMCNT); v2 = sh_cmt_read(p, CMCNT); v3 = sh_cmt_read(p, CMCNT); o1 = sh_cmt_read(p, CMCSR) & p->overflow_bit; } while (unlikely((o1 != o2) || (v1 > v2 && v1 < v3) || (v2 > v3 && v2 < v1) || (v3 > v1 && v3 < v2))); *has_wrapped = o1; return v2; } static void sh_cmt_start_stop_ch(struct sh_cmt_priv *p, int start) { struct sh_timer_config *cfg = p->pdev->dev.platform_data; unsigned long flags, value; /* start stop register shared by multiple timer channels */ spin_lock_irqsave(&sh_cmt_lock, flags); value = sh_cmt_read(p, CMSTR); if (start) value |= 1 << cfg->timer_bit; else value &= ~(1 << cfg->timer_bit); sh_cmt_write(p, CMSTR, value); spin_unlock_irqrestore(&sh_cmt_lock, flags); } static int sh_cmt_enable(struct sh_cmt_priv *p, unsigned long *rate) { int ret; /* enable clock */ ret = clk_enable(p->clk); if (ret) { dev_err(&p->pdev->dev, "cannot enable clock\n"); return ret; } /* make sure channel is disabled */ sh_cmt_start_stop_ch(p, 0); /* configure channel, periodic mode and maximum timeout */ if (p->width == 16) { *rate = clk_get_rate(p->clk) / 512; sh_cmt_write(p, CMCSR, 0x43); } else { *rate = clk_get_rate(p->clk) / 8; sh_cmt_write(p, CMCSR, 0x01a4); } sh_cmt_write(p, CMCOR, 0xffffffff); sh_cmt_write(p, CMCNT, 0); /* enable channel */ sh_cmt_start_stop_ch(p, 1); return 0; } static void sh_cmt_disable(struct sh_cmt_priv *p) { /* disable channel */ sh_cmt_start_stop_ch(p, 0); /* disable interrupts in CMT block */ sh_cmt_write(p, CMCSR, 0); /* stop clock */ clk_disable(p->clk); } /* private flags */ #define FLAG_CLOCKEVENT (1 << 0) #define FLAG_CLOCKSOURCE (1 << 1) #define FLAG_REPROGRAM (1 << 2) #define FLAG_SKIPEVENT (1 << 3) #define FLAG_IRQCONTEXT (1 << 4) static void sh_cmt_clock_event_program_verify(struct sh_cmt_priv *p, int absolute) { unsigned long new_match; unsigned long value = p->next_match_value; unsigned long delay = 0; unsigned long now = 0; int has_wrapped; now = sh_cmt_get_counter(p, &has_wrapped); p->flags |= FLAG_REPROGRAM; /* force reprogram */ if (has_wrapped) { /* we're competing with the interrupt handler. * -> let the interrupt handler reprogram the timer. * -> interrupt number two handles the event. */ p->flags |= FLAG_SKIPEVENT; return; } if (absolute) now = 0; do { /* reprogram the timer hardware, * but don't save the new match value yet. */ new_match = now + value + delay; if (new_match > p->max_match_value) new_match = p->max_match_value; sh_cmt_write(p, CMCOR, new_match); now = sh_cmt_get_counter(p, &has_wrapped); if (has_wrapped && (new_match > p->match_value)) { /* we are changing to a greater match value, * so this wrap must be caused by the counter * matching the old value. * -> first interrupt reprograms the timer. * -> interrupt number two handles the event. */ p->flags |= FLAG_SKIPEVENT; break; } if (has_wrapped) { /* we are changing to a smaller match value, * so the wrap must be caused by the counter * matching the new value. * -> save programmed match value. * -> let isr handle the event. */ p->match_value = new_match; break; } /* be safe: verify hardware settings */ if (now < new_match) { /* timer value is below match value, all good. * this makes sure we won't miss any match events. * -> save programmed match value. * -> let isr handle the event. */ p->match_value = new_match; break; } /* the counter has reached a value greater * than our new match value. and since the * has_wrapped flag isn't set we must have * programmed a too close event. * -> increase delay and retry. */ if (delay) delay <<= 1; else delay = 1; if (!delay) dev_warn(&p->pdev->dev, "too long delay\n"); } while (delay); } static void __sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta) { if (delta > p->max_match_value) dev_warn(&p->pdev->dev, "delta out of range\n"); p->next_match_value = delta; sh_cmt_clock_event_program_verify(p, 0); } static void sh_cmt_set_next(struct sh_cmt_priv *p, unsigned long delta) { unsigned long flags; spin_lock_irqsave(&p->lock, flags); __sh_cmt_set_next(p, delta); spin_unlock_irqrestore(&p->lock, flags); } static irqreturn_t sh_cmt_interrupt(int irq, void *dev_id) { struct sh_cmt_priv *p = dev_id; /* clear flags */ sh_cmt_write(p, CMCSR, sh_cmt_read(p, CMCSR) & p->clear_bits); /* update clock source counter to begin with if enabled * the wrap flag should be cleared by the timer specific * isr before we end up here. */ if (p->flags & FLAG_CLOCKSOURCE) p->total_cycles += p->match_value + 1; if (!(p->flags & FLAG_REPROGRAM)) p->next_match_value = p->max_match_value; p->flags |= FLAG_IRQCONTEXT; if (p->flags & FLAG_CLOCKEVENT) { if (!(p->flags & FLAG_SKIPEVENT)) { if (p->ced.mode == CLOCK_EVT_MODE_ONESHOT) { p->next_match_value = p->max_match_value; p->flags |= FLAG_REPROGRAM; } p->ced.event_handler(&p->ced); } } p->flags &= ~FLAG_SKIPEVENT; if (p->flags & FLAG_REPROGRAM) { p->flags &= ~FLAG_REPROGRAM; sh_cmt_clock_event_program_verify(p, 1); if (p->flags & FLAG_CLOCKEVENT) if ((p->ced.mode == CLOCK_EVT_MODE_SHUTDOWN) || (p->match_value == p->next_match_value)) p->flags &= ~FLAG_REPROGRAM; } p->flags &= ~FLAG_IRQCONTEXT; return IRQ_HANDLED; } static int sh_cmt_start(struct sh_cmt_priv *p, unsigned long flag) { int ret = 0; unsigned long flags; spin_lock_irqsave(&p->lock, flags); if (!(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) ret = sh_cmt_enable(p, &p->rate); if (ret) goto out; p->flags |= flag; /* setup timeout if no clockevent */ if ((flag == FLAG_CLOCKSOURCE) && (!(p->flags & FLAG_CLOCKEVENT))) __sh_cmt_set_next(p, p->max_match_value); out: spin_unlock_irqrestore(&p->lock, flags); return ret; } static void sh_cmt_stop(struct sh_cmt_priv *p, unsigned long flag) { unsigned long flags; unsigned long f; spin_lock_irqsave(&p->lock, flags); f = p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE); p->flags &= ~flag; if (f && !(p->flags & (FLAG_CLOCKEVENT | FLAG_CLOCKSOURCE))) sh_cmt_disable(p); /* adjust the timeout to maximum if only clocksource left */ if ((flag == FLAG_CLOCKEVENT) && (p->flags & FLAG_CLOCKSOURCE)) __sh_cmt_set_next(p, p->max_match_value); spin_unlock_irqrestore(&p->lock, flags); } static struct sh_cmt_priv *cs_to_sh_cmt(struct clocksource *cs) { return container_of(cs, struct sh_cmt_priv, cs); } static cycle_t sh_cmt_clocksource_read(struct clocksource *cs) { struct sh_cmt_priv *p = cs_to_sh_cmt(cs); unsigned long flags, raw; unsigned long value; int has_wrapped; spin_lock_irqsave(&p->lock, flags); value = p->total_cycles; raw = sh_cmt_get_counter(p, &has_wrapped); if (unlikely(has_wrapped)) raw += p->match_value + 1; spin_unlock_irqrestore(&p->lock, flags); return value + raw; } static int sh_cmt_clocksource_enable(struct clocksource *cs) { int ret; struct sh_cmt_priv *p = cs_to_sh_cmt(cs); p->total_cycles = 0; ret = sh_cmt_start(p, FLAG_CLOCKSOURCE); if (!ret) __clocksource_updatefreq_hz(cs, p->rate); return ret; } static void sh_cmt_clocksource_disable(struct clocksource *cs) { sh_cmt_stop(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE); } static void sh_cmt_clocksource_resume(struct clocksource *cs) { sh_cmt_start(cs_to_sh_cmt(cs), FLAG_CLOCKSOURCE); } static int sh_cmt_register_clocksource(struct sh_cmt_priv *p, char *name, unsigned long rating) { struct clocksource *cs = &p->cs; cs->name = name; cs->rating = rating; cs->read = sh_cmt_clocksource_read; cs->enable = sh_cmt_clocksource_enable; cs->disable = sh_cmt_clocksource_disable; cs->suspend = sh_cmt_clocksource_disable; cs->resume = sh_cmt_clocksource_resume; cs->mask = CLOCKSOURCE_MASK(sizeof(unsigned long) * 8); cs->flags = CLOCK_SOURCE_IS_CONTINUOUS; dev_info(&p->pdev->dev, "used as clock source\n"); /* Register with dummy 1 Hz value, gets updated in ->enable() */ clocksource_register_hz(cs, 1); return 0; } static struct sh_cmt_priv *ced_to_sh_cmt(struct clock_event_device *ced) { return container_of(ced, struct sh_cmt_priv, ced); } static void sh_cmt_clock_event_start(struct sh_cmt_priv *p, int periodic) { struct clock_event_device *ced = &p->ced; sh_cmt_start(p, FLAG_CLOCKEVENT); /* TODO: calculate good shift from rate and counter bit width */ ced->shift = 32; ced->mult = div_sc(p->rate, NSEC_PER_SEC, ced->shift); ced->max_delta_ns = clockevent_delta2ns(p->max_match_value, ced); ced->min_delta_ns = clockevent_delta2ns(0x1f, ced); if (periodic) sh_cmt_set_next(p, ((p->rate + HZ/2) / HZ) - 1); else sh_cmt_set_next(p, p->max_match_value); } static void sh_cmt_clock_event_mode(enum clock_event_mode mode, struct clock_event_device *ced) { struct sh_cmt_priv *p = ced_to_sh_cmt(ced); /* deal with old setting first */ switch (ced->mode) { case CLOCK_EVT_MODE_PERIODIC: case CLOCK_EVT_MODE_ONESHOT: sh_cmt_stop(p, FLAG_CLOCKEVENT); break; default: break; } switch (mode) { case CLOCK_EVT_MODE_PERIODIC: dev_info(&p->pdev->dev, "used for periodic clock events\n"); sh_cmt_clock_event_start(p, 1); break; case CLOCK_EVT_MODE_ONESHOT: dev_info(&p->pdev->dev, "used for oneshot clock events\n"); sh_cmt_clock_event_start(p, 0); break; case CLOCK_EVT_MODE_SHUTDOWN: case CLOCK_EVT_MODE_UNUSED: sh_cmt_stop(p, FLAG_CLOCKEVENT); break; default: break; } } static int sh_cmt_clock_event_next(unsigned long delta, struct clock_event_device *ced) { struct sh_cmt_priv *p = ced_to_sh_cmt(ced); BUG_ON(ced->mode != CLOCK_EVT_MODE_ONESHOT); if (likely(p->flags & FLAG_IRQCONTEXT)) p->next_match_value = delta - 1; else sh_cmt_set_next(p, delta - 1); return 0; } static void sh_cmt_register_clockevent(struct sh_cmt_priv *p, char *name, unsigned long rating) { struct clock_event_device *ced = &p->ced; memset(ced, 0, sizeof(*ced)); ced->name = name; ced->features = CLOCK_EVT_FEAT_PERIODIC; ced->features |= CLOCK_EVT_FEAT_ONESHOT; ced->rating = rating; ced->cpumask = cpumask_of(0); ced->set_next_event = sh_cmt_clock_event_next; ced->set_mode = sh_cmt_clock_event_mode; dev_info(&p->pdev->dev, "used for clock events\n"); clockevents_register_device(ced); } static int sh_cmt_register(struct sh_cmt_priv *p, char *name, unsigned long clockevent_rating, unsigned long clocksource_rating) { if (p->width == (sizeof(p->max_match_value) * 8)) p->max_match_value = ~0; else p->max_match_value = (1 << p->width) - 1; p->match_value = p->max_match_value; spin_lock_init(&p->lock); if (clockevent_rating) sh_cmt_register_clockevent(p, name, clockevent_rating); if (clocksource_rating) sh_cmt_register_clocksource(p, name, clocksource_rating); return 0; } static int sh_cmt_setup(struct sh_cmt_priv *p, struct platform_device *pdev) { struct sh_timer_config *cfg = pdev->dev.platform_data; struct resource *res; int irq, ret; ret = -ENXIO; memset(p, 0, sizeof(*p)); p->pdev = pdev; if (!cfg) { dev_err(&p->pdev->dev, "missing platform data\n"); goto err0; } platform_set_drvdata(pdev, p); res = platform_get_resource(p->pdev, IORESOURCE_MEM, 0); if (!res) { dev_err(&p->pdev->dev, "failed to get I/O memory\n"); goto err0; } irq = platform_get_irq(p->pdev, 0); if (irq < 0) { dev_err(&p->pdev->dev, "failed to get irq\n"); goto err0; } /* map memory, let mapbase point to our channel */ p->mapbase = ioremap_nocache(res->start, resource_size(res)); if (p->mapbase == NULL) { dev_err(&p->pdev->dev, "failed to remap I/O memory\n"); goto err0; } /* request irq using setup_irq() (too early for request_irq()) */ p->irqaction.name = dev_name(&p->pdev->dev); p->irqaction.handler = sh_cmt_interrupt; p->irqaction.dev_id = p; p->irqaction.flags = IRQF_DISABLED | IRQF_TIMER | \ IRQF_IRQPOLL | IRQF_NOBALANCING; /* get hold of clock */ p->clk = clk_get(&p->pdev->dev, "cmt_fck"); if (IS_ERR(p->clk)) { dev_err(&p->pdev->dev, "cannot get clock\n"); ret = PTR_ERR(p->clk); goto err1; } if (resource_size(res) == 6) { p->width = 16; p->overflow_bit = 0x80; p->clear_bits = ~0x80; } else { p->width = 32; p->overflow_bit = 0x8000; p->clear_bits = ~0xc000; } ret = sh_cmt_register(p, (char *)dev_name(&p->pdev->dev), cfg->clockevent_rating, cfg->clocksource_rating); if (ret) { dev_err(&p->pdev->dev, "registration failed\n"); goto err1; } ret = setup_irq(irq, &p->irqaction); if (ret) { dev_err(&p->pdev->dev, "failed to request irq %d\n", irq); goto err1; } return 0; err1: iounmap(p->mapbase); err0: return ret; } static int __devinit sh_cmt_probe(struct platform_device *pdev) { struct sh_cmt_priv *p = platform_get_drvdata(pdev); int ret; if (p) { dev_info(&pdev->dev, "kept as earlytimer\n"); return 0; } p = kmalloc(sizeof(*p), GFP_KERNEL); if (p == NULL) { dev_err(&pdev->dev, "failed to allocate driver data\n"); return -ENOMEM; } ret = sh_cmt_setup(p, pdev); if (ret) { kfree(p); platform_set_drvdata(pdev, NULL); } return ret; } static int __devexit sh_cmt_remove(struct platform_device *pdev) { return -EBUSY; /* cannot unregister clockevent and clocksource */ } static struct platform_driver sh_cmt_device_driver = { .probe = sh_cmt_probe, .remove = __devexit_p(sh_cmt_remove), .driver = { .name = "sh_cmt", } }; static int __init sh_cmt_init(void) { return platform_driver_register(&sh_cmt_device_driver); } static void __exit sh_cmt_exit(void) { platform_driver_unregister(&sh_cmt_device_driver); } early_platform_init("earlytimer", &sh_cmt_device_driver); module_init(sh_cmt_init); module_exit(sh_cmt_exit); MODULE_AUTHOR("Magnus Damm"); MODULE_DESCRIPTION("SuperH CMT Timer Driver"); MODULE_LICENSE("GPL v2");
gpl-2.0
XxXPachaXxX/kernel_msm_3.0
drivers/staging/rts_pstor/ms.c
2384
104426
/* Driver for Realtek PCI-Express card reader * * Copyright(c) 2009 Realtek Semiconductor Corp. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2, or (at your option) any * later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, see <http://www.gnu.org/licenses/>. * * Author: * wwang (wei_wang@realsil.com.cn) * No. 450, Shenhu Road, Suzhou Industry Park, Suzhou, China */ #include <linux/blkdev.h> #include <linux/kthread.h> #include <linux/sched.h> #include <linux/vmalloc.h> #include "rtsx.h" #include "rtsx_transport.h" #include "rtsx_scsi.h" #include "rtsx_card.h" #include "ms.h" static inline void ms_set_err_code(struct rtsx_chip *chip, u8 err_code) { struct ms_info *ms_card = &(chip->ms_card); ms_card->err_code = err_code; } static inline int ms_check_err_code(struct rtsx_chip *chip, u8 err_code) { struct ms_info *ms_card = &(chip->ms_card); return (ms_card->err_code == err_code); } static int ms_parse_err_code(struct rtsx_chip *chip) { TRACE_RET(chip, STATUS_FAIL); } static int ms_transfer_tpc(struct rtsx_chip *chip, u8 trans_mode, u8 tpc, u8 cnt, u8 cfg) { struct ms_info *ms_card = &(chip->ms_card); int retval; u8 *ptr; RTSX_DEBUGP("ms_transfer_tpc: tpc = 0x%x\n", tpc); rtsx_init_cmd(chip); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, tpc); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF, MS_TRANSFER_START | trans_mode); rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END); rtsx_add_cmd(chip, READ_REG_CMD, MS_TRANS_CFG, 0, 0); retval = rtsx_send_cmd(chip, MS_CARD, 5000); if (retval < 0) { rtsx_clear_ms_error(chip); ms_set_err_code(chip, MS_TO_ERROR); TRACE_RET(chip, ms_parse_err_code(chip)); } ptr = rtsx_get_cmd_data(chip) + 1; if (!(tpc & 0x08)) { /* Read Packet */ if (*ptr & MS_CRC16_ERR) { ms_set_err_code(chip, MS_CRC16_ERROR); TRACE_RET(chip, ms_parse_err_code(chip)); } } else { /* Write Packet */ if (CHK_MSPRO(ms_card) && !(*ptr & 0x80)) { if (*ptr & (MS_INT_ERR | MS_INT_CMDNK)) { ms_set_err_code(chip, MS_CMD_NK); TRACE_RET(chip, ms_parse_err_code(chip)); } } } if (*ptr & MS_RDY_TIMEOUT) { rtsx_clear_ms_error(chip); ms_set_err_code(chip, MS_TO_ERROR); TRACE_RET(chip, ms_parse_err_code(chip)); } return STATUS_SUCCESS; } static int ms_transfer_data(struct rtsx_chip *chip, u8 trans_mode, u8 tpc, u16 sec_cnt, u8 cfg, int mode_2k, int use_sg, void *buf, int buf_len) { int retval; u8 val, err_code = 0; enum dma_data_direction dir; if (!buf || !buf_len) { TRACE_RET(chip, STATUS_FAIL); } if (trans_mode == MS_TM_AUTO_READ) { dir = DMA_FROM_DEVICE; err_code = MS_FLASH_READ_ERROR; } else if (trans_mode == MS_TM_AUTO_WRITE) { dir = DMA_TO_DEVICE; err_code = MS_FLASH_WRITE_ERROR; } else { TRACE_RET(chip, STATUS_FAIL); } rtsx_init_cmd(chip); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, tpc); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_SECTOR_CNT_H, 0xFF, (u8)(sec_cnt >> 8)); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_SECTOR_CNT_L, 0xFF, (u8)sec_cnt); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg); if (mode_2k) { rtsx_add_cmd(chip, WRITE_REG_CMD, MS_CFG, MS_2K_SECTOR_MODE, MS_2K_SECTOR_MODE); } else { rtsx_add_cmd(chip, WRITE_REG_CMD, MS_CFG, MS_2K_SECTOR_MODE, 0); } trans_dma_enable(dir, chip, sec_cnt * 512, DMA_512); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF, MS_TRANSFER_START | trans_mode); rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END); rtsx_send_cmd_no_wait(chip); retval = rtsx_transfer_data(chip, MS_CARD, buf, buf_len, use_sg, dir, chip->mspro_timeout); if (retval < 0) { ms_set_err_code(chip, err_code); if (retval == -ETIMEDOUT) { retval = STATUS_TIMEDOUT; } else { retval = STATUS_FAIL; } TRACE_RET(chip, retval); } RTSX_READ_REG(chip, MS_TRANS_CFG, &val); if (val & (MS_INT_CMDNK | MS_INT_ERR | MS_CRC16_ERR | MS_RDY_TIMEOUT)) { TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int ms_write_bytes(struct rtsx_chip *chip, u8 tpc, u8 cnt, u8 cfg, u8 *data, int data_len) { struct ms_info *ms_card = &(chip->ms_card); int retval, i; if (!data || (data_len < cnt)) { TRACE_RET(chip, STATUS_ERROR); } rtsx_init_cmd(chip); for (i = 0; i < cnt; i++) { rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2 + i, 0xFF, data[i]); } if (cnt % 2) { rtsx_add_cmd(chip, WRITE_REG_CMD, PPBUF_BASE2 + i, 0xFF, 0xFF); } rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, tpc); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF, MS_TRANSFER_START | MS_TM_WRITE_BYTES); rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END); retval = rtsx_send_cmd(chip, MS_CARD, 5000); if (retval < 0) { u8 val = 0; rtsx_read_register(chip, MS_TRANS_CFG, &val); RTSX_DEBUGP("MS_TRANS_CFG: 0x%02x\n", val); rtsx_clear_ms_error(chip); if (!(tpc & 0x08)) { if (val & MS_CRC16_ERR) { ms_set_err_code(chip, MS_CRC16_ERROR); TRACE_RET(chip, ms_parse_err_code(chip)); } } else { if (CHK_MSPRO(ms_card) && !(val & 0x80)) { if (val & (MS_INT_ERR | MS_INT_CMDNK)) { ms_set_err_code(chip, MS_CMD_NK); TRACE_RET(chip, ms_parse_err_code(chip)); } } } if (val & MS_RDY_TIMEOUT) { ms_set_err_code(chip, MS_TO_ERROR); TRACE_RET(chip, ms_parse_err_code(chip)); } ms_set_err_code(chip, MS_TO_ERROR); TRACE_RET(chip, ms_parse_err_code(chip)); } return STATUS_SUCCESS; } static int ms_read_bytes(struct rtsx_chip *chip, u8 tpc, u8 cnt, u8 cfg, u8 *data, int data_len) { struct ms_info *ms_card = &(chip->ms_card); int retval, i; u8 *ptr; if (!data) { TRACE_RET(chip, STATUS_ERROR); } rtsx_init_cmd(chip); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, tpc); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_BYTE_CNT, 0xFF, cnt); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, cfg); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, PINGPONG_BUFFER); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF, MS_TRANSFER_START | MS_TM_READ_BYTES); rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END); for (i = 0; i < data_len - 1; i++) { rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + i, 0, 0); } if (data_len % 2) { rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + data_len, 0, 0); } else { rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + data_len - 1, 0, 0); } retval = rtsx_send_cmd(chip, MS_CARD, 5000); if (retval < 0) { u8 val = 0; rtsx_read_register(chip, MS_TRANS_CFG, &val); rtsx_clear_ms_error(chip); if (!(tpc & 0x08)) { if (val & MS_CRC16_ERR) { ms_set_err_code(chip, MS_CRC16_ERROR); TRACE_RET(chip, ms_parse_err_code(chip)); } } else { if (CHK_MSPRO(ms_card) && !(val & 0x80)) { if (val & (MS_INT_ERR | MS_INT_CMDNK)) { ms_set_err_code(chip, MS_CMD_NK); TRACE_RET(chip, ms_parse_err_code(chip)); } } } if (val & MS_RDY_TIMEOUT) { ms_set_err_code(chip, MS_TO_ERROR); TRACE_RET(chip, ms_parse_err_code(chip)); } ms_set_err_code(chip, MS_TO_ERROR); TRACE_RET(chip, ms_parse_err_code(chip)); } ptr = rtsx_get_cmd_data(chip) + 1; for (i = 0; i < data_len; i++) { data[i] = ptr[i]; } if ((tpc == PRO_READ_SHORT_DATA) && (data_len == 8)) { RTSX_DEBUGP("Read format progress:\n"); RTSX_DUMP(ptr, cnt); } return STATUS_SUCCESS; } static int ms_set_rw_reg_addr(struct rtsx_chip *chip, u8 read_start, u8 read_cnt, u8 write_start, u8 write_cnt) { int retval, i; u8 data[4]; data[0] = read_start; data[1] = read_cnt; data[2] = write_start; data[3] = write_cnt; for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_write_bytes(chip, SET_RW_REG_ADRS, 4, NO_WAIT_INT, data, 4); if (retval == STATUS_SUCCESS) return STATUS_SUCCESS; rtsx_clear_ms_error(chip); } TRACE_RET(chip, STATUS_FAIL); } static int ms_send_cmd(struct rtsx_chip *chip, u8 cmd, u8 cfg) { u8 data[2]; data[0] = cmd; data[1] = 0; return ms_write_bytes(chip, PRO_SET_CMD, 1, cfg, data, 1); } static int ms_set_init_para(struct rtsx_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int retval; if (CHK_HG8BIT(ms_card)) { if (chip->asic_code) { ms_card->ms_clock = chip->asic_ms_hg_clk; } else { ms_card->ms_clock = chip->fpga_ms_hg_clk; } } else if (CHK_MSPRO(ms_card) || CHK_MS4BIT(ms_card)) { if (chip->asic_code) { ms_card->ms_clock = chip->asic_ms_4bit_clk; } else { ms_card->ms_clock = chip->fpga_ms_4bit_clk; } } else { if (chip->asic_code) { ms_card->ms_clock = chip->asic_ms_1bit_clk; } else { ms_card->ms_clock = chip->fpga_ms_1bit_clk; } } retval = switch_clock(chip, ms_card->ms_clock); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = select_card(chip, MS_CARD); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int ms_switch_clock(struct rtsx_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int retval; retval = select_card(chip, MS_CARD); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = switch_clock(chip, ms_card->ms_clock); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int ms_pull_ctl_disable(struct rtsx_chip *chip) { if (CHECK_PID(chip, 0x5209)) { RTSX_WRITE_REG(chip, CARD_PULL_CTL4, 0xFF, 0x55); RTSX_WRITE_REG(chip, CARD_PULL_CTL5, 0xFF, 0x55); RTSX_WRITE_REG(chip, CARD_PULL_CTL6, 0xFF, 0x15); } else if (CHECK_PID(chip, 0x5208)) { RTSX_WRITE_REG(chip, CARD_PULL_CTL1, 0xFF, MS_D1_PD | MS_D2_PD | MS_CLK_PD | MS_D6_PD); RTSX_WRITE_REG(chip, CARD_PULL_CTL2, 0xFF, MS_D3_PD | MS_D0_PD | MS_BS_PD | XD_D4_PD); RTSX_WRITE_REG(chip, CARD_PULL_CTL3, 0xFF, MS_D7_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU); RTSX_WRITE_REG(chip, CARD_PULL_CTL4, 0xFF, XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD); RTSX_WRITE_REG(chip, CARD_PULL_CTL5, 0xFF, MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD); RTSX_WRITE_REG(chip, CARD_PULL_CTL6, 0xFF, MS_D5_PD | MS_D4_PD); } else if (CHECK_PID(chip, 0x5288)) { if (CHECK_BARO_PKG(chip, QFN)) { RTSX_WRITE_REG(chip, CARD_PULL_CTL1, 0xFF, 0x55); RTSX_WRITE_REG(chip, CARD_PULL_CTL2, 0xFF, 0x55); RTSX_WRITE_REG(chip, CARD_PULL_CTL3, 0xFF, 0x4B); RTSX_WRITE_REG(chip, CARD_PULL_CTL4, 0xFF, 0x69); } } return STATUS_SUCCESS; } static int ms_pull_ctl_enable(struct rtsx_chip *chip) { int retval; rtsx_init_cmd(chip); if (CHECK_PID(chip, 0x5209)) { rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x55); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, 0x55); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, 0x15); } else if (CHECK_PID(chip, 0x5208)) { rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, MS_D1_PD | MS_D2_PD | MS_CLK_NP | MS_D6_PD); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, MS_D3_PD | MS_D0_PD | MS_BS_NP | XD_D4_PD); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, MS_D7_PD | XD_CE_PD | XD_CLE_PD | XD_CD_PU); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, XD_RDY_PD | SD_D3_PD | SD_D2_PD | XD_ALE_PD); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL5, 0xFF, MS_INS_PU | SD_WP_PD | SD_CD_PU | SD_CMD_PD); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL6, 0xFF, MS_D5_PD | MS_D4_PD); } else if (CHECK_PID(chip, 0x5288)) { if (CHECK_BARO_PKG(chip, QFN)) { rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL1, 0xFF, 0x55); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL2, 0xFF, 0x45); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL3, 0xFF, 0x4B); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_PULL_CTL4, 0xFF, 0x29); } } retval = rtsx_send_cmd(chip, MS_CARD, 100); if (retval < 0) { TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int ms_prepare_reset(struct rtsx_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int retval; u8 oc_mask = 0; ms_card->ms_type = 0; ms_card->check_ms_flow = 0; ms_card->switch_8bit_fail = 0; ms_card->delay_write.delay_write_flag = 0; ms_card->pro_under_formatting = 0; retval = ms_power_off_card3v3(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (!chip->ft2_fast_mode) wait_timeout(250); retval = enable_card_clock(chip, MS_CARD); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (chip->asic_code) { retval = ms_pull_ctl_enable(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } else { RTSX_WRITE_REG(chip, FPGA_PULL_CTL, FPGA_MS_PULL_CTL_BIT | 0x20, 0); } if (!chip->ft2_fast_mode) { retval = card_power_on(chip, MS_CARD); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } wait_timeout(150); #ifdef SUPPORT_OCP if (CHECK_LUN_MODE(chip, SD_MS_2LUN)) { oc_mask = MS_OC_NOW | MS_OC_EVER; } else { oc_mask = SD_OC_NOW | SD_OC_EVER; } if (chip->ocp_stat & oc_mask) { RTSX_DEBUGP("Over current, OCPSTAT is 0x%x\n", chip->ocp_stat); TRACE_RET(chip, STATUS_FAIL); } #endif } RTSX_WRITE_REG(chip, CARD_OE, MS_OUTPUT_EN, MS_OUTPUT_EN); if (chip->asic_code) { RTSX_WRITE_REG(chip, MS_CFG, 0xFF, SAMPLE_TIME_RISING | PUSH_TIME_DEFAULT | NO_EXTEND_TOGGLE | MS_BUS_WIDTH_1); } else { RTSX_WRITE_REG(chip, MS_CFG, 0xFF, SAMPLE_TIME_FALLING | PUSH_TIME_DEFAULT | NO_EXTEND_TOGGLE | MS_BUS_WIDTH_1); } RTSX_WRITE_REG(chip, MS_TRANS_CFG, 0xFF, NO_WAIT_INT | NO_AUTO_READ_INT_REG); RTSX_WRITE_REG(chip, CARD_STOP, MS_STOP | MS_CLR_ERR, MS_STOP | MS_CLR_ERR); retval = ms_set_init_para(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int ms_identify_media_type(struct rtsx_chip *chip, int switch_8bit_bus) { struct ms_info *ms_card = &(chip->ms_card); int retval, i; u8 val; retval = ms_set_rw_reg_addr(chip, Pro_StatusReg, 6, SystemParm, 1); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, READ_REG, 6, NO_WAIT_INT); if (retval == STATUS_SUCCESS) { break; } } if (i == MS_MAX_RETRY_COUNT) { TRACE_RET(chip, STATUS_FAIL); } RTSX_READ_REG(chip, PPBUF_BASE2 + 2, &val); RTSX_DEBUGP("Type register: 0x%x\n", val); if (val != 0x01) { if (val != 0x02) { ms_card->check_ms_flow = 1; } TRACE_RET(chip, STATUS_FAIL); } RTSX_READ_REG(chip, PPBUF_BASE2 + 4, &val); RTSX_DEBUGP("Category register: 0x%x\n", val); if (val != 0) { ms_card->check_ms_flow = 1; TRACE_RET(chip, STATUS_FAIL); } RTSX_READ_REG(chip, PPBUF_BASE2 + 5, &val); RTSX_DEBUGP("Class register: 0x%x\n", val); if (val == 0) { RTSX_READ_REG(chip, PPBUF_BASE2, &val); if (val & WRT_PRTCT) { chip->card_wp |= MS_CARD; } else { chip->card_wp &= ~MS_CARD; } } else if ((val == 0x01) || (val == 0x02) || (val == 0x03)) { chip->card_wp |= MS_CARD; } else { ms_card->check_ms_flow = 1; TRACE_RET(chip, STATUS_FAIL); } ms_card->ms_type |= TYPE_MSPRO; RTSX_READ_REG(chip, PPBUF_BASE2 + 3, &val); RTSX_DEBUGP("IF Mode register: 0x%x\n", val); if (val == 0) { ms_card->ms_type &= 0x0F; } else if (val == 7) { if (switch_8bit_bus) { ms_card->ms_type |= MS_HG; } else { ms_card->ms_type &= 0x0F; } } else { TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int ms_confirm_cpu_startup(struct rtsx_chip *chip) { int retval, i, k; u8 val; /* Confirm CPU StartUp */ k = 0; do { if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) { ms_set_err_code(chip, MS_NO_CARD); TRACE_RET(chip, STATUS_FAIL); } for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval == STATUS_SUCCESS) { break; } } if (i == MS_MAX_RETRY_COUNT) { TRACE_RET(chip, STATUS_FAIL); } if (k > 100) { TRACE_RET(chip, STATUS_FAIL); } k++; wait_timeout(100); } while (!(val & INT_REG_CED)); for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval == STATUS_SUCCESS) break; } if (i == MS_MAX_RETRY_COUNT) { TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_ERR) { if (val & INT_REG_CMDNK) { chip->card_wp |= (MS_CARD); } else { TRACE_RET(chip, STATUS_FAIL); } } /* -- end confirm CPU startup */ return STATUS_SUCCESS; } static int ms_switch_parallel_bus(struct rtsx_chip *chip) { int retval, i; u8 data[2]; data[0] = PARALLEL_4BIT_IF; data[1] = 0; for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_write_bytes(chip, WRITE_REG, 1, NO_WAIT_INT, data, 2); if (retval == STATUS_SUCCESS) break; } if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int ms_switch_8bit_bus(struct rtsx_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int retval, i; u8 data[2]; data[0] = PARALLEL_8BIT_IF; data[1] = 0; for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_write_bytes(chip, WRITE_REG, 1, NO_WAIT_INT, data, 2); if (retval == STATUS_SUCCESS) { break; } } if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } RTSX_WRITE_REG(chip, MS_CFG, 0x98, MS_BUS_WIDTH_8 | SAMPLE_TIME_FALLING); ms_card->ms_type |= MS_8BIT; retval = ms_set_init_para(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, GET_INT, 1, NO_WAIT_INT); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } return STATUS_SUCCESS; } static int ms_pro_reset_flow(struct rtsx_chip *chip, int switch_8bit_bus) { struct ms_info *ms_card = &(chip->ms_card); int retval, i; for (i = 0; i < 3; i++) { retval = ms_prepare_reset(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = ms_identify_media_type(chip, switch_8bit_bus); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = ms_confirm_cpu_startup(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = ms_switch_parallel_bus(chip); if (retval != STATUS_SUCCESS) { if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) { ms_set_err_code(chip, MS_NO_CARD); TRACE_RET(chip, STATUS_FAIL); } continue; } else { break; } } if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } /* Switch MS-PRO into Parallel mode */ RTSX_WRITE_REG(chip, MS_CFG, 0x18, MS_BUS_WIDTH_4); RTSX_WRITE_REG(chip, MS_CFG, PUSH_TIME_ODD, PUSH_TIME_ODD); retval = ms_set_init_para(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } /* If MSPro HG Card, We shall try to switch to 8-bit bus */ if (CHK_MSHG(ms_card) && chip->support_ms_8bit && switch_8bit_bus) { retval = ms_switch_8bit_bus(chip); if (retval != STATUS_SUCCESS) { ms_card->switch_8bit_fail = 1; TRACE_RET(chip, STATUS_FAIL); } } return STATUS_SUCCESS; } #ifdef XC_POWERCLASS static int msxc_change_power(struct rtsx_chip *chip, u8 mode) { int retval; u8 buf[6]; ms_cleanup_work(chip); retval = ms_set_rw_reg_addr(chip, 0, 0, Pro_DataCount1, 6); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } buf[0] = 0; buf[1] = mode; buf[2] = 0; buf[3] = 0; buf[4] = 0; buf[5] = 0; retval = ms_write_bytes(chip, PRO_WRITE_REG , 6, NO_WAIT_INT, buf, 6); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = ms_send_cmd(chip, XC_CHG_POWER, WAIT_INT); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } RTSX_READ_REG(chip, MS_TRANS_CFG, buf); if (buf[0] & (MS_INT_CMDNK | MS_INT_ERR)) { TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } #endif static int ms_read_attribute_info(struct rtsx_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int retval, i; u8 val, *buf, class_code, device_type, sub_class, data[16]; u16 total_blk = 0, blk_size = 0; #ifdef SUPPORT_MSXC u32 xc_total_blk = 0, xc_blk_size = 0; #endif u32 sys_info_addr = 0, sys_info_size; #ifdef SUPPORT_PCGL_1P18 u32 model_name_addr = 0, model_name_size; int found_sys_info = 0, found_model_name = 0; #endif retval = ms_set_rw_reg_addr(chip, Pro_IntReg, 2, Pro_SystemParm, 7); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (CHK_MS8BIT(ms_card)) { data[0] = PARALLEL_8BIT_IF; } else { data[0] = PARALLEL_4BIT_IF; } data[1] = 0; data[2] = 0x40; data[3] = 0; data[4] = 0; data[5] = 0; data[6] = 0; data[7] = 0; for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_write_bytes(chip, PRO_WRITE_REG, 7, NO_WAIT_INT, data, 8); if (retval == STATUS_SUCCESS) { break; } } if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } buf = kmalloc(64 * 512, GFP_KERNEL); if (buf == NULL) { TRACE_RET(chip, STATUS_ERROR); } for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_send_cmd(chip, PRO_READ_ATRB, WAIT_INT); if (retval != STATUS_SUCCESS) { continue; } retval = rtsx_read_register(chip, MS_TRANS_CFG, &val); if (retval != STATUS_SUCCESS) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } if (!(val & MS_INT_BREQ)) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } retval = ms_transfer_data(chip, MS_TM_AUTO_READ, PRO_READ_LONG_DATA, 0x40, WAIT_INT, 0, 0, buf, 64 * 512); if (retval == STATUS_SUCCESS) { break; } else { rtsx_clear_ms_error(chip); } } if (retval != STATUS_SUCCESS) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } i = 0; do { retval = rtsx_read_register(chip, MS_TRANS_CFG, &val); if (retval != STATUS_SUCCESS) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } if ((val & MS_INT_CED) || !(val & MS_INT_BREQ)) break; retval = ms_transfer_tpc(chip, MS_TM_NORMAL_READ, PRO_READ_LONG_DATA, 0, WAIT_INT); if (retval != STATUS_SUCCESS) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } i++; } while (i < 1024); if (retval != STATUS_SUCCESS) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } if ((buf[0] != 0xa5) && (buf[1] != 0xc3)) { /* Signature code is wrong */ kfree(buf); TRACE_RET(chip, STATUS_FAIL); } if ((buf[4] < 1) || (buf[4] > 12)) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } for (i = 0; i < buf[4]; i++) { int cur_addr_off = 16 + i * 12; #ifdef SUPPORT_MSXC if ((buf[cur_addr_off + 8] == 0x10) || (buf[cur_addr_off + 8] == 0x13)) #else if (buf[cur_addr_off + 8] == 0x10) #endif { sys_info_addr = ((u32)buf[cur_addr_off + 0] << 24) | ((u32)buf[cur_addr_off + 1] << 16) | ((u32)buf[cur_addr_off + 2] << 8) | buf[cur_addr_off + 3]; sys_info_size = ((u32)buf[cur_addr_off + 4] << 24) | ((u32)buf[cur_addr_off + 5] << 16) | ((u32)buf[cur_addr_off + 6] << 8) | buf[cur_addr_off + 7]; RTSX_DEBUGP("sys_info_addr = 0x%x, sys_info_size = 0x%x\n", sys_info_addr, sys_info_size); if (sys_info_size != 96) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } if (sys_info_addr < 0x1A0) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } if ((sys_info_size + sys_info_addr) > 0x8000) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } #ifdef SUPPORT_MSXC if (buf[cur_addr_off + 8] == 0x13) { ms_card->ms_type |= MS_XC; } #endif #ifdef SUPPORT_PCGL_1P18 found_sys_info = 1; #else break; #endif } #ifdef SUPPORT_PCGL_1P18 if (buf[cur_addr_off + 8] == 0x15) { model_name_addr = ((u32)buf[cur_addr_off + 0] << 24) | ((u32)buf[cur_addr_off + 1] << 16) | ((u32)buf[cur_addr_off + 2] << 8) | buf[cur_addr_off + 3]; model_name_size = ((u32)buf[cur_addr_off + 4] << 24) | ((u32)buf[cur_addr_off + 5] << 16) | ((u32)buf[cur_addr_off + 6] << 8) | buf[cur_addr_off + 7]; RTSX_DEBUGP("model_name_addr = 0x%x, model_name_size = 0x%x\n", model_name_addr, model_name_size); if (model_name_size != 48) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } if (model_name_addr < 0x1A0) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } if ((model_name_size + model_name_addr) > 0x8000) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } found_model_name = 1; } if (found_sys_info && found_model_name) break; #endif } if (i == buf[4]) { kfree(buf); TRACE_RET(chip, STATUS_FAIL); } class_code = buf[sys_info_addr + 0]; device_type = buf[sys_info_addr + 56]; sub_class = buf[sys_info_addr + 46]; #ifdef SUPPORT_MSXC if (CHK_MSXC(ms_card)) { xc_total_blk = ((u32)buf[sys_info_addr + 6] << 24) | ((u32)buf[sys_info_addr + 7] << 16) | ((u32)buf[sys_info_addr + 8] << 8) | buf[sys_info_addr + 9]; xc_blk_size = ((u32)buf[sys_info_addr + 32] << 24) | ((u32)buf[sys_info_addr + 33] << 16) | ((u32)buf[sys_info_addr + 34] << 8) | buf[sys_info_addr + 35]; RTSX_DEBUGP("xc_total_blk = 0x%x, xc_blk_size = 0x%x\n", xc_total_blk, xc_blk_size); } else { total_blk = ((u16)buf[sys_info_addr + 6] << 8) | buf[sys_info_addr + 7]; blk_size = ((u16)buf[sys_info_addr + 2] << 8) | buf[sys_info_addr + 3]; RTSX_DEBUGP("total_blk = 0x%x, blk_size = 0x%x\n", total_blk, blk_size); } #else total_blk = ((u16)buf[sys_info_addr + 6] << 8) | buf[sys_info_addr + 7]; blk_size = ((u16)buf[sys_info_addr + 2] << 8) | buf[sys_info_addr + 3]; RTSX_DEBUGP("total_blk = 0x%x, blk_size = 0x%x\n", total_blk, blk_size); #endif RTSX_DEBUGP("class_code = 0x%x, device_type = 0x%x, sub_class = 0x%x\n", class_code, device_type, sub_class); memcpy(ms_card->raw_sys_info, buf + sys_info_addr, 96); #ifdef SUPPORT_PCGL_1P18 memcpy(ms_card->raw_model_name, buf + model_name_addr, 48); #endif kfree(buf); #ifdef SUPPORT_MSXC if (CHK_MSXC(ms_card)) { if (class_code != 0x03) { TRACE_RET(chip, STATUS_FAIL); } } else { if (class_code != 0x02) { TRACE_RET(chip, STATUS_FAIL); } } #else if (class_code != 0x02) { TRACE_RET(chip, STATUS_FAIL); } #endif if (device_type != 0x00) { if ((device_type == 0x01) || (device_type == 0x02) || (device_type == 0x03)) { chip->card_wp |= MS_CARD; } else { TRACE_RET(chip, STATUS_FAIL); } } if (sub_class & 0xC0) { TRACE_RET(chip, STATUS_FAIL); } RTSX_DEBUGP("class_code: 0x%x, device_type: 0x%x, sub_class: 0x%x\n", class_code, device_type, sub_class); #ifdef SUPPORT_MSXC if (CHK_MSXC(ms_card)) { chip->capacity[chip->card2lun[MS_CARD]] = ms_card->capacity = xc_total_blk * xc_blk_size; } else { chip->capacity[chip->card2lun[MS_CARD]] = ms_card->capacity = total_blk * blk_size; } #else chip->capacity[chip->card2lun[MS_CARD]] = ms_card->capacity = total_blk * blk_size; #endif return STATUS_SUCCESS; } #ifdef SUPPORT_MAGIC_GATE static int mg_set_tpc_para_sub(struct rtsx_chip *chip, int type, u8 mg_entry_num); #endif static int reset_ms_pro(struct rtsx_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int retval; #ifdef XC_POWERCLASS u8 change_power_class; if (chip->ms_power_class_en & 0x02) change_power_class = 2; else if (chip->ms_power_class_en & 0x01) change_power_class = 1; else change_power_class = 0; #endif #ifdef XC_POWERCLASS Retry: #endif retval = ms_pro_reset_flow(chip, 1); if (retval != STATUS_SUCCESS) { if (ms_card->switch_8bit_fail) { retval = ms_pro_reset_flow(chip, 0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } else { TRACE_RET(chip, STATUS_FAIL); } } retval = ms_read_attribute_info(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } #ifdef XC_POWERCLASS if (CHK_HG8BIT(ms_card)) { change_power_class = 0; } if (change_power_class && CHK_MSXC(ms_card)) { u8 power_class_en = chip->ms_power_class_en; RTSX_DEBUGP("power_class_en = 0x%x\n", power_class_en); RTSX_DEBUGP("change_power_class = %d\n", change_power_class); if (change_power_class) { power_class_en &= (1 << (change_power_class - 1)); } else { power_class_en = 0; } if (power_class_en) { u8 power_class_mode = (ms_card->raw_sys_info[46] & 0x18) >> 3; RTSX_DEBUGP("power_class_mode = 0x%x", power_class_mode); if (change_power_class > power_class_mode) change_power_class = power_class_mode; if (change_power_class) { retval = msxc_change_power(chip, change_power_class); if (retval != STATUS_SUCCESS) { change_power_class--; goto Retry; } } } } #endif #ifdef SUPPORT_MAGIC_GATE retval = mg_set_tpc_para_sub(chip, 0, 0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } #endif if (CHK_HG8BIT(ms_card)) { chip->card_bus_width[chip->card2lun[MS_CARD]] = 8; } else { chip->card_bus_width[chip->card2lun[MS_CARD]] = 4; } return STATUS_SUCCESS; } static int ms_read_status_reg(struct rtsx_chip *chip) { int retval; u8 val[2]; retval = ms_set_rw_reg_addr(chip, StatusReg0, 2, 0, 0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = ms_read_bytes(chip, READ_REG, 2, NO_WAIT_INT, val, 2); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (val[1] & (STS_UCDT | STS_UCEX | STS_UCFG)) { ms_set_err_code(chip, MS_FLASH_READ_ERROR); TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int ms_read_extra_data(struct rtsx_chip *chip, u16 block_addr, u8 page_num, u8 *buf, int buf_len) { struct ms_info *ms_card = &(chip->ms_card); int retval, i; u8 val, data[10]; retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 6); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (CHK_MS4BIT(ms_card)) { /* Parallel interface */ data[0] = 0x88; } else { /* Serial interface */ data[0] = 0x80; } data[1] = 0; data[2] = (u8)(block_addr >> 8); data[3] = (u8)block_addr; data[4] = 0x40; data[5] = page_num; for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_write_bytes(chip, WRITE_REG, 6, NO_WAIT_INT, data, 6); if (retval == STATUS_SUCCESS) break; } if (i == MS_MAX_RETRY_COUNT) { TRACE_RET(chip, STATUS_FAIL); } ms_set_err_code(chip, MS_NO_ERROR); for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_send_cmd(chip, BLOCK_READ, WAIT_INT); if (retval == STATUS_SUCCESS) break; } if (i == MS_MAX_RETRY_COUNT) { TRACE_RET(chip, STATUS_FAIL); } ms_set_err_code(chip, MS_NO_ERROR); retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_CMDNK) { ms_set_err_code(chip, MS_CMD_NK); TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_CED) { if (val & INT_REG_ERR) { retval = ms_read_status_reg(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 6); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } } retval = ms_read_bytes(chip, READ_REG, MS_EXTRA_SIZE, NO_WAIT_INT, data, MS_EXTRA_SIZE); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (buf && buf_len) { if (buf_len > MS_EXTRA_SIZE) buf_len = MS_EXTRA_SIZE; memcpy(buf, data, buf_len); } return STATUS_SUCCESS; } static int ms_write_extra_data(struct rtsx_chip *chip, u16 block_addr, u8 page_num, u8 *buf, int buf_len) { struct ms_info *ms_card = &(chip->ms_card); int retval, i; u8 val, data[16]; if (!buf || (buf_len < MS_EXTRA_SIZE)) { TRACE_RET(chip, STATUS_FAIL); } retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 6 + MS_EXTRA_SIZE); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (CHK_MS4BIT(ms_card)) { data[0] = 0x88; } else { data[0] = 0x80; } data[1] = 0; data[2] = (u8)(block_addr >> 8); data[3] = (u8)block_addr; data[4] = 0x40; data[5] = page_num; for (i = 6; i < MS_EXTRA_SIZE + 6; i++) { data[i] = buf[i - 6]; } retval = ms_write_bytes(chip, WRITE_REG , (6+MS_EXTRA_SIZE), NO_WAIT_INT, data, 16); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } ms_set_err_code(chip, MS_NO_ERROR); retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_CMDNK) { ms_set_err_code(chip, MS_CMD_NK); TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_CED) { if (val & INT_REG_ERR) { ms_set_err_code(chip, MS_FLASH_WRITE_ERROR); TRACE_RET(chip, STATUS_FAIL); } } return STATUS_SUCCESS; } static int ms_read_page(struct rtsx_chip *chip, u16 block_addr, u8 page_num) { struct ms_info *ms_card = &(chip->ms_card); int retval; u8 val, data[6]; retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 6); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (CHK_MS4BIT(ms_card)) { data[0] = 0x88; } else { data[0] = 0x80; } data[1] = 0; data[2] = (u8)(block_addr >> 8); data[3] = (u8)block_addr; data[4] = 0x20; data[5] = page_num; retval = ms_write_bytes(chip, WRITE_REG , 6, NO_WAIT_INT, data, 6); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = ms_send_cmd(chip, BLOCK_READ, WAIT_INT); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } ms_set_err_code(chip, MS_NO_ERROR); retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_CMDNK) { ms_set_err_code(chip, MS_CMD_NK); TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_CED) { if (val & INT_REG_ERR) { if (!(val & INT_REG_BREQ)) { ms_set_err_code(chip, MS_FLASH_READ_ERROR); TRACE_RET(chip, STATUS_FAIL); } retval = ms_read_status_reg(chip); if (retval != STATUS_SUCCESS) { ms_set_err_code(chip, MS_FLASH_WRITE_ERROR); } } else { if (!(val & INT_REG_BREQ)) { ms_set_err_code(chip, MS_BREQ_ERROR); TRACE_RET(chip, STATUS_FAIL); } } } retval = ms_transfer_tpc(chip, MS_TM_NORMAL_READ, READ_PAGE_DATA, 0, NO_WAIT_INT); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (ms_check_err_code(chip, MS_FLASH_WRITE_ERROR)) { TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int ms_set_bad_block(struct rtsx_chip *chip, u16 phy_blk) { struct ms_info *ms_card = &(chip->ms_card); int retval; u8 val, data[8], extra[MS_EXTRA_SIZE]; retval = ms_read_extra_data(chip, phy_blk, 0, extra, MS_EXTRA_SIZE); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 7); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } ms_set_err_code(chip, MS_NO_ERROR); if (CHK_MS4BIT(ms_card)) { data[0] = 0x88; } else { data[0] = 0x80; } data[1] = 0; data[2] = (u8)(phy_blk >> 8); data[3] = (u8)phy_blk; data[4] = 0x80; data[5] = 0; data[6] = extra[0] & 0x7F; data[7] = 0xFF; retval = ms_write_bytes(chip, WRITE_REG , 7, NO_WAIT_INT, data, 7); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } ms_set_err_code(chip, MS_NO_ERROR); retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_CMDNK) { ms_set_err_code(chip, MS_CMD_NK); TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_CED) { if (val & INT_REG_ERR) { ms_set_err_code(chip, MS_FLASH_WRITE_ERROR); TRACE_RET(chip, STATUS_FAIL); } } return STATUS_SUCCESS; } static int ms_erase_block(struct rtsx_chip *chip, u16 phy_blk) { struct ms_info *ms_card = &(chip->ms_card); int retval, i = 0; u8 val, data[6]; retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 6); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } ms_set_err_code(chip, MS_NO_ERROR); if (CHK_MS4BIT(ms_card)) { data[0] = 0x88; } else { data[0] = 0x80; } data[1] = 0; data[2] = (u8)(phy_blk >> 8); data[3] = (u8)phy_blk; data[4] = 0; data[5] = 0; retval = ms_write_bytes(chip, WRITE_REG, 6, NO_WAIT_INT, data, 6); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } ERASE_RTY: retval = ms_send_cmd(chip, BLOCK_ERASE, WAIT_INT); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } ms_set_err_code(chip, MS_NO_ERROR); retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_CMDNK) { if (i < 3) { i++; goto ERASE_RTY; } ms_set_err_code(chip, MS_CMD_NK); ms_set_bad_block(chip, phy_blk); TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_CED) { if (val & INT_REG_ERR) { ms_set_err_code(chip, MS_FLASH_WRITE_ERROR); TRACE_RET(chip, STATUS_FAIL); } } return STATUS_SUCCESS; } static void ms_set_page_status(u16 log_blk, u8 type, u8 *extra, int extra_len) { if (!extra || (extra_len < MS_EXTRA_SIZE)) { return; } memset(extra, 0xFF, MS_EXTRA_SIZE); if (type == setPS_NG) { /* set page status as 1:NG,and block status keep 1:OK */ extra[0] = 0xB8; } else { /* set page status as 0:Data Error,and block status keep 1:OK */ extra[0] = 0x98; } extra[2] = (u8)(log_blk >> 8); extra[3] = (u8)log_blk; } static int ms_init_page(struct rtsx_chip *chip, u16 phy_blk, u16 log_blk, u8 start_page, u8 end_page) { int retval; u8 extra[MS_EXTRA_SIZE], i; memset(extra, 0xff, MS_EXTRA_SIZE); extra[0] = 0xf8; /* Block, page OK, data erased */ extra[1] = 0xff; extra[2] = (u8)(log_blk >> 8); extra[3] = (u8)log_blk; for (i = start_page; i < end_page; i++) { if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) { ms_set_err_code(chip, MS_NO_CARD); TRACE_RET(chip, STATUS_FAIL); } retval = ms_write_extra_data(chip, phy_blk, i, extra, MS_EXTRA_SIZE); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } return STATUS_SUCCESS; } static int ms_copy_page(struct rtsx_chip *chip, u16 old_blk, u16 new_blk, u16 log_blk, u8 start_page, u8 end_page) { struct ms_info *ms_card = &(chip->ms_card); int retval, rty_cnt, uncorrect_flag = 0; u8 extra[MS_EXTRA_SIZE], val, i, j, data[16]; RTSX_DEBUGP("Copy page from 0x%x to 0x%x, logical block is 0x%x\n", old_blk, new_blk, log_blk); RTSX_DEBUGP("start_page = %d, end_page = %d\n", start_page, end_page); retval = ms_read_extra_data(chip, new_blk, 0, extra, MS_EXTRA_SIZE); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = ms_read_status_reg(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } RTSX_READ_REG(chip, PPBUF_BASE2, &val); if (val & BUF_FULL) { retval = ms_send_cmd(chip, CLEAR_BUF, WAIT_INT); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (!(val & INT_REG_CED)) { ms_set_err_code(chip, MS_FLASH_WRITE_ERROR); TRACE_RET(chip, STATUS_FAIL); } } for (i = start_page; i < end_page; i++) { if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) { ms_set_err_code(chip, MS_NO_CARD); TRACE_RET(chip, STATUS_FAIL); } ms_read_extra_data(chip, old_blk, i, extra, MS_EXTRA_SIZE); retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 6); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } ms_set_err_code(chip, MS_NO_ERROR); if (CHK_MS4BIT(ms_card)) { data[0] = 0x88; } else { data[0] = 0x80; } data[1] = 0; data[2] = (u8)(old_blk >> 8); data[3] = (u8)old_blk; data[4] = 0x20; data[5] = i; retval = ms_write_bytes(chip, WRITE_REG , 6, NO_WAIT_INT, data, 6); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = ms_send_cmd(chip, BLOCK_READ, WAIT_INT); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } ms_set_err_code(chip, MS_NO_ERROR); retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_CMDNK) { ms_set_err_code(chip, MS_CMD_NK); TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_CED) { if (val & INT_REG_ERR) { retval = ms_read_status_reg(chip); if (retval != STATUS_SUCCESS) { uncorrect_flag = 1; RTSX_DEBUGP("Uncorrectable error\n"); } else { uncorrect_flag = 0; } retval = ms_transfer_tpc(chip, MS_TM_NORMAL_READ, READ_PAGE_DATA, 0, NO_WAIT_INT); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (uncorrect_flag) { ms_set_page_status(log_blk, setPS_NG, extra, MS_EXTRA_SIZE); if (i == 0) { extra[0] &= 0xEF; } ms_write_extra_data(chip, old_blk, i, extra, MS_EXTRA_SIZE); RTSX_DEBUGP("page %d : extra[0] = 0x%x\n", i, extra[0]); MS_SET_BAD_BLOCK_FLG(ms_card); ms_set_page_status(log_blk, setPS_Error, extra, MS_EXTRA_SIZE); ms_write_extra_data(chip, new_blk, i, extra, MS_EXTRA_SIZE); continue; } for (rty_cnt = 0; rty_cnt < MS_MAX_RETRY_COUNT; rty_cnt++) { retval = ms_transfer_tpc(chip, MS_TM_NORMAL_WRITE, WRITE_PAGE_DATA, 0, NO_WAIT_INT); if (retval == STATUS_SUCCESS) { break; } } if (rty_cnt == MS_MAX_RETRY_COUNT) { TRACE_RET(chip, STATUS_FAIL); } } if (!(val & INT_REG_BREQ)) { ms_set_err_code(chip, MS_BREQ_ERROR); TRACE_RET(chip, STATUS_FAIL); } } retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, (6+MS_EXTRA_SIZE)); ms_set_err_code(chip, MS_NO_ERROR); if (CHK_MS4BIT(ms_card)) { data[0] = 0x88; } else { data[0] = 0x80; } data[1] = 0; data[2] = (u8)(new_blk >> 8); data[3] = (u8)new_blk; data[4] = 0x20; data[5] = i; if ((extra[0] & 0x60) != 0x60) { data[6] = extra[0]; } else { data[6] = 0xF8; } data[6 + 1] = 0xFF; data[6 + 2] = (u8)(log_blk >> 8); data[6 + 3] = (u8)log_blk; for (j = 4; j <= MS_EXTRA_SIZE; j++) { data[6 + j] = 0xFF; } retval = ms_write_bytes(chip, WRITE_REG, (6 + MS_EXTRA_SIZE), NO_WAIT_INT, data, 16); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } ms_set_err_code(chip, MS_NO_ERROR); retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_CMDNK) { ms_set_err_code(chip, MS_CMD_NK); TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_CED) { if (val & INT_REG_ERR) { ms_set_err_code(chip, MS_FLASH_WRITE_ERROR); TRACE_RET(chip, STATUS_FAIL); } } if (i == 0) { retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 7); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } ms_set_err_code(chip, MS_NO_ERROR); if (CHK_MS4BIT(ms_card)) { data[0] = 0x88; } else { data[0] = 0x80; } data[1] = 0; data[2] = (u8)(old_blk >> 8); data[3] = (u8)old_blk; data[4] = 0x80; data[5] = 0; data[6] = 0xEF; data[7] = 0xFF; retval = ms_write_bytes(chip, WRITE_REG, 7, NO_WAIT_INT, data, 8); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } ms_set_err_code(chip, MS_NO_ERROR); retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_CMDNK) { ms_set_err_code(chip, MS_CMD_NK); TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_CED) { if (val & INT_REG_ERR) { ms_set_err_code(chip, MS_FLASH_WRITE_ERROR); TRACE_RET(chip, STATUS_FAIL); } } } } return STATUS_SUCCESS; } static int reset_ms(struct rtsx_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int retval; u16 i, reg_addr, block_size; u8 val, extra[MS_EXTRA_SIZE], j, *ptr; #ifndef SUPPORT_MAGIC_GATE u16 eblock_cnt; #endif retval = ms_prepare_reset(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } ms_card->ms_type |= TYPE_MS; retval = ms_send_cmd(chip, MS_RESET, NO_WAIT_INT); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = ms_read_status_reg(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } RTSX_READ_REG(chip, PPBUF_BASE2, &val); if (val & WRT_PRTCT) { chip->card_wp |= MS_CARD; } else { chip->card_wp &= ~MS_CARD; } i = 0; RE_SEARCH: /* Search Boot Block */ while (i < (MAX_DEFECTIVE_BLOCK + 2)) { if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) { ms_set_err_code(chip, MS_NO_CARD); TRACE_RET(chip, STATUS_FAIL); } retval = ms_read_extra_data(chip, i, 0, extra, MS_EXTRA_SIZE); if (retval != STATUS_SUCCESS) { i++; continue; } if (extra[0] & BLOCK_OK) { if (!(extra[1] & NOT_BOOT_BLOCK)) { ms_card->boot_block = i; break; } } i++; } if (i == (MAX_DEFECTIVE_BLOCK + 2)) { RTSX_DEBUGP("No boot block found!"); TRACE_RET(chip, STATUS_FAIL); } for (j = 0; j < 3; j++) { retval = ms_read_page(chip, ms_card->boot_block, j); if (retval != STATUS_SUCCESS) { if (ms_check_err_code(chip, MS_FLASH_WRITE_ERROR)) { i = ms_card->boot_block + 1; ms_set_err_code(chip, MS_NO_ERROR); goto RE_SEARCH; } } } retval = ms_read_page(chip, ms_card->boot_block, 0); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } /* Read MS system information as sys_info */ rtsx_init_cmd(chip); for (i = 0; i < 96; i++) { rtsx_add_cmd(chip, READ_REG_CMD, PPBUF_BASE2 + 0x1A0 + i, 0, 0); } retval = rtsx_send_cmd(chip, MS_CARD, 100); if (retval < 0) { TRACE_RET(chip, STATUS_FAIL); } ptr = rtsx_get_cmd_data(chip); memcpy(ms_card->raw_sys_info, ptr, 96); /* Read useful block contents */ rtsx_init_cmd(chip); rtsx_add_cmd(chip, READ_REG_CMD, HEADER_ID0, 0, 0); rtsx_add_cmd(chip, READ_REG_CMD, HEADER_ID1, 0, 0); for (reg_addr = DISABLED_BLOCK0; reg_addr <= DISABLED_BLOCK3; reg_addr++) { rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0); } for (reg_addr = BLOCK_SIZE_0; reg_addr <= PAGE_SIZE_1; reg_addr++) { rtsx_add_cmd(chip, READ_REG_CMD, reg_addr, 0, 0); } rtsx_add_cmd(chip, READ_REG_CMD, MS_Device_Type, 0, 0); rtsx_add_cmd(chip, READ_REG_CMD, MS_4bit_Support, 0, 0); retval = rtsx_send_cmd(chip, MS_CARD, 100); if (retval < 0) { TRACE_RET(chip, STATUS_FAIL); } ptr = rtsx_get_cmd_data(chip); RTSX_DEBUGP("Boot block data:\n"); RTSX_DUMP(ptr, 16); /* Block ID error * HEADER_ID0, HEADER_ID1 */ if (ptr[0] != 0x00 || ptr[1] != 0x01) { i = ms_card->boot_block + 1; goto RE_SEARCH; } /* Page size error * PAGE_SIZE_0, PAGE_SIZE_1 */ if (ptr[12] != 0x02 || ptr[13] != 0x00) { i = ms_card->boot_block + 1; goto RE_SEARCH; } if ((ptr[14] == 1) || (ptr[14] == 3)) { chip->card_wp |= MS_CARD; } /* BLOCK_SIZE_0, BLOCK_SIZE_1 */ block_size = ((u16)ptr[6] << 8) | ptr[7]; if (block_size == 0x0010) { /* Block size 16KB */ ms_card->block_shift = 5; ms_card->page_off = 0x1F; } else if (block_size == 0x0008) { /* Block size 8KB */ ms_card->block_shift = 4; ms_card->page_off = 0x0F; } /* BLOCK_COUNT_0, BLOCK_COUNT_1 */ ms_card->total_block = ((u16)ptr[8] << 8) | ptr[9]; #ifdef SUPPORT_MAGIC_GATE j = ptr[10]; if (ms_card->block_shift == 4) { /* 4MB or 8MB */ if (j < 2) { /* Effective block for 4MB: 0x1F0 */ ms_card->capacity = 0x1EE0; } else { /* Effective block for 8MB: 0x3E0 */ ms_card->capacity = 0x3DE0; } } else { /* 16MB, 32MB, 64MB or 128MB */ if (j < 5) { /* Effective block for 16MB: 0x3E0 */ ms_card->capacity = 0x7BC0; } else if (j < 0xA) { /* Effective block for 32MB: 0x7C0 */ ms_card->capacity = 0xF7C0; } else if (j < 0x11) { /* Effective block for 64MB: 0xF80 */ ms_card->capacity = 0x1EF80; } else { /* Effective block for 128MB: 0x1F00 */ ms_card->capacity = 0x3DF00; } } #else /* EBLOCK_COUNT_0, EBLOCK_COUNT_1 */ eblock_cnt = ((u16)ptr[10] << 8) | ptr[11]; ms_card->capacity = ((u32)eblock_cnt - 2) << ms_card->block_shift; #endif chip->capacity[chip->card2lun[MS_CARD]] = ms_card->capacity; /* Switch I/F Mode */ if (ptr[15]) { retval = ms_set_rw_reg_addr(chip, 0, 0, SystemParm, 1); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } RTSX_WRITE_REG(chip, PPBUF_BASE2, 0xFF, 0x88); RTSX_WRITE_REG(chip, PPBUF_BASE2 + 1, 0xFF, 0); retval = ms_transfer_tpc(chip, MS_TM_WRITE_BYTES, WRITE_REG , 1, NO_WAIT_INT); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } RTSX_WRITE_REG(chip, MS_CFG, 0x58 | MS_NO_CHECK_INT, MS_BUS_WIDTH_4 | PUSH_TIME_ODD | MS_NO_CHECK_INT); ms_card->ms_type |= MS_4BIT; } if (CHK_MS4BIT(ms_card)) { chip->card_bus_width[chip->card2lun[MS_CARD]] = 4; } else { chip->card_bus_width[chip->card2lun[MS_CARD]] = 1; } return STATUS_SUCCESS; } static int ms_init_l2p_tbl(struct rtsx_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int size, i, seg_no, retval; u16 defect_block, reg_addr; u8 val1, val2; ms_card->segment_cnt = ms_card->total_block >> 9; RTSX_DEBUGP("ms_card->segment_cnt = %d\n", ms_card->segment_cnt); size = ms_card->segment_cnt * sizeof(struct zone_entry); ms_card->segment = (struct zone_entry *)vmalloc(size); if (ms_card->segment == NULL) { TRACE_RET(chip, STATUS_FAIL); } memset(ms_card->segment, 0, size); retval = ms_read_page(chip, ms_card->boot_block, 1); if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, INIT_FAIL); } reg_addr = PPBUF_BASE2; for (i = 0; i < (((ms_card->total_block >> 9) * 10) + 1); i++) { retval = rtsx_read_register(chip, reg_addr++, &val1); if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, INIT_FAIL); } retval = rtsx_read_register(chip, reg_addr++, &val2); if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, INIT_FAIL); } defect_block = ((u16)val1 << 8) | val2; if (defect_block == 0xFFFF) { break; } seg_no = defect_block / 512; ms_card->segment[seg_no].defect_list[ms_card->segment[seg_no].disable_count++] = defect_block; } for (i = 0; i < ms_card->segment_cnt; i++) { ms_card->segment[i].build_flag = 0; ms_card->segment[i].l2p_table = NULL; ms_card->segment[i].free_table = NULL; ms_card->segment[i].get_index = 0; ms_card->segment[i].set_index = 0; ms_card->segment[i].unused_blk_cnt = 0; RTSX_DEBUGP("defective block count of segment %d is %d\n", i, ms_card->segment[i].disable_count); } return STATUS_SUCCESS; INIT_FAIL: if (ms_card->segment) { vfree(ms_card->segment); ms_card->segment = NULL; } return STATUS_FAIL; } static u16 ms_get_l2p_tbl(struct rtsx_chip *chip, int seg_no, u16 log_off) { struct ms_info *ms_card = &(chip->ms_card); struct zone_entry *segment; if (ms_card->segment == NULL) return 0xFFFF; segment = &(ms_card->segment[seg_no]); if (segment->l2p_table) return segment->l2p_table[log_off]; return 0xFFFF; } static void ms_set_l2p_tbl(struct rtsx_chip *chip, int seg_no, u16 log_off, u16 phy_blk) { struct ms_info *ms_card = &(chip->ms_card); struct zone_entry *segment; if (ms_card->segment == NULL) return; segment = &(ms_card->segment[seg_no]); if (segment->l2p_table) { segment->l2p_table[log_off] = phy_blk; } } static void ms_set_unused_block(struct rtsx_chip *chip, u16 phy_blk) { struct ms_info *ms_card = &(chip->ms_card); struct zone_entry *segment; int seg_no; seg_no = (int)phy_blk >> 9; segment = &(ms_card->segment[seg_no]); segment->free_table[segment->set_index++] = phy_blk; if (segment->set_index >= MS_FREE_TABLE_CNT) { segment->set_index = 0; } segment->unused_blk_cnt++; } static u16 ms_get_unused_block(struct rtsx_chip *chip, int seg_no) { struct ms_info *ms_card = &(chip->ms_card); struct zone_entry *segment; u16 phy_blk; segment = &(ms_card->segment[seg_no]); if (segment->unused_blk_cnt <= 0) return 0xFFFF; phy_blk = segment->free_table[segment->get_index]; segment->free_table[segment->get_index++] = 0xFFFF; if (segment->get_index >= MS_FREE_TABLE_CNT) { segment->get_index = 0; } segment->unused_blk_cnt--; return phy_blk; } static const unsigned short ms_start_idx[] = {0, 494, 990, 1486, 1982, 2478, 2974, 3470, 3966, 4462, 4958, 5454, 5950, 6446, 6942, 7438, 7934}; static int ms_arbitrate_l2p(struct rtsx_chip *chip, u16 phy_blk, u16 log_off, u8 us1, u8 us2) { struct ms_info *ms_card = &(chip->ms_card); struct zone_entry *segment; int seg_no; u16 tmp_blk; seg_no = (int)phy_blk >> 9; segment = &(ms_card->segment[seg_no]); tmp_blk = segment->l2p_table[log_off]; if (us1 != us2) { if (us1 == 0) { if (!(chip->card_wp & MS_CARD)) { ms_erase_block(chip, tmp_blk); } ms_set_unused_block(chip, tmp_blk); segment->l2p_table[log_off] = phy_blk; } else { if (!(chip->card_wp & MS_CARD)) { ms_erase_block(chip, phy_blk); } ms_set_unused_block(chip, phy_blk); } } else { if (phy_blk < tmp_blk) { if (!(chip->card_wp & MS_CARD)) { ms_erase_block(chip, phy_blk); } ms_set_unused_block(chip, phy_blk); } else { if (!(chip->card_wp & MS_CARD)) { ms_erase_block(chip, tmp_blk); } ms_set_unused_block(chip, tmp_blk); segment->l2p_table[log_off] = phy_blk; } } return STATUS_SUCCESS; } static int ms_build_l2p_tbl(struct rtsx_chip *chip, int seg_no) { struct ms_info *ms_card = &(chip->ms_card); struct zone_entry *segment; int retval, table_size, disable_cnt, defect_flag, i; u16 start, end, phy_blk, log_blk, tmp_blk; u8 extra[MS_EXTRA_SIZE], us1, us2; RTSX_DEBUGP("ms_build_l2p_tbl: %d\n", seg_no); if (ms_card->segment == NULL) { retval = ms_init_l2p_tbl(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, retval); } } if (ms_card->segment[seg_no].build_flag) { RTSX_DEBUGP("l2p table of segment %d has been built\n", seg_no); return STATUS_SUCCESS; } if (seg_no == 0) { table_size = 494; } else { table_size = 496; } segment = &(ms_card->segment[seg_no]); if (segment->l2p_table == NULL) { segment->l2p_table = (u16 *)vmalloc(table_size * 2); if (segment->l2p_table == NULL) { TRACE_GOTO(chip, BUILD_FAIL); } } memset((u8 *)(segment->l2p_table), 0xff, table_size * 2); if (segment->free_table == NULL) { segment->free_table = (u16 *)vmalloc(MS_FREE_TABLE_CNT * 2); if (segment->free_table == NULL) { TRACE_GOTO(chip, BUILD_FAIL); } } memset((u8 *)(segment->free_table), 0xff, MS_FREE_TABLE_CNT * 2); start = (u16)seg_no << 9; end = (u16)(seg_no + 1) << 9; disable_cnt = segment->disable_count; segment->get_index = segment->set_index = 0; segment->unused_blk_cnt = 0; for (phy_blk = start; phy_blk < end; phy_blk++) { if (disable_cnt) { defect_flag = 0; for (i = 0; i < segment->disable_count; i++) { if (phy_blk == segment->defect_list[i]) { defect_flag = 1; break; } } if (defect_flag) { disable_cnt--; continue; } } retval = ms_read_extra_data(chip, phy_blk, 0, extra, MS_EXTRA_SIZE); if (retval != STATUS_SUCCESS) { RTSX_DEBUGP("read extra data fail\n"); ms_set_bad_block(chip, phy_blk); continue; } if (seg_no == ms_card->segment_cnt - 1) { if (!(extra[1] & NOT_TRANSLATION_TABLE)) { if (!(chip->card_wp & MS_CARD)) { retval = ms_erase_block(chip, phy_blk); if (retval != STATUS_SUCCESS) continue; extra[2] = 0xff; extra[3] = 0xff; } } } if (!(extra[0] & BLOCK_OK)) continue; if (!(extra[1] & NOT_BOOT_BLOCK)) continue; if ((extra[0] & PAGE_OK) != PAGE_OK) continue; log_blk = ((u16)extra[2] << 8) | extra[3]; if (log_blk == 0xFFFF) { if (!(chip->card_wp & MS_CARD)) { retval = ms_erase_block(chip, phy_blk); if (retval != STATUS_SUCCESS) continue; } ms_set_unused_block(chip, phy_blk); continue; } if ((log_blk < ms_start_idx[seg_no]) || (log_blk >= ms_start_idx[seg_no+1])) { if (!(chip->card_wp & MS_CARD)) { retval = ms_erase_block(chip, phy_blk); if (retval != STATUS_SUCCESS) continue; } ms_set_unused_block(chip, phy_blk); continue; } if (segment->l2p_table[log_blk - ms_start_idx[seg_no]] == 0xFFFF) { segment->l2p_table[log_blk - ms_start_idx[seg_no]] = phy_blk; continue; } us1 = extra[0] & 0x10; tmp_blk = segment->l2p_table[log_blk - ms_start_idx[seg_no]]; retval = ms_read_extra_data(chip, tmp_blk, 0, extra, MS_EXTRA_SIZE); if (retval != STATUS_SUCCESS) continue; us2 = extra[0] & 0x10; (void)ms_arbitrate_l2p(chip, phy_blk, log_blk-ms_start_idx[seg_no], us1, us2); continue; } segment->build_flag = 1; RTSX_DEBUGP("unused block count: %d\n", segment->unused_blk_cnt); /* Logical Address Confirmation Process */ if (seg_no == ms_card->segment_cnt - 1) { if (segment->unused_blk_cnt < 2) { chip->card_wp |= MS_CARD; } } else { if (segment->unused_blk_cnt < 1) { chip->card_wp |= MS_CARD; } } if (chip->card_wp & MS_CARD) return STATUS_SUCCESS; for (log_blk = ms_start_idx[seg_no]; log_blk < ms_start_idx[seg_no + 1]; log_blk++) { if (segment->l2p_table[log_blk-ms_start_idx[seg_no]] == 0xFFFF) { phy_blk = ms_get_unused_block(chip, seg_no); if (phy_blk == 0xFFFF) { chip->card_wp |= MS_CARD; return STATUS_SUCCESS; } retval = ms_init_page(chip, phy_blk, log_blk, 0, 1); if (retval != STATUS_SUCCESS) { TRACE_GOTO(chip, BUILD_FAIL); } segment->l2p_table[log_blk-ms_start_idx[seg_no]] = phy_blk; if (seg_no == ms_card->segment_cnt - 1) { if (segment->unused_blk_cnt < 2) { chip->card_wp |= MS_CARD; return STATUS_SUCCESS; } } else { if (segment->unused_blk_cnt < 1) { chip->card_wp |= MS_CARD; return STATUS_SUCCESS; } } } } /* Make boot block be the first normal block */ if (seg_no == 0) { for (log_blk = 0; log_blk < 494; log_blk++) { tmp_blk = segment->l2p_table[log_blk]; if (tmp_blk < ms_card->boot_block) { RTSX_DEBUGP("Boot block is not the first normal block.\n"); if (chip->card_wp & MS_CARD) break; phy_blk = ms_get_unused_block(chip, 0); retval = ms_copy_page(chip, tmp_blk, phy_blk, log_blk, 0, ms_card->page_off + 1); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } segment->l2p_table[log_blk] = phy_blk; retval = ms_set_bad_block(chip, tmp_blk); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } } } return STATUS_SUCCESS; BUILD_FAIL: segment->build_flag = 0; if (segment->l2p_table) { vfree(segment->l2p_table); segment->l2p_table = NULL; } if (segment->free_table) { vfree(segment->free_table); segment->free_table = NULL; } return STATUS_FAIL; } int reset_ms_card(struct rtsx_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int retval; memset(ms_card, 0, sizeof(struct ms_info)); retval = enable_card_clock(chip, MS_CARD); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = select_card(chip, MS_CARD); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } ms_card->ms_type = 0; retval = reset_ms_pro(chip); if (retval != STATUS_SUCCESS) { if (ms_card->check_ms_flow) { retval = reset_ms(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } else { TRACE_RET(chip, STATUS_FAIL); } } retval = ms_set_init_para(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (!CHK_MSPRO(ms_card)) { /* Build table for the last segment, * to check if L2P talbe block exist,erasing it */ retval = ms_build_l2p_tbl(chip, ms_card->total_block / 512 - 1); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } RTSX_DEBUGP("ms_card->ms_type = 0x%x\n", ms_card->ms_type); return STATUS_SUCCESS; } static int mspro_set_rw_cmd(struct rtsx_chip *chip, u32 start_sec, u16 sec_cnt, u8 cmd) { int retval, i; u8 data[8]; data[0] = cmd; data[1] = (u8)(sec_cnt >> 8); data[2] = (u8)sec_cnt; data[3] = (u8)(start_sec >> 24); data[4] = (u8)(start_sec >> 16); data[5] = (u8)(start_sec >> 8); data[6] = (u8)start_sec; data[7] = 0; for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_write_bytes(chip, PRO_EX_SET_CMD, 7, WAIT_INT, data, 8); if (retval == STATUS_SUCCESS) break; } if (i == MS_MAX_RETRY_COUNT) { TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } void mspro_stop_seq_mode(struct rtsx_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int retval; RTSX_DEBUGP("--%s--\n", __func__); if (ms_card->seq_mode) { retval = ms_switch_clock(chip); if (retval != STATUS_SUCCESS) return; ms_card->seq_mode = 0; ms_card->total_sec_cnt = 0; ms_send_cmd(chip, PRO_STOP, WAIT_INT); rtsx_write_register(chip, RBCTL, RB_FLUSH, RB_FLUSH); } } static inline int ms_auto_tune_clock(struct rtsx_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int retval; RTSX_DEBUGP("--%s--\n", __func__); if (chip->asic_code) { if (ms_card->ms_clock > 30) { ms_card->ms_clock -= 20; } } else { if (ms_card->ms_clock == CLK_80) { ms_card->ms_clock = CLK_60; } else if (ms_card->ms_clock == CLK_60) { ms_card->ms_clock = CLK_40; } } retval = ms_switch_clock(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int mspro_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector, u16 sector_cnt) { struct ms_info *ms_card = &(chip->ms_card); int retval, mode_2k = 0; u16 count; u8 val, trans_mode, rw_tpc, rw_cmd; ms_set_err_code(chip, MS_NO_ERROR); ms_card->cleanup_counter = 0; if (CHK_MSHG(ms_card)) { if ((start_sector % 4) || (sector_cnt % 4)) { if (srb->sc_data_direction == DMA_FROM_DEVICE) { rw_tpc = PRO_READ_LONG_DATA; rw_cmd = PRO_READ_DATA; } else { rw_tpc = PRO_WRITE_LONG_DATA; rw_cmd = PRO_WRITE_DATA; } } else { if (srb->sc_data_direction == DMA_FROM_DEVICE) { rw_tpc = PRO_READ_QUAD_DATA; rw_cmd = PRO_READ_2K_DATA; } else { rw_tpc = PRO_WRITE_QUAD_DATA; rw_cmd = PRO_WRITE_2K_DATA; } mode_2k = 1; } } else { if (srb->sc_data_direction == DMA_FROM_DEVICE) { rw_tpc = PRO_READ_LONG_DATA; rw_cmd = PRO_READ_DATA; } else { rw_tpc = PRO_WRITE_LONG_DATA; rw_cmd = PRO_WRITE_DATA; } } retval = ms_switch_clock(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (srb->sc_data_direction == DMA_FROM_DEVICE) { trans_mode = MS_TM_AUTO_READ; } else { trans_mode = MS_TM_AUTO_WRITE; } RTSX_READ_REG(chip, MS_TRANS_CFG, &val); if (ms_card->seq_mode) { if ((ms_card->pre_dir != srb->sc_data_direction) || ((ms_card->pre_sec_addr + ms_card->pre_sec_cnt) != start_sector) || (mode_2k && (ms_card->seq_mode & MODE_512_SEQ)) || (!mode_2k && (ms_card->seq_mode & MODE_2K_SEQ)) || !(val & MS_INT_BREQ) || ((ms_card->total_sec_cnt + sector_cnt) > 0xFE00)) { ms_card->seq_mode = 0; ms_card->total_sec_cnt = 0; if (val & MS_INT_BREQ) { retval = ms_send_cmd(chip, PRO_STOP, WAIT_INT); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } rtsx_write_register(chip, RBCTL, RB_FLUSH, RB_FLUSH); } } } if (!ms_card->seq_mode) { ms_card->total_sec_cnt = 0; if (sector_cnt >= SEQ_START_CRITERIA) { if ((ms_card->capacity - start_sector) > 0xFE00) { count = 0xFE00; } else { count = (u16)(ms_card->capacity - start_sector); } if (count > sector_cnt) { if (mode_2k) { ms_card->seq_mode |= MODE_2K_SEQ; } else { ms_card->seq_mode |= MODE_512_SEQ; } } } else { count = sector_cnt; } retval = mspro_set_rw_cmd(chip, start_sector, count, rw_cmd); if (retval != STATUS_SUCCESS) { ms_card->seq_mode = 0; TRACE_RET(chip, STATUS_FAIL); } } retval = ms_transfer_data(chip, trans_mode, rw_tpc, sector_cnt, WAIT_INT, mode_2k, scsi_sg_count(srb), scsi_sglist(srb), scsi_bufflen(srb)); if (retval != STATUS_SUCCESS) { ms_card->seq_mode = 0; rtsx_read_register(chip, MS_TRANS_CFG, &val); rtsx_clear_ms_error(chip); if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) { chip->rw_need_retry = 0; RTSX_DEBUGP("No card exist, exit mspro_rw_multi_sector\n"); TRACE_RET(chip, STATUS_FAIL); } if (val & MS_INT_BREQ) { ms_send_cmd(chip, PRO_STOP, WAIT_INT); } if (val & (MS_CRC16_ERR | MS_RDY_TIMEOUT)) { RTSX_DEBUGP("MSPro CRC error, tune clock!\n"); chip->rw_need_retry = 1; ms_auto_tune_clock(chip); } TRACE_RET(chip, retval); } if (ms_card->seq_mode) { ms_card->pre_sec_addr = start_sector; ms_card->pre_sec_cnt = sector_cnt; ms_card->pre_dir = srb->sc_data_direction; ms_card->total_sec_cnt += sector_cnt; } return STATUS_SUCCESS; } static int mspro_read_format_progress(struct rtsx_chip *chip, const int short_data_len) { struct ms_info *ms_card = &(chip->ms_card); int retval, i; u32 total_progress, cur_progress; u8 cnt, tmp; u8 data[8]; RTSX_DEBUGP("mspro_read_format_progress, short_data_len = %d\n", short_data_len); retval = ms_switch_clock(chip); if (retval != STATUS_SUCCESS) { ms_card->format_status = FORMAT_FAIL; TRACE_RET(chip, STATUS_FAIL); } retval = rtsx_read_register(chip, MS_TRANS_CFG, &tmp); if (retval != STATUS_SUCCESS) { ms_card->format_status = FORMAT_FAIL; TRACE_RET(chip, STATUS_FAIL); } if (!(tmp & MS_INT_BREQ)) { if ((tmp & (MS_INT_CED | MS_INT_BREQ | MS_INT_CMDNK | MS_INT_ERR)) == MS_INT_CED) { ms_card->format_status = FORMAT_SUCCESS; return STATUS_SUCCESS; } ms_card->format_status = FORMAT_FAIL; TRACE_RET(chip, STATUS_FAIL); } if (short_data_len >= 256) { cnt = 0; } else { cnt = (u8)short_data_len; } retval = rtsx_write_register(chip, MS_CFG, MS_NO_CHECK_INT, MS_NO_CHECK_INT); if (retval != STATUS_SUCCESS) { ms_card->format_status = FORMAT_FAIL; TRACE_RET(chip, STATUS_FAIL); } retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, cnt, WAIT_INT, data, 8); if (retval != STATUS_SUCCESS) { ms_card->format_status = FORMAT_FAIL; TRACE_RET(chip, STATUS_FAIL); } total_progress = (data[0] << 24) | (data[1] << 16) | (data[2] << 8) | data[3]; cur_progress = (data[4] << 24) | (data[5] << 16) | (data[6] << 8) | data[7]; RTSX_DEBUGP("total_progress = %d, cur_progress = %d\n", total_progress, cur_progress); if (total_progress == 0) { ms_card->progress = 0; } else { u64 ulltmp = (u64)cur_progress * (u64)65535; do_div(ulltmp, total_progress); ms_card->progress = (u16)ulltmp; } RTSX_DEBUGP("progress = %d\n", ms_card->progress); for (i = 0; i < 5000; i++) { retval = rtsx_read_register(chip, MS_TRANS_CFG, &tmp); if (retval != STATUS_SUCCESS) { ms_card->format_status = FORMAT_FAIL; TRACE_RET(chip, STATUS_FAIL); } if (tmp & (MS_INT_CED | MS_INT_CMDNK | MS_INT_BREQ | MS_INT_ERR)) { break; } wait_timeout(1); } retval = rtsx_write_register(chip, MS_CFG, MS_NO_CHECK_INT, 0); if (retval != STATUS_SUCCESS) { ms_card->format_status = FORMAT_FAIL; TRACE_RET(chip, STATUS_FAIL); } if (i == 5000) { ms_card->format_status = FORMAT_FAIL; TRACE_RET(chip, STATUS_FAIL); } if (tmp & (MS_INT_CMDNK | MS_INT_ERR)) { ms_card->format_status = FORMAT_FAIL; TRACE_RET(chip, STATUS_FAIL); } if (tmp & MS_INT_CED) { ms_card->format_status = FORMAT_SUCCESS; ms_card->pro_under_formatting = 0; } else if (tmp & MS_INT_BREQ) { ms_card->format_status = FORMAT_IN_PROGRESS; } else { ms_card->format_status = FORMAT_FAIL; ms_card->pro_under_formatting = 0; TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } void mspro_polling_format_status(struct rtsx_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int i; if (ms_card->pro_under_formatting && (rtsx_get_stat(chip) != RTSX_STAT_SS)) { rtsx_set_stat(chip, RTSX_STAT_RUN); for (i = 0; i < 65535; i++) { mspro_read_format_progress(chip, MS_SHORT_DATA_LEN); if (ms_card->format_status != FORMAT_IN_PROGRESS) break; } } return; } int mspro_format(struct scsi_cmnd *srb, struct rtsx_chip *chip, int short_data_len, int quick_format) { struct ms_info *ms_card = &(chip->ms_card); int retval, i; u8 buf[8], tmp; u16 para; RTSX_DEBUGP("--%s--\n", __func__); retval = ms_switch_clock(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = ms_set_rw_reg_addr(chip, 0x00, 0x00, Pro_TPCParm, 0x01); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } memset(buf, 0, 2); switch (short_data_len) { case 32: buf[0] = 0; break; case 64: buf[0] = 1; break; case 128: buf[0] = 2; break; case 256: default: buf[0] = 3; break; } for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_write_bytes(chip, PRO_WRITE_REG, 1, NO_WAIT_INT, buf, 2); if (retval == STATUS_SUCCESS) break; } if (i == MS_MAX_RETRY_COUNT) { TRACE_RET(chip, STATUS_FAIL); } if (quick_format) { para = 0x0000; } else { para = 0x0001; } retval = mspro_set_rw_cmd(chip, 0, para, PRO_FORMAT); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } RTSX_READ_REG(chip, MS_TRANS_CFG, &tmp); if (tmp & (MS_INT_CMDNK | MS_INT_ERR)) { TRACE_RET(chip, STATUS_FAIL); } if ((tmp & (MS_INT_BREQ | MS_INT_CED)) == MS_INT_BREQ) { ms_card->pro_under_formatting = 1; ms_card->progress = 0; ms_card->format_status = FORMAT_IN_PROGRESS; return STATUS_SUCCESS; } if (tmp & MS_INT_CED) { ms_card->pro_under_formatting = 0; ms_card->progress = 0; ms_card->format_status = FORMAT_SUCCESS; set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_NO_SENSE); return STATUS_SUCCESS; } TRACE_RET(chip, STATUS_FAIL); } static int ms_read_multiple_pages(struct rtsx_chip *chip, u16 phy_blk, u16 log_blk, u8 start_page, u8 end_page, u8 *buf, unsigned int *index, unsigned int *offset) { struct ms_info *ms_card = &(chip->ms_card); int retval, i; u8 extra[MS_EXTRA_SIZE], page_addr, val, trans_cfg, data[6]; u8 *ptr; retval = ms_read_extra_data(chip, phy_blk, start_page, extra, MS_EXTRA_SIZE); if (retval == STATUS_SUCCESS) { if ((extra[1] & 0x30) != 0x30) { ms_set_err_code(chip, MS_FLASH_READ_ERROR); TRACE_RET(chip, STATUS_FAIL); } } retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 6); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (CHK_MS4BIT(ms_card)) { data[0] = 0x88; } else { data[0] = 0x80; } data[1] = 0; data[2] = (u8)(phy_blk >> 8); data[3] = (u8)phy_blk; data[4] = 0; data[5] = start_page; for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_write_bytes(chip, WRITE_REG, 6, NO_WAIT_INT, data, 6); if (retval == STATUS_SUCCESS) break; } if (i == MS_MAX_RETRY_COUNT) { TRACE_RET(chip, STATUS_FAIL); } ms_set_err_code(chip, MS_NO_ERROR); retval = ms_send_cmd(chip, BLOCK_READ, WAIT_INT); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } ptr = buf; for (page_addr = start_page; page_addr < end_page; page_addr++) { ms_set_err_code(chip, MS_NO_ERROR); if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) { ms_set_err_code(chip, MS_NO_CARD); TRACE_RET(chip, STATUS_FAIL); } retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_CMDNK) { ms_set_err_code(chip, MS_CMD_NK); TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_ERR) { if (val & INT_REG_BREQ) { retval = ms_read_status_reg(chip); if (retval != STATUS_SUCCESS) { if (!(chip->card_wp & MS_CARD)) { reset_ms(chip); ms_set_page_status(log_blk, setPS_NG, extra, MS_EXTRA_SIZE); ms_write_extra_data(chip, phy_blk, page_addr, extra, MS_EXTRA_SIZE); } ms_set_err_code(chip, MS_FLASH_READ_ERROR); TRACE_RET(chip, STATUS_FAIL); } } else { ms_set_err_code(chip, MS_FLASH_READ_ERROR); TRACE_RET(chip, STATUS_FAIL); } } else { if (!(val & INT_REG_BREQ)) { ms_set_err_code(chip, MS_BREQ_ERROR); TRACE_RET(chip, STATUS_FAIL); } } if (page_addr == (end_page - 1)) { if (!(val & INT_REG_CED)) { retval = ms_send_cmd(chip, BLOCK_END, WAIT_INT); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (!(val & INT_REG_CED)) { ms_set_err_code(chip, MS_FLASH_READ_ERROR); TRACE_RET(chip, STATUS_FAIL); } trans_cfg = NO_WAIT_INT; } else { trans_cfg = WAIT_INT; } rtsx_init_cmd(chip); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, READ_PAGE_DATA); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, trans_cfg); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER); trans_dma_enable(DMA_FROM_DEVICE, chip, 512, DMA_512); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF, MS_TRANSFER_START | MS_TM_NORMAL_READ); rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END); rtsx_send_cmd_no_wait(chip); retval = rtsx_transfer_data_partial(chip, MS_CARD, ptr, 512, scsi_sg_count(chip->srb), index, offset, DMA_FROM_DEVICE, chip->ms_timeout); if (retval < 0) { if (retval == -ETIMEDOUT) { ms_set_err_code(chip, MS_TO_ERROR); rtsx_clear_ms_error(chip); TRACE_RET(chip, STATUS_TIMEDOUT); } retval = rtsx_read_register(chip, MS_TRANS_CFG, &val); if (retval != STATUS_SUCCESS) { ms_set_err_code(chip, MS_TO_ERROR); rtsx_clear_ms_error(chip); TRACE_RET(chip, STATUS_TIMEDOUT); } if (val & (MS_CRC16_ERR | MS_RDY_TIMEOUT)) { ms_set_err_code(chip, MS_CRC16_ERROR); rtsx_clear_ms_error(chip); TRACE_RET(chip, STATUS_FAIL); } } if (scsi_sg_count(chip->srb) == 0) ptr += 512; } return STATUS_SUCCESS; } static int ms_write_multiple_pages(struct rtsx_chip *chip, u16 old_blk, u16 new_blk, u16 log_blk, u8 start_page, u8 end_page, u8 *buf, unsigned int *index, unsigned int *offset) { struct ms_info *ms_card = &(chip->ms_card); int retval, i; u8 page_addr, val, data[16]; u8 *ptr; if (!start_page) { retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, 7); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (CHK_MS4BIT(ms_card)) { data[0] = 0x88; } else { data[0] = 0x80; } data[1] = 0; data[2] = (u8)(old_blk >> 8); data[3] = (u8)old_blk; data[4] = 0x80; data[5] = 0; data[6] = 0xEF; data[7] = 0xFF; retval = ms_write_bytes(chip, WRITE_REG, 7, NO_WAIT_INT, data, 8); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } ms_set_err_code(chip, MS_NO_ERROR); retval = ms_transfer_tpc(chip, MS_TM_READ_BYTES, GET_INT, 1, NO_WAIT_INT); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } retval = ms_set_rw_reg_addr(chip, OverwriteFlag, MS_EXTRA_SIZE, SystemParm, (6 + MS_EXTRA_SIZE)); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } ms_set_err_code(chip, MS_NO_ERROR); if (CHK_MS4BIT(ms_card)) { data[0] = 0x88; } else { data[0] = 0x80; } data[1] = 0; data[2] = (u8)(new_blk >> 8); data[3] = (u8)new_blk; if ((end_page - start_page) == 1) { data[4] = 0x20; } else { data[4] = 0; } data[5] = start_page; data[6] = 0xF8; data[7] = 0xFF; data[8] = (u8)(log_blk >> 8); data[9] = (u8)log_blk; for (i = 0x0A; i < 0x10; i++) { data[i] = 0xFF; } for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_write_bytes(chip, WRITE_REG, 6 + MS_EXTRA_SIZE, NO_WAIT_INT, data, 16); if (retval == STATUS_SUCCESS) break; } if (i == MS_MAX_RETRY_COUNT) { TRACE_RET(chip, STATUS_FAIL); } for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_send_cmd(chip, BLOCK_WRITE, WAIT_INT); if (retval == STATUS_SUCCESS) break; } if (i == MS_MAX_RETRY_COUNT) { TRACE_RET(chip, STATUS_FAIL); } retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } ptr = buf; for (page_addr = start_page; page_addr < end_page; page_addr++) { ms_set_err_code(chip, MS_NO_ERROR); if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) { ms_set_err_code(chip, MS_NO_CARD); TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_CMDNK) { ms_set_err_code(chip, MS_CMD_NK); TRACE_RET(chip, STATUS_FAIL); } if (val & INT_REG_ERR) { ms_set_err_code(chip, MS_FLASH_WRITE_ERROR); TRACE_RET(chip, STATUS_FAIL); } if (!(val & INT_REG_BREQ)) { ms_set_err_code(chip, MS_BREQ_ERROR); TRACE_RET(chip, STATUS_FAIL); } udelay(30); rtsx_init_cmd(chip); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, WRITE_PAGE_DATA); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, WAIT_INT); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER); trans_dma_enable(DMA_TO_DEVICE, chip, 512, DMA_512); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF, MS_TRANSFER_START | MS_TM_NORMAL_WRITE); rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END); rtsx_send_cmd_no_wait(chip); retval = rtsx_transfer_data_partial(chip, MS_CARD, ptr, 512, scsi_sg_count(chip->srb), index, offset, DMA_TO_DEVICE, chip->ms_timeout); if (retval < 0) { ms_set_err_code(chip, MS_TO_ERROR); rtsx_clear_ms_error(chip); if (retval == -ETIMEDOUT) { TRACE_RET(chip, STATUS_TIMEDOUT); } else { TRACE_RET(chip, STATUS_FAIL); } } retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if ((end_page - start_page) == 1) { if (!(val & INT_REG_CED)) { ms_set_err_code(chip, MS_FLASH_WRITE_ERROR); TRACE_RET(chip, STATUS_FAIL); } } else { if (page_addr == (end_page - 1)) { if (!(val & INT_REG_CED)) { retval = ms_send_cmd(chip, BLOCK_END, WAIT_INT); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } retval = ms_read_bytes(chip, GET_INT, 1, NO_WAIT_INT, &val, 1); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } if ((page_addr == (end_page - 1)) || (page_addr == ms_card->page_off)) { if (!(val & INT_REG_CED)) { ms_set_err_code(chip, MS_FLASH_WRITE_ERROR); TRACE_RET(chip, STATUS_FAIL); } } } if (scsi_sg_count(chip->srb) == 0) ptr += 512; } return STATUS_SUCCESS; } static int ms_finish_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk, u16 log_blk, u8 page_off) { struct ms_info *ms_card = &(chip->ms_card); int retval, seg_no; retval = ms_copy_page(chip, old_blk, new_blk, log_blk, page_off, ms_card->page_off + 1); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } seg_no = old_blk >> 9; if (MS_TST_BAD_BLOCK_FLG(ms_card)) { MS_CLR_BAD_BLOCK_FLG(ms_card); ms_set_bad_block(chip, old_blk); } else { retval = ms_erase_block(chip, old_blk); if (retval == STATUS_SUCCESS) { ms_set_unused_block(chip, old_blk); } } ms_set_l2p_tbl(chip, seg_no, log_blk - ms_start_idx[seg_no], new_blk); return STATUS_SUCCESS; } static int ms_prepare_write(struct rtsx_chip *chip, u16 old_blk, u16 new_blk, u16 log_blk, u8 start_page) { int retval; if (start_page) { retval = ms_copy_page(chip, old_blk, new_blk, log_blk, 0, start_page); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } return STATUS_SUCCESS; } #ifdef MS_DELAY_WRITE int ms_delay_write(struct rtsx_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); struct ms_delay_write_tag *delay_write = &(ms_card->delay_write); int retval; if (delay_write->delay_write_flag) { retval = ms_set_init_para(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } delay_write->delay_write_flag = 0; retval = ms_finish_write(chip, delay_write->old_phyblock, delay_write->new_phyblock, delay_write->logblock, delay_write->pageoff); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } return STATUS_SUCCESS; } #endif static inline void ms_rw_fail(struct scsi_cmnd *srb, struct rtsx_chip *chip) { if (srb->sc_data_direction == DMA_FROM_DEVICE) { set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); } else { set_sense_type(chip, SCSI_LUN(srb), SENSE_TYPE_MEDIA_WRITE_ERR); } } static int ms_rw_multi_sector(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector, u16 sector_cnt) { struct ms_info *ms_card = &(chip->ms_card); unsigned int lun = SCSI_LUN(srb); int retval, seg_no; unsigned int index = 0, offset = 0; u16 old_blk = 0, new_blk = 0, log_blk, total_sec_cnt = sector_cnt; u8 start_page, end_page = 0, page_cnt; u8 *ptr; #ifdef MS_DELAY_WRITE struct ms_delay_write_tag *delay_write = &(ms_card->delay_write); #endif ms_set_err_code(chip, MS_NO_ERROR); ms_card->cleanup_counter = 0; ptr = (u8 *)scsi_sglist(srb); retval = ms_switch_clock(chip); if (retval != STATUS_SUCCESS) { ms_rw_fail(srb, chip); TRACE_RET(chip, STATUS_FAIL); } log_blk = (u16)(start_sector >> ms_card->block_shift); start_page = (u8)(start_sector & ms_card->page_off); for (seg_no = 0; seg_no < ARRAY_SIZE(ms_start_idx) - 1; seg_no++) { if (log_blk < ms_start_idx[seg_no+1]) break; } if (ms_card->segment[seg_no].build_flag == 0) { retval = ms_build_l2p_tbl(chip, seg_no); if (retval != STATUS_SUCCESS) { chip->card_fail |= MS_CARD; set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); TRACE_RET(chip, STATUS_FAIL); } } if (srb->sc_data_direction == DMA_TO_DEVICE) { #ifdef MS_DELAY_WRITE if (delay_write->delay_write_flag && (delay_write->logblock == log_blk) && (start_page > delay_write->pageoff)) { delay_write->delay_write_flag = 0; retval = ms_copy_page(chip, delay_write->old_phyblock, delay_write->new_phyblock, log_blk, delay_write->pageoff, start_page); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR); TRACE_RET(chip, STATUS_FAIL); } old_blk = delay_write->old_phyblock; new_blk = delay_write->new_phyblock; } else if (delay_write->delay_write_flag && (delay_write->logblock == log_blk) && (start_page == delay_write->pageoff)) { delay_write->delay_write_flag = 0; old_blk = delay_write->old_phyblock; new_blk = delay_write->new_phyblock; } else { retval = ms_delay_write(chip); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR); TRACE_RET(chip, STATUS_FAIL); } #endif old_blk = ms_get_l2p_tbl(chip, seg_no, log_blk - ms_start_idx[seg_no]); new_blk = ms_get_unused_block(chip, seg_no); if ((old_blk == 0xFFFF) || (new_blk == 0xFFFF)) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR); TRACE_RET(chip, STATUS_FAIL); } retval = ms_prepare_write(chip, old_blk, new_blk, log_blk, start_page); if (retval != STATUS_SUCCESS) { if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); TRACE_RET(chip, STATUS_FAIL); } set_sense_type(chip, lun, SENSE_TYPE_MEDIA_WRITE_ERR); TRACE_RET(chip, STATUS_FAIL); } #ifdef MS_DELAY_WRITE } #endif } else { #ifdef MS_DELAY_WRITE retval = ms_delay_write(chip); if (retval != STATUS_SUCCESS) { if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); TRACE_RET(chip, STATUS_FAIL); } set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); TRACE_RET(chip, STATUS_FAIL); } #endif old_blk = ms_get_l2p_tbl(chip, seg_no, log_blk - ms_start_idx[seg_no]); if (old_blk == 0xFFFF) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); TRACE_RET(chip, STATUS_FAIL); } } RTSX_DEBUGP("seg_no = %d, old_blk = 0x%x, new_blk = 0x%x\n", seg_no, old_blk, new_blk); while (total_sec_cnt) { if ((start_page + total_sec_cnt) > (ms_card->page_off + 1)) { end_page = ms_card->page_off + 1; } else { end_page = start_page + (u8)total_sec_cnt; } page_cnt = end_page - start_page; RTSX_DEBUGP("start_page = %d, end_page = %d, page_cnt = %d\n", start_page, end_page, page_cnt); if (srb->sc_data_direction == DMA_FROM_DEVICE) { retval = ms_read_multiple_pages(chip, old_blk, log_blk, start_page, end_page, ptr, &index, &offset); } else { retval = ms_write_multiple_pages(chip, old_blk, new_blk, log_blk, start_page, end_page, ptr, &index, &offset); } if (retval != STATUS_SUCCESS) { toggle_gpio(chip, 1); if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); TRACE_RET(chip, STATUS_FAIL); } ms_rw_fail(srb, chip); TRACE_RET(chip, STATUS_FAIL); } if (srb->sc_data_direction == DMA_TO_DEVICE) { if (end_page == (ms_card->page_off + 1)) { retval = ms_erase_block(chip, old_blk); if (retval == STATUS_SUCCESS) { ms_set_unused_block(chip, old_blk); } ms_set_l2p_tbl(chip, seg_no, log_blk - ms_start_idx[seg_no], new_blk); } } total_sec_cnt -= page_cnt; if (scsi_sg_count(srb) == 0) ptr += page_cnt * 512; if (total_sec_cnt == 0) break; log_blk++; for (seg_no = 0; seg_no < sizeof(ms_start_idx)/2; seg_no++) { if (log_blk < ms_start_idx[seg_no+1]) break; } if (ms_card->segment[seg_no].build_flag == 0) { retval = ms_build_l2p_tbl(chip, seg_no); if (retval != STATUS_SUCCESS) { chip->card_fail |= MS_CARD; set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); TRACE_RET(chip, STATUS_FAIL); } } old_blk = ms_get_l2p_tbl(chip, seg_no, log_blk - ms_start_idx[seg_no]); if (old_blk == 0xFFFF) { ms_rw_fail(srb, chip); TRACE_RET(chip, STATUS_FAIL); } if (srb->sc_data_direction == DMA_TO_DEVICE) { new_blk = ms_get_unused_block(chip, seg_no); if (new_blk == 0xFFFF) { ms_rw_fail(srb, chip); TRACE_RET(chip, STATUS_FAIL); } } RTSX_DEBUGP("seg_no = %d, old_blk = 0x%x, new_blk = 0x%x\n", seg_no, old_blk, new_blk); start_page = 0; } if (srb->sc_data_direction == DMA_TO_DEVICE) { if (end_page < (ms_card->page_off + 1)) { #ifdef MS_DELAY_WRITE delay_write->delay_write_flag = 1; delay_write->old_phyblock = old_blk; delay_write->new_phyblock = new_blk; delay_write->logblock = log_blk; delay_write->pageoff = end_page; #else retval = ms_finish_write(chip, old_blk, new_blk, log_blk, end_page); if (retval != STATUS_SUCCESS) { if (detect_card_cd(chip, MS_CARD) != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_NOT_PRESENT); TRACE_RET(chip, STATUS_FAIL); } ms_rw_fail(srb, chip); TRACE_RET(chip, STATUS_FAIL); } #endif } } scsi_set_resid(srb, 0); return STATUS_SUCCESS; } int ms_rw(struct scsi_cmnd *srb, struct rtsx_chip *chip, u32 start_sector, u16 sector_cnt) { struct ms_info *ms_card = &(chip->ms_card); int retval; if (CHK_MSPRO(ms_card)) { retval = mspro_rw_multi_sector(srb, chip, start_sector, sector_cnt); } else { retval = ms_rw_multi_sector(srb, chip, start_sector, sector_cnt); } return retval; } void ms_free_l2p_tbl(struct rtsx_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int i = 0; if (ms_card->segment != NULL) { for (i = 0; i < ms_card->segment_cnt; i++) { if (ms_card->segment[i].l2p_table != NULL) { vfree(ms_card->segment[i].l2p_table); ms_card->segment[i].l2p_table = NULL; } if (ms_card->segment[i].free_table != NULL) { vfree(ms_card->segment[i].free_table); ms_card->segment[i].free_table = NULL; } } vfree(ms_card->segment); ms_card->segment = NULL; } } #ifdef SUPPORT_MAGIC_GATE #ifdef READ_BYTES_WAIT_INT static int ms_poll_int(struct rtsx_chip *chip) { int retval; u8 val; rtsx_init_cmd(chip); rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANS_CFG, MS_INT_CED, MS_INT_CED); retval = rtsx_send_cmd(chip, MS_CARD, 5000); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } val = *rtsx_get_cmd_data(chip); if (val & MS_INT_ERR) { TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } #endif #ifdef MS_SAMPLE_INT_ERR static int check_ms_err(struct rtsx_chip *chip) { int retval; u8 val; retval = rtsx_read_register(chip, MS_TRANSFER, &val); if (retval != STATUS_SUCCESS) return 1; if (val & MS_TRANSFER_ERR) return 1; retval = rtsx_read_register(chip, MS_TRANS_CFG, &val); if (retval != STATUS_SUCCESS) return 1; if (val & (MS_INT_ERR | MS_INT_CMDNK)) return 1; return 0; } #else static int check_ms_err(struct rtsx_chip *chip) { int retval; u8 val; retval = rtsx_read_register(chip, MS_TRANSFER, &val); if (retval != STATUS_SUCCESS) return 1; if (val & MS_TRANSFER_ERR) return 1; return 0; } #endif static int mg_send_ex_cmd(struct rtsx_chip *chip, u8 cmd, u8 entry_num) { int retval, i; u8 data[8]; data[0] = cmd; data[1] = 0; data[2] = 0; data[3] = 0; data[4] = 0; data[5] = 0; data[6] = entry_num; data[7] = 0; for (i = 0; i < MS_MAX_RETRY_COUNT; i++) { retval = ms_write_bytes(chip, PRO_EX_SET_CMD, 7, WAIT_INT, data, 8); if (retval == STATUS_SUCCESS) break; } if (i == MS_MAX_RETRY_COUNT) { TRACE_RET(chip, STATUS_FAIL); } if (check_ms_err(chip)) { rtsx_clear_ms_error(chip); TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } static int mg_set_tpc_para_sub(struct rtsx_chip *chip, int type, u8 mg_entry_num) { int retval; u8 buf[6]; RTSX_DEBUGP("--%s--\n", __func__); if (type == 0) { retval = ms_set_rw_reg_addr(chip, 0, 0, Pro_TPCParm, 1); } else { retval = ms_set_rw_reg_addr(chip, 0, 0, Pro_DataCount1, 6); } if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } buf[0] = 0; buf[1] = 0; if (type == 1) { buf[2] = 0; buf[3] = 0; buf[4] = 0; buf[5] = mg_entry_num; } retval = ms_write_bytes(chip, PRO_WRITE_REG, (type == 0) ? 1 : 6, NO_WAIT_INT, buf, 6); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } int mg_set_leaf_id(struct scsi_cmnd *srb, struct rtsx_chip *chip) { int retval; int i; unsigned int lun = SCSI_LUN(srb); u8 buf1[32], buf2[12]; RTSX_DEBUGP("--%s--\n", __func__); if (scsi_bufflen(srb) < 12) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_INVALID_CMD_FIELD); TRACE_RET(chip, STATUS_FAIL); } ms_cleanup_work(chip); retval = ms_switch_clock(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = mg_send_ex_cmd(chip, MG_SET_LID, 0); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB); TRACE_RET(chip, STATUS_FAIL); } memset(buf1, 0, 32); rtsx_stor_get_xfer_buf(buf2, min(12, (int)scsi_bufflen(srb)), srb); for (i = 0; i < 8; i++) { buf1[8+i] = buf2[4+i]; } retval = ms_write_bytes(chip, PRO_WRITE_SHORT_DATA, 32, WAIT_INT, buf1, 32); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB); TRACE_RET(chip, STATUS_FAIL); } if (check_ms_err(chip)) { set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB); rtsx_clear_ms_error(chip); TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; } int mg_get_local_EKB(struct scsi_cmnd *srb, struct rtsx_chip *chip) { int retval = STATUS_FAIL; int bufflen; unsigned int lun = SCSI_LUN(srb); u8 *buf = NULL; RTSX_DEBUGP("--%s--\n", __func__); ms_cleanup_work(chip); retval = ms_switch_clock(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } buf = kmalloc(1540, GFP_KERNEL); if (!buf) { TRACE_RET(chip, STATUS_ERROR); } buf[0] = 0x04; buf[1] = 0x1A; buf[2] = 0x00; buf[3] = 0x00; retval = mg_send_ex_cmd(chip, MG_GET_LEKB, 0); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN); TRACE_GOTO(chip, GetEKBFinish); } retval = ms_transfer_data(chip, MS_TM_AUTO_READ, PRO_READ_LONG_DATA, 3, WAIT_INT, 0, 0, buf + 4, 1536); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN); rtsx_clear_ms_error(chip); TRACE_GOTO(chip, GetEKBFinish); } if (check_ms_err(chip)) { set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN); rtsx_clear_ms_error(chip); TRACE_RET(chip, STATUS_FAIL); } bufflen = min(1052, (int)scsi_bufflen(srb)); rtsx_stor_set_xfer_buf(buf, bufflen, srb); GetEKBFinish: kfree(buf); return retval; } int mg_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int retval; int bufflen; int i; unsigned int lun = SCSI_LUN(srb); u8 buf[32]; RTSX_DEBUGP("--%s--\n", __func__); ms_cleanup_work(chip); retval = ms_switch_clock(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = mg_send_ex_cmd(chip, MG_GET_ID, 0); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM); TRACE_RET(chip, STATUS_FAIL); } retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, 32, WAIT_INT, buf, 32); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM); TRACE_RET(chip, STATUS_FAIL); } if (check_ms_err(chip)) { set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM); rtsx_clear_ms_error(chip); TRACE_RET(chip, STATUS_FAIL); } memcpy(ms_card->magic_gate_id, buf, 16); #ifdef READ_BYTES_WAIT_INT retval = ms_poll_int(chip); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM); TRACE_RET(chip, STATUS_FAIL); } #endif retval = mg_send_ex_cmd(chip, MG_SET_RD, 0); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM); TRACE_RET(chip, STATUS_FAIL); } bufflen = min(12, (int)scsi_bufflen(srb)); rtsx_stor_get_xfer_buf(buf, bufflen, srb); for (i = 0; i < 8; i++) { buf[i] = buf[4+i]; } for (i = 0; i < 24; i++) { buf[8+i] = 0; } retval = ms_write_bytes(chip, PRO_WRITE_SHORT_DATA, 32, WAIT_INT, buf, 32); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM); TRACE_RET(chip, STATUS_FAIL); } if (check_ms_err(chip)) { set_sense_type(chip, lun, SENSE_TYPE_MG_INCOMPATIBLE_MEDIUM); rtsx_clear_ms_error(chip); TRACE_RET(chip, STATUS_FAIL); } ms_card->mg_auth = 0; return STATUS_SUCCESS; } int mg_get_rsp_chg(struct scsi_cmnd *srb, struct rtsx_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int retval; int bufflen; unsigned int lun = SCSI_LUN(srb); u8 buf1[32], buf2[36]; RTSX_DEBUGP("--%s--\n", __func__); ms_cleanup_work(chip); retval = ms_switch_clock(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = mg_send_ex_cmd(chip, MG_MAKE_RMS, 0); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN); TRACE_RET(chip, STATUS_FAIL); } retval = ms_read_bytes(chip, PRO_READ_SHORT_DATA, 32, WAIT_INT, buf1, 32); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN); TRACE_RET(chip, STATUS_FAIL); } if (check_ms_err(chip)) { set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN); rtsx_clear_ms_error(chip); TRACE_RET(chip, STATUS_FAIL); } buf2[0] = 0x00; buf2[1] = 0x22; buf2[2] = 0x00; buf2[3] = 0x00; memcpy(buf2 + 4, ms_card->magic_gate_id, 16); memcpy(buf2 + 20, buf1, 16); bufflen = min(36, (int)scsi_bufflen(srb)); rtsx_stor_set_xfer_buf(buf2, bufflen, srb); #ifdef READ_BYTES_WAIT_INT retval = ms_poll_int(chip); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN); TRACE_RET(chip, STATUS_FAIL); } #endif return STATUS_SUCCESS; } int mg_rsp(struct scsi_cmnd *srb, struct rtsx_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int retval; int i; int bufflen; unsigned int lun = SCSI_LUN(srb); u8 buf[32]; RTSX_DEBUGP("--%s--\n", __func__); ms_cleanup_work(chip); retval = ms_switch_clock(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } retval = mg_send_ex_cmd(chip, MG_MAKE_KSE, 0); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN); TRACE_RET(chip, STATUS_FAIL); } bufflen = min(12, (int)scsi_bufflen(srb)); rtsx_stor_get_xfer_buf(buf, bufflen, srb); for (i = 0; i < 8; i++) { buf[i] = buf[4+i]; } for (i = 0; i < 24; i++) { buf[8+i] = 0; } retval = ms_write_bytes(chip, PRO_WRITE_SHORT_DATA, 32, WAIT_INT, buf, 32); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN); TRACE_RET(chip, STATUS_FAIL); } if (check_ms_err(chip)) { set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_AUTHEN); rtsx_clear_ms_error(chip); TRACE_RET(chip, STATUS_FAIL); } ms_card->mg_auth = 1; return STATUS_SUCCESS; } int mg_get_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int retval; int bufflen; unsigned int lun = SCSI_LUN(srb); u8 *buf = NULL; RTSX_DEBUGP("--%s--\n", __func__); ms_cleanup_work(chip); retval = ms_switch_clock(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } buf = kmalloc(1028, GFP_KERNEL); if (!buf) { TRACE_RET(chip, STATUS_ERROR); } buf[0] = 0x04; buf[1] = 0x02; buf[2] = 0x00; buf[3] = 0x00; retval = mg_send_ex_cmd(chip, MG_GET_IBD, ms_card->mg_entry_num); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); TRACE_GOTO(chip, GetICVFinish); } retval = ms_transfer_data(chip, MS_TM_AUTO_READ, PRO_READ_LONG_DATA, 2, WAIT_INT, 0, 0, buf + 4, 1024); if (retval != STATUS_SUCCESS) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); rtsx_clear_ms_error(chip); TRACE_GOTO(chip, GetICVFinish); } if (check_ms_err(chip)) { set_sense_type(chip, lun, SENSE_TYPE_MEDIA_UNRECOVER_READ_ERR); rtsx_clear_ms_error(chip); TRACE_RET(chip, STATUS_FAIL); } bufflen = min(1028, (int)scsi_bufflen(srb)); rtsx_stor_set_xfer_buf(buf, bufflen, srb); GetICVFinish: kfree(buf); return retval; } int mg_set_ICV(struct scsi_cmnd *srb, struct rtsx_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int retval; int bufflen; #ifdef MG_SET_ICV_SLOW int i; #endif unsigned int lun = SCSI_LUN(srb); u8 *buf = NULL; RTSX_DEBUGP("--%s--\n", __func__); ms_cleanup_work(chip); retval = ms_switch_clock(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } buf = kmalloc(1028, GFP_KERNEL); if (!buf) { TRACE_RET(chip, STATUS_ERROR); } bufflen = min(1028, (int)scsi_bufflen(srb)); rtsx_stor_get_xfer_buf(buf, bufflen, srb); retval = mg_send_ex_cmd(chip, MG_SET_IBD, ms_card->mg_entry_num); if (retval != STATUS_SUCCESS) { if (ms_card->mg_auth == 0) { if ((buf[5] & 0xC0) != 0) { set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB); } else { set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR); } } else { set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR); } TRACE_GOTO(chip, SetICVFinish); } #ifdef MG_SET_ICV_SLOW for (i = 0; i < 2; i++) { udelay(50); rtsx_init_cmd(chip); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TPC, 0xFF, PRO_WRITE_LONG_DATA); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANS_CFG, 0xFF, WAIT_INT); rtsx_add_cmd(chip, WRITE_REG_CMD, CARD_DATA_SOURCE, 0x01, RING_BUFFER); trans_dma_enable(DMA_TO_DEVICE, chip, 512, DMA_512); rtsx_add_cmd(chip, WRITE_REG_CMD, MS_TRANSFER, 0xFF, MS_TRANSFER_START | MS_TM_NORMAL_WRITE); rtsx_add_cmd(chip, CHECK_REG_CMD, MS_TRANSFER, MS_TRANSFER_END, MS_TRANSFER_END); rtsx_send_cmd_no_wait(chip); retval = rtsx_transfer_data(chip, MS_CARD, buf + 4 + i*512, 512, 0, DMA_TO_DEVICE, 3000); if ((retval < 0) || check_ms_err(chip)) { rtsx_clear_ms_error(chip); if (ms_card->mg_auth == 0) { if ((buf[5] & 0xC0) != 0) { set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB); } else { set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR); } } else { set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR); } retval = STATUS_FAIL; TRACE_GOTO(chip, SetICVFinish); } } #else retval = ms_transfer_data(chip, MS_TM_AUTO_WRITE, PRO_WRITE_LONG_DATA, 2, WAIT_INT, 0, 0, buf + 4, 1024); if ((retval != STATUS_SUCCESS) || check_ms_err(chip) { rtsx_clear_ms_error(chip); if (ms_card->mg_auth == 0) { if ((buf[5] & 0xC0) != 0) { set_sense_type(chip, lun, SENSE_TYPE_MG_KEY_FAIL_NOT_ESTAB); } else { set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR); } } else { set_sense_type(chip, lun, SENSE_TYPE_MG_WRITE_ERR); } TRACE_GOTO(chip, SetICVFinish); } #endif SetICVFinish: kfree(buf); return retval; } #endif /* SUPPORT_MAGIC_GATE */ void ms_cleanup_work(struct rtsx_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); if (CHK_MSPRO(ms_card)) { if (ms_card->seq_mode) { RTSX_DEBUGP("MS Pro: stop transmission\n"); mspro_stop_seq_mode(chip); ms_card->cleanup_counter = 0; } if (CHK_MSHG(ms_card)) { rtsx_write_register(chip, MS_CFG, MS_2K_SECTOR_MODE, 0x00); } } #ifdef MS_DELAY_WRITE else if ((!CHK_MSPRO(ms_card)) && ms_card->delay_write.delay_write_flag) { RTSX_DEBUGP("MS: delay write\n"); ms_delay_write(chip); ms_card->cleanup_counter = 0; } #endif } int ms_power_off_card3v3(struct rtsx_chip *chip) { int retval; retval = disable_card_clock(chip, MS_CARD); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } if (chip->asic_code) { retval = ms_pull_ctl_disable(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } else { RTSX_WRITE_REG(chip, FPGA_PULL_CTL, FPGA_MS_PULL_CTL_BIT | 0x20, FPGA_MS_PULL_CTL_BIT); } RTSX_WRITE_REG(chip, CARD_OE, MS_OUTPUT_EN, 0); if (!chip->ft2_fast_mode) { retval = card_power_off(chip, MS_CARD); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } } return STATUS_SUCCESS; } int release_ms_card(struct rtsx_chip *chip) { struct ms_info *ms_card = &(chip->ms_card); int retval; RTSX_DEBUGP("release_ms_card\n"); #ifdef MS_DELAY_WRITE ms_card->delay_write.delay_write_flag = 0; #endif ms_card->pro_under_formatting = 0; chip->card_ready &= ~MS_CARD; chip->card_fail &= ~MS_CARD; chip->card_wp &= ~MS_CARD; ms_free_l2p_tbl(chip); memset(ms_card->raw_sys_info, 0, 96); #ifdef SUPPORT_PCGL_1P18 memset(ms_card->raw_model_name, 0, 48); #endif retval = ms_power_off_card3v3(chip); if (retval != STATUS_SUCCESS) { TRACE_RET(chip, STATUS_FAIL); } return STATUS_SUCCESS; }
gpl-2.0
kozmikkick/KozmiKKernel
drivers/staging/hv/ring_buffer.c
2384
11763
/* * * Copyright (c) 2009, Microsoft Corporation. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program; if not, write to the Free Software Foundation, Inc., 59 Temple * Place - Suite 330, Boston, MA 02111-1307 USA. * * Authors: * Haiyang Zhang <haiyangz@microsoft.com> * Hank Janssen <hjanssen@microsoft.com> * K. Y. Srinivasan <kys@microsoft.com> * */ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/kernel.h> #include <linux/mm.h> #include "hyperv.h" #include "hyperv_vmbus.h" /* #defines */ /* Amount of space to write to */ #define BYTES_AVAIL_TO_WRITE(r, w, z) ((w) >= (r)) ? ((z) - ((w) - (r))) : ((r) - (w)) /* * * hv_get_ringbuffer_availbytes() * * Get number of bytes available to read and to write to * for the specified ring buffer */ static inline void hv_get_ringbuffer_availbytes(struct hv_ring_buffer_info *rbi, u32 *read, u32 *write) { u32 read_loc, write_loc; /* Capture the read/write indices before they changed */ read_loc = rbi->ring_buffer->read_index; write_loc = rbi->ring_buffer->write_index; *write = BYTES_AVAIL_TO_WRITE(read_loc, write_loc, rbi->ring_datasize); *read = rbi->ring_datasize - *write; } /* * hv_get_next_write_location() * * Get the next write location for the specified ring buffer * */ static inline u32 hv_get_next_write_location(struct hv_ring_buffer_info *ring_info) { u32 next = ring_info->ring_buffer->write_index; return next; } /* * hv_set_next_write_location() * * Set the next write location for the specified ring buffer * */ static inline void hv_set_next_write_location(struct hv_ring_buffer_info *ring_info, u32 next_write_location) { ring_info->ring_buffer->write_index = next_write_location; } /* * hv_get_next_read_location() * * Get the next read location for the specified ring buffer */ static inline u32 hv_get_next_read_location(struct hv_ring_buffer_info *ring_info) { u32 next = ring_info->ring_buffer->read_index; return next; } /* * hv_get_next_readlocation_withoffset() * * Get the next read location + offset for the specified ring buffer. * This allows the caller to skip */ static inline u32 hv_get_next_readlocation_withoffset(struct hv_ring_buffer_info *ring_info, u32 offset) { u32 next = ring_info->ring_buffer->read_index; next += offset; next %= ring_info->ring_datasize; return next; } /* * * hv_set_next_read_location() * * Set the next read location for the specified ring buffer * */ static inline void hv_set_next_read_location(struct hv_ring_buffer_info *ring_info, u32 next_read_location) { ring_info->ring_buffer->read_index = next_read_location; } /* * * hv_get_ring_buffer() * * Get the start of the ring buffer */ static inline void * hv_get_ring_buffer(struct hv_ring_buffer_info *ring_info) { return (void *)ring_info->ring_buffer->buffer; } /* * * hv_get_ring_buffersize() * * Get the size of the ring buffer */ static inline u32 hv_get_ring_buffersize(struct hv_ring_buffer_info *ring_info) { return ring_info->ring_datasize; } /* * * hv_get_ring_bufferindices() * * Get the read and write indices as u64 of the specified ring buffer * */ static inline u64 hv_get_ring_bufferindices(struct hv_ring_buffer_info *ring_info) { return (u64)ring_info->ring_buffer->write_index << 32; } /* * * hv_dump_ring_info() * * Dump out to console the ring buffer info * */ void hv_dump_ring_info(struct hv_ring_buffer_info *ring_info, char *prefix) { u32 bytes_avail_towrite; u32 bytes_avail_toread; hv_get_ringbuffer_availbytes(ring_info, &bytes_avail_toread, &bytes_avail_towrite); DPRINT(VMBUS, DEBUG_RING_LVL, "%s <<ringinfo %p buffer %p avail write %u " "avail read %u read idx %u write idx %u>>", prefix, ring_info, ring_info->ring_buffer->buffer, bytes_avail_towrite, bytes_avail_toread, ring_info->ring_buffer->read_index, ring_info->ring_buffer->write_index); } /* * * hv_copyfrom_ringbuffer() * * Helper routine to copy to source from ring buffer. * Assume there is enough room. Handles wrap-around in src case only!! * */ static u32 hv_copyfrom_ringbuffer( struct hv_ring_buffer_info *ring_info, void *dest, u32 destlen, u32 start_read_offset) { void *ring_buffer = hv_get_ring_buffer(ring_info); u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); u32 frag_len; /* wrap-around detected at the src */ if (destlen > ring_buffer_size - start_read_offset) { frag_len = ring_buffer_size - start_read_offset; memcpy(dest, ring_buffer + start_read_offset, frag_len); memcpy(dest + frag_len, ring_buffer, destlen - frag_len); } else memcpy(dest, ring_buffer + start_read_offset, destlen); start_read_offset += destlen; start_read_offset %= ring_buffer_size; return start_read_offset; } /* * * hv_copyto_ringbuffer() * * Helper routine to copy from source to ring buffer. * Assume there is enough room. Handles wrap-around in dest case only!! * */ static u32 hv_copyto_ringbuffer( struct hv_ring_buffer_info *ring_info, u32 start_write_offset, void *src, u32 srclen) { void *ring_buffer = hv_get_ring_buffer(ring_info); u32 ring_buffer_size = hv_get_ring_buffersize(ring_info); u32 frag_len; /* wrap-around detected! */ if (srclen > ring_buffer_size - start_write_offset) { frag_len = ring_buffer_size - start_write_offset; memcpy(ring_buffer + start_write_offset, src, frag_len); memcpy(ring_buffer, src + frag_len, srclen - frag_len); } else memcpy(ring_buffer + start_write_offset, src, srclen); start_write_offset += srclen; start_write_offset %= ring_buffer_size; return start_write_offset; } /* * * hv_ringbuffer_get_debuginfo() * * Get various debug metrics for the specified ring buffer * */ void hv_ringbuffer_get_debuginfo(struct hv_ring_buffer_info *ring_info, struct hv_ring_buffer_debug_info *debug_info) { u32 bytes_avail_towrite; u32 bytes_avail_toread; if (ring_info->ring_buffer) { hv_get_ringbuffer_availbytes(ring_info, &bytes_avail_toread, &bytes_avail_towrite); debug_info->bytes_avail_toread = bytes_avail_toread; debug_info->bytes_avail_towrite = bytes_avail_towrite; debug_info->current_read_index = ring_info->ring_buffer->read_index; debug_info->current_write_index = ring_info->ring_buffer->write_index; debug_info->current_interrupt_mask = ring_info->ring_buffer->interrupt_mask; } } /* * * hv_get_ringbuffer_interrupt_mask() * * Get the interrupt mask for the specified ring buffer * */ u32 hv_get_ringbuffer_interrupt_mask(struct hv_ring_buffer_info *rbi) { return rbi->ring_buffer->interrupt_mask; } /* * * hv_ringbuffer_init() * *Initialize the ring buffer * */ int hv_ringbuffer_init(struct hv_ring_buffer_info *ring_info, void *buffer, u32 buflen) { if (sizeof(struct hv_ring_buffer) != PAGE_SIZE) return -EINVAL; memset(ring_info, 0, sizeof(struct hv_ring_buffer_info)); ring_info->ring_buffer = (struct hv_ring_buffer *)buffer; ring_info->ring_buffer->read_index = ring_info->ring_buffer->write_index = 0; ring_info->ring_size = buflen; ring_info->ring_datasize = buflen - sizeof(struct hv_ring_buffer); spin_lock_init(&ring_info->ring_lock); return 0; } /* * * hv_ringbuffer_cleanup() * * Cleanup the ring buffer * */ void hv_ringbuffer_cleanup(struct hv_ring_buffer_info *ring_info) { } /* * * hv_ringbuffer_write() * * Write to the ring buffer * */ int hv_ringbuffer_write(struct hv_ring_buffer_info *outring_info, struct scatterlist *sglist, u32 sgcount) { int i = 0; u32 bytes_avail_towrite; u32 bytes_avail_toread; u32 totalbytes_towrite = 0; struct scatterlist *sg; u32 next_write_location; u64 prev_indices = 0; unsigned long flags; for_each_sg(sglist, sg, sgcount, i) { totalbytes_towrite += sg->length; } totalbytes_towrite += sizeof(u64); spin_lock_irqsave(&outring_info->ring_lock, flags); hv_get_ringbuffer_availbytes(outring_info, &bytes_avail_toread, &bytes_avail_towrite); /* If there is only room for the packet, assume it is full. */ /* Otherwise, the next time around, we think the ring buffer */ /* is empty since the read index == write index */ if (bytes_avail_towrite <= totalbytes_towrite) { spin_unlock_irqrestore(&outring_info->ring_lock, flags); return -1; } /* Write to the ring buffer */ next_write_location = hv_get_next_write_location(outring_info); for_each_sg(sglist, sg, sgcount, i) { next_write_location = hv_copyto_ringbuffer(outring_info, next_write_location, sg_virt(sg), sg->length); } /* Set previous packet start */ prev_indices = hv_get_ring_bufferindices(outring_info); next_write_location = hv_copyto_ringbuffer(outring_info, next_write_location, &prev_indices, sizeof(u64)); /* Make sure we flush all writes before updating the writeIndex */ mb(); /* Now, update the write location */ hv_set_next_write_location(outring_info, next_write_location); spin_unlock_irqrestore(&outring_info->ring_lock, flags); return 0; } /* * * hv_ringbuffer_peek() * * Read without advancing the read index * */ int hv_ringbuffer_peek(struct hv_ring_buffer_info *Inring_info, void *Buffer, u32 buflen) { u32 bytes_avail_towrite; u32 bytes_avail_toread; u32 next_read_location = 0; unsigned long flags; spin_lock_irqsave(&Inring_info->ring_lock, flags); hv_get_ringbuffer_availbytes(Inring_info, &bytes_avail_toread, &bytes_avail_towrite); /* Make sure there is something to read */ if (bytes_avail_toread < buflen) { spin_unlock_irqrestore(&Inring_info->ring_lock, flags); return -1; } /* Convert to byte offset */ next_read_location = hv_get_next_read_location(Inring_info); next_read_location = hv_copyfrom_ringbuffer(Inring_info, Buffer, buflen, next_read_location); spin_unlock_irqrestore(&Inring_info->ring_lock, flags); return 0; } /* * * hv_ringbuffer_read() * * Read and advance the read index * */ int hv_ringbuffer_read(struct hv_ring_buffer_info *inring_info, void *buffer, u32 buflen, u32 offset) { u32 bytes_avail_towrite; u32 bytes_avail_toread; u32 next_read_location = 0; u64 prev_indices = 0; unsigned long flags; if (buflen <= 0) return -EINVAL; spin_lock_irqsave(&inring_info->ring_lock, flags); hv_get_ringbuffer_availbytes(inring_info, &bytes_avail_toread, &bytes_avail_towrite); /* Make sure there is something to read */ if (bytes_avail_toread < buflen) { spin_unlock_irqrestore(&inring_info->ring_lock, flags); return -1; } next_read_location = hv_get_next_readlocation_withoffset(inring_info, offset); next_read_location = hv_copyfrom_ringbuffer(inring_info, buffer, buflen, next_read_location); next_read_location = hv_copyfrom_ringbuffer(inring_info, &prev_indices, sizeof(u64), next_read_location); /* Make sure all reads are done before we update the read index since */ /* the writer may start writing to the read area once the read index */ /*is updated */ mb(); /* Update the read index */ hv_set_next_read_location(inring_info, next_read_location); spin_unlock_irqrestore(&inring_info->ring_lock, flags); return 0; }
gpl-2.0
TeamRegular/android_kernel_samsung_codinatmo
arch/microblaze/kernel/reset.c
3152
2849
/* * Copyright (C) 2009 Michal Simek <monstr@monstr.eu> * Copyright (C) 2009 PetaLogix * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/init.h> #include <linux/of_platform.h> #include <asm/prom.h> /* Trigger specific functions */ #ifdef CONFIG_GPIOLIB #include <linux/of_gpio.h> static int handle; /* reset pin handle */ static unsigned int reset_val; static int of_reset_gpio_handle(void) { int ret; /* variable which stored handle reset gpio pin */ struct device_node *root; /* root node */ struct device_node *gpio; /* gpio node */ struct gpio_chip *gc; u32 flags; const void *gpio_spec; /* find out root node */ root = of_find_node_by_path("/"); /* give me handle for gpio node to be possible allocate pin */ ret = of_parse_phandles_with_args(root, "hard-reset-gpios", "#gpio-cells", 0, &gpio, &gpio_spec); if (ret) { pr_debug("%s: can't parse gpios property\n", __func__); goto err0; } gc = of_node_to_gpiochip(gpio); if (!gc) { pr_debug("%s: gpio controller %s isn't registered\n", root->full_name, gpio->full_name); ret = -ENODEV; goto err1; } ret = gc->of_xlate(gc, root, gpio_spec, &flags); if (ret < 0) goto err1; ret += gc->base; err1: of_node_put(gpio); err0: pr_debug("%s exited with status %d\n", __func__, ret); return ret; } void of_platform_reset_gpio_probe(void) { int ret; handle = of_reset_gpio_handle(); if (!gpio_is_valid(handle)) { printk(KERN_INFO "Skipping unavailable RESET gpio %d (%s)\n", handle, "reset"); } ret = gpio_request(handle, "reset"); if (ret < 0) { printk(KERN_INFO "GPIO pin is already allocated\n"); return; } /* get current setup value */ reset_val = gpio_get_value(handle); /* FIXME maybe worth to perform any action */ pr_debug("Reset: Gpio output state: 0x%x\n", reset_val); /* Setup GPIO as output */ ret = gpio_direction_output(handle, 0); if (ret < 0) goto err; /* Setup output direction */ gpio_set_value(handle, 0); printk(KERN_INFO "RESET: Registered gpio device: %d, current val: %d\n", handle, reset_val); return; err: gpio_free(handle); return; } static void gpio_system_reset(void) { gpio_set_value(handle, 1 - reset_val); } #else #define gpio_system_reset() do {} while (0) void of_platform_reset_gpio_probe(void) { return; } #endif void machine_restart(char *cmd) { printk(KERN_NOTICE "Machine restart...\n"); gpio_system_reset(); dump_stack(); while (1) ; } void machine_shutdown(void) { printk(KERN_NOTICE "Machine shutdown...\n"); while (1) ; } void machine_halt(void) { printk(KERN_NOTICE "Machine halt...\n"); while (1) ; } void machine_power_off(void) { printk(KERN_NOTICE "Machine power off...\n"); while (1) ; }
gpl-2.0
nunogia/Z7Max_NX505J_H129_kernel
arch/powerpc/platforms/pseries/dtl.c
4688
8682
/* * Virtual Processor Dispatch Trace Log * * (C) Copyright IBM Corporation 2009 * * Author: Jeremy Kerr <jk@ozlabs.org> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/slab.h> #include <linux/debugfs.h> #include <linux/spinlock.h> #include <asm/smp.h> #include <asm/uaccess.h> #include <asm/firmware.h> #include <asm/lppaca.h> #include <asm/debug.h> #include "plpar_wrappers.h" struct dtl { struct dtl_entry *buf; struct dentry *file; int cpu; int buf_entries; u64 last_idx; spinlock_t lock; }; static DEFINE_PER_CPU(struct dtl, cpu_dtl); /* * Dispatch trace log event mask: * 0x7: 0x1: voluntary virtual processor waits * 0x2: time-slice preempts * 0x4: virtual partition memory page faults */ static u8 dtl_event_mask = 0x7; /* * Size of per-cpu log buffers. Firmware requires that the buffer does * not cross a 4k boundary. */ static int dtl_buf_entries = N_DISPATCH_LOG; #ifdef CONFIG_VIRT_CPU_ACCOUNTING struct dtl_ring { u64 write_index; struct dtl_entry *write_ptr; struct dtl_entry *buf; struct dtl_entry *buf_end; u8 saved_dtl_mask; }; static DEFINE_PER_CPU(struct dtl_ring, dtl_rings); static atomic_t dtl_count; /* * The cpu accounting code controls the DTL ring buffer, and we get * given entries as they are processed. */ static void consume_dtle(struct dtl_entry *dtle, u64 index) { struct dtl_ring *dtlr = &__get_cpu_var(dtl_rings); struct dtl_entry *wp = dtlr->write_ptr; struct lppaca *vpa = local_paca->lppaca_ptr; if (!wp) return; *wp = *dtle; barrier(); /* check for hypervisor ring buffer overflow, ignore this entry if so */ if (index + N_DISPATCH_LOG < vpa->dtl_idx) return; ++wp; if (wp == dtlr->buf_end) wp = dtlr->buf; dtlr->write_ptr = wp; /* incrementing write_index makes the new entry visible */ smp_wmb(); ++dtlr->write_index; } static int dtl_start(struct dtl *dtl) { struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu); dtlr->buf = dtl->buf; dtlr->buf_end = dtl->buf + dtl->buf_entries; dtlr->write_index = 0; /* setting write_ptr enables logging into our buffer */ smp_wmb(); dtlr->write_ptr = dtl->buf; /* enable event logging */ dtlr->saved_dtl_mask = lppaca_of(dtl->cpu).dtl_enable_mask; lppaca_of(dtl->cpu).dtl_enable_mask |= dtl_event_mask; dtl_consumer = consume_dtle; atomic_inc(&dtl_count); return 0; } static void dtl_stop(struct dtl *dtl) { struct dtl_ring *dtlr = &per_cpu(dtl_rings, dtl->cpu); dtlr->write_ptr = NULL; smp_wmb(); dtlr->buf = NULL; /* restore dtl_enable_mask */ lppaca_of(dtl->cpu).dtl_enable_mask = dtlr->saved_dtl_mask; if (atomic_dec_and_test(&dtl_count)) dtl_consumer = NULL; } static u64 dtl_current_index(struct dtl *dtl) { return per_cpu(dtl_rings, dtl->cpu).write_index; } #else /* CONFIG_VIRT_CPU_ACCOUNTING */ static int dtl_start(struct dtl *dtl) { unsigned long addr; int ret, hwcpu; /* Register our dtl buffer with the hypervisor. The HV expects the * buffer size to be passed in the second word of the buffer */ ((u32 *)dtl->buf)[1] = DISPATCH_LOG_BYTES; hwcpu = get_hard_smp_processor_id(dtl->cpu); addr = __pa(dtl->buf); ret = register_dtl(hwcpu, addr); if (ret) { printk(KERN_WARNING "%s: DTL registration for cpu %d (hw %d) " "failed with %d\n", __func__, dtl->cpu, hwcpu, ret); return -EIO; } /* set our initial buffer indices */ lppaca_of(dtl->cpu).dtl_idx = 0; /* ensure that our updates to the lppaca fields have occurred before * we actually enable the logging */ smp_wmb(); /* enable event logging */ lppaca_of(dtl->cpu).dtl_enable_mask = dtl_event_mask; return 0; } static void dtl_stop(struct dtl *dtl) { int hwcpu = get_hard_smp_processor_id(dtl->cpu); lppaca_of(dtl->cpu).dtl_enable_mask = 0x0; unregister_dtl(hwcpu); } static u64 dtl_current_index(struct dtl *dtl) { return lppaca_of(dtl->cpu).dtl_idx; } #endif /* CONFIG_VIRT_CPU_ACCOUNTING */ static int dtl_enable(struct dtl *dtl) { long int n_entries; long int rc; struct dtl_entry *buf = NULL; if (!dtl_cache) return -ENOMEM; /* only allow one reader */ if (dtl->buf) return -EBUSY; n_entries = dtl_buf_entries; buf = kmem_cache_alloc_node(dtl_cache, GFP_KERNEL, cpu_to_node(dtl->cpu)); if (!buf) { printk(KERN_WARNING "%s: buffer alloc failed for cpu %d\n", __func__, dtl->cpu); return -ENOMEM; } spin_lock(&dtl->lock); rc = -EBUSY; if (!dtl->buf) { /* store the original allocation size for use during read */ dtl->buf_entries = n_entries; dtl->buf = buf; dtl->last_idx = 0; rc = dtl_start(dtl); if (rc) dtl->buf = NULL; } spin_unlock(&dtl->lock); if (rc) kmem_cache_free(dtl_cache, buf); return rc; } static void dtl_disable(struct dtl *dtl) { spin_lock(&dtl->lock); dtl_stop(dtl); kmem_cache_free(dtl_cache, dtl->buf); dtl->buf = NULL; dtl->buf_entries = 0; spin_unlock(&dtl->lock); } /* file interface */ static int dtl_file_open(struct inode *inode, struct file *filp) { struct dtl *dtl = inode->i_private; int rc; rc = dtl_enable(dtl); if (rc) return rc; filp->private_data = dtl; return 0; } static int dtl_file_release(struct inode *inode, struct file *filp) { struct dtl *dtl = inode->i_private; dtl_disable(dtl); return 0; } static ssize_t dtl_file_read(struct file *filp, char __user *buf, size_t len, loff_t *pos) { long int rc, n_read, n_req, read_size; struct dtl *dtl; u64 cur_idx, last_idx, i; if ((len % sizeof(struct dtl_entry)) != 0) return -EINVAL; dtl = filp->private_data; /* requested number of entries to read */ n_req = len / sizeof(struct dtl_entry); /* actual number of entries read */ n_read = 0; spin_lock(&dtl->lock); cur_idx = dtl_current_index(dtl); last_idx = dtl->last_idx; if (last_idx + dtl->buf_entries <= cur_idx) last_idx = cur_idx - dtl->buf_entries + 1; if (last_idx + n_req > cur_idx) n_req = cur_idx - last_idx; if (n_req > 0) dtl->last_idx = last_idx + n_req; spin_unlock(&dtl->lock); if (n_req <= 0) return 0; i = last_idx % dtl->buf_entries; /* read the tail of the buffer if we've wrapped */ if (i + n_req > dtl->buf_entries) { read_size = dtl->buf_entries - i; rc = copy_to_user(buf, &dtl->buf[i], read_size * sizeof(struct dtl_entry)); if (rc) return -EFAULT; i = 0; n_req -= read_size; n_read += read_size; buf += read_size * sizeof(struct dtl_entry); } /* .. and now the head */ rc = copy_to_user(buf, &dtl->buf[i], n_req * sizeof(struct dtl_entry)); if (rc) return -EFAULT; n_read += n_req; return n_read * sizeof(struct dtl_entry); } static const struct file_operations dtl_fops = { .open = dtl_file_open, .release = dtl_file_release, .read = dtl_file_read, .llseek = no_llseek, }; static struct dentry *dtl_dir; static int dtl_setup_file(struct dtl *dtl) { char name[10]; sprintf(name, "cpu-%d", dtl->cpu); dtl->file = debugfs_create_file(name, 0400, dtl_dir, dtl, &dtl_fops); if (!dtl->file) return -ENOMEM; return 0; } static int dtl_init(void) { struct dentry *event_mask_file, *buf_entries_file; int rc, i; if (!firmware_has_feature(FW_FEATURE_SPLPAR)) return -ENODEV; /* set up common debugfs structure */ rc = -ENOMEM; dtl_dir = debugfs_create_dir("dtl", powerpc_debugfs_root); if (!dtl_dir) { printk(KERN_WARNING "%s: can't create dtl root dir\n", __func__); goto err; } event_mask_file = debugfs_create_x8("dtl_event_mask", 0600, dtl_dir, &dtl_event_mask); buf_entries_file = debugfs_create_u32("dtl_buf_entries", 0400, dtl_dir, &dtl_buf_entries); if (!event_mask_file || !buf_entries_file) { printk(KERN_WARNING "%s: can't create dtl files\n", __func__); goto err_remove_dir; } /* set up the per-cpu log structures */ for_each_possible_cpu(i) { struct dtl *dtl = &per_cpu(cpu_dtl, i); spin_lock_init(&dtl->lock); dtl->cpu = i; rc = dtl_setup_file(dtl); if (rc) goto err_remove_dir; } return 0; err_remove_dir: debugfs_remove_recursive(dtl_dir); err: return rc; } arch_initcall(dtl_init);
gpl-2.0
xdajog/kernel_fx3q_aosp
arch/powerpc/platforms/cell/spu_callbacks.c
4944
2181
/* * System call callback functions for SPUs */ #undef DEBUG #include <linux/kallsyms.h> #include <linux/export.h> #include <linux/syscalls.h> #include <asm/spu.h> #include <asm/syscalls.h> #include <asm/unistd.h> /* * This table defines the system calls that an SPU can call. * It is currently a subset of the 64 bit powerpc system calls, * with the exact semantics. * * The reasons for disabling some of the system calls are: * 1. They interact with the way SPU syscalls are handled * and we can't let them execute ever: * restart_syscall, exit, for, execve, ptrace, ... * 2. They are deprecated and replaced by other means: * uselib, pciconfig_*, sysfs, ... * 3. They are somewhat interacting with the system in a way * we don't want an SPU to: * reboot, init_module, mount, kexec_load * 4. They are optional and we can't rely on them being * linked into the kernel. Unfortunately, the cond_syscall * helper does not work here as it does not add the necessary * opd symbols: * mbind, mq_open, ipc, ... */ static void *spu_syscall_table[] = { #define SYSCALL(func) sys_ni_syscall, #define COMPAT_SYS(func) sys_ni_syscall, #define PPC_SYS(func) sys_ni_syscall, #define OLDSYS(func) sys_ni_syscall, #define SYS32ONLY(func) sys_ni_syscall, #define SYSX(f, f3264, f32) sys_ni_syscall, #define SYSCALL_SPU(func) sys_##func, #define COMPAT_SYS_SPU(func) sys_##func, #define PPC_SYS_SPU(func) ppc_##func, #define SYSX_SPU(f, f3264, f32) f, #include <asm/systbl.h> }; long spu_sys_callback(struct spu_syscall_block *s) { long (*syscall)(u64 a1, u64 a2, u64 a3, u64 a4, u64 a5, u64 a6); if (s->nr_ret >= ARRAY_SIZE(spu_syscall_table)) { pr_debug("%s: invalid syscall #%lld", __func__, s->nr_ret); return -ENOSYS; } syscall = spu_syscall_table[s->nr_ret]; #ifdef DEBUG print_symbol(KERN_DEBUG "SPU-syscall %s:", (unsigned long)syscall); printk("syscall%ld(%lx, %lx, %lx, %lx, %lx, %lx)\n", s->nr_ret, s->parm[0], s->parm[1], s->parm[2], s->parm[3], s->parm[4], s->parm[5]); #endif return syscall(s->parm[0], s->parm[1], s->parm[2], s->parm[3], s->parm[4], s->parm[5]); } EXPORT_SYMBOL_GPL(spu_sys_callback);
gpl-2.0
walter79/android_kernel_sony_tsubasa
arch/mips/mipssim/sim_setup.c
5200
2178
/* * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * */ #include <linux/init.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/io.h> #include <linux/irq.h> #include <linux/ioport.h> #include <linux/tty.h> #include <linux/serial.h> #include <linux/serial_core.h> #include <linux/serial_8250.h> #include <asm/cpu.h> #include <asm/bootinfo.h> #include <asm/mips-boards/generic.h> #include <asm/mips-boards/prom.h> #include <asm/time.h> #include <asm/mips-boards/sim.h> #include <asm/mips-boards/simint.h> #include <asm/smp-ops.h> static void __init serial_init(void); unsigned int _isbonito; const char *get_system_type(void) { return "MIPSsim"; } void __init plat_mem_setup(void) { set_io_port_base(0xbfd00000); serial_init(); } extern struct plat_smp_ops ssmtc_smp_ops; void __init prom_init(void) { set_io_port_base(0xbfd00000); prom_meminit(); if (cpu_has_mipsmt) { if (!register_vsmp_smp_ops()) return; #ifdef CONFIG_MIPS_MT_SMTC register_smp_ops(&ssmtc_smp_ops); return; #endif } register_up_smp_ops(); } static void __init serial_init(void) { #ifdef CONFIG_SERIAL_8250 struct uart_port s; memset(&s, 0, sizeof(s)); s.iobase = 0x3f8; /* hardware int 4 - the serial int, is CPU int 6 but poll for now */ s.irq = 0; s.uartclk = 1843200; s.flags = UPF_BOOT_AUTOCONF | UPF_SKIP_TEST; s.iotype = UPIO_PORT; s.regshift = 0; s.timeout = 4; if (early_serial_setup(&s) != 0) { printk(KERN_ERR "Serial setup failed!\n"); } #endif }
gpl-2.0
kirilllavrov/android_kernel_huawei_s10101l
scripts/mod/sumversion.c
10320
12227
#include <netinet/in.h> #ifdef __sun__ #include <inttypes.h> #else #include <stdint.h> #endif #include <ctype.h> #include <errno.h> #include <string.h> #include <limits.h> #include "modpost.h" /* * Stolen form Cryptographic API. * * MD4 Message Digest Algorithm (RFC1320). * * Implementation derived from Andrew Tridgell and Steve French's * CIFS MD4 implementation, and the cryptoapi implementation * originally based on the public domain implementation written * by Colin Plumb in 1993. * * Copyright (c) Andrew Tridgell 1997-1998. * Modified by Steve French (sfrench@us.ibm.com) 2002 * Copyright (c) Cryptoapi developers. * Copyright (c) 2002 David S. Miller (davem@redhat.com) * Copyright (c) 2002 James Morris <jmorris@intercode.com.au> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #define MD4_DIGEST_SIZE 16 #define MD4_HMAC_BLOCK_SIZE 64 #define MD4_BLOCK_WORDS 16 #define MD4_HASH_WORDS 4 struct md4_ctx { uint32_t hash[MD4_HASH_WORDS]; uint32_t block[MD4_BLOCK_WORDS]; uint64_t byte_count; }; static inline uint32_t lshift(uint32_t x, unsigned int s) { x &= 0xFFFFFFFF; return ((x << s) & 0xFFFFFFFF) | (x >> (32 - s)); } static inline uint32_t F(uint32_t x, uint32_t y, uint32_t z) { return (x & y) | ((~x) & z); } static inline uint32_t G(uint32_t x, uint32_t y, uint32_t z) { return (x & y) | (x & z) | (y & z); } static inline uint32_t H(uint32_t x, uint32_t y, uint32_t z) { return x ^ y ^ z; } #define ROUND1(a,b,c,d,k,s) (a = lshift(a + F(b,c,d) + k, s)) #define ROUND2(a,b,c,d,k,s) (a = lshift(a + G(b,c,d) + k + (uint32_t)0x5A827999,s)) #define ROUND3(a,b,c,d,k,s) (a = lshift(a + H(b,c,d) + k + (uint32_t)0x6ED9EBA1,s)) /* XXX: this stuff can be optimized */ static inline void le32_to_cpu_array(uint32_t *buf, unsigned int words) { while (words--) { *buf = ntohl(*buf); buf++; } } static inline void cpu_to_le32_array(uint32_t *buf, unsigned int words) { while (words--) { *buf = htonl(*buf); buf++; } } static void md4_transform(uint32_t *hash, uint32_t const *in) { uint32_t a, b, c, d; a = hash[0]; b = hash[1]; c = hash[2]; d = hash[3]; ROUND1(a, b, c, d, in[0], 3); ROUND1(d, a, b, c, in[1], 7); ROUND1(c, d, a, b, in[2], 11); ROUND1(b, c, d, a, in[3], 19); ROUND1(a, b, c, d, in[4], 3); ROUND1(d, a, b, c, in[5], 7); ROUND1(c, d, a, b, in[6], 11); ROUND1(b, c, d, a, in[7], 19); ROUND1(a, b, c, d, in[8], 3); ROUND1(d, a, b, c, in[9], 7); ROUND1(c, d, a, b, in[10], 11); ROUND1(b, c, d, a, in[11], 19); ROUND1(a, b, c, d, in[12], 3); ROUND1(d, a, b, c, in[13], 7); ROUND1(c, d, a, b, in[14], 11); ROUND1(b, c, d, a, in[15], 19); ROUND2(a, b, c, d,in[ 0], 3); ROUND2(d, a, b, c, in[4], 5); ROUND2(c, d, a, b, in[8], 9); ROUND2(b, c, d, a, in[12], 13); ROUND2(a, b, c, d, in[1], 3); ROUND2(d, a, b, c, in[5], 5); ROUND2(c, d, a, b, in[9], 9); ROUND2(b, c, d, a, in[13], 13); ROUND2(a, b, c, d, in[2], 3); ROUND2(d, a, b, c, in[6], 5); ROUND2(c, d, a, b, in[10], 9); ROUND2(b, c, d, a, in[14], 13); ROUND2(a, b, c, d, in[3], 3); ROUND2(d, a, b, c, in[7], 5); ROUND2(c, d, a, b, in[11], 9); ROUND2(b, c, d, a, in[15], 13); ROUND3(a, b, c, d,in[ 0], 3); ROUND3(d, a, b, c, in[8], 9); ROUND3(c, d, a, b, in[4], 11); ROUND3(b, c, d, a, in[12], 15); ROUND3(a, b, c, d, in[2], 3); ROUND3(d, a, b, c, in[10], 9); ROUND3(c, d, a, b, in[6], 11); ROUND3(b, c, d, a, in[14], 15); ROUND3(a, b, c, d, in[1], 3); ROUND3(d, a, b, c, in[9], 9); ROUND3(c, d, a, b, in[5], 11); ROUND3(b, c, d, a, in[13], 15); ROUND3(a, b, c, d, in[3], 3); ROUND3(d, a, b, c, in[11], 9); ROUND3(c, d, a, b, in[7], 11); ROUND3(b, c, d, a, in[15], 15); hash[0] += a; hash[1] += b; hash[2] += c; hash[3] += d; } static inline void md4_transform_helper(struct md4_ctx *ctx) { le32_to_cpu_array(ctx->block, sizeof(ctx->block) / sizeof(uint32_t)); md4_transform(ctx->hash, ctx->block); } static void md4_init(struct md4_ctx *mctx) { mctx->hash[0] = 0x67452301; mctx->hash[1] = 0xefcdab89; mctx->hash[2] = 0x98badcfe; mctx->hash[3] = 0x10325476; mctx->byte_count = 0; } static void md4_update(struct md4_ctx *mctx, const unsigned char *data, unsigned int len) { const uint32_t avail = sizeof(mctx->block) - (mctx->byte_count & 0x3f); mctx->byte_count += len; if (avail > len) { memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data, len); return; } memcpy((char *)mctx->block + (sizeof(mctx->block) - avail), data, avail); md4_transform_helper(mctx); data += avail; len -= avail; while (len >= sizeof(mctx->block)) { memcpy(mctx->block, data, sizeof(mctx->block)); md4_transform_helper(mctx); data += sizeof(mctx->block); len -= sizeof(mctx->block); } memcpy(mctx->block, data, len); } static void md4_final_ascii(struct md4_ctx *mctx, char *out, unsigned int len) { const unsigned int offset = mctx->byte_count & 0x3f; char *p = (char *)mctx->block + offset; int padding = 56 - (offset + 1); *p++ = 0x80; if (padding < 0) { memset(p, 0x00, padding + sizeof (uint64_t)); md4_transform_helper(mctx); p = (char *)mctx->block; padding = 56; } memset(p, 0, padding); mctx->block[14] = mctx->byte_count << 3; mctx->block[15] = mctx->byte_count >> 29; le32_to_cpu_array(mctx->block, (sizeof(mctx->block) - sizeof(uint64_t)) / sizeof(uint32_t)); md4_transform(mctx->hash, mctx->block); cpu_to_le32_array(mctx->hash, sizeof(mctx->hash) / sizeof(uint32_t)); snprintf(out, len, "%08X%08X%08X%08X", mctx->hash[0], mctx->hash[1], mctx->hash[2], mctx->hash[3]); } static inline void add_char(unsigned char c, struct md4_ctx *md) { md4_update(md, &c, 1); } static int parse_string(const char *file, unsigned long len, struct md4_ctx *md) { unsigned long i; add_char(file[0], md); for (i = 1; i < len; i++) { add_char(file[i], md); if (file[i] == '"' && file[i-1] != '\\') break; } return i; } static int parse_comment(const char *file, unsigned long len) { unsigned long i; for (i = 2; i < len; i++) { if (file[i-1] == '*' && file[i] == '/') break; } return i; } /* FIXME: Handle .s files differently (eg. # starts comments) --RR */ static int parse_file(const char *fname, struct md4_ctx *md) { char *file; unsigned long i, len; file = grab_file(fname, &len); if (!file) return 0; for (i = 0; i < len; i++) { /* Collapse and ignore \ and CR. */ if (file[i] == '\\' && (i+1 < len) && file[i+1] == '\n') { i++; continue; } /* Ignore whitespace */ if (isspace(file[i])) continue; /* Handle strings as whole units */ if (file[i] == '"') { i += parse_string(file+i, len - i, md); continue; } /* Comments: ignore */ if (file[i] == '/' && file[i+1] == '*') { i += parse_comment(file+i, len - i); continue; } add_char(file[i], md); } release_file(file, len); return 1; } /* Check whether the file is a static library or not */ static int is_static_library(const char *objfile) { int len = strlen(objfile); if (objfile[len - 2] == '.' && objfile[len - 1] == 'a') return 1; else return 0; } /* We have dir/file.o. Open dir/.file.o.cmd, look for source_ and deps_ line * to figure out source files. */ static int parse_source_files(const char *objfile, struct md4_ctx *md) { char *cmd, *file, *line, *dir; const char *base; unsigned long flen, pos = 0; int dirlen, ret = 0, check_files = 0; cmd = NOFAIL(malloc(strlen(objfile) + sizeof("..cmd"))); base = strrchr(objfile, '/'); if (base) { base++; dirlen = base - objfile; sprintf(cmd, "%.*s.%s.cmd", dirlen, objfile, base); } else { dirlen = 0; sprintf(cmd, ".%s.cmd", objfile); } dir = NOFAIL(malloc(dirlen + 1)); strncpy(dir, objfile, dirlen); dir[dirlen] = '\0'; file = grab_file(cmd, &flen); if (!file) { warn("could not find %s for %s\n", cmd, objfile); goto out; } /* There will be a line like so: deps_drivers/net/dummy.o := \ drivers/net/dummy.c \ $(wildcard include/config/net/fastroute.h) \ include/linux/module.h \ Sum all files in the same dir or subdirs. */ while ((line = get_next_line(&pos, file, flen)) != NULL) { char* p = line; if (strncmp(line, "source_", sizeof("source_")-1) == 0) { p = strrchr(line, ' '); if (!p) { warn("malformed line: %s\n", line); goto out_file; } p++; if (!parse_file(p, md)) { warn("could not open %s: %s\n", p, strerror(errno)); goto out_file; } continue; } if (strncmp(line, "deps_", sizeof("deps_")-1) == 0) { check_files = 1; continue; } if (!check_files) continue; /* Continue until line does not end with '\' */ if ( *(p + strlen(p)-1) != '\\') break; /* Terminate line at first space, to get rid of final ' \' */ while (*p) { if (isspace(*p)) { *p = '\0'; break; } p++; } /* Check if this file is in same dir as objfile */ if ((strstr(line, dir)+strlen(dir)-1) == strrchr(line, '/')) { if (!parse_file(line, md)) { warn("could not open %s: %s\n", line, strerror(errno)); goto out_file; } } } /* Everyone parsed OK */ ret = 1; out_file: release_file(file, flen); out: free(dir); free(cmd); return ret; } /* Calc and record src checksum. */ void get_src_version(const char *modname, char sum[], unsigned sumlen) { void *file; unsigned long len; struct md4_ctx md; char *sources, *end, *fname; const char *basename; char filelist[PATH_MAX + 1]; char *modverdir = getenv("MODVERDIR"); if (!modverdir) modverdir = "."; /* Source files for module are in .tmp_versions/modname.mod, after the first line. */ if (strrchr(modname, '/')) basename = strrchr(modname, '/') + 1; else basename = modname; sprintf(filelist, "%s/%.*s.mod", modverdir, (int) strlen(basename) - 2, basename); file = grab_file(filelist, &len); if (!file) /* not a module or .mod file missing - ignore */ return; sources = strchr(file, '\n'); if (!sources) { warn("malformed versions file for %s\n", modname); goto release; } sources++; end = strchr(sources, '\n'); if (!end) { warn("bad ending versions file for %s\n", modname); goto release; } *end = '\0'; md4_init(&md); while ((fname = strsep(&sources, " ")) != NULL) { if (!*fname) continue; if (!(is_static_library(fname)) && !parse_source_files(fname, &md)) goto release; } md4_final_ascii(&md, sum, sumlen); release: release_file(file, len); } static void write_version(const char *filename, const char *sum, unsigned long offset) { int fd; fd = open(filename, O_RDWR); if (fd < 0) { warn("changing sum in %s failed: %s\n", filename, strerror(errno)); return; } if (lseek(fd, offset, SEEK_SET) == (off_t)-1) { warn("changing sum in %s:%lu failed: %s\n", filename, offset, strerror(errno)); goto out; } if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) { warn("writing sum in %s failed: %s\n", filename, strerror(errno)); goto out; } out: close(fd); } static int strip_rcs_crap(char *version) { unsigned int len, full_len; if (strncmp(version, "$Revision", strlen("$Revision")) != 0) return 0; /* Space for version string follows. */ full_len = strlen(version) + strlen(version + strlen(version) + 1) + 2; /* Move string to start with version number: prefix will be * $Revision$ or $Revision: */ len = strlen("$Revision"); if (version[len] == ':' || version[len] == '$') len++; while (isspace(version[len])) len++; memmove(version, version+len, full_len-len); full_len -= len; /* Preserve up to next whitespace. */ len = 0; while (version[len] && !isspace(version[len])) len++; memmove(version + len, version + strlen(version), full_len - strlen(version)); return 1; } /* Clean up RCS-style version numbers. */ void maybe_frob_rcs_version(const char *modfilename, char *version, void *modinfo, unsigned long version_offset) { if (strip_rcs_crap(version)) write_version(modfilename, version, version_offset); }
gpl-2.0
Nicklas373/AoiCore-Kernel-MSM8627-CM13
scripts/kconfig/kxgettext.c
10576
4196
/* * Arnaldo Carvalho de Melo <acme@conectiva.com.br>, 2005 * * Released under the terms of the GNU GPL v2.0 */ #include <stdlib.h> #include <string.h> #include "lkc.h" static char *escape(const char* text, char *bf, int len) { char *bfp = bf; int multiline = strchr(text, '\n') != NULL; int eol = 0; int textlen = strlen(text); if ((textlen > 0) && (text[textlen-1] == '\n')) eol = 1; *bfp++ = '"'; --len; if (multiline) { *bfp++ = '"'; *bfp++ = '\n'; *bfp++ = '"'; len -= 3; } while (*text != '\0' && len > 1) { if (*text == '"') *bfp++ = '\\'; else if (*text == '\n') { *bfp++ = '\\'; *bfp++ = 'n'; *bfp++ = '"'; *bfp++ = '\n'; *bfp++ = '"'; len -= 5; ++text; goto next; } else if (*text == '\\') { *bfp++ = '\\'; len--; } *bfp++ = *text++; next: --len; } if (multiline && eol) bfp -= 3; *bfp++ = '"'; *bfp = '\0'; return bf; } struct file_line { struct file_line *next; const char *file; int lineno; }; static struct file_line *file_line__new(const char *file, int lineno) { struct file_line *self = malloc(sizeof(*self)); if (self == NULL) goto out; self->file = file; self->lineno = lineno; self->next = NULL; out: return self; } struct message { const char *msg; const char *option; struct message *next; struct file_line *files; }; static struct message *message__list; static struct message *message__new(const char *msg, char *option, const char *file, int lineno) { struct message *self = malloc(sizeof(*self)); if (self == NULL) goto out; self->files = file_line__new(file, lineno); if (self->files == NULL) goto out_fail; self->msg = strdup(msg); if (self->msg == NULL) goto out_fail_msg; self->option = option; self->next = NULL; out: return self; out_fail_msg: free(self->files); out_fail: free(self); self = NULL; goto out; } static struct message *mesage__find(const char *msg) { struct message *m = message__list; while (m != NULL) { if (strcmp(m->msg, msg) == 0) break; m = m->next; } return m; } static int message__add_file_line(struct message *self, const char *file, int lineno) { int rc = -1; struct file_line *fl = file_line__new(file, lineno); if (fl == NULL) goto out; fl->next = self->files; self->files = fl; rc = 0; out: return rc; } static int message__add(const char *msg, char *option, const char *file, int lineno) { int rc = 0; char bf[16384]; char *escaped = escape(msg, bf, sizeof(bf)); struct message *m = mesage__find(escaped); if (m != NULL) rc = message__add_file_line(m, file, lineno); else { m = message__new(escaped, option, file, lineno); if (m != NULL) { m->next = message__list; message__list = m; } else rc = -1; } return rc; } static void menu_build_message_list(struct menu *menu) { struct menu *child; message__add(menu_get_prompt(menu), NULL, menu->file == NULL ? "Root Menu" : menu->file->name, menu->lineno); if (menu->sym != NULL && menu_has_help(menu)) message__add(menu_get_help(menu), menu->sym->name, menu->file == NULL ? "Root Menu" : menu->file->name, menu->lineno); for (child = menu->list; child != NULL; child = child->next) if (child->prompt != NULL) menu_build_message_list(child); } static void message__print_file_lineno(struct message *self) { struct file_line *fl = self->files; putchar('\n'); if (self->option != NULL) printf("# %s:00000\n", self->option); printf("#: %s:%d", fl->file, fl->lineno); fl = fl->next; while (fl != NULL) { printf(", %s:%d", fl->file, fl->lineno); fl = fl->next; } putchar('\n'); } static void message__print_gettext_msgid_msgstr(struct message *self) { message__print_file_lineno(self); printf("msgid %s\n" "msgstr \"\"\n", self->msg); } static void menu__xgettext(void) { struct message *m = message__list; while (m != NULL) { /* skip empty lines ("") */ if (strlen(m->msg) > sizeof("\"\"")) message__print_gettext_msgid_msgstr(m); m = m->next; } } int main(int ac, char **av) { conf_parse(av[1]); menu_build_message_list(menu_get_root_menu(NULL)); menu__xgettext(); return 0; }
gpl-2.0
pasnox/recalbox-buildroot
support/kconfig/kxgettext.c
10576
4196
/* * Arnaldo Carvalho de Melo <acme@conectiva.com.br>, 2005 * * Released under the terms of the GNU GPL v2.0 */ #include <stdlib.h> #include <string.h> #include "lkc.h" static char *escape(const char* text, char *bf, int len) { char *bfp = bf; int multiline = strchr(text, '\n') != NULL; int eol = 0; int textlen = strlen(text); if ((textlen > 0) && (text[textlen-1] == '\n')) eol = 1; *bfp++ = '"'; --len; if (multiline) { *bfp++ = '"'; *bfp++ = '\n'; *bfp++ = '"'; len -= 3; } while (*text != '\0' && len > 1) { if (*text == '"') *bfp++ = '\\'; else if (*text == '\n') { *bfp++ = '\\'; *bfp++ = 'n'; *bfp++ = '"'; *bfp++ = '\n'; *bfp++ = '"'; len -= 5; ++text; goto next; } else if (*text == '\\') { *bfp++ = '\\'; len--; } *bfp++ = *text++; next: --len; } if (multiline && eol) bfp -= 3; *bfp++ = '"'; *bfp = '\0'; return bf; } struct file_line { struct file_line *next; const char *file; int lineno; }; static struct file_line *file_line__new(const char *file, int lineno) { struct file_line *self = malloc(sizeof(*self)); if (self == NULL) goto out; self->file = file; self->lineno = lineno; self->next = NULL; out: return self; } struct message { const char *msg; const char *option; struct message *next; struct file_line *files; }; static struct message *message__list; static struct message *message__new(const char *msg, char *option, const char *file, int lineno) { struct message *self = malloc(sizeof(*self)); if (self == NULL) goto out; self->files = file_line__new(file, lineno); if (self->files == NULL) goto out_fail; self->msg = strdup(msg); if (self->msg == NULL) goto out_fail_msg; self->option = option; self->next = NULL; out: return self; out_fail_msg: free(self->files); out_fail: free(self); self = NULL; goto out; } static struct message *mesage__find(const char *msg) { struct message *m = message__list; while (m != NULL) { if (strcmp(m->msg, msg) == 0) break; m = m->next; } return m; } static int message__add_file_line(struct message *self, const char *file, int lineno) { int rc = -1; struct file_line *fl = file_line__new(file, lineno); if (fl == NULL) goto out; fl->next = self->files; self->files = fl; rc = 0; out: return rc; } static int message__add(const char *msg, char *option, const char *file, int lineno) { int rc = 0; char bf[16384]; char *escaped = escape(msg, bf, sizeof(bf)); struct message *m = mesage__find(escaped); if (m != NULL) rc = message__add_file_line(m, file, lineno); else { m = message__new(escaped, option, file, lineno); if (m != NULL) { m->next = message__list; message__list = m; } else rc = -1; } return rc; } static void menu_build_message_list(struct menu *menu) { struct menu *child; message__add(menu_get_prompt(menu), NULL, menu->file == NULL ? "Root Menu" : menu->file->name, menu->lineno); if (menu->sym != NULL && menu_has_help(menu)) message__add(menu_get_help(menu), menu->sym->name, menu->file == NULL ? "Root Menu" : menu->file->name, menu->lineno); for (child = menu->list; child != NULL; child = child->next) if (child->prompt != NULL) menu_build_message_list(child); } static void message__print_file_lineno(struct message *self) { struct file_line *fl = self->files; putchar('\n'); if (self->option != NULL) printf("# %s:00000\n", self->option); printf("#: %s:%d", fl->file, fl->lineno); fl = fl->next; while (fl != NULL) { printf(", %s:%d", fl->file, fl->lineno); fl = fl->next; } putchar('\n'); } static void message__print_gettext_msgid_msgstr(struct message *self) { message__print_file_lineno(self); printf("msgid %s\n" "msgstr \"\"\n", self->msg); } static void menu__xgettext(void) { struct message *m = message__list; while (m != NULL) { /* skip empty lines ("") */ if (strlen(m->msg) > sizeof("\"\"")) message__print_gettext_msgid_msgstr(m); m = m->next; } } int main(int ac, char **av) { conf_parse(av[1]); menu_build_message_list(menu_get_root_menu(NULL)); menu__xgettext(); return 0; }
gpl-2.0
bigzz/linux-xfs
drivers/input/mouse/alps.c
81
82921
/* * ALPS touchpad PS/2 mouse driver * * Copyright (c) 2003 Neil Brown <neilb@cse.unsw.edu.au> * Copyright (c) 2003-2005 Peter Osterlund <petero2@telia.com> * Copyright (c) 2004 Dmitry Torokhov <dtor@mail.ru> * Copyright (c) 2005 Vojtech Pavlik <vojtech@suse.cz> * Copyright (c) 2009 Sebastian Kapfer <sebastian_kapfer@gmx.net> * * ALPS detection, tap switching and status querying info is taken from * tpconfig utility (by C. Scott Ananian and Bruce Kall). * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published by * the Free Software Foundation. */ #include <linux/slab.h> #include <linux/input.h> #include <linux/input/mt.h> #include <linux/serio.h> #include <linux/libps2.h> #include "psmouse.h" #include "alps.h" /* * Definitions for ALPS version 3 and 4 command mode protocol */ #define ALPS_CMD_NIBBLE_10 0x01f2 #define ALPS_REG_BASE_RUSHMORE 0xc2c0 #define ALPS_REG_BASE_PINNACLE 0x0000 static const struct alps_nibble_commands alps_v3_nibble_commands[] = { { PSMOUSE_CMD_SETPOLL, 0x00 }, /* 0 */ { PSMOUSE_CMD_RESET_DIS, 0x00 }, /* 1 */ { PSMOUSE_CMD_SETSCALE21, 0x00 }, /* 2 */ { PSMOUSE_CMD_SETRATE, 0x0a }, /* 3 */ { PSMOUSE_CMD_SETRATE, 0x14 }, /* 4 */ { PSMOUSE_CMD_SETRATE, 0x28 }, /* 5 */ { PSMOUSE_CMD_SETRATE, 0x3c }, /* 6 */ { PSMOUSE_CMD_SETRATE, 0x50 }, /* 7 */ { PSMOUSE_CMD_SETRATE, 0x64 }, /* 8 */ { PSMOUSE_CMD_SETRATE, 0xc8 }, /* 9 */ { ALPS_CMD_NIBBLE_10, 0x00 }, /* a */ { PSMOUSE_CMD_SETRES, 0x00 }, /* b */ { PSMOUSE_CMD_SETRES, 0x01 }, /* c */ { PSMOUSE_CMD_SETRES, 0x02 }, /* d */ { PSMOUSE_CMD_SETRES, 0x03 }, /* e */ { PSMOUSE_CMD_SETSCALE11, 0x00 }, /* f */ }; static const struct alps_nibble_commands alps_v4_nibble_commands[] = { { PSMOUSE_CMD_ENABLE, 0x00 }, /* 0 */ { PSMOUSE_CMD_RESET_DIS, 0x00 }, /* 1 */ { PSMOUSE_CMD_SETSCALE21, 0x00 }, /* 2 */ { PSMOUSE_CMD_SETRATE, 0x0a }, /* 3 */ { PSMOUSE_CMD_SETRATE, 0x14 }, /* 4 */ { PSMOUSE_CMD_SETRATE, 0x28 }, /* 5 */ { PSMOUSE_CMD_SETRATE, 0x3c }, /* 6 */ { PSMOUSE_CMD_SETRATE, 0x50 }, /* 7 */ { PSMOUSE_CMD_SETRATE, 0x64 }, /* 8 */ { PSMOUSE_CMD_SETRATE, 0xc8 }, /* 9 */ { ALPS_CMD_NIBBLE_10, 0x00 }, /* a */ { PSMOUSE_CMD_SETRES, 0x00 }, /* b */ { PSMOUSE_CMD_SETRES, 0x01 }, /* c */ { PSMOUSE_CMD_SETRES, 0x02 }, /* d */ { PSMOUSE_CMD_SETRES, 0x03 }, /* e */ { PSMOUSE_CMD_SETSCALE11, 0x00 }, /* f */ }; static const struct alps_nibble_commands alps_v6_nibble_commands[] = { { PSMOUSE_CMD_ENABLE, 0x00 }, /* 0 */ { PSMOUSE_CMD_SETRATE, 0x0a }, /* 1 */ { PSMOUSE_CMD_SETRATE, 0x14 }, /* 2 */ { PSMOUSE_CMD_SETRATE, 0x28 }, /* 3 */ { PSMOUSE_CMD_SETRATE, 0x3c }, /* 4 */ { PSMOUSE_CMD_SETRATE, 0x50 }, /* 5 */ { PSMOUSE_CMD_SETRATE, 0x64 }, /* 6 */ { PSMOUSE_CMD_SETRATE, 0xc8 }, /* 7 */ { PSMOUSE_CMD_GETID, 0x00 }, /* 8 */ { PSMOUSE_CMD_GETINFO, 0x00 }, /* 9 */ { PSMOUSE_CMD_SETRES, 0x00 }, /* a */ { PSMOUSE_CMD_SETRES, 0x01 }, /* b */ { PSMOUSE_CMD_SETRES, 0x02 }, /* c */ { PSMOUSE_CMD_SETRES, 0x03 }, /* d */ { PSMOUSE_CMD_SETSCALE21, 0x00 }, /* e */ { PSMOUSE_CMD_SETSCALE11, 0x00 }, /* f */ }; #define ALPS_DUALPOINT 0x02 /* touchpad has trackstick */ #define ALPS_PASS 0x04 /* device has a pass-through port */ #define ALPS_WHEEL 0x08 /* hardware wheel present */ #define ALPS_FW_BK_1 0x10 /* front & back buttons present */ #define ALPS_FW_BK_2 0x20 /* front & back buttons present */ #define ALPS_FOUR_BUTTONS 0x40 /* 4 direction button present */ #define ALPS_PS2_INTERLEAVED 0x80 /* 3-byte PS/2 packet interleaved with 6-byte ALPS packet */ #define ALPS_BUTTONPAD 0x200 /* device is a clickpad */ static const struct alps_model_info alps_model_data[] = { { { 0x32, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } }, /* Toshiba Salellite Pro M10 */ { { 0x33, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V1, 0x88, 0xf8, 0 } }, /* UMAX-530T */ { { 0x53, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } }, { { 0x53, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } }, { { 0x60, 0x03, 0xc8 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } }, /* HP ze1115 */ { { 0x63, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } }, { { 0x63, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } }, { { 0x63, 0x02, 0x28 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 } }, /* Fujitsu Siemens S6010 */ { { 0x63, 0x02, 0x3c }, 0x00, { ALPS_PROTO_V2, 0x8f, 0x8f, ALPS_WHEEL } }, /* Toshiba Satellite S2400-103 */ { { 0x63, 0x02, 0x50 }, 0x00, { ALPS_PROTO_V2, 0xef, 0xef, ALPS_FW_BK_1 } }, /* NEC Versa L320 */ { { 0x63, 0x02, 0x64 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } }, { { 0x63, 0x03, 0xc8 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } }, /* Dell Latitude D800 */ { { 0x73, 0x00, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_DUALPOINT } }, /* ThinkPad R61 8918-5QG */ { { 0x73, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, 0 } }, { { 0x73, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_FW_BK_2 } }, /* Ahtec Laptop */ /* * XXX This entry is suspicious. First byte has zero lower nibble, * which is what a normal mouse would report. Also, the value 0x0e * isn't valid per PS/2 spec. */ { { 0x20, 0x02, 0x0e }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } }, { { 0x22, 0x02, 0x0a }, 0x00, { ALPS_PROTO_V2, 0xf8, 0xf8, ALPS_PASS | ALPS_DUALPOINT } }, { { 0x22, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT } }, /* Dell Latitude D600 */ /* Dell Latitude E5500, E6400, E6500, Precision M4400 */ { { 0x62, 0x02, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED } }, { { 0x73, 0x00, 0x14 }, 0x00, { ALPS_PROTO_V6, 0xff, 0xff, ALPS_DUALPOINT } }, /* Dell XT2 */ { { 0x73, 0x02, 0x50 }, 0x00, { ALPS_PROTO_V2, 0xcf, 0xcf, ALPS_FOUR_BUTTONS } }, /* Dell Vostro 1400 */ { { 0x52, 0x01, 0x14 }, 0x00, { ALPS_PROTO_V2, 0xff, 0xff, ALPS_PASS | ALPS_DUALPOINT | ALPS_PS2_INTERLEAVED } }, /* Toshiba Tecra A11-11L */ { { 0x73, 0x02, 0x64 }, 0x8a, { ALPS_PROTO_V4, 0x8f, 0x8f, 0 } }, }; static const struct alps_protocol_info alps_v3_protocol_data = { ALPS_PROTO_V3, 0x8f, 0x8f, ALPS_DUALPOINT }; static const struct alps_protocol_info alps_v3_rushmore_data = { ALPS_PROTO_V3_RUSHMORE, 0x8f, 0x8f, ALPS_DUALPOINT }; static const struct alps_protocol_info alps_v5_protocol_data = { ALPS_PROTO_V5, 0xc8, 0xd8, 0 }; static const struct alps_protocol_info alps_v7_protocol_data = { ALPS_PROTO_V7, 0x48, 0x48, ALPS_DUALPOINT }; static const struct alps_protocol_info alps_v8_protocol_data = { ALPS_PROTO_V8, 0x18, 0x18, 0 }; static void alps_set_abs_params_st(struct alps_data *priv, struct input_dev *dev1); static void alps_set_abs_params_semi_mt(struct alps_data *priv, struct input_dev *dev1); static void alps_set_abs_params_v7(struct alps_data *priv, struct input_dev *dev1); static void alps_set_abs_params_ss4_v2(struct alps_data *priv, struct input_dev *dev1); /* Packet formats are described in Documentation/input/alps.txt */ static bool alps_is_valid_first_byte(struct alps_data *priv, unsigned char data) { return (data & priv->mask0) == priv->byte0; } static void alps_report_buttons(struct input_dev *dev1, struct input_dev *dev2, int left, int right, int middle) { struct input_dev *dev; /* * If shared button has already been reported on the * other device (dev2) then this event should be also * sent through that device. */ dev = (dev2 && test_bit(BTN_LEFT, dev2->key)) ? dev2 : dev1; input_report_key(dev, BTN_LEFT, left); dev = (dev2 && test_bit(BTN_RIGHT, dev2->key)) ? dev2 : dev1; input_report_key(dev, BTN_RIGHT, right); dev = (dev2 && test_bit(BTN_MIDDLE, dev2->key)) ? dev2 : dev1; input_report_key(dev, BTN_MIDDLE, middle); /* * Sync the _other_ device now, we'll do the first * device later once we report the rest of the events. */ if (dev2) input_sync(dev2); } static void alps_process_packet_v1_v2(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; unsigned char *packet = psmouse->packet; struct input_dev *dev = psmouse->dev; struct input_dev *dev2 = priv->dev2; int x, y, z, ges, fin, left, right, middle; int back = 0, forward = 0; if (priv->proto_version == ALPS_PROTO_V1) { left = packet[2] & 0x10; right = packet[2] & 0x08; middle = 0; x = packet[1] | ((packet[0] & 0x07) << 7); y = packet[4] | ((packet[3] & 0x07) << 7); z = packet[5]; } else { left = packet[3] & 1; right = packet[3] & 2; middle = packet[3] & 4; x = packet[1] | ((packet[2] & 0x78) << (7 - 3)); y = packet[4] | ((packet[3] & 0x70) << (7 - 4)); z = packet[5]; } if (priv->flags & ALPS_FW_BK_1) { back = packet[0] & 0x10; forward = packet[2] & 4; } if (priv->flags & ALPS_FW_BK_2) { back = packet[3] & 4; forward = packet[2] & 4; if ((middle = forward && back)) forward = back = 0; } ges = packet[2] & 1; fin = packet[2] & 2; if ((priv->flags & ALPS_DUALPOINT) && z == 127) { input_report_rel(dev2, REL_X, (x > 383 ? (x - 768) : x)); input_report_rel(dev2, REL_Y, -(y > 255 ? (y - 512) : y)); alps_report_buttons(dev2, dev, left, right, middle); input_sync(dev2); return; } /* Non interleaved V2 dualpoint has separate stick button bits */ if (priv->proto_version == ALPS_PROTO_V2 && priv->flags == (ALPS_PASS | ALPS_DUALPOINT)) { left |= packet[0] & 1; right |= packet[0] & 2; middle |= packet[0] & 4; } alps_report_buttons(dev, dev2, left, right, middle); /* Convert hardware tap to a reasonable Z value */ if (ges && !fin) z = 40; /* * A "tap and drag" operation is reported by the hardware as a transition * from (!fin && ges) to (fin && ges). This should be translated to the * sequence Z>0, Z==0, Z>0, so the Z==0 event has to be generated manually. */ if (ges && fin && !priv->prev_fin) { input_report_abs(dev, ABS_X, x); input_report_abs(dev, ABS_Y, y); input_report_abs(dev, ABS_PRESSURE, 0); input_report_key(dev, BTN_TOOL_FINGER, 0); input_sync(dev); } priv->prev_fin = fin; if (z > 30) input_report_key(dev, BTN_TOUCH, 1); if (z < 25) input_report_key(dev, BTN_TOUCH, 0); if (z > 0) { input_report_abs(dev, ABS_X, x); input_report_abs(dev, ABS_Y, y); } input_report_abs(dev, ABS_PRESSURE, z); input_report_key(dev, BTN_TOOL_FINGER, z > 0); if (priv->flags & ALPS_WHEEL) input_report_rel(dev, REL_WHEEL, ((packet[2] << 1) & 0x08) - ((packet[0] >> 4) & 0x07)); if (priv->flags & (ALPS_FW_BK_1 | ALPS_FW_BK_2)) { input_report_key(dev, BTN_FORWARD, forward); input_report_key(dev, BTN_BACK, back); } if (priv->flags & ALPS_FOUR_BUTTONS) { input_report_key(dev, BTN_0, packet[2] & 4); input_report_key(dev, BTN_1, packet[0] & 0x10); input_report_key(dev, BTN_2, packet[3] & 4); input_report_key(dev, BTN_3, packet[0] & 0x20); } input_sync(dev); } static void alps_get_bitmap_points(unsigned int map, struct alps_bitmap_point *low, struct alps_bitmap_point *high, int *fingers) { struct alps_bitmap_point *point; int i, bit, prev_bit = 0; point = low; for (i = 0; map != 0; i++, map >>= 1) { bit = map & 1; if (bit) { if (!prev_bit) { point->start_bit = i; point->num_bits = 0; (*fingers)++; } point->num_bits++; } else { if (prev_bit) point = high; } prev_bit = bit; } } /* * Process bitmap data from semi-mt protocols. Returns the number of * fingers detected. A return value of 0 means at least one of the * bitmaps was empty. * * The bitmaps don't have enough data to track fingers, so this function * only generates points representing a bounding box of all contacts. * These points are returned in fields->mt when the return value * is greater than 0. */ static int alps_process_bitmap(struct alps_data *priv, struct alps_fields *fields) { int i, fingers_x = 0, fingers_y = 0, fingers, closest; struct alps_bitmap_point x_low = {0,}, x_high = {0,}; struct alps_bitmap_point y_low = {0,}, y_high = {0,}; struct input_mt_pos corner[4]; if (!fields->x_map || !fields->y_map) return 0; alps_get_bitmap_points(fields->x_map, &x_low, &x_high, &fingers_x); alps_get_bitmap_points(fields->y_map, &y_low, &y_high, &fingers_y); /* * Fingers can overlap, so we use the maximum count of fingers * on either axis as the finger count. */ fingers = max(fingers_x, fingers_y); /* * If an axis reports only a single contact, we have overlapping or * adjacent fingers. Divide the single contact between the two points. */ if (fingers_x == 1) { i = (x_low.num_bits - 1) / 2; x_low.num_bits = x_low.num_bits - i; x_high.start_bit = x_low.start_bit + i; x_high.num_bits = max(i, 1); } if (fingers_y == 1) { i = (y_low.num_bits - 1) / 2; y_low.num_bits = y_low.num_bits - i; y_high.start_bit = y_low.start_bit + i; y_high.num_bits = max(i, 1); } /* top-left corner */ corner[0].x = (priv->x_max * (2 * x_low.start_bit + x_low.num_bits - 1)) / (2 * (priv->x_bits - 1)); corner[0].y = (priv->y_max * (2 * y_low.start_bit + y_low.num_bits - 1)) / (2 * (priv->y_bits - 1)); /* top-right corner */ corner[1].x = (priv->x_max * (2 * x_high.start_bit + x_high.num_bits - 1)) / (2 * (priv->x_bits - 1)); corner[1].y = (priv->y_max * (2 * y_low.start_bit + y_low.num_bits - 1)) / (2 * (priv->y_bits - 1)); /* bottom-right corner */ corner[2].x = (priv->x_max * (2 * x_high.start_bit + x_high.num_bits - 1)) / (2 * (priv->x_bits - 1)); corner[2].y = (priv->y_max * (2 * y_high.start_bit + y_high.num_bits - 1)) / (2 * (priv->y_bits - 1)); /* bottom-left corner */ corner[3].x = (priv->x_max * (2 * x_low.start_bit + x_low.num_bits - 1)) / (2 * (priv->x_bits - 1)); corner[3].y = (priv->y_max * (2 * y_high.start_bit + y_high.num_bits - 1)) / (2 * (priv->y_bits - 1)); /* x-bitmap order is reversed on v5 touchpads */ if (priv->proto_version == ALPS_PROTO_V5) { for (i = 0; i < 4; i++) corner[i].x = priv->x_max - corner[i].x; } /* y-bitmap order is reversed on v3 and v4 touchpads */ if (priv->proto_version == ALPS_PROTO_V3 || priv->proto_version == ALPS_PROTO_V4) { for (i = 0; i < 4; i++) corner[i].y = priv->y_max - corner[i].y; } /* * We only select a corner for the second touch once per 2 finger * touch sequence to avoid the chosen corner (and thus the coordinates) * jumping around when the first touch is in the middle. */ if (priv->second_touch == -1) { /* Find corner closest to our st coordinates */ closest = 0x7fffffff; for (i = 0; i < 4; i++) { int dx = fields->st.x - corner[i].x; int dy = fields->st.y - corner[i].y; int distance = dx * dx + dy * dy; if (distance < closest) { priv->second_touch = i; closest = distance; } } /* And select the opposite corner to use for the 2nd touch */ priv->second_touch = (priv->second_touch + 2) % 4; } fields->mt[0] = fields->st; fields->mt[1] = corner[priv->second_touch]; return fingers; } static void alps_set_slot(struct input_dev *dev, int slot, int x, int y) { input_mt_slot(dev, slot); input_mt_report_slot_state(dev, MT_TOOL_FINGER, true); input_report_abs(dev, ABS_MT_POSITION_X, x); input_report_abs(dev, ABS_MT_POSITION_Y, y); } static void alps_report_mt_data(struct psmouse *psmouse, int n) { struct alps_data *priv = psmouse->private; struct input_dev *dev = psmouse->dev; struct alps_fields *f = &priv->f; int i, slot[MAX_TOUCHES]; input_mt_assign_slots(dev, slot, f->mt, n, 0); for (i = 0; i < n; i++) alps_set_slot(dev, slot[i], f->mt[i].x, f->mt[i].y); input_mt_sync_frame(dev); } static void alps_report_semi_mt_data(struct psmouse *psmouse, int fingers) { struct alps_data *priv = psmouse->private; struct input_dev *dev = psmouse->dev; struct alps_fields *f = &priv->f; /* Use st data when we don't have mt data */ if (fingers < 2) { f->mt[0].x = f->st.x; f->mt[0].y = f->st.y; fingers = f->pressure > 0 ? 1 : 0; priv->second_touch = -1; } if (fingers >= 1) alps_set_slot(dev, 0, f->mt[0].x, f->mt[0].y); if (fingers >= 2) alps_set_slot(dev, 1, f->mt[1].x, f->mt[1].y); input_mt_sync_frame(dev); input_mt_report_finger_count(dev, fingers); input_report_key(dev, BTN_LEFT, f->left); input_report_key(dev, BTN_RIGHT, f->right); input_report_key(dev, BTN_MIDDLE, f->middle); input_report_abs(dev, ABS_PRESSURE, f->pressure); input_sync(dev); } static void alps_process_trackstick_packet_v3(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; unsigned char *packet = psmouse->packet; struct input_dev *dev = priv->dev2; int x, y, z, left, right, middle; /* It should be a DualPoint when received trackstick packet */ if (!(priv->flags & ALPS_DUALPOINT)) { psmouse_warn(psmouse, "Rejected trackstick packet from non DualPoint device"); return; } /* Sanity check packet */ if (!(packet[0] & 0x40)) { psmouse_dbg(psmouse, "Bad trackstick packet, discarding\n"); return; } /* * There's a special packet that seems to indicate the end * of a stream of trackstick data. Filter these out. */ if (packet[1] == 0x7f && packet[2] == 0x7f && packet[4] == 0x7f) return; x = (s8)(((packet[0] & 0x20) << 2) | (packet[1] & 0x7f)); y = (s8)(((packet[0] & 0x10) << 3) | (packet[2] & 0x7f)); z = (packet[4] & 0x7c) >> 2; /* * The x and y values tend to be quite large, and when used * alone the trackstick is difficult to use. Scale them down * to compensate. */ x /= 8; y /= 8; input_report_rel(dev, REL_X, x); input_report_rel(dev, REL_Y, -y); /* * Most ALPS models report the trackstick buttons in the touchpad * packets, but a few report them here. No reliable way has been * found to differentiate between the models upfront, so we enable * the quirk in response to seeing a button press in the trackstick * packet. */ left = packet[3] & 0x01; right = packet[3] & 0x02; middle = packet[3] & 0x04; if (!(priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS) && (left || right || middle)) priv->quirks |= ALPS_QUIRK_TRACKSTICK_BUTTONS; if (priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS) { input_report_key(dev, BTN_LEFT, left); input_report_key(dev, BTN_RIGHT, right); input_report_key(dev, BTN_MIDDLE, middle); } input_sync(dev); return; } static void alps_decode_buttons_v3(struct alps_fields *f, unsigned char *p) { f->left = !!(p[3] & 0x01); f->right = !!(p[3] & 0x02); f->middle = !!(p[3] & 0x04); f->ts_left = !!(p[3] & 0x10); f->ts_right = !!(p[3] & 0x20); f->ts_middle = !!(p[3] & 0x40); } static int alps_decode_pinnacle(struct alps_fields *f, unsigned char *p, struct psmouse *psmouse) { f->first_mp = !!(p[4] & 0x40); f->is_mp = !!(p[0] & 0x40); if (f->is_mp) { f->fingers = (p[5] & 0x3) + 1; f->x_map = ((p[4] & 0x7e) << 8) | ((p[1] & 0x7f) << 2) | ((p[0] & 0x30) >> 4); f->y_map = ((p[3] & 0x70) << 4) | ((p[2] & 0x7f) << 1) | (p[4] & 0x01); } else { f->st.x = ((p[1] & 0x7f) << 4) | ((p[4] & 0x30) >> 2) | ((p[0] & 0x30) >> 4); f->st.y = ((p[2] & 0x7f) << 4) | (p[4] & 0x0f); f->pressure = p[5] & 0x7f; alps_decode_buttons_v3(f, p); } return 0; } static int alps_decode_rushmore(struct alps_fields *f, unsigned char *p, struct psmouse *psmouse) { f->first_mp = !!(p[4] & 0x40); f->is_mp = !!(p[5] & 0x40); if (f->is_mp) { f->fingers = max((p[5] & 0x3), ((p[5] >> 2) & 0x3)) + 1; f->x_map = ((p[5] & 0x10) << 11) | ((p[4] & 0x7e) << 8) | ((p[1] & 0x7f) << 2) | ((p[0] & 0x30) >> 4); f->y_map = ((p[5] & 0x20) << 6) | ((p[3] & 0x70) << 4) | ((p[2] & 0x7f) << 1) | (p[4] & 0x01); } else { f->st.x = ((p[1] & 0x7f) << 4) | ((p[4] & 0x30) >> 2) | ((p[0] & 0x30) >> 4); f->st.y = ((p[2] & 0x7f) << 4) | (p[4] & 0x0f); f->pressure = p[5] & 0x7f; alps_decode_buttons_v3(f, p); } return 0; } static int alps_decode_dolphin(struct alps_fields *f, unsigned char *p, struct psmouse *psmouse) { u64 palm_data = 0; struct alps_data *priv = psmouse->private; f->first_mp = !!(p[0] & 0x02); f->is_mp = !!(p[0] & 0x20); if (!f->is_mp) { f->st.x = ((p[1] & 0x7f) | ((p[4] & 0x0f) << 7)); f->st.y = ((p[2] & 0x7f) | ((p[4] & 0xf0) << 3)); f->pressure = (p[0] & 4) ? 0 : p[5] & 0x7f; alps_decode_buttons_v3(f, p); } else { f->fingers = ((p[0] & 0x6) >> 1 | (p[0] & 0x10) >> 2); palm_data = (p[1] & 0x7f) | ((p[2] & 0x7f) << 7) | ((p[4] & 0x7f) << 14) | ((p[5] & 0x7f) << 21) | ((p[3] & 0x07) << 28) | (((u64)p[3] & 0x70) << 27) | (((u64)p[0] & 0x01) << 34); /* Y-profile is stored in P(0) to p(n-1), n = y_bits; */ f->y_map = palm_data & (BIT(priv->y_bits) - 1); /* X-profile is stored in p(n) to p(n+m-1), m = x_bits; */ f->x_map = (palm_data >> priv->y_bits) & (BIT(priv->x_bits) - 1); } return 0; } static void alps_process_touchpad_packet_v3_v5(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; unsigned char *packet = psmouse->packet; struct input_dev *dev2 = priv->dev2; struct alps_fields *f = &priv->f; int fingers = 0; memset(f, 0, sizeof(*f)); priv->decode_fields(f, packet, psmouse); /* * There's no single feature of touchpad position and bitmap packets * that can be used to distinguish between them. We rely on the fact * that a bitmap packet should always follow a position packet with * bit 6 of packet[4] set. */ if (priv->multi_packet) { /* * Sometimes a position packet will indicate a multi-packet * sequence, but then what follows is another position * packet. Check for this, and when it happens process the * position packet as usual. */ if (f->is_mp) { fingers = f->fingers; /* * Bitmap processing uses position packet's coordinate * data, so we need to do decode it first. */ priv->decode_fields(f, priv->multi_data, psmouse); if (alps_process_bitmap(priv, f) == 0) fingers = 0; /* Use st data */ } else { priv->multi_packet = 0; } } /* * Bit 6 of byte 0 is not usually set in position packets. The only * times it seems to be set is in situations where the data is * suspect anyway, e.g. a palm resting flat on the touchpad. Given * this combined with the fact that this bit is useful for filtering * out misidentified bitmap packets, we reject anything with this * bit set. */ if (f->is_mp) return; if (!priv->multi_packet && f->first_mp) { priv->multi_packet = 1; memcpy(priv->multi_data, packet, sizeof(priv->multi_data)); return; } priv->multi_packet = 0; /* * Sometimes the hardware sends a single packet with z = 0 * in the middle of a stream. Real releases generate packets * with x, y, and z all zero, so these seem to be flukes. * Ignore them. */ if (f->st.x && f->st.y && !f->pressure) return; alps_report_semi_mt_data(psmouse, fingers); if ((priv->flags & ALPS_DUALPOINT) && !(priv->quirks & ALPS_QUIRK_TRACKSTICK_BUTTONS)) { input_report_key(dev2, BTN_LEFT, f->ts_left); input_report_key(dev2, BTN_RIGHT, f->ts_right); input_report_key(dev2, BTN_MIDDLE, f->ts_middle); input_sync(dev2); } } static void alps_process_packet_v3(struct psmouse *psmouse) { unsigned char *packet = psmouse->packet; /* * v3 protocol packets come in three types, two representing * touchpad data and one representing trackstick data. * Trackstick packets seem to be distinguished by always * having 0x3f in the last byte. This value has never been * observed in the last byte of either of the other types * of packets. */ if (packet[5] == 0x3f) { alps_process_trackstick_packet_v3(psmouse); return; } alps_process_touchpad_packet_v3_v5(psmouse); } static void alps_process_packet_v6(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; unsigned char *packet = psmouse->packet; struct input_dev *dev = psmouse->dev; struct input_dev *dev2 = priv->dev2; int x, y, z, left, right, middle; /* * We can use Byte5 to distinguish if the packet is from Touchpad * or Trackpoint. * Touchpad: 0 - 0x7E * Trackpoint: 0x7F */ if (packet[5] == 0x7F) { /* It should be a DualPoint when received Trackpoint packet */ if (!(priv->flags & ALPS_DUALPOINT)) { psmouse_warn(psmouse, "Rejected trackstick packet from non DualPoint device"); return; } /* Trackpoint packet */ x = packet[1] | ((packet[3] & 0x20) << 2); y = packet[2] | ((packet[3] & 0x40) << 1); z = packet[4]; left = packet[3] & 0x01; right = packet[3] & 0x02; middle = packet[3] & 0x04; /* To prevent the cursor jump when finger lifted */ if (x == 0x7F && y == 0x7F && z == 0x7F) x = y = z = 0; /* Divide 4 since trackpoint's speed is too fast */ input_report_rel(dev2, REL_X, (char)x / 4); input_report_rel(dev2, REL_Y, -((char)y / 4)); input_report_key(dev2, BTN_LEFT, left); input_report_key(dev2, BTN_RIGHT, right); input_report_key(dev2, BTN_MIDDLE, middle); input_sync(dev2); return; } /* Touchpad packet */ x = packet[1] | ((packet[3] & 0x78) << 4); y = packet[2] | ((packet[4] & 0x78) << 4); z = packet[5]; left = packet[3] & 0x01; right = packet[3] & 0x02; if (z > 30) input_report_key(dev, BTN_TOUCH, 1); if (z < 25) input_report_key(dev, BTN_TOUCH, 0); if (z > 0) { input_report_abs(dev, ABS_X, x); input_report_abs(dev, ABS_Y, y); } input_report_abs(dev, ABS_PRESSURE, z); input_report_key(dev, BTN_TOOL_FINGER, z > 0); /* v6 touchpad does not have middle button */ input_report_key(dev, BTN_LEFT, left); input_report_key(dev, BTN_RIGHT, right); input_sync(dev); } static void alps_process_packet_v4(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; unsigned char *packet = psmouse->packet; struct alps_fields *f = &priv->f; int offset; /* * v4 has a 6-byte encoding for bitmap data, but this data is * broken up between 3 normal packets. Use priv->multi_packet to * track our position in the bitmap packet. */ if (packet[6] & 0x40) { /* sync, reset position */ priv->multi_packet = 0; } if (WARN_ON_ONCE(priv->multi_packet > 2)) return; offset = 2 * priv->multi_packet; priv->multi_data[offset] = packet[6]; priv->multi_data[offset + 1] = packet[7]; f->left = !!(packet[4] & 0x01); f->right = !!(packet[4] & 0x02); f->st.x = ((packet[1] & 0x7f) << 4) | ((packet[3] & 0x30) >> 2) | ((packet[0] & 0x30) >> 4); f->st.y = ((packet[2] & 0x7f) << 4) | (packet[3] & 0x0f); f->pressure = packet[5] & 0x7f; if (++priv->multi_packet > 2) { priv->multi_packet = 0; f->x_map = ((priv->multi_data[2] & 0x1f) << 10) | ((priv->multi_data[3] & 0x60) << 3) | ((priv->multi_data[0] & 0x3f) << 2) | ((priv->multi_data[1] & 0x60) >> 5); f->y_map = ((priv->multi_data[5] & 0x01) << 10) | ((priv->multi_data[3] & 0x1f) << 5) | (priv->multi_data[1] & 0x1f); f->fingers = alps_process_bitmap(priv, f); } alps_report_semi_mt_data(psmouse, f->fingers); } static bool alps_is_valid_package_v7(struct psmouse *psmouse) { switch (psmouse->pktcnt) { case 3: return (psmouse->packet[2] & 0x40) == 0x40; case 4: return (psmouse->packet[3] & 0x48) == 0x48; case 6: return (psmouse->packet[5] & 0x40) == 0x00; } return true; } static unsigned char alps_get_packet_id_v7(char *byte) { unsigned char packet_id; if (byte[4] & 0x40) packet_id = V7_PACKET_ID_TWO; else if (byte[4] & 0x01) packet_id = V7_PACKET_ID_MULTI; else if ((byte[0] & 0x10) && !(byte[4] & 0x43)) packet_id = V7_PACKET_ID_NEW; else if (byte[1] == 0x00 && byte[4] == 0x00) packet_id = V7_PACKET_ID_IDLE; else packet_id = V7_PACKET_ID_UNKNOWN; return packet_id; } static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt, unsigned char *pkt, unsigned char pkt_id) { mt[0].x = ((pkt[2] & 0x80) << 4); mt[0].x |= ((pkt[2] & 0x3F) << 5); mt[0].x |= ((pkt[3] & 0x30) >> 1); mt[0].x |= (pkt[3] & 0x07); mt[0].y = (pkt[1] << 3) | (pkt[0] & 0x07); mt[1].x = ((pkt[3] & 0x80) << 4); mt[1].x |= ((pkt[4] & 0x80) << 3); mt[1].x |= ((pkt[4] & 0x3F) << 4); mt[1].y = ((pkt[5] & 0x80) << 3); mt[1].y |= ((pkt[5] & 0x3F) << 4); switch (pkt_id) { case V7_PACKET_ID_TWO: mt[1].x &= ~0x000F; mt[1].y |= 0x000F; /* Detect false-postive touches where x & y report max value */ if (mt[1].y == 0x7ff && mt[1].x == 0xff0) { mt[1].x = 0; /* y gets set to 0 at the end of this function */ } break; case V7_PACKET_ID_MULTI: mt[1].x &= ~0x003F; mt[1].y &= ~0x0020; mt[1].y |= ((pkt[4] & 0x02) << 4); mt[1].y |= 0x001F; break; case V7_PACKET_ID_NEW: mt[1].x &= ~0x003F; mt[1].x |= (pkt[0] & 0x20); mt[1].y |= 0x000F; break; } mt[0].y = 0x7FF - mt[0].y; mt[1].y = 0x7FF - mt[1].y; } static int alps_get_mt_count(struct input_mt_pos *mt) { int i, fingers = 0; for (i = 0; i < MAX_TOUCHES; i++) { if (mt[i].x != 0 || mt[i].y != 0) fingers++; } return fingers; } static int alps_decode_packet_v7(struct alps_fields *f, unsigned char *p, struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; unsigned char pkt_id; pkt_id = alps_get_packet_id_v7(p); if (pkt_id == V7_PACKET_ID_IDLE) return 0; if (pkt_id == V7_PACKET_ID_UNKNOWN) return -1; /* * NEW packets are send to indicate a discontinuity in the finger * coordinate reporting. Specifically a finger may have moved from * slot 0 to 1 or vice versa. INPUT_MT_TRACK takes care of this for * us. * * NEW packets have 3 problems: * 1) They do not contain middle / right button info (on non clickpads) * this can be worked around by preserving the old button state * 2) They do not contain an accurate fingercount, and they are * typically send when the number of fingers changes. We cannot use * the old finger count as that may mismatch with the amount of * touch coordinates we've available in the NEW packet * 3) Their x data for the second touch is inaccurate leading to * a possible jump of the x coordinate by 16 units when the first * non NEW packet comes in * Since problems 2 & 3 cannot be worked around, just ignore them. */ if (pkt_id == V7_PACKET_ID_NEW) return 1; alps_get_finger_coordinate_v7(f->mt, p, pkt_id); if (pkt_id == V7_PACKET_ID_TWO) f->fingers = alps_get_mt_count(f->mt); else /* pkt_id == V7_PACKET_ID_MULTI */ f->fingers = 3 + (p[5] & 0x03); f->left = (p[0] & 0x80) >> 7; if (priv->flags & ALPS_BUTTONPAD) { if (p[0] & 0x20) f->fingers++; if (p[0] & 0x10) f->fingers++; } else { f->right = (p[0] & 0x20) >> 5; f->middle = (p[0] & 0x10) >> 4; } /* Sometimes a single touch is reported in mt[1] rather then mt[0] */ if (f->fingers == 1 && f->mt[0].x == 0 && f->mt[0].y == 0) { f->mt[0].x = f->mt[1].x; f->mt[0].y = f->mt[1].y; f->mt[1].x = 0; f->mt[1].y = 0; } return 0; } static void alps_process_trackstick_packet_v7(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; unsigned char *packet = psmouse->packet; struct input_dev *dev2 = priv->dev2; int x, y, z, left, right, middle; /* It should be a DualPoint when received trackstick packet */ if (!(priv->flags & ALPS_DUALPOINT)) { psmouse_warn(psmouse, "Rejected trackstick packet from non DualPoint device"); return; } x = ((packet[2] & 0xbf)) | ((packet[3] & 0x10) << 2); y = (packet[3] & 0x07) | (packet[4] & 0xb8) | ((packet[3] & 0x20) << 1); z = (packet[5] & 0x3f) | ((packet[3] & 0x80) >> 1); left = (packet[1] & 0x01); right = (packet[1] & 0x02) >> 1; middle = (packet[1] & 0x04) >> 2; input_report_rel(dev2, REL_X, (char)x); input_report_rel(dev2, REL_Y, -((char)y)); input_report_key(dev2, BTN_LEFT, left); input_report_key(dev2, BTN_RIGHT, right); input_report_key(dev2, BTN_MIDDLE, middle); input_sync(dev2); } static void alps_process_touchpad_packet_v7(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; struct input_dev *dev = psmouse->dev; struct alps_fields *f = &priv->f; memset(f, 0, sizeof(*f)); if (priv->decode_fields(f, psmouse->packet, psmouse)) return; alps_report_mt_data(psmouse, alps_get_mt_count(f->mt)); input_mt_report_finger_count(dev, f->fingers); input_report_key(dev, BTN_LEFT, f->left); input_report_key(dev, BTN_RIGHT, f->right); input_report_key(dev, BTN_MIDDLE, f->middle); input_sync(dev); } static void alps_process_packet_v7(struct psmouse *psmouse) { unsigned char *packet = psmouse->packet; if (packet[0] == 0x48 && (packet[4] & 0x47) == 0x06) alps_process_trackstick_packet_v7(psmouse); else alps_process_touchpad_packet_v7(psmouse); } static unsigned char alps_get_pkt_id_ss4_v2(unsigned char *byte) { unsigned char pkt_id = SS4_PACKET_ID_IDLE; if (byte[0] == 0x18 && byte[1] == 0x10 && byte[2] == 0x00 && (byte[3] & 0x88) == 0x08 && byte[4] == 0x10 && byte[5] == 0x00) { pkt_id = SS4_PACKET_ID_IDLE; } else if (!(byte[3] & 0x10)) { pkt_id = SS4_PACKET_ID_ONE; } else if (!(byte[3] & 0x20)) { pkt_id = SS4_PACKET_ID_TWO; } else { pkt_id = SS4_PACKET_ID_MULTI; } return pkt_id; } static int alps_decode_ss4_v2(struct alps_fields *f, unsigned char *p, struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; unsigned char pkt_id; unsigned int no_data_x, no_data_y; pkt_id = alps_get_pkt_id_ss4_v2(p); /* Current packet is 1Finger coordinate packet */ switch (pkt_id) { case SS4_PACKET_ID_ONE: f->mt[0].x = SS4_1F_X_V2(p); f->mt[0].y = SS4_1F_Y_V2(p); f->pressure = ((SS4_1F_Z_V2(p)) * 2) & 0x7f; f->fingers = 1; f->first_mp = 0; f->is_mp = 0; break; case SS4_PACKET_ID_TWO: if (priv->flags & ALPS_BUTTONPAD) { f->mt[0].x = SS4_BTL_MF_X_V2(p, 0); f->mt[0].y = SS4_BTL_MF_Y_V2(p, 0); f->mt[1].x = SS4_BTL_MF_X_V2(p, 1); f->mt[1].y = SS4_BTL_MF_Y_V2(p, 1); } else { f->mt[0].x = SS4_STD_MF_X_V2(p, 0); f->mt[0].y = SS4_STD_MF_Y_V2(p, 0); f->mt[1].x = SS4_STD_MF_X_V2(p, 1); f->mt[1].y = SS4_STD_MF_Y_V2(p, 1); } f->pressure = SS4_MF_Z_V2(p, 0) ? 0x30 : 0; if (SS4_IS_MF_CONTINUE(p)) { f->first_mp = 1; } else { f->fingers = 2; f->first_mp = 0; } f->is_mp = 0; break; case SS4_PACKET_ID_MULTI: if (priv->flags & ALPS_BUTTONPAD) { f->mt[2].x = SS4_BTL_MF_X_V2(p, 0); f->mt[2].y = SS4_BTL_MF_Y_V2(p, 0); f->mt[3].x = SS4_BTL_MF_X_V2(p, 1); f->mt[3].y = SS4_BTL_MF_Y_V2(p, 1); no_data_x = SS4_MFPACKET_NO_AX_BL; no_data_y = SS4_MFPACKET_NO_AY_BL; } else { f->mt[2].x = SS4_STD_MF_X_V2(p, 0); f->mt[2].y = SS4_STD_MF_Y_V2(p, 0); f->mt[3].x = SS4_STD_MF_X_V2(p, 1); f->mt[3].y = SS4_STD_MF_Y_V2(p, 1); no_data_x = SS4_MFPACKET_NO_AX; no_data_y = SS4_MFPACKET_NO_AY; } f->first_mp = 0; f->is_mp = 1; if (SS4_IS_5F_DETECTED(p)) { f->fingers = 5; } else if (f->mt[3].x == no_data_x && f->mt[3].y == no_data_y) { f->mt[3].x = 0; f->mt[3].y = 0; f->fingers = 3; } else { f->fingers = 4; } break; case SS4_PACKET_ID_IDLE: default: memset(f, 0, sizeof(struct alps_fields)); break; } f->left = !!(SS4_BTN_V2(p) & 0x01); if (!(priv->flags & ALPS_BUTTONPAD)) { f->right = !!(SS4_BTN_V2(p) & 0x02); f->middle = !!(SS4_BTN_V2(p) & 0x04); } return 0; } static void alps_process_packet_ss4_v2(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; unsigned char *packet = psmouse->packet; struct input_dev *dev = psmouse->dev; struct alps_fields *f = &priv->f; memset(f, 0, sizeof(struct alps_fields)); priv->decode_fields(f, packet, psmouse); if (priv->multi_packet) { /* * Sometimes the first packet will indicate a multi-packet * sequence, but sometimes the next multi-packet would not * come. Check for this, and when it happens process the * position packet as usual. */ if (f->is_mp) { /* Now process the 1st packet */ priv->decode_fields(f, priv->multi_data, psmouse); } else { priv->multi_packet = 0; } } /* * "f.is_mp" would always be '0' after merging the 1st and 2nd packet. * When it is set, it means 2nd packet comes without 1st packet come. */ if (f->is_mp) return; /* Save the first packet */ if (!priv->multi_packet && f->first_mp) { priv->multi_packet = 1; memcpy(priv->multi_data, packet, sizeof(priv->multi_data)); return; } priv->multi_packet = 0; alps_report_mt_data(psmouse, (f->fingers <= 4) ? f->fingers : 4); input_mt_report_finger_count(dev, f->fingers); input_report_key(dev, BTN_LEFT, f->left); input_report_key(dev, BTN_RIGHT, f->right); input_report_key(dev, BTN_MIDDLE, f->middle); input_report_abs(dev, ABS_PRESSURE, f->pressure); input_sync(dev); } static bool alps_is_valid_package_ss4_v2(struct psmouse *psmouse) { if (psmouse->pktcnt == 4 && ((psmouse->packet[3] & 0x08) != 0x08)) return false; if (psmouse->pktcnt == 6 && ((psmouse->packet[5] & 0x10) != 0x0)) return false; return true; } static DEFINE_MUTEX(alps_mutex); static void alps_register_bare_ps2_mouse(struct work_struct *work) { struct alps_data *priv = container_of(work, struct alps_data, dev3_register_work.work); struct psmouse *psmouse = priv->psmouse; struct input_dev *dev3; int error = 0; mutex_lock(&alps_mutex); if (priv->dev3) goto out; dev3 = input_allocate_device(); if (!dev3) { psmouse_err(psmouse, "failed to allocate secondary device\n"); error = -ENOMEM; goto out; } snprintf(priv->phys3, sizeof(priv->phys3), "%s/%s", psmouse->ps2dev.serio->phys, (priv->dev2 ? "input2" : "input1")); dev3->phys = priv->phys3; /* * format of input device name is: "protocol vendor name" * see function psmouse_switch_protocol() in psmouse-base.c */ dev3->name = "PS/2 ALPS Mouse"; dev3->id.bustype = BUS_I8042; dev3->id.vendor = 0x0002; dev3->id.product = PSMOUSE_PS2; dev3->id.version = 0x0000; dev3->dev.parent = &psmouse->ps2dev.serio->dev; input_set_capability(dev3, EV_REL, REL_X); input_set_capability(dev3, EV_REL, REL_Y); input_set_capability(dev3, EV_KEY, BTN_LEFT); input_set_capability(dev3, EV_KEY, BTN_RIGHT); input_set_capability(dev3, EV_KEY, BTN_MIDDLE); __set_bit(INPUT_PROP_POINTER, dev3->propbit); error = input_register_device(dev3); if (error) { psmouse_err(psmouse, "failed to register secondary device: %d\n", error); input_free_device(dev3); goto out; } priv->dev3 = dev3; out: /* * Save the error code so that we can detect that we * already tried to create the device. */ if (error) priv->dev3 = ERR_PTR(error); mutex_unlock(&alps_mutex); } static void alps_report_bare_ps2_packet(struct psmouse *psmouse, unsigned char packet[], bool report_buttons) { struct alps_data *priv = psmouse->private; struct input_dev *dev, *dev2 = NULL; /* Figure out which device to use to report the bare packet */ if (priv->proto_version == ALPS_PROTO_V2 && (priv->flags & ALPS_DUALPOINT)) { /* On V2 devices the DualPoint Stick reports bare packets */ dev = priv->dev2; dev2 = psmouse->dev; } else if (unlikely(IS_ERR_OR_NULL(priv->dev3))) { /* Register dev3 mouse if we received PS/2 packet first time */ if (!IS_ERR(priv->dev3)) psmouse_queue_work(psmouse, &priv->dev3_register_work, 0); return; } else { dev = priv->dev3; } if (report_buttons) alps_report_buttons(dev, dev2, packet[0] & 1, packet[0] & 2, packet[0] & 4); input_report_rel(dev, REL_X, packet[1] ? packet[1] - ((packet[0] << 4) & 0x100) : 0); input_report_rel(dev, REL_Y, packet[2] ? ((packet[0] << 3) & 0x100) - packet[2] : 0); input_sync(dev); } static psmouse_ret_t alps_handle_interleaved_ps2(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; if (psmouse->pktcnt < 6) return PSMOUSE_GOOD_DATA; if (psmouse->pktcnt == 6) { /* * Start a timer to flush the packet if it ends up last * 6-byte packet in the stream. Timer needs to fire * psmouse core times out itself. 20 ms should be enough * to decide if we are getting more data or not. */ mod_timer(&priv->timer, jiffies + msecs_to_jiffies(20)); return PSMOUSE_GOOD_DATA; } del_timer(&priv->timer); if (psmouse->packet[6] & 0x80) { /* * Highest bit is set - that means we either had * complete ALPS packet and this is start of the * next packet or we got garbage. */ if (((psmouse->packet[3] | psmouse->packet[4] | psmouse->packet[5]) & 0x80) || (!alps_is_valid_first_byte(priv, psmouse->packet[6]))) { psmouse_dbg(psmouse, "refusing packet %4ph (suspected interleaved ps/2)\n", psmouse->packet + 3); return PSMOUSE_BAD_DATA; } priv->process_packet(psmouse); /* Continue with the next packet */ psmouse->packet[0] = psmouse->packet[6]; psmouse->pktcnt = 1; } else { /* * High bit is 0 - that means that we indeed got a PS/2 * packet in the middle of ALPS packet. * * There is also possibility that we got 6-byte ALPS * packet followed by 3-byte packet from trackpoint. We * can not distinguish between these 2 scenarios but * because the latter is unlikely to happen in course of * normal operation (user would need to press all * buttons on the pad and start moving trackpoint * without touching the pad surface) we assume former. * Even if we are wrong the wost thing that would happen * the cursor would jump but we should not get protocol * de-synchronization. */ alps_report_bare_ps2_packet(psmouse, &psmouse->packet[3], false); /* * Continue with the standard ALPS protocol handling, * but make sure we won't process it as an interleaved * packet again, which may happen if all buttons are * pressed. To avoid this let's reset the 4th bit which * is normally 1. */ psmouse->packet[3] = psmouse->packet[6] & 0xf7; psmouse->pktcnt = 4; } return PSMOUSE_GOOD_DATA; } static void alps_flush_packet(unsigned long data) { struct psmouse *psmouse = (struct psmouse *)data; struct alps_data *priv = psmouse->private; serio_pause_rx(psmouse->ps2dev.serio); if (psmouse->pktcnt == psmouse->pktsize) { /* * We did not any more data in reasonable amount of time. * Validate the last 3 bytes and process as a standard * ALPS packet. */ if ((psmouse->packet[3] | psmouse->packet[4] | psmouse->packet[5]) & 0x80) { psmouse_dbg(psmouse, "refusing packet %3ph (suspected interleaved ps/2)\n", psmouse->packet + 3); } else { priv->process_packet(psmouse); } psmouse->pktcnt = 0; } serio_continue_rx(psmouse->ps2dev.serio); } static psmouse_ret_t alps_process_byte(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; /* * Check if we are dealing with a bare PS/2 packet, presumably from * a device connected to the external PS/2 port. Because bare PS/2 * protocol does not have enough constant bits to self-synchronize * properly we only do this if the device is fully synchronized. * Can not distinguish V8's first byte from PS/2 packet's */ if (priv->proto_version != ALPS_PROTO_V8 && !psmouse->out_of_sync_cnt && (psmouse->packet[0] & 0xc8) == 0x08) { if (psmouse->pktcnt == 3) { alps_report_bare_ps2_packet(psmouse, psmouse->packet, true); return PSMOUSE_FULL_PACKET; } return PSMOUSE_GOOD_DATA; } /* Check for PS/2 packet stuffed in the middle of ALPS packet. */ if ((priv->flags & ALPS_PS2_INTERLEAVED) && psmouse->pktcnt >= 4 && (psmouse->packet[3] & 0x0f) == 0x0f) { return alps_handle_interleaved_ps2(psmouse); } if (!alps_is_valid_first_byte(priv, psmouse->packet[0])) { psmouse_dbg(psmouse, "refusing packet[0] = %x (mask0 = %x, byte0 = %x)\n", psmouse->packet[0], priv->mask0, priv->byte0); return PSMOUSE_BAD_DATA; } /* Bytes 2 - pktsize should have 0 in the highest bit */ if (priv->proto_version < ALPS_PROTO_V5 && psmouse->pktcnt >= 2 && psmouse->pktcnt <= psmouse->pktsize && (psmouse->packet[psmouse->pktcnt - 1] & 0x80)) { psmouse_dbg(psmouse, "refusing packet[%i] = %x\n", psmouse->pktcnt - 1, psmouse->packet[psmouse->pktcnt - 1]); if (priv->proto_version == ALPS_PROTO_V3_RUSHMORE && psmouse->pktcnt == psmouse->pktsize) { /* * Some Dell boxes, such as Latitude E6440 or E7440 * with closed lid, quite often smash last byte of * otherwise valid packet with 0xff. Given that the * next packet is very likely to be valid let's * report PSMOUSE_FULL_PACKET but not process data, * rather than reporting PSMOUSE_BAD_DATA and * filling the logs. */ return PSMOUSE_FULL_PACKET; } return PSMOUSE_BAD_DATA; } if ((priv->proto_version == ALPS_PROTO_V7 && !alps_is_valid_package_v7(psmouse)) || (priv->proto_version == ALPS_PROTO_V8 && !alps_is_valid_package_ss4_v2(psmouse))) { psmouse_dbg(psmouse, "refusing packet[%i] = %x\n", psmouse->pktcnt - 1, psmouse->packet[psmouse->pktcnt - 1]); return PSMOUSE_BAD_DATA; } if (psmouse->pktcnt == psmouse->pktsize) { priv->process_packet(psmouse); return PSMOUSE_FULL_PACKET; } return PSMOUSE_GOOD_DATA; } static int alps_command_mode_send_nibble(struct psmouse *psmouse, int nibble) { struct ps2dev *ps2dev = &psmouse->ps2dev; struct alps_data *priv = psmouse->private; int command; unsigned char *param; unsigned char dummy[4]; BUG_ON(nibble > 0xf); command = priv->nibble_commands[nibble].command; param = (command & 0x0f00) ? dummy : (unsigned char *)&priv->nibble_commands[nibble].data; if (ps2_command(ps2dev, param, command)) return -1; return 0; } static int alps_command_mode_set_addr(struct psmouse *psmouse, int addr) { struct ps2dev *ps2dev = &psmouse->ps2dev; struct alps_data *priv = psmouse->private; int i, nibble; if (ps2_command(ps2dev, NULL, priv->addr_command)) return -1; for (i = 12; i >= 0; i -= 4) { nibble = (addr >> i) & 0xf; if (alps_command_mode_send_nibble(psmouse, nibble)) return -1; } return 0; } static int __alps_command_mode_read_reg(struct psmouse *psmouse, int addr) { struct ps2dev *ps2dev = &psmouse->ps2dev; unsigned char param[4]; if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) return -1; /* * The address being read is returned in the first two bytes * of the result. Check that this address matches the expected * address. */ if (addr != ((param[0] << 8) | param[1])) return -1; return param[2]; } static int alps_command_mode_read_reg(struct psmouse *psmouse, int addr) { if (alps_command_mode_set_addr(psmouse, addr)) return -1; return __alps_command_mode_read_reg(psmouse, addr); } static int __alps_command_mode_write_reg(struct psmouse *psmouse, u8 value) { if (alps_command_mode_send_nibble(psmouse, (value >> 4) & 0xf)) return -1; if (alps_command_mode_send_nibble(psmouse, value & 0xf)) return -1; return 0; } static int alps_command_mode_write_reg(struct psmouse *psmouse, int addr, u8 value) { if (alps_command_mode_set_addr(psmouse, addr)) return -1; return __alps_command_mode_write_reg(psmouse, value); } static int alps_rpt_cmd(struct psmouse *psmouse, int init_command, int repeated_command, unsigned char *param) { struct ps2dev *ps2dev = &psmouse->ps2dev; param[0] = 0; if (init_command && ps2_command(ps2dev, param, init_command)) return -EIO; if (ps2_command(ps2dev, NULL, repeated_command) || ps2_command(ps2dev, NULL, repeated_command) || ps2_command(ps2dev, NULL, repeated_command)) return -EIO; param[0] = param[1] = param[2] = 0xff; if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) return -EIO; psmouse_dbg(psmouse, "%2.2X report: %3ph\n", repeated_command, param); return 0; } static bool alps_check_valid_firmware_id(unsigned char id[]) { if (id[0] == 0x73) return true; if (id[0] == 0x88 && (id[1] == 0x07 || id[1] == 0x08 || (id[1] & 0xf0) == 0xb0 || (id[1] & 0xf0) == 0xc0)) { return true; } return false; } static int alps_enter_command_mode(struct psmouse *psmouse) { unsigned char param[4]; if (alps_rpt_cmd(psmouse, 0, PSMOUSE_CMD_RESET_WRAP, param)) { psmouse_err(psmouse, "failed to enter command mode\n"); return -1; } if (!alps_check_valid_firmware_id(param)) { psmouse_dbg(psmouse, "unknown response while entering command mode\n"); return -1; } return 0; } static inline int alps_exit_command_mode(struct psmouse *psmouse) { struct ps2dev *ps2dev = &psmouse->ps2dev; if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSTREAM)) return -1; return 0; } /* * For DualPoint devices select the device that should respond to * subsequent commands. It looks like glidepad is behind stickpointer, * I'd thought it would be other way around... */ static int alps_passthrough_mode_v2(struct psmouse *psmouse, bool enable) { struct ps2dev *ps2dev = &psmouse->ps2dev; int cmd = enable ? PSMOUSE_CMD_SETSCALE21 : PSMOUSE_CMD_SETSCALE11; if (ps2_command(ps2dev, NULL, cmd) || ps2_command(ps2dev, NULL, cmd) || ps2_command(ps2dev, NULL, cmd) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE)) return -1; /* we may get 3 more bytes, just ignore them */ ps2_drain(ps2dev, 3, 100); return 0; } static int alps_absolute_mode_v1_v2(struct psmouse *psmouse) { struct ps2dev *ps2dev = &psmouse->ps2dev; /* Try ALPS magic knock - 4 disable before enable */ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE)) return -1; /* * Switch mouse to poll (remote) mode so motion data will not * get in our way */ return ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETPOLL); } static int alps_monitor_mode_send_word(struct psmouse *psmouse, u16 word) { int i, nibble; /* * b0-b11 are valid bits, send sequence is inverse. * e.g. when word = 0x0123, nibble send sequence is 3, 2, 1 */ for (i = 0; i <= 8; i += 4) { nibble = (word >> i) & 0xf; if (alps_command_mode_send_nibble(psmouse, nibble)) return -1; } return 0; } static int alps_monitor_mode_write_reg(struct psmouse *psmouse, u16 addr, u16 value) { struct ps2dev *ps2dev = &psmouse->ps2dev; /* 0x0A0 is the command to write the word */ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE) || alps_monitor_mode_send_word(psmouse, 0x0A0) || alps_monitor_mode_send_word(psmouse, addr) || alps_monitor_mode_send_word(psmouse, value) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE)) return -1; return 0; } static int alps_monitor_mode(struct psmouse *psmouse, bool enable) { struct ps2dev *ps2dev = &psmouse->ps2dev; if (enable) { /* EC E9 F5 F5 E7 E6 E7 E9 to enter monitor mode */ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_GETINFO) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE21) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_GETINFO)) return -1; } else { /* EC to exit monitor mode */ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP)) return -1; } return 0; } static int alps_absolute_mode_v6(struct psmouse *psmouse) { u16 reg_val = 0x181; int ret = -1; /* enter monitor mode, to write the register */ if (alps_monitor_mode(psmouse, true)) return -1; ret = alps_monitor_mode_write_reg(psmouse, 0x000, reg_val); if (alps_monitor_mode(psmouse, false)) ret = -1; return ret; } static int alps_get_status(struct psmouse *psmouse, char *param) { /* Get status: 0xF5 0xF5 0xF5 0xE9 */ if (alps_rpt_cmd(psmouse, 0, PSMOUSE_CMD_DISABLE, param)) return -1; return 0; } /* * Turn touchpad tapping on or off. The sequences are: * 0xE9 0xF5 0xF5 0xF3 0x0A to enable, * 0xE9 0xF5 0xF5 0xE8 0x00 to disable. * My guess that 0xE9 (GetInfo) is here as a sync point. * For models that also have stickpointer (DualPoints) its tapping * is controlled separately (0xE6 0xE6 0xE6 0xF3 0x14|0x0A) but * we don't fiddle with it. */ static int alps_tap_mode(struct psmouse *psmouse, int enable) { struct ps2dev *ps2dev = &psmouse->ps2dev; int cmd = enable ? PSMOUSE_CMD_SETRATE : PSMOUSE_CMD_SETRES; unsigned char tap_arg = enable ? 0x0A : 0x00; unsigned char param[4]; if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_DISABLE) || ps2_command(ps2dev, &tap_arg, cmd)) return -1; if (alps_get_status(psmouse, param)) return -1; return 0; } /* * alps_poll() - poll the touchpad for current motion packet. * Used in resync. */ static int alps_poll(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; unsigned char buf[sizeof(psmouse->packet)]; bool poll_failed; if (priv->flags & ALPS_PASS) alps_passthrough_mode_v2(psmouse, true); poll_failed = ps2_command(&psmouse->ps2dev, buf, PSMOUSE_CMD_POLL | (psmouse->pktsize << 8)) < 0; if (priv->flags & ALPS_PASS) alps_passthrough_mode_v2(psmouse, false); if (poll_failed || (buf[0] & priv->mask0) != priv->byte0) return -1; if ((psmouse->badbyte & 0xc8) == 0x08) { /* * Poll the track stick ... */ if (ps2_command(&psmouse->ps2dev, buf, PSMOUSE_CMD_POLL | (3 << 8))) return -1; } memcpy(psmouse->packet, buf, sizeof(buf)); return 0; } static int alps_hw_init_v1_v2(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; if ((priv->flags & ALPS_PASS) && alps_passthrough_mode_v2(psmouse, true)) { return -1; } if (alps_tap_mode(psmouse, true)) { psmouse_warn(psmouse, "Failed to enable hardware tapping\n"); return -1; } if (alps_absolute_mode_v1_v2(psmouse)) { psmouse_err(psmouse, "Failed to enable absolute mode\n"); return -1; } if ((priv->flags & ALPS_PASS) && alps_passthrough_mode_v2(psmouse, false)) { return -1; } /* ALPS needs stream mode, otherwise it won't report any data */ if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSTREAM)) { psmouse_err(psmouse, "Failed to enable stream mode\n"); return -1; } return 0; } static int alps_hw_init_v6(struct psmouse *psmouse) { unsigned char param[2] = {0xC8, 0x14}; /* Enter passthrough mode to let trackpoint enter 6byte raw mode */ if (alps_passthrough_mode_v2(psmouse, true)) return -1; if (ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) || ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) || ps2_command(&psmouse->ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) || ps2_command(&psmouse->ps2dev, &param[0], PSMOUSE_CMD_SETRATE) || ps2_command(&psmouse->ps2dev, &param[1], PSMOUSE_CMD_SETRATE)) return -1; if (alps_passthrough_mode_v2(psmouse, false)) return -1; if (alps_absolute_mode_v6(psmouse)) { psmouse_err(psmouse, "Failed to enable absolute mode\n"); return -1; } return 0; } /* * Enable or disable passthrough mode to the trackstick. */ static int alps_passthrough_mode_v3(struct psmouse *psmouse, int reg_base, bool enable) { int reg_val, ret = -1; if (alps_enter_command_mode(psmouse)) return -1; reg_val = alps_command_mode_read_reg(psmouse, reg_base + 0x0008); if (reg_val == -1) goto error; if (enable) reg_val |= 0x01; else reg_val &= ~0x01; ret = __alps_command_mode_write_reg(psmouse, reg_val); error: if (alps_exit_command_mode(psmouse)) ret = -1; return ret; } /* Must be in command mode when calling this function */ static int alps_absolute_mode_v3(struct psmouse *psmouse) { int reg_val; reg_val = alps_command_mode_read_reg(psmouse, 0x0004); if (reg_val == -1) return -1; reg_val |= 0x06; if (__alps_command_mode_write_reg(psmouse, reg_val)) return -1; return 0; } static int alps_probe_trackstick_v3(struct psmouse *psmouse, int reg_base) { int ret = -EIO, reg_val; if (alps_enter_command_mode(psmouse)) goto error; reg_val = alps_command_mode_read_reg(psmouse, reg_base + 0x08); if (reg_val == -1) goto error; /* bit 7: trackstick is present */ ret = reg_val & 0x80 ? 0 : -ENODEV; error: alps_exit_command_mode(psmouse); return ret; } static int alps_setup_trackstick_v3(struct psmouse *psmouse, int reg_base) { struct ps2dev *ps2dev = &psmouse->ps2dev; int ret = 0; unsigned char param[4]; if (alps_passthrough_mode_v3(psmouse, reg_base, true)) return -EIO; /* * E7 report for the trackstick * * There have been reports of failures to seem to trace back * to the above trackstick check failing. When these occur * this E7 report fails, so when that happens we continue * with the assumption that there isn't a trackstick after * all. */ if (alps_rpt_cmd(psmouse, 0, PSMOUSE_CMD_SETSCALE21, param)) { psmouse_warn(psmouse, "Failed to initialize trackstick (E7 report failed)\n"); ret = -ENODEV; } else { psmouse_dbg(psmouse, "trackstick E7 report: %3ph\n", param); /* * Not sure what this does, but it is absolutely * essential. Without it, the touchpad does not * work at all and the trackstick just emits normal * PS/2 packets. */ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSCALE11) || alps_command_mode_send_nibble(psmouse, 0x9) || alps_command_mode_send_nibble(psmouse, 0x4)) { psmouse_err(psmouse, "Error sending magic E6 sequence\n"); ret = -EIO; goto error; } /* * This ensures the trackstick packets are in the format * supported by this driver. If bit 1 isn't set the packet * format is different. */ if (alps_enter_command_mode(psmouse) || alps_command_mode_write_reg(psmouse, reg_base + 0x08, 0x82) || alps_exit_command_mode(psmouse)) ret = -EIO; } error: if (alps_passthrough_mode_v3(psmouse, reg_base, false)) ret = -EIO; return ret; } static int alps_hw_init_v3(struct psmouse *psmouse) { struct ps2dev *ps2dev = &psmouse->ps2dev; int reg_val; unsigned char param[4]; reg_val = alps_probe_trackstick_v3(psmouse, ALPS_REG_BASE_PINNACLE); if (reg_val == -EIO) goto error; if (reg_val == 0 && alps_setup_trackstick_v3(psmouse, ALPS_REG_BASE_PINNACLE) == -EIO) goto error; if (alps_enter_command_mode(psmouse) || alps_absolute_mode_v3(psmouse)) { psmouse_err(psmouse, "Failed to enter absolute mode\n"); goto error; } reg_val = alps_command_mode_read_reg(psmouse, 0x0006); if (reg_val == -1) goto error; if (__alps_command_mode_write_reg(psmouse, reg_val | 0x01)) goto error; reg_val = alps_command_mode_read_reg(psmouse, 0x0007); if (reg_val == -1) goto error; if (__alps_command_mode_write_reg(psmouse, reg_val | 0x01)) goto error; if (alps_command_mode_read_reg(psmouse, 0x0144) == -1) goto error; if (__alps_command_mode_write_reg(psmouse, 0x04)) goto error; if (alps_command_mode_read_reg(psmouse, 0x0159) == -1) goto error; if (__alps_command_mode_write_reg(psmouse, 0x03)) goto error; if (alps_command_mode_read_reg(psmouse, 0x0163) == -1) goto error; if (alps_command_mode_write_reg(psmouse, 0x0163, 0x03)) goto error; if (alps_command_mode_read_reg(psmouse, 0x0162) == -1) goto error; if (alps_command_mode_write_reg(psmouse, 0x0162, 0x04)) goto error; alps_exit_command_mode(psmouse); /* Set rate and enable data reporting */ param[0] = 0x64; if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE)) { psmouse_err(psmouse, "Failed to enable data reporting\n"); return -1; } return 0; error: /* * Leaving the touchpad in command mode will essentially render * it unusable until the machine reboots, so exit it here just * to be safe */ alps_exit_command_mode(psmouse); return -1; } static int alps_get_v3_v7_resolution(struct psmouse *psmouse, int reg_pitch) { int reg, x_pitch, y_pitch, x_electrode, y_electrode, x_phys, y_phys; struct alps_data *priv = psmouse->private; reg = alps_command_mode_read_reg(psmouse, reg_pitch); if (reg < 0) return reg; x_pitch = (char)(reg << 4) >> 4; /* sign extend lower 4 bits */ x_pitch = 50 + 2 * x_pitch; /* In 0.1 mm units */ y_pitch = (char)reg >> 4; /* sign extend upper 4 bits */ y_pitch = 36 + 2 * y_pitch; /* In 0.1 mm units */ reg = alps_command_mode_read_reg(psmouse, reg_pitch + 1); if (reg < 0) return reg; x_electrode = (char)(reg << 4) >> 4; /* sign extend lower 4 bits */ x_electrode = 17 + x_electrode; y_electrode = (char)reg >> 4; /* sign extend upper 4 bits */ y_electrode = 13 + y_electrode; x_phys = x_pitch * (x_electrode - 1); /* In 0.1 mm units */ y_phys = y_pitch * (y_electrode - 1); /* In 0.1 mm units */ priv->x_res = priv->x_max * 10 / x_phys; /* units / mm */ priv->y_res = priv->y_max * 10 / y_phys; /* units / mm */ psmouse_dbg(psmouse, "pitch %dx%d num-electrodes %dx%d physical size %dx%d mm res %dx%d\n", x_pitch, y_pitch, x_electrode, y_electrode, x_phys / 10, y_phys / 10, priv->x_res, priv->y_res); return 0; } static int alps_hw_init_rushmore_v3(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; struct ps2dev *ps2dev = &psmouse->ps2dev; int reg_val, ret = -1; if (priv->flags & ALPS_DUALPOINT) { reg_val = alps_setup_trackstick_v3(psmouse, ALPS_REG_BASE_RUSHMORE); if (reg_val == -EIO) goto error; } if (alps_enter_command_mode(psmouse) || alps_command_mode_read_reg(psmouse, 0xc2d9) == -1 || alps_command_mode_write_reg(psmouse, 0xc2cb, 0x00)) goto error; if (alps_get_v3_v7_resolution(psmouse, 0xc2da)) goto error; reg_val = alps_command_mode_read_reg(psmouse, 0xc2c6); if (reg_val == -1) goto error; if (__alps_command_mode_write_reg(psmouse, reg_val & 0xfd)) goto error; if (alps_command_mode_write_reg(psmouse, 0xc2c9, 0x64)) goto error; /* enter absolute mode */ reg_val = alps_command_mode_read_reg(psmouse, 0xc2c4); if (reg_val == -1) goto error; if (__alps_command_mode_write_reg(psmouse, reg_val | 0x02)) goto error; alps_exit_command_mode(psmouse); return ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE); error: alps_exit_command_mode(psmouse); return ret; } /* Must be in command mode when calling this function */ static int alps_absolute_mode_v4(struct psmouse *psmouse) { int reg_val; reg_val = alps_command_mode_read_reg(psmouse, 0x0004); if (reg_val == -1) return -1; reg_val |= 0x02; if (__alps_command_mode_write_reg(psmouse, reg_val)) return -1; return 0; } static int alps_hw_init_v4(struct psmouse *psmouse) { struct ps2dev *ps2dev = &psmouse->ps2dev; unsigned char param[4]; if (alps_enter_command_mode(psmouse)) goto error; if (alps_absolute_mode_v4(psmouse)) { psmouse_err(psmouse, "Failed to enter absolute mode\n"); goto error; } if (alps_command_mode_write_reg(psmouse, 0x0007, 0x8c)) goto error; if (alps_command_mode_write_reg(psmouse, 0x0149, 0x03)) goto error; if (alps_command_mode_write_reg(psmouse, 0x0160, 0x03)) goto error; if (alps_command_mode_write_reg(psmouse, 0x017f, 0x15)) goto error; if (alps_command_mode_write_reg(psmouse, 0x0151, 0x01)) goto error; if (alps_command_mode_write_reg(psmouse, 0x0168, 0x03)) goto error; if (alps_command_mode_write_reg(psmouse, 0x014a, 0x03)) goto error; if (alps_command_mode_write_reg(psmouse, 0x0161, 0x03)) goto error; alps_exit_command_mode(psmouse); /* * This sequence changes the output from a 9-byte to an * 8-byte format. All the same data seems to be present, * just in a more compact format. */ param[0] = 0xc8; param[1] = 0x64; param[2] = 0x50; if (ps2_command(ps2dev, &param[0], PSMOUSE_CMD_SETRATE) || ps2_command(ps2dev, &param[1], PSMOUSE_CMD_SETRATE) || ps2_command(ps2dev, &param[2], PSMOUSE_CMD_SETRATE) || ps2_command(ps2dev, param, PSMOUSE_CMD_GETID)) return -1; /* Set rate and enable data reporting */ param[0] = 0x64; if (ps2_command(ps2dev, param, PSMOUSE_CMD_SETRATE) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE)) { psmouse_err(psmouse, "Failed to enable data reporting\n"); return -1; } return 0; error: /* * Leaving the touchpad in command mode will essentially render * it unusable until the machine reboots, so exit it here just * to be safe */ alps_exit_command_mode(psmouse); return -1; } static int alps_get_otp_values_ss4_v2(struct psmouse *psmouse, unsigned char index, unsigned char otp[]) { struct ps2dev *ps2dev = &psmouse->ps2dev; switch (index) { case 0: if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSTREAM) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSTREAM) || ps2_command(ps2dev, otp, PSMOUSE_CMD_GETINFO)) return -1; break; case 1: if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETPOLL) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETPOLL) || ps2_command(ps2dev, otp, PSMOUSE_CMD_GETINFO)) return -1; break; } return 0; } static int alps_update_device_area_ss4_v2(unsigned char otp[][4], struct alps_data *priv) { int num_x_electrode; int num_y_electrode; int x_pitch, y_pitch, x_phys, y_phys; num_x_electrode = SS4_NUMSENSOR_XOFFSET + (otp[1][0] & 0x0F); num_y_electrode = SS4_NUMSENSOR_YOFFSET + ((otp[1][0] >> 4) & 0x0F); priv->x_max = (num_x_electrode - 1) * SS4_COUNT_PER_ELECTRODE; priv->y_max = (num_y_electrode - 1) * SS4_COUNT_PER_ELECTRODE; x_pitch = ((otp[1][2] >> 2) & 0x07) + SS4_MIN_PITCH_MM; y_pitch = ((otp[1][2] >> 5) & 0x07) + SS4_MIN_PITCH_MM; x_phys = x_pitch * (num_x_electrode - 1); /* In 0.1 mm units */ y_phys = y_pitch * (num_y_electrode - 1); /* In 0.1 mm units */ priv->x_res = priv->x_max * 10 / x_phys; /* units / mm */ priv->y_res = priv->y_max * 10 / y_phys; /* units / mm */ return 0; } static int alps_update_btn_info_ss4_v2(unsigned char otp[][4], struct alps_data *priv) { unsigned char is_btnless; is_btnless = (otp[1][1] >> 3) & 0x01; if (is_btnless) priv->flags |= ALPS_BUTTONPAD; return 0; } static int alps_set_defaults_ss4_v2(struct psmouse *psmouse, struct alps_data *priv) { unsigned char otp[2][4]; memset(otp, 0, sizeof(otp)); if (alps_get_otp_values_ss4_v2(psmouse, 0, &otp[0][0]) || alps_get_otp_values_ss4_v2(psmouse, 1, &otp[1][0])) return -1; alps_update_device_area_ss4_v2(otp, priv); alps_update_btn_info_ss4_v2(otp, priv); return 0; } static int alps_dolphin_get_device_area(struct psmouse *psmouse, struct alps_data *priv) { struct ps2dev *ps2dev = &psmouse->ps2dev; unsigned char param[4] = {0}; int num_x_electrode, num_y_electrode; if (alps_enter_command_mode(psmouse)) return -1; param[0] = 0x0a; if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_RESET_WRAP) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETPOLL) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETPOLL) || ps2_command(ps2dev, &param[0], PSMOUSE_CMD_SETRATE) || ps2_command(ps2dev, &param[0], PSMOUSE_CMD_SETRATE)) return -1; if (ps2_command(ps2dev, param, PSMOUSE_CMD_GETINFO)) return -1; /* * Dolphin's sensor line number is not fixed. It can be calculated * by adding the device's register value with DOLPHIN_PROFILE_X/YOFFSET. * Further more, we can get device's x_max and y_max by multiplying * sensor line number with DOLPHIN_COUNT_PER_ELECTRODE. * * e.g. When we get register's sensor_x = 11 & sensor_y = 8, * real sensor line number X = 11 + 8 = 19, and * real sensor line number Y = 8 + 1 = 9. * So, x_max = (19 - 1) * 64 = 1152, and * y_max = (9 - 1) * 64 = 512. */ num_x_electrode = DOLPHIN_PROFILE_XOFFSET + (param[2] & 0x0F); num_y_electrode = DOLPHIN_PROFILE_YOFFSET + ((param[2] >> 4) & 0x0F); priv->x_bits = num_x_electrode; priv->y_bits = num_y_electrode; priv->x_max = (num_x_electrode - 1) * DOLPHIN_COUNT_PER_ELECTRODE; priv->y_max = (num_y_electrode - 1) * DOLPHIN_COUNT_PER_ELECTRODE; if (alps_exit_command_mode(psmouse)) return -1; return 0; } static int alps_hw_init_dolphin_v1(struct psmouse *psmouse) { struct ps2dev *ps2dev = &psmouse->ps2dev; unsigned char param[2]; /* This is dolphin "v1" as empirically defined by florin9doi */ param[0] = 0x64; param[1] = 0x28; if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSTREAM) || ps2_command(ps2dev, &param[0], PSMOUSE_CMD_SETRATE) || ps2_command(ps2dev, &param[1], PSMOUSE_CMD_SETRATE)) return -1; return 0; } static int alps_hw_init_v7(struct psmouse *psmouse) { struct ps2dev *ps2dev = &psmouse->ps2dev; int reg_val, ret = -1; if (alps_enter_command_mode(psmouse) || alps_command_mode_read_reg(psmouse, 0xc2d9) == -1) goto error; if (alps_get_v3_v7_resolution(psmouse, 0xc397)) goto error; if (alps_command_mode_write_reg(psmouse, 0xc2c9, 0x64)) goto error; reg_val = alps_command_mode_read_reg(psmouse, 0xc2c4); if (reg_val == -1) goto error; if (__alps_command_mode_write_reg(psmouse, reg_val | 0x02)) goto error; alps_exit_command_mode(psmouse); return ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE); error: alps_exit_command_mode(psmouse); return ret; } static int alps_hw_init_ss4_v2(struct psmouse *psmouse) { struct ps2dev *ps2dev = &psmouse->ps2dev; char param[2] = {0x64, 0x28}; int ret = -1; /* enter absolute mode */ if (ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSTREAM) || ps2_command(ps2dev, NULL, PSMOUSE_CMD_SETSTREAM) || ps2_command(ps2dev, &param[0], PSMOUSE_CMD_SETRATE) || ps2_command(ps2dev, &param[1], PSMOUSE_CMD_SETRATE)) { goto error; } /* T.B.D. Decread noise packet number, delete in the future */ if (alps_exit_command_mode(psmouse) || alps_enter_command_mode(psmouse) || alps_command_mode_write_reg(psmouse, 0x001D, 0x20)) { goto error; } alps_exit_command_mode(psmouse); return ps2_command(ps2dev, NULL, PSMOUSE_CMD_ENABLE); error: alps_exit_command_mode(psmouse); return ret; } static int alps_set_protocol(struct psmouse *psmouse, struct alps_data *priv, const struct alps_protocol_info *protocol) { psmouse->private = priv; setup_timer(&priv->timer, alps_flush_packet, (unsigned long)psmouse); priv->proto_version = protocol->version; priv->byte0 = protocol->byte0; priv->mask0 = protocol->mask0; priv->flags = protocol->flags; priv->x_max = 2000; priv->y_max = 1400; priv->x_bits = 15; priv->y_bits = 11; switch (priv->proto_version) { case ALPS_PROTO_V1: case ALPS_PROTO_V2: priv->hw_init = alps_hw_init_v1_v2; priv->process_packet = alps_process_packet_v1_v2; priv->set_abs_params = alps_set_abs_params_st; priv->x_max = 1023; priv->y_max = 767; break; case ALPS_PROTO_V3: priv->hw_init = alps_hw_init_v3; priv->process_packet = alps_process_packet_v3; priv->set_abs_params = alps_set_abs_params_semi_mt; priv->decode_fields = alps_decode_pinnacle; priv->nibble_commands = alps_v3_nibble_commands; priv->addr_command = PSMOUSE_CMD_RESET_WRAP; break; case ALPS_PROTO_V3_RUSHMORE: priv->hw_init = alps_hw_init_rushmore_v3; priv->process_packet = alps_process_packet_v3; priv->set_abs_params = alps_set_abs_params_semi_mt; priv->decode_fields = alps_decode_rushmore; priv->nibble_commands = alps_v3_nibble_commands; priv->addr_command = PSMOUSE_CMD_RESET_WRAP; priv->x_bits = 16; priv->y_bits = 12; if (alps_probe_trackstick_v3(psmouse, ALPS_REG_BASE_RUSHMORE) < 0) priv->flags &= ~ALPS_DUALPOINT; break; case ALPS_PROTO_V4: priv->hw_init = alps_hw_init_v4; priv->process_packet = alps_process_packet_v4; priv->set_abs_params = alps_set_abs_params_semi_mt; priv->nibble_commands = alps_v4_nibble_commands; priv->addr_command = PSMOUSE_CMD_DISABLE; break; case ALPS_PROTO_V5: priv->hw_init = alps_hw_init_dolphin_v1; priv->process_packet = alps_process_touchpad_packet_v3_v5; priv->decode_fields = alps_decode_dolphin; priv->set_abs_params = alps_set_abs_params_semi_mt; priv->nibble_commands = alps_v3_nibble_commands; priv->addr_command = PSMOUSE_CMD_RESET_WRAP; priv->x_bits = 23; priv->y_bits = 12; if (alps_dolphin_get_device_area(psmouse, priv)) return -EIO; break; case ALPS_PROTO_V6: priv->hw_init = alps_hw_init_v6; priv->process_packet = alps_process_packet_v6; priv->set_abs_params = alps_set_abs_params_st; priv->nibble_commands = alps_v6_nibble_commands; priv->x_max = 2047; priv->y_max = 1535; break; case ALPS_PROTO_V7: priv->hw_init = alps_hw_init_v7; priv->process_packet = alps_process_packet_v7; priv->decode_fields = alps_decode_packet_v7; priv->set_abs_params = alps_set_abs_params_v7; priv->nibble_commands = alps_v3_nibble_commands; priv->addr_command = PSMOUSE_CMD_RESET_WRAP; priv->x_max = 0xfff; priv->y_max = 0x7ff; if (priv->fw_ver[1] != 0xba) priv->flags |= ALPS_BUTTONPAD; break; case ALPS_PROTO_V8: priv->hw_init = alps_hw_init_ss4_v2; priv->process_packet = alps_process_packet_ss4_v2; priv->decode_fields = alps_decode_ss4_v2; priv->set_abs_params = alps_set_abs_params_ss4_v2; priv->nibble_commands = alps_v3_nibble_commands; priv->addr_command = PSMOUSE_CMD_RESET_WRAP; if (alps_set_defaults_ss4_v2(psmouse, priv)) return -EIO; break; } return 0; } static const struct alps_protocol_info *alps_match_table(unsigned char *e7, unsigned char *ec) { const struct alps_model_info *model; int i; for (i = 0; i < ARRAY_SIZE(alps_model_data); i++) { model = &alps_model_data[i]; if (!memcmp(e7, model->signature, sizeof(model->signature)) && (!model->command_mode_resp || model->command_mode_resp == ec[2])) { return &model->protocol_info; } } return NULL; } static int alps_identify(struct psmouse *psmouse, struct alps_data *priv) { const struct alps_protocol_info *protocol; unsigned char e6[4], e7[4], ec[4]; int error; /* * First try "E6 report". * ALPS should return 0,0,10 or 0,0,100 if no buttons are pressed. * The bits 0-2 of the first byte will be 1s if some buttons are * pressed. */ if (alps_rpt_cmd(psmouse, PSMOUSE_CMD_SETRES, PSMOUSE_CMD_SETSCALE11, e6)) return -EIO; if ((e6[0] & 0xf8) != 0 || e6[1] != 0 || (e6[2] != 10 && e6[2] != 100)) return -EINVAL; /* * Now get the "E7" and "EC" reports. These will uniquely identify * most ALPS touchpads. */ if (alps_rpt_cmd(psmouse, PSMOUSE_CMD_SETRES, PSMOUSE_CMD_SETSCALE21, e7) || alps_rpt_cmd(psmouse, PSMOUSE_CMD_SETRES, PSMOUSE_CMD_RESET_WRAP, ec) || alps_exit_command_mode(psmouse)) return -EIO; protocol = alps_match_table(e7, ec); if (!protocol) { if (e7[0] == 0x73 && e7[1] == 0x03 && e7[2] == 0x50 && ec[0] == 0x73 && (ec[1] == 0x01 || ec[1] == 0x02)) { protocol = &alps_v5_protocol_data; } else if (ec[0] == 0x88 && ((ec[1] & 0xf0) == 0xb0 || (ec[1] & 0xf0) == 0xc0)) { protocol = &alps_v7_protocol_data; } else if (ec[0] == 0x88 && ec[1] == 0x08) { protocol = &alps_v3_rushmore_data; } else if (ec[0] == 0x88 && ec[1] == 0x07 && ec[2] >= 0x90 && ec[2] <= 0x9d) { protocol = &alps_v3_protocol_data; } else if (e7[0] == 0x73 && e7[1] == 0x03 && e7[2] == 0x14 && ec[1] == 0x02) { protocol = &alps_v8_protocol_data; } else { psmouse_dbg(psmouse, "Likely not an ALPS touchpad: E7=%3ph, EC=%3ph\n", e7, ec); return -EINVAL; } } if (priv) { /* Save the Firmware version */ memcpy(priv->fw_ver, ec, 3); error = alps_set_protocol(psmouse, priv, protocol); if (error) return error; } return 0; } static int alps_reconnect(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; psmouse_reset(psmouse); if (alps_identify(psmouse, priv) < 0) return -1; return priv->hw_init(psmouse); } static void alps_disconnect(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; psmouse_reset(psmouse); del_timer_sync(&priv->timer); if (priv->dev2) input_unregister_device(priv->dev2); if (!IS_ERR_OR_NULL(priv->dev3)) input_unregister_device(priv->dev3); kfree(priv); } static void alps_set_abs_params_st(struct alps_data *priv, struct input_dev *dev1) { input_set_abs_params(dev1, ABS_X, 0, priv->x_max, 0, 0); input_set_abs_params(dev1, ABS_Y, 0, priv->y_max, 0, 0); input_set_abs_params(dev1, ABS_PRESSURE, 0, 127, 0, 0); } static void alps_set_abs_params_mt_common(struct alps_data *priv, struct input_dev *dev1) { input_set_abs_params(dev1, ABS_MT_POSITION_X, 0, priv->x_max, 0, 0); input_set_abs_params(dev1, ABS_MT_POSITION_Y, 0, priv->y_max, 0, 0); input_abs_set_res(dev1, ABS_MT_POSITION_X, priv->x_res); input_abs_set_res(dev1, ABS_MT_POSITION_Y, priv->y_res); set_bit(BTN_TOOL_TRIPLETAP, dev1->keybit); set_bit(BTN_TOOL_QUADTAP, dev1->keybit); } static void alps_set_abs_params_semi_mt(struct alps_data *priv, struct input_dev *dev1) { alps_set_abs_params_mt_common(priv, dev1); input_set_abs_params(dev1, ABS_PRESSURE, 0, 127, 0, 0); input_mt_init_slots(dev1, MAX_TOUCHES, INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED | INPUT_MT_SEMI_MT); } static void alps_set_abs_params_v7(struct alps_data *priv, struct input_dev *dev1) { alps_set_abs_params_mt_common(priv, dev1); set_bit(BTN_TOOL_QUINTTAP, dev1->keybit); input_mt_init_slots(dev1, MAX_TOUCHES, INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED | INPUT_MT_TRACK); set_bit(BTN_TOOL_QUINTTAP, dev1->keybit); } static void alps_set_abs_params_ss4_v2(struct alps_data *priv, struct input_dev *dev1) { alps_set_abs_params_mt_common(priv, dev1); input_set_abs_params(dev1, ABS_PRESSURE, 0, 127, 0, 0); set_bit(BTN_TOOL_QUINTTAP, dev1->keybit); input_mt_init_slots(dev1, MAX_TOUCHES, INPUT_MT_POINTER | INPUT_MT_DROP_UNUSED | INPUT_MT_TRACK); } int alps_init(struct psmouse *psmouse) { struct alps_data *priv = psmouse->private; struct input_dev *dev1 = psmouse->dev; int error; error = priv->hw_init(psmouse); if (error) goto init_fail; /* * Undo part of setup done for us by psmouse core since touchpad * is not a relative device. */ __clear_bit(EV_REL, dev1->evbit); __clear_bit(REL_X, dev1->relbit); __clear_bit(REL_Y, dev1->relbit); /* * Now set up our capabilities. */ dev1->evbit[BIT_WORD(EV_KEY)] |= BIT_MASK(EV_KEY); dev1->keybit[BIT_WORD(BTN_TOUCH)] |= BIT_MASK(BTN_TOUCH); dev1->keybit[BIT_WORD(BTN_TOOL_FINGER)] |= BIT_MASK(BTN_TOOL_FINGER); dev1->keybit[BIT_WORD(BTN_LEFT)] |= BIT_MASK(BTN_LEFT) | BIT_MASK(BTN_RIGHT); dev1->evbit[BIT_WORD(EV_ABS)] |= BIT_MASK(EV_ABS); priv->set_abs_params(priv, dev1); if (priv->flags & ALPS_WHEEL) { dev1->evbit[BIT_WORD(EV_REL)] |= BIT_MASK(EV_REL); dev1->relbit[BIT_WORD(REL_WHEEL)] |= BIT_MASK(REL_WHEEL); } if (priv->flags & (ALPS_FW_BK_1 | ALPS_FW_BK_2)) { dev1->keybit[BIT_WORD(BTN_FORWARD)] |= BIT_MASK(BTN_FORWARD); dev1->keybit[BIT_WORD(BTN_BACK)] |= BIT_MASK(BTN_BACK); } if (priv->flags & ALPS_FOUR_BUTTONS) { dev1->keybit[BIT_WORD(BTN_0)] |= BIT_MASK(BTN_0); dev1->keybit[BIT_WORD(BTN_1)] |= BIT_MASK(BTN_1); dev1->keybit[BIT_WORD(BTN_2)] |= BIT_MASK(BTN_2); dev1->keybit[BIT_WORD(BTN_3)] |= BIT_MASK(BTN_3); } else if (priv->flags & ALPS_BUTTONPAD) { set_bit(INPUT_PROP_BUTTONPAD, dev1->propbit); clear_bit(BTN_RIGHT, dev1->keybit); } else { dev1->keybit[BIT_WORD(BTN_MIDDLE)] |= BIT_MASK(BTN_MIDDLE); } if (priv->flags & ALPS_DUALPOINT) { struct input_dev *dev2; dev2 = input_allocate_device(); if (!dev2) { psmouse_err(psmouse, "failed to allocate trackstick device\n"); error = -ENOMEM; goto init_fail; } snprintf(priv->phys2, sizeof(priv->phys2), "%s/input1", psmouse->ps2dev.serio->phys); dev2->phys = priv->phys2; /* * format of input device name is: "protocol vendor name" * see function psmouse_switch_protocol() in psmouse-base.c */ dev2->name = "AlpsPS/2 ALPS DualPoint Stick"; dev2->id.bustype = BUS_I8042; dev2->id.vendor = 0x0002; dev2->id.product = PSMOUSE_ALPS; dev2->id.version = priv->proto_version; dev2->dev.parent = &psmouse->ps2dev.serio->dev; input_set_capability(dev2, EV_REL, REL_X); input_set_capability(dev2, EV_REL, REL_Y); input_set_capability(dev2, EV_KEY, BTN_LEFT); input_set_capability(dev2, EV_KEY, BTN_RIGHT); input_set_capability(dev2, EV_KEY, BTN_MIDDLE); __set_bit(INPUT_PROP_POINTER, dev2->propbit); __set_bit(INPUT_PROP_POINTING_STICK, dev2->propbit); error = input_register_device(dev2); if (error) { psmouse_err(psmouse, "failed to register trackstick device: %d\n", error); input_free_device(dev2); goto init_fail; } priv->dev2 = dev2; } priv->psmouse = psmouse; INIT_DELAYED_WORK(&priv->dev3_register_work, alps_register_bare_ps2_mouse); psmouse->protocol_handler = alps_process_byte; psmouse->poll = alps_poll; psmouse->disconnect = alps_disconnect; psmouse->reconnect = alps_reconnect; psmouse->pktsize = priv->proto_version == ALPS_PROTO_V4 ? 8 : 6; /* We are having trouble resyncing ALPS touchpads so disable it for now */ psmouse->resync_time = 0; /* Allow 2 invalid packets without resetting device */ psmouse->resetafter = psmouse->pktsize * 2; return 0; init_fail: psmouse_reset(psmouse); /* * Even though we did not allocate psmouse->private we do free * it here. */ kfree(psmouse->private); psmouse->private = NULL; return error; } int alps_detect(struct psmouse *psmouse, bool set_properties) { struct alps_data *priv; int error; error = alps_identify(psmouse, NULL); if (error) return error; /* * Reset the device to make sure it is fully operational: * on some laptops, like certain Dell Latitudes, we may * fail to properly detect presence of trackstick if device * has not been reset. */ psmouse_reset(psmouse); priv = kzalloc(sizeof(struct alps_data), GFP_KERNEL); if (!priv) return -ENOMEM; error = alps_identify(psmouse, priv); if (error) { kfree(priv); return error; } if (set_properties) { psmouse->vendor = "ALPS"; psmouse->name = priv->flags & ALPS_DUALPOINT ? "DualPoint TouchPad" : "GlidePoint"; psmouse->model = priv->proto_version; } else { /* * Destroy alps_data structure we allocated earlier since * this was just a "trial run". Otherwise we'll keep it * to be used by alps_init() which has to be called if * we succeed and set_properties is true. */ kfree(priv); psmouse->private = NULL; } return 0; }
gpl-2.0
yuhc/linux-3.6.5-for-gxen
drivers/power/avs/smartreflex.c
81
30709
/* * OMAP SmartReflex Voltage Control * * Author: Thara Gopinath <thara@ti.com> * * Copyright (C) 2012 Texas Instruments, Inc. * Thara Gopinath <thara@ti.com> * * Copyright (C) 2008 Nokia Corporation * Kalle Jokiniemi * * Copyright (C) 2007 Texas Instruments, Inc. * Lesly A M <x0080970@ti.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/module.h> #include <linux/interrupt.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/pm_runtime.h> #include <linux/power/smartreflex.h> #define SMARTREFLEX_NAME_LEN 16 #define NVALUE_NAME_LEN 40 #define SR_DISABLE_TIMEOUT 200 /* sr_list contains all the instances of smartreflex module */ static LIST_HEAD(sr_list); static struct omap_sr_class_data *sr_class; static struct omap_sr_pmic_data *sr_pmic_data; static struct dentry *sr_dbg_dir; static inline void sr_write_reg(struct omap_sr *sr, unsigned offset, u32 value) { __raw_writel(value, (sr->base + offset)); } static inline void sr_modify_reg(struct omap_sr *sr, unsigned offset, u32 mask, u32 value) { u32 reg_val; /* * Smartreflex error config register is special as it contains * certain status bits which if written a 1 into means a clear * of those bits. So in order to make sure no accidental write of * 1 happens to those status bits, do a clear of them in the read * value. This mean this API doesn't rewrite values in these bits * if they are currently set, but does allow the caller to write * those bits. */ if (sr->ip_type == SR_TYPE_V1 && offset == ERRCONFIG_V1) mask |= ERRCONFIG_STATUS_V1_MASK; else if (sr->ip_type == SR_TYPE_V2 && offset == ERRCONFIG_V2) mask |= ERRCONFIG_VPBOUNDINTST_V2; reg_val = __raw_readl(sr->base + offset); reg_val &= ~mask; value &= mask; reg_val |= value; __raw_writel(reg_val, (sr->base + offset)); } static inline u32 sr_read_reg(struct omap_sr *sr, unsigned offset) { return __raw_readl(sr->base + offset); } static struct omap_sr *_sr_lookup(struct voltagedomain *voltdm) { struct omap_sr *sr_info; if (!voltdm) { pr_err("%s: Null voltage domain passed!\n", __func__); return ERR_PTR(-EINVAL); } list_for_each_entry(sr_info, &sr_list, node) { if (voltdm == sr_info->voltdm) return sr_info; } return ERR_PTR(-ENODATA); } static irqreturn_t sr_interrupt(int irq, void *data) { struct omap_sr *sr_info = data; u32 status = 0; switch (sr_info->ip_type) { case SR_TYPE_V1: /* Read the status bits */ status = sr_read_reg(sr_info, ERRCONFIG_V1); /* Clear them by writing back */ sr_write_reg(sr_info, ERRCONFIG_V1, status); break; case SR_TYPE_V2: /* Read the status bits */ status = sr_read_reg(sr_info, IRQSTATUS); /* Clear them by writing back */ sr_write_reg(sr_info, IRQSTATUS, status); break; default: dev_err(&sr_info->pdev->dev, "UNKNOWN IP type %d\n", sr_info->ip_type); return IRQ_NONE; } if (sr_class->notify) sr_class->notify(sr_info, status); return IRQ_HANDLED; } static void sr_set_clk_length(struct omap_sr *sr) { struct clk *sys_ck; u32 sys_clk_speed; if (cpu_is_omap34xx()) sys_ck = clk_get(NULL, "sys_ck"); else sys_ck = clk_get(NULL, "sys_clkin_ck"); if (IS_ERR(sys_ck)) { dev_err(&sr->pdev->dev, "%s: unable to get sys clk\n", __func__); return; } sys_clk_speed = clk_get_rate(sys_ck); clk_put(sys_ck); switch (sys_clk_speed) { case 12000000: sr->clk_length = SRCLKLENGTH_12MHZ_SYSCLK; break; case 13000000: sr->clk_length = SRCLKLENGTH_13MHZ_SYSCLK; break; case 19200000: sr->clk_length = SRCLKLENGTH_19MHZ_SYSCLK; break; case 26000000: sr->clk_length = SRCLKLENGTH_26MHZ_SYSCLK; break; case 38400000: sr->clk_length = SRCLKLENGTH_38MHZ_SYSCLK; break; default: dev_err(&sr->pdev->dev, "%s: Invalid sysclk value: %d\n", __func__, sys_clk_speed); break; } } static void sr_set_regfields(struct omap_sr *sr) { /* * For time being these values are defined in smartreflex.h * and populated during init. May be they can be moved to board * file or pmic specific data structure. In that case these structure * fields will have to be populated using the pdata or pmic structure. */ if (cpu_is_omap34xx() || cpu_is_omap44xx()) { sr->err_weight = OMAP3430_SR_ERRWEIGHT; sr->err_maxlimit = OMAP3430_SR_ERRMAXLIMIT; sr->accum_data = OMAP3430_SR_ACCUMDATA; if (!(strcmp(sr->name, "smartreflex_mpu_iva"))) { sr->senn_avgweight = OMAP3430_SR1_SENNAVGWEIGHT; sr->senp_avgweight = OMAP3430_SR1_SENPAVGWEIGHT; } else { sr->senn_avgweight = OMAP3430_SR2_SENNAVGWEIGHT; sr->senp_avgweight = OMAP3430_SR2_SENPAVGWEIGHT; } } } static void sr_start_vddautocomp(struct omap_sr *sr) { if (!sr_class || !(sr_class->enable) || !(sr_class->configure)) { dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not registered\n", __func__); return; } if (!sr_class->enable(sr)) sr->autocomp_active = true; } static void sr_stop_vddautocomp(struct omap_sr *sr) { if (!sr_class || !(sr_class->disable)) { dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not registered\n", __func__); return; } if (sr->autocomp_active) { sr_class->disable(sr, 1); sr->autocomp_active = false; } } /* * This function handles the intializations which have to be done * only when both sr device and class driver regiter has * completed. This will be attempted to be called from both sr class * driver register and sr device intializtion API's. Only one call * will ultimately succeed. * * Currently this function registers interrupt handler for a particular SR * if smartreflex class driver is already registered and has * requested for interrupts and the SR interrupt line in present. */ static int sr_late_init(struct omap_sr *sr_info) { struct omap_sr_data *pdata = sr_info->pdev->dev.platform_data; struct resource *mem; int ret = 0; if (sr_class->notify && sr_class->notify_flags && sr_info->irq) { ret = request_irq(sr_info->irq, sr_interrupt, 0, sr_info->name, sr_info); if (ret) goto error; disable_irq(sr_info->irq); } if (pdata && pdata->enable_on_init) sr_start_vddautocomp(sr_info); return ret; error: iounmap(sr_info->base); mem = platform_get_resource(sr_info->pdev, IORESOURCE_MEM, 0); release_mem_region(mem->start, resource_size(mem)); list_del(&sr_info->node); dev_err(&sr_info->pdev->dev, "%s: ERROR in registering" "interrupt handler. Smartreflex will" "not function as desired\n", __func__); kfree(sr_info); return ret; } static void sr_v1_disable(struct omap_sr *sr) { int timeout = 0; int errconf_val = ERRCONFIG_MCUACCUMINTST | ERRCONFIG_MCUVALIDINTST | ERRCONFIG_MCUBOUNDINTST; /* Enable MCUDisableAcknowledge interrupt */ sr_modify_reg(sr, ERRCONFIG_V1, ERRCONFIG_MCUDISACKINTEN, ERRCONFIG_MCUDISACKINTEN); /* SRCONFIG - disable SR */ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, 0x0); /* Disable all other SR interrupts and clear the status as needed */ if (sr_read_reg(sr, ERRCONFIG_V1) & ERRCONFIG_VPBOUNDINTST_V1) errconf_val |= ERRCONFIG_VPBOUNDINTST_V1; sr_modify_reg(sr, ERRCONFIG_V1, (ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUVALIDINTEN | ERRCONFIG_MCUBOUNDINTEN | ERRCONFIG_VPBOUNDINTEN_V1), errconf_val); /* * Wait for SR to be disabled. * wait until ERRCONFIG.MCUDISACKINTST = 1. Typical latency is 1us. */ sr_test_cond_timeout((sr_read_reg(sr, ERRCONFIG_V1) & ERRCONFIG_MCUDISACKINTST), SR_DISABLE_TIMEOUT, timeout); if (timeout >= SR_DISABLE_TIMEOUT) dev_warn(&sr->pdev->dev, "%s: Smartreflex disable timedout\n", __func__); /* Disable MCUDisableAcknowledge interrupt & clear pending interrupt */ sr_modify_reg(sr, ERRCONFIG_V1, ERRCONFIG_MCUDISACKINTEN, ERRCONFIG_MCUDISACKINTST); } static void sr_v2_disable(struct omap_sr *sr) { int timeout = 0; /* Enable MCUDisableAcknowledge interrupt */ sr_write_reg(sr, IRQENABLE_SET, IRQENABLE_MCUDISABLEACKINT); /* SRCONFIG - disable SR */ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, 0x0); /* * Disable all other SR interrupts and clear the status * write to status register ONLY on need basis - only if status * is set. */ if (sr_read_reg(sr, ERRCONFIG_V2) & ERRCONFIG_VPBOUNDINTST_V2) sr_modify_reg(sr, ERRCONFIG_V2, ERRCONFIG_VPBOUNDINTEN_V2, ERRCONFIG_VPBOUNDINTST_V2); else sr_modify_reg(sr, ERRCONFIG_V2, ERRCONFIG_VPBOUNDINTEN_V2, 0x0); sr_write_reg(sr, IRQENABLE_CLR, (IRQENABLE_MCUACCUMINT | IRQENABLE_MCUVALIDINT | IRQENABLE_MCUBOUNDSINT)); sr_write_reg(sr, IRQSTATUS, (IRQSTATUS_MCUACCUMINT | IRQSTATUS_MCVALIDINT | IRQSTATUS_MCBOUNDSINT)); /* * Wait for SR to be disabled. * wait until IRQSTATUS.MCUDISACKINTST = 1. Typical latency is 1us. */ sr_test_cond_timeout((sr_read_reg(sr, IRQSTATUS) & IRQSTATUS_MCUDISABLEACKINT), SR_DISABLE_TIMEOUT, timeout); if (timeout >= SR_DISABLE_TIMEOUT) dev_warn(&sr->pdev->dev, "%s: Smartreflex disable timedout\n", __func__); /* Disable MCUDisableAcknowledge interrupt & clear pending interrupt */ sr_write_reg(sr, IRQENABLE_CLR, IRQENABLE_MCUDISABLEACKINT); sr_write_reg(sr, IRQSTATUS, IRQSTATUS_MCUDISABLEACKINT); } static struct omap_sr_nvalue_table *sr_retrieve_nvalue_row( struct omap_sr *sr, u32 efuse_offs) { int i; if (!sr->nvalue_table) { dev_warn(&sr->pdev->dev, "%s: Missing ntarget value table\n", __func__); return NULL; } for (i = 0; i < sr->nvalue_count; i++) { if (sr->nvalue_table[i].efuse_offs == efuse_offs) return &sr->nvalue_table[i]; } return NULL; } /* Public Functions */ /** * sr_configure_errgen() - Configures the smrtreflex to perform AVS using the * error generator module. * @voltdm: VDD pointer to which the SR module to be configured belongs to. * * This API is to be called from the smartreflex class driver to * configure the error generator module inside the smartreflex module. * SR settings if using the ERROR module inside Smartreflex. * SR CLASS 3 by default uses only the ERROR module where as * SR CLASS 2 can choose between ERROR module and MINMAXAVG * module. Returns 0 on success and error value in case of failure. */ int sr_configure_errgen(struct voltagedomain *voltdm) { u32 sr_config, sr_errconfig, errconfig_offs; u32 vpboundint_en, vpboundint_st; u32 senp_en = 0, senn_en = 0; u8 senp_shift, senn_shift; struct omap_sr *sr = _sr_lookup(voltdm); if (IS_ERR(sr)) { pr_warning("%s: omap_sr struct for voltdm not found\n", __func__); return PTR_ERR(sr); } if (!sr->clk_length) sr_set_clk_length(sr); senp_en = sr->senp_mod; senn_en = sr->senn_mod; sr_config = (sr->clk_length << SRCONFIG_SRCLKLENGTH_SHIFT) | SRCONFIG_SENENABLE | SRCONFIG_ERRGEN_EN; switch (sr->ip_type) { case SR_TYPE_V1: sr_config |= SRCONFIG_DELAYCTRL; senn_shift = SRCONFIG_SENNENABLE_V1_SHIFT; senp_shift = SRCONFIG_SENPENABLE_V1_SHIFT; errconfig_offs = ERRCONFIG_V1; vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V1; vpboundint_st = ERRCONFIG_VPBOUNDINTST_V1; break; case SR_TYPE_V2: senn_shift = SRCONFIG_SENNENABLE_V2_SHIFT; senp_shift = SRCONFIG_SENPENABLE_V2_SHIFT; errconfig_offs = ERRCONFIG_V2; vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V2; vpboundint_st = ERRCONFIG_VPBOUNDINTST_V2; break; default: dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex" "module without specifying the ip\n", __func__); return -EINVAL; } sr_config |= ((senn_en << senn_shift) | (senp_en << senp_shift)); sr_write_reg(sr, SRCONFIG, sr_config); sr_errconfig = (sr->err_weight << ERRCONFIG_ERRWEIGHT_SHIFT) | (sr->err_maxlimit << ERRCONFIG_ERRMAXLIMIT_SHIFT) | (sr->err_minlimit << ERRCONFIG_ERRMINLIMIT_SHIFT); sr_modify_reg(sr, errconfig_offs, (SR_ERRWEIGHT_MASK | SR_ERRMAXLIMIT_MASK | SR_ERRMINLIMIT_MASK), sr_errconfig); /* Enabling the interrupts if the ERROR module is used */ sr_modify_reg(sr, errconfig_offs, (vpboundint_en | vpboundint_st), vpboundint_en); return 0; } /** * sr_disable_errgen() - Disables SmartReflex AVS module's errgen component * @voltdm: VDD pointer to which the SR module to be configured belongs to. * * This API is to be called from the smartreflex class driver to * disable the error generator module inside the smartreflex module. * * Returns 0 on success and error value in case of failure. */ int sr_disable_errgen(struct voltagedomain *voltdm) { u32 errconfig_offs; u32 vpboundint_en, vpboundint_st; struct omap_sr *sr = _sr_lookup(voltdm); if (IS_ERR(sr)) { pr_warning("%s: omap_sr struct for voltdm not found\n", __func__); return PTR_ERR(sr); } switch (sr->ip_type) { case SR_TYPE_V1: errconfig_offs = ERRCONFIG_V1; vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V1; vpboundint_st = ERRCONFIG_VPBOUNDINTST_V1; break; case SR_TYPE_V2: errconfig_offs = ERRCONFIG_V2; vpboundint_en = ERRCONFIG_VPBOUNDINTEN_V2; vpboundint_st = ERRCONFIG_VPBOUNDINTST_V2; break; default: dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex" "module without specifying the ip\n", __func__); return -EINVAL; } /* Disable the interrupts of ERROR module */ sr_modify_reg(sr, errconfig_offs, vpboundint_en | vpboundint_st, 0); /* Disable the Sensor and errorgen */ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SENENABLE | SRCONFIG_ERRGEN_EN, 0); return 0; } /** * sr_configure_minmax() - Configures the smrtreflex to perform AVS using the * minmaxavg module. * @voltdm: VDD pointer to which the SR module to be configured belongs to. * * This API is to be called from the smartreflex class driver to * configure the minmaxavg module inside the smartreflex module. * SR settings if using the ERROR module inside Smartreflex. * SR CLASS 3 by default uses only the ERROR module where as * SR CLASS 2 can choose between ERROR module and MINMAXAVG * module. Returns 0 on success and error value in case of failure. */ int sr_configure_minmax(struct voltagedomain *voltdm) { u32 sr_config, sr_avgwt; u32 senp_en = 0, senn_en = 0; u8 senp_shift, senn_shift; struct omap_sr *sr = _sr_lookup(voltdm); if (IS_ERR(sr)) { pr_warning("%s: omap_sr struct for voltdm not found\n", __func__); return PTR_ERR(sr); } if (!sr->clk_length) sr_set_clk_length(sr); senp_en = sr->senp_mod; senn_en = sr->senn_mod; sr_config = (sr->clk_length << SRCONFIG_SRCLKLENGTH_SHIFT) | SRCONFIG_SENENABLE | (sr->accum_data << SRCONFIG_ACCUMDATA_SHIFT); switch (sr->ip_type) { case SR_TYPE_V1: sr_config |= SRCONFIG_DELAYCTRL; senn_shift = SRCONFIG_SENNENABLE_V1_SHIFT; senp_shift = SRCONFIG_SENPENABLE_V1_SHIFT; break; case SR_TYPE_V2: senn_shift = SRCONFIG_SENNENABLE_V2_SHIFT; senp_shift = SRCONFIG_SENPENABLE_V2_SHIFT; break; default: dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex" "module without specifying the ip\n", __func__); return -EINVAL; } sr_config |= ((senn_en << senn_shift) | (senp_en << senp_shift)); sr_write_reg(sr, SRCONFIG, sr_config); sr_avgwt = (sr->senp_avgweight << AVGWEIGHT_SENPAVGWEIGHT_SHIFT) | (sr->senn_avgweight << AVGWEIGHT_SENNAVGWEIGHT_SHIFT); sr_write_reg(sr, AVGWEIGHT, sr_avgwt); /* * Enabling the interrupts if MINMAXAVG module is used. * TODO: check if all the interrupts are mandatory */ switch (sr->ip_type) { case SR_TYPE_V1: sr_modify_reg(sr, ERRCONFIG_V1, (ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUVALIDINTEN | ERRCONFIG_MCUBOUNDINTEN), (ERRCONFIG_MCUACCUMINTEN | ERRCONFIG_MCUACCUMINTST | ERRCONFIG_MCUVALIDINTEN | ERRCONFIG_MCUVALIDINTST | ERRCONFIG_MCUBOUNDINTEN | ERRCONFIG_MCUBOUNDINTST)); break; case SR_TYPE_V2: sr_write_reg(sr, IRQSTATUS, IRQSTATUS_MCUACCUMINT | IRQSTATUS_MCVALIDINT | IRQSTATUS_MCBOUNDSINT | IRQSTATUS_MCUDISABLEACKINT); sr_write_reg(sr, IRQENABLE_SET, IRQENABLE_MCUACCUMINT | IRQENABLE_MCUVALIDINT | IRQENABLE_MCUBOUNDSINT | IRQENABLE_MCUDISABLEACKINT); break; default: dev_err(&sr->pdev->dev, "%s: Trying to Configure smartreflex" "module without specifying the ip\n", __func__); return -EINVAL; } return 0; } /** * sr_enable() - Enables the smartreflex module. * @voltdm: VDD pointer to which the SR module to be configured belongs to. * @volt: The voltage at which the Voltage domain associated with * the smartreflex module is operating at. * This is required only to program the correct Ntarget value. * * This API is to be called from the smartreflex class driver to * enable a smartreflex module. Returns 0 on success. Returns error * value if the voltage passed is wrong or if ntarget value is wrong. */ int sr_enable(struct voltagedomain *voltdm, unsigned long volt) { struct omap_volt_data *volt_data; struct omap_sr *sr = _sr_lookup(voltdm); struct omap_sr_nvalue_table *nvalue_row; int ret; if (IS_ERR(sr)) { pr_warning("%s: omap_sr struct for voltdm not found\n", __func__); return PTR_ERR(sr); } volt_data = omap_voltage_get_voltdata(sr->voltdm, volt); if (IS_ERR(volt_data)) { dev_warn(&sr->pdev->dev, "%s: Unable to get voltage table" "for nominal voltage %ld\n", __func__, volt); return PTR_ERR(volt_data); } nvalue_row = sr_retrieve_nvalue_row(sr, volt_data->sr_efuse_offs); if (!nvalue_row) { dev_warn(&sr->pdev->dev, "%s: failure getting SR data for this voltage %ld\n", __func__, volt); return -ENODATA; } /* errminlimit is opp dependent and hence linked to voltage */ sr->err_minlimit = nvalue_row->errminlimit; pm_runtime_get_sync(&sr->pdev->dev); /* Check if SR is already enabled. If yes do nothing */ if (sr_read_reg(sr, SRCONFIG) & SRCONFIG_SRENABLE) return 0; /* Configure SR */ ret = sr_class->configure(sr); if (ret) return ret; sr_write_reg(sr, NVALUERECIPROCAL, nvalue_row->nvalue); /* SRCONFIG - enable SR */ sr_modify_reg(sr, SRCONFIG, SRCONFIG_SRENABLE, SRCONFIG_SRENABLE); return 0; } /** * sr_disable() - Disables the smartreflex module. * @voltdm: VDD pointer to which the SR module to be configured belongs to. * * This API is to be called from the smartreflex class driver to * disable a smartreflex module. */ void sr_disable(struct voltagedomain *voltdm) { struct omap_sr *sr = _sr_lookup(voltdm); if (IS_ERR(sr)) { pr_warning("%s: omap_sr struct for voltdm not found\n", __func__); return; } /* Check if SR clocks are already disabled. If yes do nothing */ if (pm_runtime_suspended(&sr->pdev->dev)) return; /* * Disable SR if only it is indeed enabled. Else just * disable the clocks. */ if (sr_read_reg(sr, SRCONFIG) & SRCONFIG_SRENABLE) { switch (sr->ip_type) { case SR_TYPE_V1: sr_v1_disable(sr); break; case SR_TYPE_V2: sr_v2_disable(sr); break; default: dev_err(&sr->pdev->dev, "UNKNOWN IP type %d\n", sr->ip_type); } } pm_runtime_put_sync_suspend(&sr->pdev->dev); } /** * sr_register_class() - API to register a smartreflex class parameters. * @class_data: The structure containing various sr class specific data. * * This API is to be called by the smartreflex class driver to register itself * with the smartreflex driver during init. Returns 0 on success else the * error value. */ int sr_register_class(struct omap_sr_class_data *class_data) { struct omap_sr *sr_info; if (!class_data) { pr_warning("%s:, Smartreflex class data passed is NULL\n", __func__); return -EINVAL; } if (sr_class) { pr_warning("%s: Smartreflex class driver already registered\n", __func__); return -EBUSY; } sr_class = class_data; /* * Call into late init to do intializations that require * both sr driver and sr class driver to be initiallized. */ list_for_each_entry(sr_info, &sr_list, node) sr_late_init(sr_info); return 0; } /** * omap_sr_enable() - API to enable SR clocks and to call into the * registered smartreflex class enable API. * @voltdm: VDD pointer to which the SR module to be configured belongs to. * * This API is to be called from the kernel in order to enable * a particular smartreflex module. This API will do the initial * configurations to turn on the smartreflex module and in turn call * into the registered smartreflex class enable API. */ void omap_sr_enable(struct voltagedomain *voltdm) { struct omap_sr *sr = _sr_lookup(voltdm); if (IS_ERR(sr)) { pr_warning("%s: omap_sr struct for voltdm not found\n", __func__); return; } if (!sr->autocomp_active) return; if (!sr_class || !(sr_class->enable) || !(sr_class->configure)) { dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not" "registered\n", __func__); return; } sr_class->enable(sr); } /** * omap_sr_disable() - API to disable SR without resetting the voltage * processor voltage * @voltdm: VDD pointer to which the SR module to be configured belongs to. * * This API is to be called from the kernel in order to disable * a particular smartreflex module. This API will in turn call * into the registered smartreflex class disable API. This API will tell * the smartreflex class disable not to reset the VP voltage after * disabling smartreflex. */ void omap_sr_disable(struct voltagedomain *voltdm) { struct omap_sr *sr = _sr_lookup(voltdm); if (IS_ERR(sr)) { pr_warning("%s: omap_sr struct for voltdm not found\n", __func__); return; } if (!sr->autocomp_active) return; if (!sr_class || !(sr_class->disable)) { dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not" "registered\n", __func__); return; } sr_class->disable(sr, 0); } /** * omap_sr_disable_reset_volt() - API to disable SR and reset the * voltage processor voltage * @voltdm: VDD pointer to which the SR module to be configured belongs to. * * This API is to be called from the kernel in order to disable * a particular smartreflex module. This API will in turn call * into the registered smartreflex class disable API. This API will tell * the smartreflex class disable to reset the VP voltage after * disabling smartreflex. */ void omap_sr_disable_reset_volt(struct voltagedomain *voltdm) { struct omap_sr *sr = _sr_lookup(voltdm); if (IS_ERR(sr)) { pr_warning("%s: omap_sr struct for voltdm not found\n", __func__); return; } if (!sr->autocomp_active) return; if (!sr_class || !(sr_class->disable)) { dev_warn(&sr->pdev->dev, "%s: smartreflex class driver not" "registered\n", __func__); return; } sr_class->disable(sr, 1); } /** * omap_sr_register_pmic() - API to register pmic specific info. * @pmic_data: The structure containing pmic specific data. * * This API is to be called from the PMIC specific code to register with * smartreflex driver pmic specific info. Currently the only info required * is the smartreflex init on the PMIC side. */ void omap_sr_register_pmic(struct omap_sr_pmic_data *pmic_data) { if (!pmic_data) { pr_warning("%s: Trying to register NULL PMIC data structure" "with smartreflex\n", __func__); return; } sr_pmic_data = pmic_data; } /* PM Debug FS entries to enable and disable smartreflex. */ static int omap_sr_autocomp_show(void *data, u64 *val) { struct omap_sr *sr_info = data; if (!sr_info) { pr_warning("%s: omap_sr struct not found\n", __func__); return -EINVAL; } *val = sr_info->autocomp_active; return 0; } static int omap_sr_autocomp_store(void *data, u64 val) { struct omap_sr *sr_info = data; if (!sr_info) { pr_warning("%s: omap_sr struct not found\n", __func__); return -EINVAL; } /* Sanity check */ if (val > 1) { pr_warning("%s: Invalid argument %lld\n", __func__, val); return -EINVAL; } /* control enable/disable only if there is a delta in value */ if (sr_info->autocomp_active != val) { if (!val) sr_stop_vddautocomp(sr_info); else sr_start_vddautocomp(sr_info); } return 0; } DEFINE_SIMPLE_ATTRIBUTE(pm_sr_fops, omap_sr_autocomp_show, omap_sr_autocomp_store, "%llu\n"); static int __init omap_sr_probe(struct platform_device *pdev) { struct omap_sr *sr_info; struct omap_sr_data *pdata = pdev->dev.platform_data; struct resource *mem, *irq; struct dentry *nvalue_dir; int i, ret = 0; sr_info = kzalloc(sizeof(struct omap_sr), GFP_KERNEL); if (!sr_info) { dev_err(&pdev->dev, "%s: unable to allocate sr_info\n", __func__); return -ENOMEM; } platform_set_drvdata(pdev, sr_info); if (!pdata) { dev_err(&pdev->dev, "%s: platform data missing\n", __func__); ret = -EINVAL; goto err_free_devinfo; } mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!mem) { dev_err(&pdev->dev, "%s: no mem resource\n", __func__); ret = -ENODEV; goto err_free_devinfo; } mem = request_mem_region(mem->start, resource_size(mem), dev_name(&pdev->dev)); if (!mem) { dev_err(&pdev->dev, "%s: no mem region\n", __func__); ret = -EBUSY; goto err_free_devinfo; } irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0); pm_runtime_enable(&pdev->dev); pm_runtime_irq_safe(&pdev->dev); sr_info->name = kasprintf(GFP_KERNEL, "%s", pdata->name); if (!sr_info->name) { dev_err(&pdev->dev, "%s: Unable to alloc SR instance name\n", __func__); ret = -ENOMEM; goto err_release_region; } sr_info->pdev = pdev; sr_info->srid = pdev->id; sr_info->voltdm = pdata->voltdm; sr_info->nvalue_table = pdata->nvalue_table; sr_info->nvalue_count = pdata->nvalue_count; sr_info->senn_mod = pdata->senn_mod; sr_info->senp_mod = pdata->senp_mod; sr_info->autocomp_active = false; sr_info->ip_type = pdata->ip_type; sr_info->base = ioremap(mem->start, resource_size(mem)); if (!sr_info->base) { dev_err(&pdev->dev, "%s: ioremap fail\n", __func__); ret = -ENOMEM; goto err_release_region; } if (irq) sr_info->irq = irq->start; sr_set_clk_length(sr_info); sr_set_regfields(sr_info); list_add(&sr_info->node, &sr_list); /* * Call into late init to do intializations that require * both sr driver and sr class driver to be initiallized. */ if (sr_class) { ret = sr_late_init(sr_info); if (ret) { pr_warning("%s: Error in SR late init\n", __func__); goto err_iounmap; } } dev_info(&pdev->dev, "%s: SmartReflex driver initialized\n", __func__); if (!sr_dbg_dir) { sr_dbg_dir = debugfs_create_dir("smartreflex", NULL); if (IS_ERR_OR_NULL(sr_dbg_dir)) { ret = PTR_ERR(sr_dbg_dir); pr_err("%s:sr debugfs dir creation failed(%d)\n", __func__, ret); goto err_iounmap; } } sr_info->dbg_dir = debugfs_create_dir(sr_info->name, sr_dbg_dir); if (IS_ERR_OR_NULL(sr_info->dbg_dir)) { dev_err(&pdev->dev, "%s: Unable to create debugfs directory\n", __func__); ret = PTR_ERR(sr_info->dbg_dir); goto err_free_name; } (void) debugfs_create_file("autocomp", S_IRUGO | S_IWUSR, sr_info->dbg_dir, (void *)sr_info, &pm_sr_fops); (void) debugfs_create_x32("errweight", S_IRUGO, sr_info->dbg_dir, &sr_info->err_weight); (void) debugfs_create_x32("errmaxlimit", S_IRUGO, sr_info->dbg_dir, &sr_info->err_maxlimit); nvalue_dir = debugfs_create_dir("nvalue", sr_info->dbg_dir); if (IS_ERR_OR_NULL(nvalue_dir)) { dev_err(&pdev->dev, "%s: Unable to create debugfs directory" "for n-values\n", __func__); ret = PTR_ERR(nvalue_dir); goto err_debugfs; } if (sr_info->nvalue_count == 0 || !sr_info->nvalue_table) { dev_warn(&pdev->dev, "%s: %s: No Voltage table for the corresponding vdd. Cannot create debugfs entries for n-values\n", __func__, sr_info->name); ret = -ENODATA; goto err_debugfs; } for (i = 0; i < sr_info->nvalue_count; i++) { char name[NVALUE_NAME_LEN + 1]; snprintf(name, sizeof(name), "volt_%lu", sr_info->nvalue_table[i].volt_nominal); (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir, &(sr_info->nvalue_table[i].nvalue)); snprintf(name, sizeof(name), "errminlimit_%lu", sr_info->nvalue_table[i].volt_nominal); (void) debugfs_create_x32(name, S_IRUGO | S_IWUSR, nvalue_dir, &(sr_info->nvalue_table[i].errminlimit)); } return ret; err_debugfs: debugfs_remove_recursive(sr_info->dbg_dir); err_free_name: kfree(sr_info->name); err_iounmap: list_del(&sr_info->node); iounmap(sr_info->base); err_release_region: release_mem_region(mem->start, resource_size(mem)); err_free_devinfo: kfree(sr_info); return ret; } static int __devexit omap_sr_remove(struct platform_device *pdev) { struct omap_sr_data *pdata = pdev->dev.platform_data; struct omap_sr *sr_info; struct resource *mem; if (!pdata) { dev_err(&pdev->dev, "%s: platform data missing\n", __func__); return -EINVAL; } sr_info = _sr_lookup(pdata->voltdm); if (IS_ERR(sr_info)) { dev_warn(&pdev->dev, "%s: omap_sr struct not found\n", __func__); return PTR_ERR(sr_info); } if (sr_info->autocomp_active) sr_stop_vddautocomp(sr_info); if (sr_info->dbg_dir) debugfs_remove_recursive(sr_info->dbg_dir); list_del(&sr_info->node); iounmap(sr_info->base); kfree(sr_info->name); kfree(sr_info); mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); release_mem_region(mem->start, resource_size(mem)); return 0; } static void __devexit omap_sr_shutdown(struct platform_device *pdev) { struct omap_sr_data *pdata = pdev->dev.platform_data; struct omap_sr *sr_info; if (!pdata) { dev_err(&pdev->dev, "%s: platform data missing\n", __func__); return; } sr_info = _sr_lookup(pdata->voltdm); if (IS_ERR(sr_info)) { dev_warn(&pdev->dev, "%s: omap_sr struct not found\n", __func__); return; } if (sr_info->autocomp_active) sr_stop_vddautocomp(sr_info); return; } static struct platform_driver smartreflex_driver = { .remove = __devexit_p(omap_sr_remove), .shutdown = __devexit_p(omap_sr_shutdown), .driver = { .name = "smartreflex", }, }; static int __init sr_init(void) { int ret = 0; /* * sr_init is a late init. If by then a pmic specific API is not * registered either there is no need for anything to be done on * the PMIC side or somebody has forgotten to register a PMIC * handler. Warn for the second condition. */ if (sr_pmic_data && sr_pmic_data->sr_pmic_init) sr_pmic_data->sr_pmic_init(); else pr_warning("%s: No PMIC hook to init smartreflex\n", __func__); ret = platform_driver_probe(&smartreflex_driver, omap_sr_probe); if (ret) { pr_err("%s: platform driver register failed for SR\n", __func__); return ret; } return 0; } late_initcall(sr_init); static void __exit sr_exit(void) { platform_driver_unregister(&smartreflex_driver); } module_exit(sr_exit); MODULE_DESCRIPTION("OMAP Smartreflex Driver"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" DRIVER_NAME); MODULE_AUTHOR("Texas Instruments Inc");
gpl-2.0
coolbho3k/galaxysii_oc
sound/pci/ca0106/ca0106_proc.c
1105
14348
/* * Copyright (c) 2004 James Courtier-Dutton <James@superbug.demon.co.uk> * Driver CA0106 chips. e.g. Sound Blaster Audigy LS and Live 24bit * Version: 0.0.18 * * FEATURES currently supported: * See ca0106_main.c for features. * * Changelog: * Support interrupts per period. * Removed noise from Center/LFE channel when in Analog mode. * Rename and remove mixer controls. * 0.0.6 * Use separate card based DMA buffer for periods table list. * 0.0.7 * Change remove and rename ctrls into lists. * 0.0.8 * Try to fix capture sources. * 0.0.9 * Fix AC3 output. * Enable S32_LE format support. * 0.0.10 * Enable playback 48000 and 96000 rates. (Rates other that these do not work, even with "plug:front".) * 0.0.11 * Add Model name recognition. * 0.0.12 * Correct interrupt timing. interrupt at end of period, instead of in the middle of a playback period. * Remove redundent "voice" handling. * 0.0.13 * Single trigger call for multi channels. * 0.0.14 * Set limits based on what the sound card hardware can do. * playback periods_min=2, periods_max=8 * capture hw constraints require period_size = n * 64 bytes. * playback hw constraints require period_size = n * 64 bytes. * 0.0.15 * Separate ca0106.c into separate functional .c files. * 0.0.16 * Modified Copyright message. * 0.0.17 * Add iec958 file in proc file system to show status of SPDIF in. * 0.0.18 * Implement support for Line-in capture on SB Live 24bit. * * This code was initally based on code from ALSA's emu10k1x.c which is: * Copyright (c) by Francisco Moraes <fmoraes@nc.rr.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/delay.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/moduleparam.h> #include <sound/core.h> #include <sound/initval.h> #include <sound/pcm.h> #include <sound/ac97_codec.h> #include <sound/info.h> #include <sound/asoundef.h> #include <asm/io.h> #include "ca0106.h" #ifdef CONFIG_PROC_FS struct snd_ca0106_category_str { int val; const char *name; }; static struct snd_ca0106_category_str snd_ca0106_con_category[] = { { IEC958_AES1_CON_DAT, "DAT" }, { IEC958_AES1_CON_VCR, "VCR" }, { IEC958_AES1_CON_MICROPHONE, "microphone" }, { IEC958_AES1_CON_SYNTHESIZER, "synthesizer" }, { IEC958_AES1_CON_RATE_CONVERTER, "rate converter" }, { IEC958_AES1_CON_MIXER, "mixer" }, { IEC958_AES1_CON_SAMPLER, "sampler" }, { IEC958_AES1_CON_PCM_CODER, "PCM coder" }, { IEC958_AES1_CON_IEC908_CD, "CD" }, { IEC958_AES1_CON_NON_IEC908_CD, "non-IEC908 CD" }, { IEC958_AES1_CON_GENERAL, "general" }, }; static void snd_ca0106_proc_dump_iec958( struct snd_info_buffer *buffer, u32 value) { int i; u32 status[4]; status[0] = value & 0xff; status[1] = (value >> 8) & 0xff; status[2] = (value >> 16) & 0xff; status[3] = (value >> 24) & 0xff; if (! (status[0] & IEC958_AES0_PROFESSIONAL)) { /* consumer */ snd_iprintf(buffer, "Mode: consumer\n"); snd_iprintf(buffer, "Data: "); if (!(status[0] & IEC958_AES0_NONAUDIO)) { snd_iprintf(buffer, "audio\n"); } else { snd_iprintf(buffer, "non-audio\n"); } snd_iprintf(buffer, "Rate: "); switch (status[3] & IEC958_AES3_CON_FS) { case IEC958_AES3_CON_FS_44100: snd_iprintf(buffer, "44100 Hz\n"); break; case IEC958_AES3_CON_FS_48000: snd_iprintf(buffer, "48000 Hz\n"); break; case IEC958_AES3_CON_FS_32000: snd_iprintf(buffer, "32000 Hz\n"); break; default: snd_iprintf(buffer, "unknown\n"); break; } snd_iprintf(buffer, "Copyright: "); if (status[0] & IEC958_AES0_CON_NOT_COPYRIGHT) { snd_iprintf(buffer, "permitted\n"); } else { snd_iprintf(buffer, "protected\n"); } snd_iprintf(buffer, "Emphasis: "); if ((status[0] & IEC958_AES0_CON_EMPHASIS) != IEC958_AES0_CON_EMPHASIS_5015) { snd_iprintf(buffer, "none\n"); } else { snd_iprintf(buffer, "50/15us\n"); } snd_iprintf(buffer, "Category: "); for (i = 0; i < ARRAY_SIZE(snd_ca0106_con_category); i++) { if ((status[1] & IEC958_AES1_CON_CATEGORY) == snd_ca0106_con_category[i].val) { snd_iprintf(buffer, "%s\n", snd_ca0106_con_category[i].name); break; } } if (i >= ARRAY_SIZE(snd_ca0106_con_category)) { snd_iprintf(buffer, "unknown 0x%x\n", status[1] & IEC958_AES1_CON_CATEGORY); } snd_iprintf(buffer, "Original: "); if (status[1] & IEC958_AES1_CON_ORIGINAL) { snd_iprintf(buffer, "original\n"); } else { snd_iprintf(buffer, "1st generation\n"); } snd_iprintf(buffer, "Clock: "); switch (status[3] & IEC958_AES3_CON_CLOCK) { case IEC958_AES3_CON_CLOCK_1000PPM: snd_iprintf(buffer, "1000 ppm\n"); break; case IEC958_AES3_CON_CLOCK_50PPM: snd_iprintf(buffer, "50 ppm\n"); break; case IEC958_AES3_CON_CLOCK_VARIABLE: snd_iprintf(buffer, "variable pitch\n"); break; default: snd_iprintf(buffer, "unknown\n"); break; } } else { snd_iprintf(buffer, "Mode: professional\n"); snd_iprintf(buffer, "Data: "); if (!(status[0] & IEC958_AES0_NONAUDIO)) { snd_iprintf(buffer, "audio\n"); } else { snd_iprintf(buffer, "non-audio\n"); } snd_iprintf(buffer, "Rate: "); switch (status[0] & IEC958_AES0_PRO_FS) { case IEC958_AES0_PRO_FS_44100: snd_iprintf(buffer, "44100 Hz\n"); break; case IEC958_AES0_PRO_FS_48000: snd_iprintf(buffer, "48000 Hz\n"); break; case IEC958_AES0_PRO_FS_32000: snd_iprintf(buffer, "32000 Hz\n"); break; default: snd_iprintf(buffer, "unknown\n"); break; } snd_iprintf(buffer, "Rate Locked: "); if (status[0] & IEC958_AES0_PRO_FREQ_UNLOCKED) snd_iprintf(buffer, "no\n"); else snd_iprintf(buffer, "yes\n"); snd_iprintf(buffer, "Emphasis: "); switch (status[0] & IEC958_AES0_PRO_EMPHASIS) { case IEC958_AES0_PRO_EMPHASIS_CCITT: snd_iprintf(buffer, "CCITT J.17\n"); break; case IEC958_AES0_PRO_EMPHASIS_NONE: snd_iprintf(buffer, "none\n"); break; case IEC958_AES0_PRO_EMPHASIS_5015: snd_iprintf(buffer, "50/15us\n"); break; case IEC958_AES0_PRO_EMPHASIS_NOTID: default: snd_iprintf(buffer, "unknown\n"); break; } snd_iprintf(buffer, "Stereophonic: "); if ((status[1] & IEC958_AES1_PRO_MODE) == IEC958_AES1_PRO_MODE_STEREOPHONIC) { snd_iprintf(buffer, "stereo\n"); } else { snd_iprintf(buffer, "not indicated\n"); } snd_iprintf(buffer, "Userbits: "); switch (status[1] & IEC958_AES1_PRO_USERBITS) { case IEC958_AES1_PRO_USERBITS_192: snd_iprintf(buffer, "192bit\n"); break; case IEC958_AES1_PRO_USERBITS_UDEF: snd_iprintf(buffer, "user-defined\n"); break; default: snd_iprintf(buffer, "unknown\n"); break; } snd_iprintf(buffer, "Sample Bits: "); switch (status[2] & IEC958_AES2_PRO_SBITS) { case IEC958_AES2_PRO_SBITS_20: snd_iprintf(buffer, "20 bit\n"); break; case IEC958_AES2_PRO_SBITS_24: snd_iprintf(buffer, "24 bit\n"); break; case IEC958_AES2_PRO_SBITS_UDEF: snd_iprintf(buffer, "user defined\n"); break; default: snd_iprintf(buffer, "unknown\n"); break; } snd_iprintf(buffer, "Word Length: "); switch (status[2] & IEC958_AES2_PRO_WORDLEN) { case IEC958_AES2_PRO_WORDLEN_22_18: snd_iprintf(buffer, "22 bit or 18 bit\n"); break; case IEC958_AES2_PRO_WORDLEN_23_19: snd_iprintf(buffer, "23 bit or 19 bit\n"); break; case IEC958_AES2_PRO_WORDLEN_24_20: snd_iprintf(buffer, "24 bit or 20 bit\n"); break; case IEC958_AES2_PRO_WORDLEN_20_16: snd_iprintf(buffer, "20 bit or 16 bit\n"); break; default: snd_iprintf(buffer, "unknown\n"); break; } } } static void snd_ca0106_proc_iec958(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_ca0106 *emu = entry->private_data; u32 value; value = snd_ca0106_ptr_read(emu, SAMPLE_RATE_TRACKER_STATUS, 0); snd_iprintf(buffer, "Status: %s, %s, %s\n", (value & 0x100000) ? "Rate Locked" : "Not Rate Locked", (value & 0x200000) ? "SPDIF Locked" : "No SPDIF Lock", (value & 0x400000) ? "Audio Valid" : "No valid audio" ); snd_iprintf(buffer, "Estimated sample rate: %u\n", ((value & 0xfffff) * 48000) / 0x8000 ); if (value & 0x200000) { snd_iprintf(buffer, "IEC958/SPDIF input status:\n"); value = snd_ca0106_ptr_read(emu, SPDIF_INPUT_STATUS, 0); snd_ca0106_proc_dump_iec958(buffer, value); } snd_iprintf(buffer, "\n"); } static void snd_ca0106_proc_reg_write32(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_ca0106 *emu = entry->private_data; unsigned long flags; char line[64]; u32 reg, val; while (!snd_info_get_line(buffer, line, sizeof(line))) { if (sscanf(line, "%x %x", &reg, &val) != 2) continue; if (reg < 0x40 && val <= 0xffffffff) { spin_lock_irqsave(&emu->emu_lock, flags); outl(val, emu->port + (reg & 0xfffffffc)); spin_unlock_irqrestore(&emu->emu_lock, flags); } } } static void snd_ca0106_proc_reg_read32(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_ca0106 *emu = entry->private_data; unsigned long value; unsigned long flags; int i; snd_iprintf(buffer, "Registers:\n\n"); for(i = 0; i < 0x20; i+=4) { spin_lock_irqsave(&emu->emu_lock, flags); value = inl(emu->port + i); spin_unlock_irqrestore(&emu->emu_lock, flags); snd_iprintf(buffer, "Register %02X: %08lX\n", i, value); } } static void snd_ca0106_proc_reg_read16(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_ca0106 *emu = entry->private_data; unsigned int value; unsigned long flags; int i; snd_iprintf(buffer, "Registers:\n\n"); for(i = 0; i < 0x20; i+=2) { spin_lock_irqsave(&emu->emu_lock, flags); value = inw(emu->port + i); spin_unlock_irqrestore(&emu->emu_lock, flags); snd_iprintf(buffer, "Register %02X: %04X\n", i, value); } } static void snd_ca0106_proc_reg_read8(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_ca0106 *emu = entry->private_data; unsigned int value; unsigned long flags; int i; snd_iprintf(buffer, "Registers:\n\n"); for(i = 0; i < 0x20; i+=1) { spin_lock_irqsave(&emu->emu_lock, flags); value = inb(emu->port + i); spin_unlock_irqrestore(&emu->emu_lock, flags); snd_iprintf(buffer, "Register %02X: %02X\n", i, value); } } static void snd_ca0106_proc_reg_read1(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_ca0106 *emu = entry->private_data; unsigned long value; int i,j; snd_iprintf(buffer, "Registers\n"); for(i = 0; i < 0x40; i++) { snd_iprintf(buffer, "%02X: ",i); for (j = 0; j < 4; j++) { value = snd_ca0106_ptr_read(emu, i, j); snd_iprintf(buffer, "%08lX ", value); } snd_iprintf(buffer, "\n"); } } static void snd_ca0106_proc_reg_read2(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_ca0106 *emu = entry->private_data; unsigned long value; int i,j; snd_iprintf(buffer, "Registers\n"); for(i = 0x40; i < 0x80; i++) { snd_iprintf(buffer, "%02X: ",i); for (j = 0; j < 4; j++) { value = snd_ca0106_ptr_read(emu, i, j); snd_iprintf(buffer, "%08lX ", value); } snd_iprintf(buffer, "\n"); } } static void snd_ca0106_proc_reg_write(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_ca0106 *emu = entry->private_data; char line[64]; unsigned int reg, channel_id , val; while (!snd_info_get_line(buffer, line, sizeof(line))) { if (sscanf(line, "%x %x %x", &reg, &channel_id, &val) != 3) continue; if (reg < 0x80 && val <= 0xffffffff && channel_id <= 3) snd_ca0106_ptr_write(emu, reg, channel_id, val); } } static void snd_ca0106_proc_i2c_write(struct snd_info_entry *entry, struct snd_info_buffer *buffer) { struct snd_ca0106 *emu = entry->private_data; char line[64]; unsigned int reg, val; while (!snd_info_get_line(buffer, line, sizeof(line))) { if (sscanf(line, "%x %x", &reg, &val) != 2) continue; if ((reg <= 0x7f) || (val <= 0x1ff)) { snd_ca0106_i2c_write(emu, reg, val); } } } int __devinit snd_ca0106_proc_init(struct snd_ca0106 * emu) { struct snd_info_entry *entry; if(! snd_card_proc_new(emu->card, "iec958", &entry)) snd_info_set_text_ops(entry, emu, snd_ca0106_proc_iec958); if(! snd_card_proc_new(emu->card, "ca0106_reg32", &entry)) { snd_info_set_text_ops(entry, emu, snd_ca0106_proc_reg_read32); entry->c.text.write = snd_ca0106_proc_reg_write32; entry->mode |= S_IWUSR; } if(! snd_card_proc_new(emu->card, "ca0106_reg16", &entry)) snd_info_set_text_ops(entry, emu, snd_ca0106_proc_reg_read16); if(! snd_card_proc_new(emu->card, "ca0106_reg8", &entry)) snd_info_set_text_ops(entry, emu, snd_ca0106_proc_reg_read8); if(! snd_card_proc_new(emu->card, "ca0106_regs1", &entry)) { snd_info_set_text_ops(entry, emu, snd_ca0106_proc_reg_read1); entry->c.text.write = snd_ca0106_proc_reg_write; entry->mode |= S_IWUSR; } if(! snd_card_proc_new(emu->card, "ca0106_i2c", &entry)) { entry->c.text.write = snd_ca0106_proc_i2c_write; entry->private_data = emu; entry->mode |= S_IWUSR; } if(! snd_card_proc_new(emu->card, "ca0106_regs2", &entry)) snd_info_set_text_ops(entry, emu, snd_ca0106_proc_reg_read2); return 0; } #endif /* CONFIG_PROC_FS */
gpl-2.0
luckpizza/n8000-kernel-aufs
drivers/block/umem.c
1873
30193
/* * mm.c - Micro Memory(tm) PCI memory board block device driver - v2.3 * * (C) 2001 San Mehat <nettwerk@valinux.com> * (C) 2001 Johannes Erdfelt <jerdfelt@valinux.com> * (C) 2001 NeilBrown <neilb@cse.unsw.edu.au> * * This driver for the Micro Memory PCI Memory Module with Battery Backup * is Copyright Micro Memory Inc 2001-2002. All rights reserved. * * This driver is released to the public under the terms of the * GNU GENERAL PUBLIC LICENSE version 2 * See the file COPYING for details. * * This driver provides a standard block device interface for Micro Memory(tm) * PCI based RAM boards. * 10/05/01: Phap Nguyen - Rebuilt the driver * 10/22/01: Phap Nguyen - v2.1 Added disk partitioning * 29oct2001:NeilBrown - Use make_request_fn instead of request_fn * - use stand disk partitioning (so fdisk works). * 08nov2001:NeilBrown - change driver name from "mm" to "umem" * - incorporate into main kernel * 08apr2002:NeilBrown - Move some of interrupt handle to tasklet * - use spin_lock_bh instead of _irq * - Never block on make_request. queue * bh's instead. * - unregister umem from devfs at mod unload * - Change version to 2.3 * 07Nov2001:Phap Nguyen - Select pci read command: 06, 12, 15 (Decimal) * 07Jan2002: P. Nguyen - Used PCI Memory Write & Invalidate for DMA * 15May2002:NeilBrown - convert to bio for 2.5 * 17May2002:NeilBrown - remove init_mem initialisation. Instead detect * - a sequence of writes that cover the card, and * - set initialised bit then. */ #undef DEBUG /* #define DEBUG if you want debugging info (pr_debug) */ #include <linux/fs.h> #include <linux/bio.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/mman.h> #include <linux/gfp.h> #include <linux/ioctl.h> #include <linux/module.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/timer.h> #include <linux/pci.h> #include <linux/dma-mapping.h> #include <linux/fcntl.h> /* O_ACCMODE */ #include <linux/hdreg.h> /* HDIO_GETGEO */ #include "umem.h" #include <asm/uaccess.h> #include <asm/io.h> #define MM_MAXCARDS 4 #define MM_RAHEAD 2 /* two sectors */ #define MM_BLKSIZE 1024 /* 1k blocks */ #define MM_HARDSECT 512 /* 512-byte hardware sectors */ #define MM_SHIFT 6 /* max 64 partitions on 4 cards */ /* * Version Information */ #define DRIVER_NAME "umem" #define DRIVER_VERSION "v2.3" #define DRIVER_AUTHOR "San Mehat, Johannes Erdfelt, NeilBrown" #define DRIVER_DESC "Micro Memory(tm) PCI memory board block driver" static int debug; /* #define HW_TRACE(x) writeb(x,cards[0].csr_remap + MEMCTRLSTATUS_MAGIC) */ #define HW_TRACE(x) #define DEBUG_LED_ON_TRANSFER 0x01 #define DEBUG_BATTERY_POLLING 0x02 module_param(debug, int, 0644); MODULE_PARM_DESC(debug, "Debug bitmask"); static int pci_read_cmd = 0x0C; /* Read Multiple */ module_param(pci_read_cmd, int, 0); MODULE_PARM_DESC(pci_read_cmd, "PCI read command"); static int pci_write_cmd = 0x0F; /* Write and Invalidate */ module_param(pci_write_cmd, int, 0); MODULE_PARM_DESC(pci_write_cmd, "PCI write command"); static int pci_cmds; static int major_nr; #include <linux/blkdev.h> #include <linux/blkpg.h> struct cardinfo { struct pci_dev *dev; unsigned char __iomem *csr_remap; unsigned int mm_size; /* size in kbytes */ unsigned int init_size; /* initial segment, in sectors, * that we know to * have been written */ struct bio *bio, *currentbio, **biotail; int current_idx; sector_t current_sector; struct request_queue *queue; struct mm_page { dma_addr_t page_dma; struct mm_dma_desc *desc; int cnt, headcnt; struct bio *bio, **biotail; int idx; } mm_pages[2]; #define DESC_PER_PAGE ((PAGE_SIZE*2)/sizeof(struct mm_dma_desc)) int Active, Ready; struct tasklet_struct tasklet; unsigned int dma_status; struct { int good; int warned; unsigned long last_change; } battery[2]; spinlock_t lock; int check_batteries; int flags; }; static struct cardinfo cards[MM_MAXCARDS]; static struct timer_list battery_timer; static int num_cards; static struct gendisk *mm_gendisk[MM_MAXCARDS]; static void check_batteries(struct cardinfo *card); static int get_userbit(struct cardinfo *card, int bit) { unsigned char led; led = readb(card->csr_remap + MEMCTRLCMD_LEDCTRL); return led & bit; } static int set_userbit(struct cardinfo *card, int bit, unsigned char state) { unsigned char led; led = readb(card->csr_remap + MEMCTRLCMD_LEDCTRL); if (state) led |= bit; else led &= ~bit; writeb(led, card->csr_remap + MEMCTRLCMD_LEDCTRL); return 0; } /* * NOTE: For the power LED, use the LED_POWER_* macros since they differ */ static void set_led(struct cardinfo *card, int shift, unsigned char state) { unsigned char led; led = readb(card->csr_remap + MEMCTRLCMD_LEDCTRL); if (state == LED_FLIP) led ^= (1<<shift); else { led &= ~(0x03 << shift); led |= (state << shift); } writeb(led, card->csr_remap + MEMCTRLCMD_LEDCTRL); } #ifdef MM_DIAG static void dump_regs(struct cardinfo *card) { unsigned char *p; int i, i1; p = card->csr_remap; for (i = 0; i < 8; i++) { printk(KERN_DEBUG "%p ", p); for (i1 = 0; i1 < 16; i1++) printk("%02x ", *p++); printk("\n"); } } #endif static void dump_dmastat(struct cardinfo *card, unsigned int dmastat) { dev_printk(KERN_DEBUG, &card->dev->dev, "DMAstat - "); if (dmastat & DMASCR_ANY_ERR) printk(KERN_CONT "ANY_ERR "); if (dmastat & DMASCR_MBE_ERR) printk(KERN_CONT "MBE_ERR "); if (dmastat & DMASCR_PARITY_ERR_REP) printk(KERN_CONT "PARITY_ERR_REP "); if (dmastat & DMASCR_PARITY_ERR_DET) printk(KERN_CONT "PARITY_ERR_DET "); if (dmastat & DMASCR_SYSTEM_ERR_SIG) printk(KERN_CONT "SYSTEM_ERR_SIG "); if (dmastat & DMASCR_TARGET_ABT) printk(KERN_CONT "TARGET_ABT "); if (dmastat & DMASCR_MASTER_ABT) printk(KERN_CONT "MASTER_ABT "); if (dmastat & DMASCR_CHAIN_COMPLETE) printk(KERN_CONT "CHAIN_COMPLETE "); if (dmastat & DMASCR_DMA_COMPLETE) printk(KERN_CONT "DMA_COMPLETE "); printk("\n"); } /* * Theory of request handling * * Each bio is assigned to one mm_dma_desc - which may not be enough FIXME * We have two pages of mm_dma_desc, holding about 64 descriptors * each. These are allocated at init time. * One page is "Ready" and is either full, or can have request added. * The other page might be "Active", which DMA is happening on it. * * Whenever IO on the active page completes, the Ready page is activated * and the ex-Active page is clean out and made Ready. * Otherwise the Ready page is only activated when it becomes full. * * If a request arrives while both pages a full, it is queued, and b_rdev is * overloaded to record whether it was a read or a write. * * The interrupt handler only polls the device to clear the interrupt. * The processing of the result is done in a tasklet. */ static void mm_start_io(struct cardinfo *card) { /* we have the lock, we know there is * no IO active, and we know that card->Active * is set */ struct mm_dma_desc *desc; struct mm_page *page; int offset; /* make the last descriptor end the chain */ page = &card->mm_pages[card->Active]; pr_debug("start_io: %d %d->%d\n", card->Active, page->headcnt, page->cnt - 1); desc = &page->desc[page->cnt-1]; desc->control_bits |= cpu_to_le32(DMASCR_CHAIN_COMP_EN); desc->control_bits &= ~cpu_to_le32(DMASCR_CHAIN_EN); desc->sem_control_bits = desc->control_bits; if (debug & DEBUG_LED_ON_TRANSFER) set_led(card, LED_REMOVE, LED_ON); desc = &page->desc[page->headcnt]; writel(0, card->csr_remap + DMA_PCI_ADDR); writel(0, card->csr_remap + DMA_PCI_ADDR + 4); writel(0, card->csr_remap + DMA_LOCAL_ADDR); writel(0, card->csr_remap + DMA_LOCAL_ADDR + 4); writel(0, card->csr_remap + DMA_TRANSFER_SIZE); writel(0, card->csr_remap + DMA_TRANSFER_SIZE + 4); writel(0, card->csr_remap + DMA_SEMAPHORE_ADDR); writel(0, card->csr_remap + DMA_SEMAPHORE_ADDR + 4); offset = ((char *)desc) - ((char *)page->desc); writel(cpu_to_le32((page->page_dma+offset) & 0xffffffff), card->csr_remap + DMA_DESCRIPTOR_ADDR); /* Force the value to u64 before shifting otherwise >> 32 is undefined C * and on some ports will do nothing ! */ writel(cpu_to_le32(((u64)page->page_dma)>>32), card->csr_remap + DMA_DESCRIPTOR_ADDR + 4); /* Go, go, go */ writel(cpu_to_le32(DMASCR_GO | DMASCR_CHAIN_EN | pci_cmds), card->csr_remap + DMA_STATUS_CTRL); } static int add_bio(struct cardinfo *card); static void activate(struct cardinfo *card) { /* if No page is Active, and Ready is * not empty, then switch Ready page * to active and start IO. * Then add any bh's that are available to Ready */ do { while (add_bio(card)) ; if (card->Active == -1 && card->mm_pages[card->Ready].cnt > 0) { card->Active = card->Ready; card->Ready = 1-card->Ready; mm_start_io(card); } } while (card->Active == -1 && add_bio(card)); } static inline void reset_page(struct mm_page *page) { page->cnt = 0; page->headcnt = 0; page->bio = NULL; page->biotail = &page->bio; } /* * If there is room on Ready page, take * one bh off list and add it. * return 1 if there was room, else 0. */ static int add_bio(struct cardinfo *card) { struct mm_page *p; struct mm_dma_desc *desc; dma_addr_t dma_handle; int offset; struct bio *bio; struct bio_vec *vec; int idx; int rw; int len; bio = card->currentbio; if (!bio && card->bio) { card->currentbio = card->bio; card->current_idx = card->bio->bi_idx; card->current_sector = card->bio->bi_sector; card->bio = card->bio->bi_next; if (card->bio == NULL) card->biotail = &card->bio; card->currentbio->bi_next = NULL; return 1; } if (!bio) return 0; idx = card->current_idx; rw = bio_rw(bio); if (card->mm_pages[card->Ready].cnt >= DESC_PER_PAGE) return 0; vec = bio_iovec_idx(bio, idx); len = vec->bv_len; dma_handle = pci_map_page(card->dev, vec->bv_page, vec->bv_offset, len, (rw == READ) ? PCI_DMA_FROMDEVICE : PCI_DMA_TODEVICE); p = &card->mm_pages[card->Ready]; desc = &p->desc[p->cnt]; p->cnt++; if (p->bio == NULL) p->idx = idx; if ((p->biotail) != &bio->bi_next) { *(p->biotail) = bio; p->biotail = &(bio->bi_next); bio->bi_next = NULL; } desc->data_dma_handle = dma_handle; desc->pci_addr = cpu_to_le64((u64)desc->data_dma_handle); desc->local_addr = cpu_to_le64(card->current_sector << 9); desc->transfer_size = cpu_to_le32(len); offset = (((char *)&desc->sem_control_bits) - ((char *)p->desc)); desc->sem_addr = cpu_to_le64((u64)(p->page_dma+offset)); desc->zero1 = desc->zero2 = 0; offset = (((char *)(desc+1)) - ((char *)p->desc)); desc->next_desc_addr = cpu_to_le64(p->page_dma+offset); desc->control_bits = cpu_to_le32(DMASCR_GO|DMASCR_ERR_INT_EN| DMASCR_PARITY_INT_EN| DMASCR_CHAIN_EN | DMASCR_SEM_EN | pci_cmds); if (rw == WRITE) desc->control_bits |= cpu_to_le32(DMASCR_TRANSFER_READ); desc->sem_control_bits = desc->control_bits; card->current_sector += (len >> 9); idx++; card->current_idx = idx; if (idx >= bio->bi_vcnt) card->currentbio = NULL; return 1; } static void process_page(unsigned long data) { /* check if any of the requests in the page are DMA_COMPLETE, * and deal with them appropriately. * If we find a descriptor without DMA_COMPLETE in the semaphore, then * dma must have hit an error on that descriptor, so use dma_status * instead and assume that all following descriptors must be re-tried. */ struct mm_page *page; struct bio *return_bio = NULL; struct cardinfo *card = (struct cardinfo *)data; unsigned int dma_status = card->dma_status; spin_lock_bh(&card->lock); if (card->Active < 0) goto out_unlock; page = &card->mm_pages[card->Active]; while (page->headcnt < page->cnt) { struct bio *bio = page->bio; struct mm_dma_desc *desc = &page->desc[page->headcnt]; int control = le32_to_cpu(desc->sem_control_bits); int last = 0; int idx; if (!(control & DMASCR_DMA_COMPLETE)) { control = dma_status; last = 1; } page->headcnt++; idx = page->idx; page->idx++; if (page->idx >= bio->bi_vcnt) { page->bio = bio->bi_next; if (page->bio) page->idx = page->bio->bi_idx; } pci_unmap_page(card->dev, desc->data_dma_handle, bio_iovec_idx(bio, idx)->bv_len, (control & DMASCR_TRANSFER_READ) ? PCI_DMA_TODEVICE : PCI_DMA_FROMDEVICE); if (control & DMASCR_HARD_ERROR) { /* error */ clear_bit(BIO_UPTODATE, &bio->bi_flags); dev_printk(KERN_WARNING, &card->dev->dev, "I/O error on sector %d/%d\n", le32_to_cpu(desc->local_addr)>>9, le32_to_cpu(desc->transfer_size)); dump_dmastat(card, control); } else if ((bio->bi_rw & REQ_WRITE) && le32_to_cpu(desc->local_addr) >> 9 == card->init_size) { card->init_size += le32_to_cpu(desc->transfer_size) >> 9; if (card->init_size >> 1 >= card->mm_size) { dev_printk(KERN_INFO, &card->dev->dev, "memory now initialised\n"); set_userbit(card, MEMORY_INITIALIZED, 1); } } if (bio != page->bio) { bio->bi_next = return_bio; return_bio = bio; } if (last) break; } if (debug & DEBUG_LED_ON_TRANSFER) set_led(card, LED_REMOVE, LED_OFF); if (card->check_batteries) { card->check_batteries = 0; check_batteries(card); } if (page->headcnt >= page->cnt) { reset_page(page); card->Active = -1; activate(card); } else { /* haven't finished with this one yet */ pr_debug("do some more\n"); mm_start_io(card); } out_unlock: spin_unlock_bh(&card->lock); while (return_bio) { struct bio *bio = return_bio; return_bio = bio->bi_next; bio->bi_next = NULL; bio_endio(bio, 0); } } static int mm_make_request(struct request_queue *q, struct bio *bio) { struct cardinfo *card = q->queuedata; pr_debug("mm_make_request %llu %u\n", (unsigned long long)bio->bi_sector, bio->bi_size); spin_lock_irq(&card->lock); *card->biotail = bio; bio->bi_next = NULL; card->biotail = &bio->bi_next; spin_unlock_irq(&card->lock); return 0; } static irqreturn_t mm_interrupt(int irq, void *__card) { struct cardinfo *card = (struct cardinfo *) __card; unsigned int dma_status; unsigned short cfg_status; HW_TRACE(0x30); dma_status = le32_to_cpu(readl(card->csr_remap + DMA_STATUS_CTRL)); if (!(dma_status & (DMASCR_ERROR_MASK | DMASCR_CHAIN_COMPLETE))) { /* interrupt wasn't for me ... */ return IRQ_NONE; } /* clear COMPLETION interrupts */ if (card->flags & UM_FLAG_NO_BYTE_STATUS) writel(cpu_to_le32(DMASCR_DMA_COMPLETE|DMASCR_CHAIN_COMPLETE), card->csr_remap + DMA_STATUS_CTRL); else writeb((DMASCR_DMA_COMPLETE|DMASCR_CHAIN_COMPLETE) >> 16, card->csr_remap + DMA_STATUS_CTRL + 2); /* log errors and clear interrupt status */ if (dma_status & DMASCR_ANY_ERR) { unsigned int data_log1, data_log2; unsigned int addr_log1, addr_log2; unsigned char stat, count, syndrome, check; stat = readb(card->csr_remap + MEMCTRLCMD_ERRSTATUS); data_log1 = le32_to_cpu(readl(card->csr_remap + ERROR_DATA_LOG)); data_log2 = le32_to_cpu(readl(card->csr_remap + ERROR_DATA_LOG + 4)); addr_log1 = le32_to_cpu(readl(card->csr_remap + ERROR_ADDR_LOG)); addr_log2 = readb(card->csr_remap + ERROR_ADDR_LOG + 4); count = readb(card->csr_remap + ERROR_COUNT); syndrome = readb(card->csr_remap + ERROR_SYNDROME); check = readb(card->csr_remap + ERROR_CHECK); dump_dmastat(card, dma_status); if (stat & 0x01) dev_printk(KERN_ERR, &card->dev->dev, "Memory access error detected (err count %d)\n", count); if (stat & 0x02) dev_printk(KERN_ERR, &card->dev->dev, "Multi-bit EDC error\n"); dev_printk(KERN_ERR, &card->dev->dev, "Fault Address 0x%02x%08x, Fault Data 0x%08x%08x\n", addr_log2, addr_log1, data_log2, data_log1); dev_printk(KERN_ERR, &card->dev->dev, "Fault Check 0x%02x, Fault Syndrome 0x%02x\n", check, syndrome); writeb(0, card->csr_remap + ERROR_COUNT); } if (dma_status & DMASCR_PARITY_ERR_REP) { dev_printk(KERN_ERR, &card->dev->dev, "PARITY ERROR REPORTED\n"); pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); pci_write_config_word(card->dev, PCI_STATUS, cfg_status); } if (dma_status & DMASCR_PARITY_ERR_DET) { dev_printk(KERN_ERR, &card->dev->dev, "PARITY ERROR DETECTED\n"); pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); pci_write_config_word(card->dev, PCI_STATUS, cfg_status); } if (dma_status & DMASCR_SYSTEM_ERR_SIG) { dev_printk(KERN_ERR, &card->dev->dev, "SYSTEM ERROR\n"); pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); pci_write_config_word(card->dev, PCI_STATUS, cfg_status); } if (dma_status & DMASCR_TARGET_ABT) { dev_printk(KERN_ERR, &card->dev->dev, "TARGET ABORT\n"); pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); pci_write_config_word(card->dev, PCI_STATUS, cfg_status); } if (dma_status & DMASCR_MASTER_ABT) { dev_printk(KERN_ERR, &card->dev->dev, "MASTER ABORT\n"); pci_read_config_word(card->dev, PCI_STATUS, &cfg_status); pci_write_config_word(card->dev, PCI_STATUS, cfg_status); } /* and process the DMA descriptors */ card->dma_status = dma_status; tasklet_schedule(&card->tasklet); HW_TRACE(0x36); return IRQ_HANDLED; } /* * If both batteries are good, no LED * If either battery has been warned, solid LED * If both batteries are bad, flash the LED quickly * If either battery is bad, flash the LED semi quickly */ static void set_fault_to_battery_status(struct cardinfo *card) { if (card->battery[0].good && card->battery[1].good) set_led(card, LED_FAULT, LED_OFF); else if (card->battery[0].warned || card->battery[1].warned) set_led(card, LED_FAULT, LED_ON); else if (!card->battery[0].good && !card->battery[1].good) set_led(card, LED_FAULT, LED_FLASH_7_0); else set_led(card, LED_FAULT, LED_FLASH_3_5); } static void init_battery_timer(void); static int check_battery(struct cardinfo *card, int battery, int status) { if (status != card->battery[battery].good) { card->battery[battery].good = !card->battery[battery].good; card->battery[battery].last_change = jiffies; if (card->battery[battery].good) { dev_printk(KERN_ERR, &card->dev->dev, "Battery %d now good\n", battery + 1); card->battery[battery].warned = 0; } else dev_printk(KERN_ERR, &card->dev->dev, "Battery %d now FAILED\n", battery + 1); return 1; } else if (!card->battery[battery].good && !card->battery[battery].warned && time_after_eq(jiffies, card->battery[battery].last_change + (HZ * 60 * 60 * 5))) { dev_printk(KERN_ERR, &card->dev->dev, "Battery %d still FAILED after 5 hours\n", battery + 1); card->battery[battery].warned = 1; return 1; } return 0; } static void check_batteries(struct cardinfo *card) { /* NOTE: this must *never* be called while the card * is doing (bus-to-card) DMA, or you will need the * reset switch */ unsigned char status; int ret1, ret2; status = readb(card->csr_remap + MEMCTRLSTATUS_BATTERY); if (debug & DEBUG_BATTERY_POLLING) dev_printk(KERN_DEBUG, &card->dev->dev, "checking battery status, 1 = %s, 2 = %s\n", (status & BATTERY_1_FAILURE) ? "FAILURE" : "OK", (status & BATTERY_2_FAILURE) ? "FAILURE" : "OK"); ret1 = check_battery(card, 0, !(status & BATTERY_1_FAILURE)); ret2 = check_battery(card, 1, !(status & BATTERY_2_FAILURE)); if (ret1 || ret2) set_fault_to_battery_status(card); } static void check_all_batteries(unsigned long ptr) { int i; for (i = 0; i < num_cards; i++) if (!(cards[i].flags & UM_FLAG_NO_BATT)) { struct cardinfo *card = &cards[i]; spin_lock_bh(&card->lock); if (card->Active >= 0) card->check_batteries = 1; else check_batteries(card); spin_unlock_bh(&card->lock); } init_battery_timer(); } static void init_battery_timer(void) { init_timer(&battery_timer); battery_timer.function = check_all_batteries; battery_timer.expires = jiffies + (HZ * 60); add_timer(&battery_timer); } static void del_battery_timer(void) { del_timer(&battery_timer); } /* * Note no locks taken out here. In a worst case scenario, we could drop * a chunk of system memory. But that should never happen, since validation * happens at open or mount time, when locks are held. * * That's crap, since doing that while some partitions are opened * or mounted will give you really nasty results. */ static int mm_revalidate(struct gendisk *disk) { struct cardinfo *card = disk->private_data; set_capacity(disk, card->mm_size << 1); return 0; } static int mm_getgeo(struct block_device *bdev, struct hd_geometry *geo) { struct cardinfo *card = bdev->bd_disk->private_data; int size = card->mm_size * (1024 / MM_HARDSECT); /* * get geometry: we have to fake one... trim the size to a * multiple of 2048 (1M): tell we have 32 sectors, 64 heads, * whatever cylinders. */ geo->heads = 64; geo->sectors = 32; geo->cylinders = size / (geo->heads * geo->sectors); return 0; } static const struct block_device_operations mm_fops = { .owner = THIS_MODULE, .getgeo = mm_getgeo, .revalidate_disk = mm_revalidate, }; static int __devinit mm_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { int ret = -ENODEV; struct cardinfo *card = &cards[num_cards]; unsigned char mem_present; unsigned char batt_status; unsigned int saved_bar, data; unsigned long csr_base; unsigned long csr_len; int magic_number; static int printed_version; if (!printed_version++) printk(KERN_INFO DRIVER_VERSION " : " DRIVER_DESC "\n"); ret = pci_enable_device(dev); if (ret) return ret; pci_write_config_byte(dev, PCI_LATENCY_TIMER, 0xF8); pci_set_master(dev); card->dev = dev; csr_base = pci_resource_start(dev, 0); csr_len = pci_resource_len(dev, 0); if (!csr_base || !csr_len) return -ENODEV; dev_printk(KERN_INFO, &dev->dev, "Micro Memory(tm) controller found (PCI Mem Module (Battery Backup))\n"); if (pci_set_dma_mask(dev, DMA_BIT_MASK(64)) && pci_set_dma_mask(dev, DMA_BIT_MASK(32))) { dev_printk(KERN_WARNING, &dev->dev, "NO suitable DMA found\n"); return -ENOMEM; } ret = pci_request_regions(dev, DRIVER_NAME); if (ret) { dev_printk(KERN_ERR, &card->dev->dev, "Unable to request memory region\n"); goto failed_req_csr; } card->csr_remap = ioremap_nocache(csr_base, csr_len); if (!card->csr_remap) { dev_printk(KERN_ERR, &card->dev->dev, "Unable to remap memory region\n"); ret = -ENOMEM; goto failed_remap_csr; } dev_printk(KERN_INFO, &card->dev->dev, "CSR 0x%08lx -> 0x%p (0x%lx)\n", csr_base, card->csr_remap, csr_len); switch (card->dev->device) { case 0x5415: card->flags |= UM_FLAG_NO_BYTE_STATUS | UM_FLAG_NO_BATTREG; magic_number = 0x59; break; case 0x5425: card->flags |= UM_FLAG_NO_BYTE_STATUS; magic_number = 0x5C; break; case 0x6155: card->flags |= UM_FLAG_NO_BYTE_STATUS | UM_FLAG_NO_BATTREG | UM_FLAG_NO_BATT; magic_number = 0x99; break; default: magic_number = 0x100; break; } if (readb(card->csr_remap + MEMCTRLSTATUS_MAGIC) != magic_number) { dev_printk(KERN_ERR, &card->dev->dev, "Magic number invalid\n"); ret = -ENOMEM; goto failed_magic; } card->mm_pages[0].desc = pci_alloc_consistent(card->dev, PAGE_SIZE * 2, &card->mm_pages[0].page_dma); card->mm_pages[1].desc = pci_alloc_consistent(card->dev, PAGE_SIZE * 2, &card->mm_pages[1].page_dma); if (card->mm_pages[0].desc == NULL || card->mm_pages[1].desc == NULL) { dev_printk(KERN_ERR, &card->dev->dev, "alloc failed\n"); goto failed_alloc; } reset_page(&card->mm_pages[0]); reset_page(&card->mm_pages[1]); card->Ready = 0; /* page 0 is ready */ card->Active = -1; /* no page is active */ card->bio = NULL; card->biotail = &card->bio; card->queue = blk_alloc_queue(GFP_KERNEL); if (!card->queue) goto failed_alloc; blk_queue_make_request(card->queue, mm_make_request); card->queue->queue_lock = &card->lock; card->queue->queuedata = card; tasklet_init(&card->tasklet, process_page, (unsigned long)card); card->check_batteries = 0; mem_present = readb(card->csr_remap + MEMCTRLSTATUS_MEMORY); switch (mem_present) { case MEM_128_MB: card->mm_size = 1024 * 128; break; case MEM_256_MB: card->mm_size = 1024 * 256; break; case MEM_512_MB: card->mm_size = 1024 * 512; break; case MEM_1_GB: card->mm_size = 1024 * 1024; break; case MEM_2_GB: card->mm_size = 1024 * 2048; break; default: card->mm_size = 0; break; } /* Clear the LED's we control */ set_led(card, LED_REMOVE, LED_OFF); set_led(card, LED_FAULT, LED_OFF); batt_status = readb(card->csr_remap + MEMCTRLSTATUS_BATTERY); card->battery[0].good = !(batt_status & BATTERY_1_FAILURE); card->battery[1].good = !(batt_status & BATTERY_2_FAILURE); card->battery[0].last_change = card->battery[1].last_change = jiffies; if (card->flags & UM_FLAG_NO_BATT) dev_printk(KERN_INFO, &card->dev->dev, "Size %d KB\n", card->mm_size); else { dev_printk(KERN_INFO, &card->dev->dev, "Size %d KB, Battery 1 %s (%s), Battery 2 %s (%s)\n", card->mm_size, batt_status & BATTERY_1_DISABLED ? "Disabled" : "Enabled", card->battery[0].good ? "OK" : "FAILURE", batt_status & BATTERY_2_DISABLED ? "Disabled" : "Enabled", card->battery[1].good ? "OK" : "FAILURE"); set_fault_to_battery_status(card); } pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &saved_bar); data = 0xffffffff; pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, data); pci_read_config_dword(dev, PCI_BASE_ADDRESS_1, &data); pci_write_config_dword(dev, PCI_BASE_ADDRESS_1, saved_bar); data &= 0xfffffff0; data = ~data; data += 1; if (request_irq(dev->irq, mm_interrupt, IRQF_SHARED, DRIVER_NAME, card)) { dev_printk(KERN_ERR, &card->dev->dev, "Unable to allocate IRQ\n"); ret = -ENODEV; goto failed_req_irq; } dev_printk(KERN_INFO, &card->dev->dev, "Window size %d bytes, IRQ %d\n", data, dev->irq); spin_lock_init(&card->lock); pci_set_drvdata(dev, card); if (pci_write_cmd != 0x0F) /* If not Memory Write & Invalidate */ pci_write_cmd = 0x07; /* then Memory Write command */ if (pci_write_cmd & 0x08) { /* use Memory Write and Invalidate */ unsigned short cfg_command; pci_read_config_word(dev, PCI_COMMAND, &cfg_command); cfg_command |= 0x10; /* Memory Write & Invalidate Enable */ pci_write_config_word(dev, PCI_COMMAND, cfg_command); } pci_cmds = (pci_read_cmd << 28) | (pci_write_cmd << 24); num_cards++; if (!get_userbit(card, MEMORY_INITIALIZED)) { dev_printk(KERN_INFO, &card->dev->dev, "memory NOT initialized. Consider over-writing whole device.\n"); card->init_size = 0; } else { dev_printk(KERN_INFO, &card->dev->dev, "memory already initialized\n"); card->init_size = card->mm_size; } /* Enable ECC */ writeb(EDC_STORE_CORRECT, card->csr_remap + MEMCTRLCMD_ERRCTRL); return 0; failed_req_irq: failed_alloc: if (card->mm_pages[0].desc) pci_free_consistent(card->dev, PAGE_SIZE*2, card->mm_pages[0].desc, card->mm_pages[0].page_dma); if (card->mm_pages[1].desc) pci_free_consistent(card->dev, PAGE_SIZE*2, card->mm_pages[1].desc, card->mm_pages[1].page_dma); failed_magic: iounmap(card->csr_remap); failed_remap_csr: pci_release_regions(dev); failed_req_csr: return ret; } static void mm_pci_remove(struct pci_dev *dev) { struct cardinfo *card = pci_get_drvdata(dev); tasklet_kill(&card->tasklet); free_irq(dev->irq, card); iounmap(card->csr_remap); if (card->mm_pages[0].desc) pci_free_consistent(card->dev, PAGE_SIZE*2, card->mm_pages[0].desc, card->mm_pages[0].page_dma); if (card->mm_pages[1].desc) pci_free_consistent(card->dev, PAGE_SIZE*2, card->mm_pages[1].desc, card->mm_pages[1].page_dma); blk_cleanup_queue(card->queue); pci_release_regions(dev); pci_disable_device(dev); } static const struct pci_device_id mm_pci_ids[] = { {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY, PCI_DEVICE_ID_MICRO_MEMORY_5415CN)}, {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY, PCI_DEVICE_ID_MICRO_MEMORY_5425CN)}, {PCI_DEVICE(PCI_VENDOR_ID_MICRO_MEMORY, PCI_DEVICE_ID_MICRO_MEMORY_6155)}, { .vendor = 0x8086, .device = 0xB555, .subvendor = 0x1332, .subdevice = 0x5460, .class = 0x050000, .class_mask = 0, }, { /* end: all zeroes */ } }; MODULE_DEVICE_TABLE(pci, mm_pci_ids); static struct pci_driver mm_pci_driver = { .name = DRIVER_NAME, .id_table = mm_pci_ids, .probe = mm_pci_probe, .remove = mm_pci_remove, }; static int __init mm_init(void) { int retval, i; int err; retval = pci_register_driver(&mm_pci_driver); if (retval) return -ENOMEM; err = major_nr = register_blkdev(0, DRIVER_NAME); if (err < 0) { pci_unregister_driver(&mm_pci_driver); return -EIO; } for (i = 0; i < num_cards; i++) { mm_gendisk[i] = alloc_disk(1 << MM_SHIFT); if (!mm_gendisk[i]) goto out; } for (i = 0; i < num_cards; i++) { struct gendisk *disk = mm_gendisk[i]; sprintf(disk->disk_name, "umem%c", 'a'+i); spin_lock_init(&cards[i].lock); disk->major = major_nr; disk->first_minor = i << MM_SHIFT; disk->fops = &mm_fops; disk->private_data = &cards[i]; disk->queue = cards[i].queue; set_capacity(disk, cards[i].mm_size << 1); add_disk(disk); } init_battery_timer(); printk(KERN_INFO "MM: desc_per_page = %ld\n", DESC_PER_PAGE); /* printk("mm_init: Done. 10-19-01 9:00\n"); */ return 0; out: pci_unregister_driver(&mm_pci_driver); unregister_blkdev(major_nr, DRIVER_NAME); while (i--) put_disk(mm_gendisk[i]); return -ENOMEM; } static void __exit mm_cleanup(void) { int i; del_battery_timer(); for (i = 0; i < num_cards ; i++) { del_gendisk(mm_gendisk[i]); put_disk(mm_gendisk[i]); } pci_unregister_driver(&mm_pci_driver); unregister_blkdev(major_nr, DRIVER_NAME); } module_init(mm_init); module_exit(mm_cleanup); MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE("GPL");
gpl-2.0
dovydasvenckus/linux
drivers/video/backlight/jornada720_lcd.c
1873
2938
/* * * LCD driver for HP Jornada 700 series (710/720/728) * Copyright (C) 2006-2009 Kristoffer Ericson <kristoffer.ericson@gmail.com> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License version * 2 or any later version as published by the Free Software Foundation. * */ #include <linux/device.h> #include <linux/fb.h> #include <linux/kernel.h> #include <linux/lcd.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/delay.h> #include <mach/jornada720.h> #include <mach/hardware.h> #include <video/s1d13xxxfb.h> #define LCD_MAX_CONTRAST 0xff #define LCD_DEF_CONTRAST 0x80 static int jornada_lcd_get_power(struct lcd_device *ld) { return PPSR & PPC_LDD2 ? FB_BLANK_UNBLANK : FB_BLANK_POWERDOWN; } static int jornada_lcd_get_contrast(struct lcd_device *ld) { int ret; if (jornada_lcd_get_power(ld) != FB_BLANK_UNBLANK) return 0; jornada_ssp_start(); if (jornada_ssp_byte(GETCONTRAST) == TXDUMMY) { ret = jornada_ssp_byte(TXDUMMY); goto success; } dev_err(&ld->dev, "failed to set contrast\n"); ret = -ETIMEDOUT; success: jornada_ssp_end(); return ret; } static int jornada_lcd_set_contrast(struct lcd_device *ld, int value) { int ret = 0; jornada_ssp_start(); /* start by sending our set contrast cmd to mcu */ if (jornada_ssp_byte(SETCONTRAST) == TXDUMMY) { /* if successful push the new value */ if (jornada_ssp_byte(value) == TXDUMMY) goto success; } dev_err(&ld->dev, "failed to set contrast\n"); ret = -ETIMEDOUT; success: jornada_ssp_end(); return ret; } static int jornada_lcd_set_power(struct lcd_device *ld, int power) { if (power != FB_BLANK_UNBLANK) { PPSR &= ~PPC_LDD2; PPDR |= PPC_LDD2; } else { PPSR |= PPC_LDD2; } return 0; } static struct lcd_ops jornada_lcd_props = { .get_contrast = jornada_lcd_get_contrast, .set_contrast = jornada_lcd_set_contrast, .get_power = jornada_lcd_get_power, .set_power = jornada_lcd_set_power, }; static int jornada_lcd_probe(struct platform_device *pdev) { struct lcd_device *lcd_device; int ret; lcd_device = devm_lcd_device_register(&pdev->dev, S1D_DEVICENAME, &pdev->dev, NULL, &jornada_lcd_props); if (IS_ERR(lcd_device)) { ret = PTR_ERR(lcd_device); dev_err(&pdev->dev, "failed to register device\n"); return ret; } platform_set_drvdata(pdev, lcd_device); /* lets set our default values */ jornada_lcd_set_contrast(lcd_device, LCD_DEF_CONTRAST); jornada_lcd_set_power(lcd_device, FB_BLANK_UNBLANK); /* give it some time to startup */ msleep(100); return 0; } static struct platform_driver jornada_lcd_driver = { .probe = jornada_lcd_probe, .driver = { .name = "jornada_lcd", }, }; module_platform_driver(jornada_lcd_driver); MODULE_AUTHOR("Kristoffer Ericson <kristoffer.ericson@gmail.com>"); MODULE_DESCRIPTION("HP Jornada 710/720/728 LCD driver"); MODULE_LICENSE("GPL");
gpl-2.0
Split-Screen/android_kernel_mediatek_sprout
drivers/regulator/tps6586x-regulator.c
2129
13882
/* * Regulator driver for TI TPS6586x * * Copyright (C) 2010 Compulab Ltd. * Author: Mike Rapoport <mike@compulab.co.il> * * Based on da903x * Copyright (C) 2006-2008 Marvell International Ltd. * Copyright (C) 2008 Compulab Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/init.h> #include <linux/err.h> #include <linux/of.h> #include <linux/slab.h> #include <linux/platform_device.h> #include <linux/regulator/driver.h> #include <linux/regulator/machine.h> #include <linux/regulator/of_regulator.h> #include <linux/mfd/tps6586x.h> /* supply control and voltage setting */ #define TPS6586X_SUPPLYENA 0x10 #define TPS6586X_SUPPLYENB 0x11 #define TPS6586X_SUPPLYENC 0x12 #define TPS6586X_SUPPLYEND 0x13 #define TPS6586X_SUPPLYENE 0x14 #define TPS6586X_VCC1 0x20 #define TPS6586X_VCC2 0x21 #define TPS6586X_SM1V1 0x23 #define TPS6586X_SM1V2 0x24 #define TPS6586X_SM1SL 0x25 #define TPS6586X_SM0V1 0x26 #define TPS6586X_SM0V2 0x27 #define TPS6586X_SM0SL 0x28 #define TPS6586X_LDO2AV1 0x29 #define TPS6586X_LDO2AV2 0x2A #define TPS6586X_LDO2BV1 0x2F #define TPS6586X_LDO2BV2 0x30 #define TPS6586X_LDO4V1 0x32 #define TPS6586X_LDO4V2 0x33 /* converter settings */ #define TPS6586X_SUPPLYV1 0x41 #define TPS6586X_SUPPLYV2 0x42 #define TPS6586X_SUPPLYV3 0x43 #define TPS6586X_SUPPLYV4 0x44 #define TPS6586X_SUPPLYV5 0x45 #define TPS6586X_SUPPLYV6 0x46 #define TPS6586X_SMODE1 0x47 #define TPS6586X_SMODE2 0x48 struct tps6586x_regulator { struct regulator_desc desc; int enable_bit[2]; int enable_reg[2]; }; static inline struct device *to_tps6586x_dev(struct regulator_dev *rdev) { return rdev_get_dev(rdev)->parent; } static struct regulator_ops tps6586x_regulator_ops = { .list_voltage = regulator_list_voltage_table, .map_voltage = regulator_map_voltage_ascend, .get_voltage_sel = regulator_get_voltage_sel_regmap, .set_voltage_sel = regulator_set_voltage_sel_regmap, .is_enabled = regulator_is_enabled_regmap, .enable = regulator_enable_regmap, .disable = regulator_disable_regmap, }; static struct regulator_ops tps6586x_sys_regulator_ops = { }; static const unsigned int tps6586x_ldo0_voltages[] = { 1200000, 1500000, 1800000, 2500000, 2700000, 2850000, 3100000, 3300000, }; static const unsigned int tps6586x_ldo4_voltages[] = { 1700000, 1725000, 1750000, 1775000, 1800000, 1825000, 1850000, 1875000, 1900000, 1925000, 1950000, 1975000, 2000000, 2025000, 2050000, 2075000, 2100000, 2125000, 2150000, 2175000, 2200000, 2225000, 2250000, 2275000, 2300000, 2325000, 2350000, 2375000, 2400000, 2425000, 2450000, 2475000, }; static const unsigned int tps6586x_ldo_voltages[] = { 1250000, 1500000, 1800000, 2500000, 2700000, 2850000, 3100000, 3300000, }; static const unsigned int tps6586x_sm2_voltages[] = { 3000000, 3050000, 3100000, 3150000, 3200000, 3250000, 3300000, 3350000, 3400000, 3450000, 3500000, 3550000, 3600000, 3650000, 3700000, 3750000, 3800000, 3850000, 3900000, 3950000, 4000000, 4050000, 4100000, 4150000, 4200000, 4250000, 4300000, 4350000, 4400000, 4450000, 4500000, 4550000, }; static const unsigned int tps6586x_dvm_voltages[] = { 725000, 750000, 775000, 800000, 825000, 850000, 875000, 900000, 925000, 950000, 975000, 1000000, 1025000, 1050000, 1075000, 1100000, 1125000, 1150000, 1175000, 1200000, 1225000, 1250000, 1275000, 1300000, 1325000, 1350000, 1375000, 1400000, 1425000, 1450000, 1475000, 1500000, }; #define TPS6586X_REGULATOR(_id, _pin_name, vdata, vreg, shift, nbits, \ ereg0, ebit0, ereg1, ebit1, goreg, gobit) \ .desc = { \ .supply_name = _pin_name, \ .name = "REG-" #_id, \ .ops = &tps6586x_regulator_ops, \ .type = REGULATOR_VOLTAGE, \ .id = TPS6586X_ID_##_id, \ .n_voltages = ARRAY_SIZE(tps6586x_##vdata##_voltages), \ .volt_table = tps6586x_##vdata##_voltages, \ .owner = THIS_MODULE, \ .enable_reg = TPS6586X_SUPPLY##ereg0, \ .enable_mask = 1 << (ebit0), \ .vsel_reg = TPS6586X_##vreg, \ .vsel_mask = ((1 << (nbits)) - 1) << (shift), \ .apply_reg = (goreg), \ .apply_bit = (gobit), \ }, \ .enable_reg[0] = TPS6586X_SUPPLY##ereg0, \ .enable_bit[0] = (ebit0), \ .enable_reg[1] = TPS6586X_SUPPLY##ereg1, \ .enable_bit[1] = (ebit1), #define TPS6586X_LDO(_id, _pname, vdata, vreg, shift, nbits, \ ereg0, ebit0, ereg1, ebit1) \ { \ TPS6586X_REGULATOR(_id, _pname, vdata, vreg, shift, nbits, \ ereg0, ebit0, ereg1, ebit1, 0, 0) \ } #define TPS6586X_DVM(_id, _pname, vdata, vreg, shift, nbits, \ ereg0, ebit0, ereg1, ebit1, goreg, gobit) \ { \ TPS6586X_REGULATOR(_id, _pname, vdata, vreg, shift, nbits, \ ereg0, ebit0, ereg1, ebit1, goreg, gobit) \ } #define TPS6586X_SYS_REGULATOR() \ { \ .desc = { \ .supply_name = "sys", \ .name = "REG-SYS", \ .ops = &tps6586x_sys_regulator_ops, \ .type = REGULATOR_VOLTAGE, \ .id = TPS6586X_ID_SYS, \ .owner = THIS_MODULE, \ }, \ } static struct tps6586x_regulator tps6586x_regulator[] = { TPS6586X_SYS_REGULATOR(), TPS6586X_LDO(LDO_0, "vinldo01", ldo0, SUPPLYV1, 5, 3, ENC, 0, END, 0), TPS6586X_LDO(LDO_3, "vinldo23", ldo, SUPPLYV4, 0, 3, ENC, 2, END, 2), TPS6586X_LDO(LDO_5, "REG-SYS", ldo, SUPPLYV6, 0, 3, ENE, 6, ENE, 6), TPS6586X_LDO(LDO_6, "vinldo678", ldo, SUPPLYV3, 0, 3, ENC, 4, END, 4), TPS6586X_LDO(LDO_7, "vinldo678", ldo, SUPPLYV3, 3, 3, ENC, 5, END, 5), TPS6586X_LDO(LDO_8, "vinldo678", ldo, SUPPLYV2, 5, 3, ENC, 6, END, 6), TPS6586X_LDO(LDO_9, "vinldo9", ldo, SUPPLYV6, 3, 3, ENE, 7, ENE, 7), TPS6586X_LDO(LDO_RTC, "REG-SYS", ldo, SUPPLYV4, 3, 3, V4, 7, V4, 7), TPS6586X_LDO(LDO_1, "vinldo01", dvm, SUPPLYV1, 0, 5, ENC, 1, END, 1), TPS6586X_LDO(SM_2, "vin-sm2", sm2, SUPPLYV2, 0, 5, ENC, 7, END, 7), TPS6586X_DVM(LDO_2, "vinldo23", dvm, LDO2BV1, 0, 5, ENA, 3, ENB, 3, TPS6586X_VCC2, BIT(6)), TPS6586X_DVM(LDO_4, "vinldo4", ldo4, LDO4V1, 0, 5, ENC, 3, END, 3, TPS6586X_VCC1, BIT(6)), TPS6586X_DVM(SM_0, "vin-sm0", dvm, SM0V1, 0, 5, ENA, 1, ENB, 1, TPS6586X_VCC1, BIT(2)), TPS6586X_DVM(SM_1, "vin-sm1", dvm, SM1V1, 0, 5, ENA, 0, ENB, 0, TPS6586X_VCC1, BIT(0)), }; /* * TPS6586X has 2 enable bits that are OR'ed to determine the actual * regulator state. Clearing one of this bits allows switching * regulator on and of with single register write. */ static inline int tps6586x_regulator_preinit(struct device *parent, struct tps6586x_regulator *ri) { uint8_t val1, val2; int ret; if (ri->enable_reg[0] == ri->enable_reg[1] && ri->enable_bit[0] == ri->enable_bit[1]) return 0; ret = tps6586x_read(parent, ri->enable_reg[0], &val1); if (ret) return ret; ret = tps6586x_read(parent, ri->enable_reg[1], &val2); if (ret) return ret; if (!(val2 & (1 << ri->enable_bit[1]))) return 0; /* * The regulator is on, but it's enabled with the bit we don't * want to use, so we switch the enable bits */ if (!(val1 & (1 << ri->enable_bit[0]))) { ret = tps6586x_set_bits(parent, ri->enable_reg[0], 1 << ri->enable_bit[0]); if (ret) return ret; } return tps6586x_clr_bits(parent, ri->enable_reg[1], 1 << ri->enable_bit[1]); } static int tps6586x_regulator_set_slew_rate(struct platform_device *pdev, int id, struct regulator_init_data *p) { struct device *parent = pdev->dev.parent; struct tps6586x_settings *setting = p->driver_data; uint8_t reg; if (setting == NULL) return 0; if (!(setting->slew_rate & TPS6586X_SLEW_RATE_SET)) return 0; /* only SM0 and SM1 can have the slew rate settings */ switch (id) { case TPS6586X_ID_SM_0: reg = TPS6586X_SM0SL; break; case TPS6586X_ID_SM_1: reg = TPS6586X_SM1SL; break; default: dev_err(&pdev->dev, "Only SM0/SM1 can set slew rate\n"); return -EINVAL; } return tps6586x_write(parent, reg, setting->slew_rate & TPS6586X_SLEW_RATE_MASK); } static inline struct tps6586x_regulator *find_regulator_info(int id) { struct tps6586x_regulator *ri; int i; for (i = 0; i < ARRAY_SIZE(tps6586x_regulator); i++) { ri = &tps6586x_regulator[i]; if (ri->desc.id == id) return ri; } return NULL; } #ifdef CONFIG_OF static struct of_regulator_match tps6586x_matches[] = { { .name = "sys", .driver_data = (void *)TPS6586X_ID_SYS }, { .name = "sm0", .driver_data = (void *)TPS6586X_ID_SM_0 }, { .name = "sm1", .driver_data = (void *)TPS6586X_ID_SM_1 }, { .name = "sm2", .driver_data = (void *)TPS6586X_ID_SM_2 }, { .name = "ldo0", .driver_data = (void *)TPS6586X_ID_LDO_0 }, { .name = "ldo1", .driver_data = (void *)TPS6586X_ID_LDO_1 }, { .name = "ldo2", .driver_data = (void *)TPS6586X_ID_LDO_2 }, { .name = "ldo3", .driver_data = (void *)TPS6586X_ID_LDO_3 }, { .name = "ldo4", .driver_data = (void *)TPS6586X_ID_LDO_4 }, { .name = "ldo5", .driver_data = (void *)TPS6586X_ID_LDO_5 }, { .name = "ldo6", .driver_data = (void *)TPS6586X_ID_LDO_6 }, { .name = "ldo7", .driver_data = (void *)TPS6586X_ID_LDO_7 }, { .name = "ldo8", .driver_data = (void *)TPS6586X_ID_LDO_8 }, { .name = "ldo9", .driver_data = (void *)TPS6586X_ID_LDO_9 }, { .name = "ldo_rtc", .driver_data = (void *)TPS6586X_ID_LDO_RTC }, }; static struct tps6586x_platform_data *tps6586x_parse_regulator_dt( struct platform_device *pdev, struct of_regulator_match **tps6586x_reg_matches) { const unsigned int num = ARRAY_SIZE(tps6586x_matches); struct device_node *np = pdev->dev.parent->of_node; struct device_node *regs; const char *sys_rail = NULL; unsigned int i; struct tps6586x_platform_data *pdata; int err; regs = of_find_node_by_name(np, "regulators"); if (!regs) { dev_err(&pdev->dev, "regulator node not found\n"); return NULL; } err = of_regulator_match(&pdev->dev, regs, tps6586x_matches, num); of_node_put(regs); if (err < 0) { dev_err(&pdev->dev, "Regulator match failed, e %d\n", err); return NULL; } pdata = devm_kzalloc(&pdev->dev, sizeof(*pdata), GFP_KERNEL); if (!pdata) { dev_err(&pdev->dev, "Memory alloction failed\n"); return NULL; } for (i = 0; i < num; i++) { int id; if (!tps6586x_matches[i].init_data) continue; pdata->reg_init_data[i] = tps6586x_matches[i].init_data; id = (int)tps6586x_matches[i].driver_data; if (id == TPS6586X_ID_SYS) sys_rail = pdata->reg_init_data[i]->constraints.name; if ((id == TPS6586X_ID_LDO_5) || (id == TPS6586X_ID_LDO_RTC)) pdata->reg_init_data[i]->supply_regulator = sys_rail; } *tps6586x_reg_matches = tps6586x_matches; return pdata; } #else static struct tps6586x_platform_data *tps6586x_parse_regulator_dt( struct platform_device *pdev, struct of_regulator_match **tps6586x_reg_matches) { *tps6586x_reg_matches = NULL; return NULL; } #endif static int tps6586x_regulator_probe(struct platform_device *pdev) { struct tps6586x_regulator *ri = NULL; struct regulator_config config = { }; struct regulator_dev **rdev; struct regulator_init_data *reg_data; struct tps6586x_platform_data *pdata; struct of_regulator_match *tps6586x_reg_matches = NULL; int id; int err; dev_dbg(&pdev->dev, "Probing regulator\n"); pdata = dev_get_platdata(pdev->dev.parent); if ((!pdata) && (pdev->dev.parent->of_node)) pdata = tps6586x_parse_regulator_dt(pdev, &tps6586x_reg_matches); if (!pdata) { dev_err(&pdev->dev, "Platform data not available, exiting\n"); return -ENODEV; } rdev = devm_kzalloc(&pdev->dev, TPS6586X_ID_MAX_REGULATOR * sizeof(*rdev), GFP_KERNEL); if (!rdev) { dev_err(&pdev->dev, "Mmemory alloc failed\n"); return -ENOMEM; } for (id = 0; id < TPS6586X_ID_MAX_REGULATOR; ++id) { reg_data = pdata->reg_init_data[id]; ri = find_regulator_info(id); if (!ri) { dev_err(&pdev->dev, "invalid regulator ID specified\n"); err = -EINVAL; goto fail; } err = tps6586x_regulator_preinit(pdev->dev.parent, ri); if (err) { dev_err(&pdev->dev, "regulator %d preinit failed, e %d\n", id, err); goto fail; } config.dev = pdev->dev.parent; config.init_data = reg_data; config.driver_data = ri; if (tps6586x_reg_matches) config.of_node = tps6586x_reg_matches[id].of_node; rdev[id] = regulator_register(&ri->desc, &config); if (IS_ERR(rdev[id])) { dev_err(&pdev->dev, "failed to register regulator %s\n", ri->desc.name); err = PTR_ERR(rdev[id]); goto fail; } if (reg_data) { err = tps6586x_regulator_set_slew_rate(pdev, id, reg_data); if (err < 0) { dev_err(&pdev->dev, "Slew rate config failed, e %d\n", err); regulator_unregister(rdev[id]); goto fail; } } } platform_set_drvdata(pdev, rdev); return 0; fail: while (--id >= 0) regulator_unregister(rdev[id]); return err; } static int tps6586x_regulator_remove(struct platform_device *pdev) { struct regulator_dev **rdev = platform_get_drvdata(pdev); int id = TPS6586X_ID_MAX_REGULATOR; while (--id >= 0) regulator_unregister(rdev[id]); return 0; } static struct platform_driver tps6586x_regulator_driver = { .driver = { .name = "tps6586x-regulator", .owner = THIS_MODULE, }, .probe = tps6586x_regulator_probe, .remove = tps6586x_regulator_remove, }; static int __init tps6586x_regulator_init(void) { return platform_driver_register(&tps6586x_regulator_driver); } subsys_initcall(tps6586x_regulator_init); static void __exit tps6586x_regulator_exit(void) { platform_driver_unregister(&tps6586x_regulator_driver); } module_exit(tps6586x_regulator_exit); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Mike Rapoport <mike@compulab.co.il>"); MODULE_DESCRIPTION("Regulator Driver for TI TPS6586X PMIC"); MODULE_ALIAS("platform:tps6586x-regulator");
gpl-2.0
davidmueller13/f2fs-backport
drivers/net/ixgbe/ixgbe_dcb_nl.c
2385
23234
/******************************************************************************* Intel 10 Gigabit PCI Express Linux driver Copyright(c) 1999 - 2011 Intel Corporation. This program is free software; you can redistribute it and/or modify it under the terms and conditions of the GNU General Public License, version 2, as published by the Free Software Foundation. This program is distributed in the hope it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. The full GNU General Public License is included in this distribution in the file called "COPYING". Contact Information: Linux NICS <linux.nics@intel.com> e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 *******************************************************************************/ #include "ixgbe.h" #include <linux/dcbnl.h> #include "ixgbe_dcb_82598.h" #include "ixgbe_dcb_82599.h" /* Callbacks for DCB netlink in the kernel */ #define BIT_DCB_MODE 0x01 #define BIT_PFC 0x02 #define BIT_PG_RX 0x04 #define BIT_PG_TX 0x08 #define BIT_APP_UPCHG 0x10 #define BIT_LINKSPEED 0x80 /* Responses for the DCB_C_SET_ALL command */ #define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */ #define DCB_NO_HW_CHG 1 /* DCB configuration did not change */ #define DCB_HW_CHG 2 /* DCB configuration changed, no reset */ int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, struct ixgbe_dcb_config *dst_dcb_cfg, int tc_max) { struct tc_configuration *src_tc_cfg = NULL; struct tc_configuration *dst_tc_cfg = NULL; int i; if (!src_dcb_cfg || !dst_dcb_cfg) return -EINVAL; for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) { src_tc_cfg = &src_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0]; dst_tc_cfg = &dst_dcb_cfg->tc_config[i - DCB_PG_ATTR_TC_0]; dst_tc_cfg->path[DCB_TX_CONFIG].prio_type = src_tc_cfg->path[DCB_TX_CONFIG].prio_type; dst_tc_cfg->path[DCB_TX_CONFIG].bwg_id = src_tc_cfg->path[DCB_TX_CONFIG].bwg_id; dst_tc_cfg->path[DCB_TX_CONFIG].bwg_percent = src_tc_cfg->path[DCB_TX_CONFIG].bwg_percent; dst_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap = src_tc_cfg->path[DCB_TX_CONFIG].up_to_tc_bitmap; dst_tc_cfg->path[DCB_RX_CONFIG].prio_type = src_tc_cfg->path[DCB_RX_CONFIG].prio_type; dst_tc_cfg->path[DCB_RX_CONFIG].bwg_id = src_tc_cfg->path[DCB_RX_CONFIG].bwg_id; dst_tc_cfg->path[DCB_RX_CONFIG].bwg_percent = src_tc_cfg->path[DCB_RX_CONFIG].bwg_percent; dst_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap = src_tc_cfg->path[DCB_RX_CONFIG].up_to_tc_bitmap; } for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) { dst_dcb_cfg->bw_percentage[DCB_TX_CONFIG] [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage [DCB_TX_CONFIG][i-DCB_PG_ATTR_BW_ID_0]; dst_dcb_cfg->bw_percentage[DCB_RX_CONFIG] [i-DCB_PG_ATTR_BW_ID_0] = src_dcb_cfg->bw_percentage [DCB_RX_CONFIG][i-DCB_PG_ATTR_BW_ID_0]; } for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) { dst_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc = src_dcb_cfg->tc_config[i - DCB_PFC_UP_ATTR_0].dcb_pfc; } dst_dcb_cfg->pfc_mode_enable = src_dcb_cfg->pfc_mode_enable; return 0; } static u8 ixgbe_dcbnl_get_state(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); return !!(adapter->flags & IXGBE_FLAG_DCB_ENABLED); } static u8 ixgbe_dcbnl_set_state(struct net_device *netdev, u8 state) { u8 err = 0; struct ixgbe_adapter *adapter = netdev_priv(netdev); if (state > 0) { /* Turn on DCB */ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) goto out; if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) { e_err(drv, "Enable failed, needs MSI-X\n"); err = 1; goto out; } if (netif_running(netdev)) netdev->netdev_ops->ndo_stop(netdev); ixgbe_clear_interrupt_scheme(adapter); switch (adapter->hw.mac.type) { case ixgbe_mac_82598EB: adapter->last_lfc_mode = adapter->hw.fc.current_mode; adapter->hw.fc.requested_mode = ixgbe_fc_none; break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; break; default: break; } adapter->flags |= IXGBE_FLAG_DCB_ENABLED; if (!netdev_get_num_tc(netdev)) ixgbe_setup_tc(netdev, MAX_TRAFFIC_CLASS); ixgbe_init_interrupt_scheme(adapter); if (netif_running(netdev)) netdev->netdev_ops->ndo_open(netdev); } else { /* Turn off DCB */ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { if (netif_running(netdev)) netdev->netdev_ops->ndo_stop(netdev); ixgbe_clear_interrupt_scheme(adapter); adapter->hw.fc.requested_mode = adapter->last_lfc_mode; adapter->temp_dcb_cfg.pfc_mode_enable = false; adapter->dcb_cfg.pfc_mode_enable = false; adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; break; default: break; } ixgbe_setup_tc(netdev, 0); ixgbe_init_interrupt_scheme(adapter); if (netif_running(netdev)) netdev->netdev_ops->ndo_open(netdev); } } out: return err; } static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, u8 *perm_addr) { struct ixgbe_adapter *adapter = netdev_priv(netdev); int i, j; memset(perm_addr, 0xff, MAX_ADDR_LEN); for (i = 0; i < netdev->addr_len; i++) perm_addr[i] = adapter->hw.mac.perm_addr[i]; switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: for (j = 0; j < netdev->addr_len; j++, i++) perm_addr[i] = adapter->hw.mac.san_addr[j]; break; default: break; } } static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 prio, u8 bwg_id, u8 bw_pct, u8 up_map) { struct ixgbe_adapter *adapter = netdev_priv(netdev); if (prio != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio; if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id; if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent = bw_pct; if (up_map != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap = up_map; if ((adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type != adapter->dcb_cfg.tc_config[tc].path[0].prio_type) || (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id != adapter->dcb_cfg.tc_config[tc].path[0].bwg_id) || (adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_percent != adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent) || (adapter->temp_dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap != adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap)) adapter->dcb_set_bitmap |= BIT_PG_TX; } static void ixgbe_dcbnl_set_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, u8 bw_pct) { struct ixgbe_adapter *adapter = netdev_priv(netdev); adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] = bw_pct; if (adapter->temp_dcb_cfg.bw_percentage[0][bwg_id] != adapter->dcb_cfg.bw_percentage[0][bwg_id]) adapter->dcb_set_bitmap |= BIT_PG_TX; } static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, u8 prio, u8 bwg_id, u8 bw_pct, u8 up_map) { struct ixgbe_adapter *adapter = netdev_priv(netdev); if (prio != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio; if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id; if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent = bw_pct; if (up_map != DCB_ATTR_VALUE_UNDEFINED) adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap = up_map; if ((adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type != adapter->dcb_cfg.tc_config[tc].path[1].prio_type) || (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id != adapter->dcb_cfg.tc_config[tc].path[1].bwg_id) || (adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_percent != adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent) || (adapter->temp_dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap != adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap)) adapter->dcb_set_bitmap |= BIT_PG_RX; } static void ixgbe_dcbnl_set_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, u8 bw_pct) { struct ixgbe_adapter *adapter = netdev_priv(netdev); adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] = bw_pct; if (adapter->temp_dcb_cfg.bw_percentage[1][bwg_id] != adapter->dcb_cfg.bw_percentage[1][bwg_id]) adapter->dcb_set_bitmap |= BIT_PG_RX; } static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, u8 *prio, u8 *bwg_id, u8 *bw_pct, u8 *up_map) { struct ixgbe_adapter *adapter = netdev_priv(netdev); *prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type; *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id; *bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent; *up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap; } static void ixgbe_dcbnl_get_pg_bwg_cfg_tx(struct net_device *netdev, int bwg_id, u8 *bw_pct) { struct ixgbe_adapter *adapter = netdev_priv(netdev); *bw_pct = adapter->dcb_cfg.bw_percentage[0][bwg_id]; } static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc, u8 *prio, u8 *bwg_id, u8 *bw_pct, u8 *up_map) { struct ixgbe_adapter *adapter = netdev_priv(netdev); *prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type; *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id; *bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent; *up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap; } static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, u8 *bw_pct) { struct ixgbe_adapter *adapter = netdev_priv(netdev); *bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id]; } static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, u8 setting) { struct ixgbe_adapter *adapter = netdev_priv(netdev); adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting; if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc != adapter->dcb_cfg.tc_config[priority].dcb_pfc) { adapter->dcb_set_bitmap |= BIT_PFC; adapter->temp_dcb_cfg.pfc_mode_enable = true; } } static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, u8 *setting) { struct ixgbe_adapter *adapter = netdev_priv(netdev); *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc; } static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct dcb_app app = { .selector = DCB_APP_IDTYPE_ETHTYPE, .protocol = ETH_P_FCOE, }; u8 up = dcb_getapp(netdev, &app); int ret; ret = ixgbe_copy_dcb_cfg(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, MAX_TRAFFIC_CLASS); if (ret) return DCB_NO_HW_CHG; /* In IEEE mode app data must be parsed into DCBX format for * hardware routines. */ if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) up = (1 << up); #ifdef IXGBE_FCOE if (up && (up != (1 << adapter->fcoe.up))) adapter->dcb_set_bitmap |= BIT_APP_UPCHG; /* * Only take down the adapter if an app change occurred. FCoE * may shuffle tx rings in this case and this can not be done * without a reset currently. */ if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) usleep_range(1000, 2000); ixgbe_fcoe_setapp(adapter, up); if (netif_running(netdev)) netdev->netdev_ops->ndo_stop(netdev); ixgbe_clear_interrupt_scheme(adapter); } #endif if (adapter->dcb_cfg.pfc_mode_enable) { switch (adapter->hw.mac.type) { case ixgbe_mac_82599EB: case ixgbe_mac_X540: if (adapter->hw.fc.current_mode != ixgbe_fc_pfc) adapter->last_lfc_mode = adapter->hw.fc.current_mode; break; default: break; } adapter->hw.fc.requested_mode = ixgbe_fc_pfc; } else { switch (adapter->hw.mac.type) { case ixgbe_mac_82598EB: adapter->hw.fc.requested_mode = ixgbe_fc_none; break; case ixgbe_mac_82599EB: case ixgbe_mac_X540: adapter->hw.fc.requested_mode = adapter->last_lfc_mode; break; default: break; } } #ifdef IXGBE_FCOE if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { ixgbe_init_interrupt_scheme(adapter); if (netif_running(netdev)) netdev->netdev_ops->ndo_open(netdev); ret = DCB_HW_CHG_RST; } #endif if (adapter->dcb_set_bitmap & BIT_PFC) { u8 pfc_en; ixgbe_dcb_unpack_pfc(&adapter->dcb_cfg, &pfc_en); ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc_en); ret = DCB_HW_CHG; } if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) { u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS]; u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS]; /* Priority to TC mapping in CEE case default to 1:1 */ u8 prio_tc[MAX_TRAFFIC_CLASS] = {0, 1, 2, 3, 4, 5, 6, 7}; int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; #ifdef CONFIG_FCOE if (adapter->netdev->features & NETIF_F_FCOE_MTU) max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); #endif ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg, max_frame, DCB_TX_CONFIG); ixgbe_dcb_calculate_tc_credits(&adapter->hw, &adapter->dcb_cfg, max_frame, DCB_RX_CONFIG); ixgbe_dcb_unpack_refill(&adapter->dcb_cfg, DCB_TX_CONFIG, refill); ixgbe_dcb_unpack_max(&adapter->dcb_cfg, max); ixgbe_dcb_unpack_bwgid(&adapter->dcb_cfg, DCB_TX_CONFIG, bwg_id); ixgbe_dcb_unpack_prio(&adapter->dcb_cfg, DCB_TX_CONFIG, prio_type); ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max, bwg_id, prio_type, prio_tc); } if (adapter->dcb_cfg.pfc_mode_enable) adapter->hw.fc.current_mode = ixgbe_fc_pfc; if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) clear_bit(__IXGBE_RESETTING, &adapter->state); adapter->dcb_set_bitmap = 0x00; return ret; } static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) { struct ixgbe_adapter *adapter = netdev_priv(netdev); switch (capid) { case DCB_CAP_ATTR_PG: *cap = true; break; case DCB_CAP_ATTR_PFC: *cap = true; break; case DCB_CAP_ATTR_UP2TC: *cap = false; break; case DCB_CAP_ATTR_PG_TCS: *cap = 0x80; break; case DCB_CAP_ATTR_PFC_TCS: *cap = 0x80; break; case DCB_CAP_ATTR_GSP: *cap = true; break; case DCB_CAP_ATTR_BCN: *cap = false; break; case DCB_CAP_ATTR_DCBX: *cap = adapter->dcbx_cap; break; default: *cap = false; break; } return 0; } static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) { struct ixgbe_adapter *adapter = netdev_priv(netdev); u8 rval = 0; if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { switch (tcid) { case DCB_NUMTCS_ATTR_PG: *num = MAX_TRAFFIC_CLASS; break; case DCB_NUMTCS_ATTR_PFC: *num = MAX_TRAFFIC_CLASS; break; default: rval = -EINVAL; break; } } else { rval = -EINVAL; } return rval; } static u8 ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) { return -EINVAL; } static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev) { struct ixgbe_adapter *adapter = netdev_priv(netdev); return adapter->dcb_cfg.pfc_mode_enable; } static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state) { struct ixgbe_adapter *adapter = netdev_priv(netdev); adapter->temp_dcb_cfg.pfc_mode_enable = state; if (adapter->temp_dcb_cfg.pfc_mode_enable != adapter->dcb_cfg.pfc_mode_enable) adapter->dcb_set_bitmap |= BIT_PFC; } /** * ixgbe_dcbnl_getapp - retrieve the DCBX application user priority * @netdev : the corresponding netdev * @idtype : identifies the id as ether type or TCP/UDP port number * @id: id is either ether type or TCP/UDP port number * * Returns : on success, returns a non-zero 802.1p user priority bitmap * otherwise returns 0 as the invalid user priority bitmap to indicate an * error. */ static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) { struct ixgbe_adapter *adapter = netdev_priv(netdev); struct dcb_app app = { .selector = idtype, .protocol = id, }; if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) return 0; return dcb_getapp(netdev, &app); } static int ixgbe_dcbnl_ieee_getets(struct net_device *dev, struct ieee_ets *ets) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets; /* No IEEE PFC settings available */ if (!my_ets) return -EINVAL; ets->ets_cap = MAX_TRAFFIC_CLASS; ets->cbs = my_ets->cbs; memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw)); memcpy(ets->tc_tsa, my_ets->tc_tsa, sizeof(ets->tc_tsa)); memcpy(ets->prio_tc, my_ets->prio_tc, sizeof(ets->prio_tc)); return 0; } static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, struct ieee_ets *ets) { struct ixgbe_adapter *adapter = netdev_priv(dev); __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS]; __u8 prio_type[IEEE_8021QAZ_MAX_TCS]; int max_frame = dev->mtu + ETH_HLEN + ETH_FCS_LEN; int i, err; __u64 *p = (__u64 *) ets->prio_tc; /* naively give each TC a bwg to map onto CEE hardware */ __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7}; if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) return -EINVAL; if (!adapter->ixgbe_ieee_ets) { adapter->ixgbe_ieee_ets = kmalloc(sizeof(struct ieee_ets), GFP_KERNEL); if (!adapter->ixgbe_ieee_ets) return -ENOMEM; } memcpy(adapter->ixgbe_ieee_ets, ets, sizeof(*adapter->ixgbe_ieee_ets)); /* Map TSA onto CEE prio type */ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { switch (ets->tc_tsa[i]) { case IEEE_8021QAZ_TSA_STRICT: prio_type[i] = 2; break; case IEEE_8021QAZ_TSA_ETS: prio_type[i] = 0; break; default: /* Hardware only supports priority strict or * ETS transmission selection algorithms if * we receive some other value from dcbnl * throw an error */ return -EINVAL; } } if (*p) ixgbe_dcbnl_set_state(dev, 1); else ixgbe_dcbnl_set_state(dev, 0); ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame); err = ixgbe_dcb_hw_ets_config(&adapter->hw, refill, max, bwg_id, prio_type, ets->prio_tc); return err; } static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev, struct ieee_pfc *pfc) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc; int i; /* No IEEE PFC settings available */ if (!my_pfc) return -EINVAL; pfc->pfc_cap = MAX_TRAFFIC_CLASS; pfc->pfc_en = my_pfc->pfc_en; pfc->mbc = my_pfc->mbc; pfc->delay = my_pfc->delay; for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { pfc->requests[i] = adapter->stats.pxoffrxc[i]; pfc->indications[i] = adapter->stats.pxofftxc[i]; } return 0; } static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev, struct ieee_pfc *pfc) { struct ixgbe_adapter *adapter = netdev_priv(dev); int err; if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) return -EINVAL; if (!adapter->ixgbe_ieee_pfc) { adapter->ixgbe_ieee_pfc = kmalloc(sizeof(struct ieee_pfc), GFP_KERNEL); if (!adapter->ixgbe_ieee_pfc) return -ENOMEM; } memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc)); err = ixgbe_dcb_hw_pfc_config(&adapter->hw, pfc->pfc_en); return err; } static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, struct dcb_app *app) { struct ixgbe_adapter *adapter = netdev_priv(dev); if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE)) return -EINVAL; dcb_setapp(dev, app); #ifdef IXGBE_FCOE if (app->selector == 1 && app->protocol == ETH_P_FCOE && adapter->fcoe.tc == app->priority) ixgbe_dcbnl_set_all(dev); #endif return 0; } static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev) { struct ixgbe_adapter *adapter = netdev_priv(dev); return adapter->dcbx_cap; } static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) { struct ixgbe_adapter *adapter = netdev_priv(dev); struct ieee_ets ets = {0}; struct ieee_pfc pfc = {0}; /* no support for LLD_MANAGED modes or CEE+IEEE */ if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || ((mode & DCB_CAP_DCBX_VER_IEEE) && (mode & DCB_CAP_DCBX_VER_CEE)) || !(mode & DCB_CAP_DCBX_HOST)) return 1; if (mode == adapter->dcbx_cap) return 0; adapter->dcbx_cap = mode; /* ETS and PFC defaults */ ets.ets_cap = 8; pfc.pfc_cap = 8; if (mode & DCB_CAP_DCBX_VER_IEEE) { ixgbe_dcbnl_ieee_setets(dev, &ets); ixgbe_dcbnl_ieee_setpfc(dev, &pfc); } else if (mode & DCB_CAP_DCBX_VER_CEE) { adapter->dcb_set_bitmap |= (BIT_PFC & BIT_PG_TX & BIT_PG_RX); ixgbe_dcbnl_set_all(dev); } else { /* Drop into single TC mode strict priority as this * indicates CEE and IEEE versions are disabled */ ixgbe_dcbnl_ieee_setets(dev, &ets); ixgbe_dcbnl_ieee_setpfc(dev, &pfc); ixgbe_dcbnl_set_state(dev, 0); } return 0; } const struct dcbnl_rtnl_ops dcbnl_ops = { .ieee_getets = ixgbe_dcbnl_ieee_getets, .ieee_setets = ixgbe_dcbnl_ieee_setets, .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc, .ieee_setpfc = ixgbe_dcbnl_ieee_setpfc, .ieee_setapp = ixgbe_dcbnl_ieee_setapp, .getstate = ixgbe_dcbnl_get_state, .setstate = ixgbe_dcbnl_set_state, .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr, .setpgtccfgtx = ixgbe_dcbnl_set_pg_tc_cfg_tx, .setpgbwgcfgtx = ixgbe_dcbnl_set_pg_bwg_cfg_tx, .setpgtccfgrx = ixgbe_dcbnl_set_pg_tc_cfg_rx, .setpgbwgcfgrx = ixgbe_dcbnl_set_pg_bwg_cfg_rx, .getpgtccfgtx = ixgbe_dcbnl_get_pg_tc_cfg_tx, .getpgbwgcfgtx = ixgbe_dcbnl_get_pg_bwg_cfg_tx, .getpgtccfgrx = ixgbe_dcbnl_get_pg_tc_cfg_rx, .getpgbwgcfgrx = ixgbe_dcbnl_get_pg_bwg_cfg_rx, .setpfccfg = ixgbe_dcbnl_set_pfc_cfg, .getpfccfg = ixgbe_dcbnl_get_pfc_cfg, .setall = ixgbe_dcbnl_set_all, .getcap = ixgbe_dcbnl_getcap, .getnumtcs = ixgbe_dcbnl_getnumtcs, .setnumtcs = ixgbe_dcbnl_setnumtcs, .getpfcstate = ixgbe_dcbnl_getpfcstate, .setpfcstate = ixgbe_dcbnl_setpfcstate, .getapp = ixgbe_dcbnl_getapp, .getdcbx = ixgbe_dcbnl_getdcbx, .setdcbx = ixgbe_dcbnl_setdcbx, };
gpl-2.0
attn1/android_kernel_pantech_p8010
drivers/media/video/pxa_camera.c
2385
51705
/* * V4L2 Driver for PXA camera host * * Copyright (C) 2006, Sascha Hauer, Pengutronix * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/init.h> #include <linux/module.h> #include <linux/io.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/moduleparam.h> #include <linux/time.h> #include <linux/version.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/clk.h> #include <linux/sched.h> #include <linux/slab.h> #include <media/v4l2-common.h> #include <media/v4l2-dev.h> #include <media/videobuf-dma-sg.h> #include <media/soc_camera.h> #include <media/soc_mediabus.h> #include <linux/videodev2.h> #include <mach/dma.h> #include <mach/camera.h> #define PXA_CAM_VERSION_CODE KERNEL_VERSION(0, 0, 5) #define PXA_CAM_DRV_NAME "pxa27x-camera" /* Camera Interface */ #define CICR0 0x0000 #define CICR1 0x0004 #define CICR2 0x0008 #define CICR3 0x000C #define CICR4 0x0010 #define CISR 0x0014 #define CIFR 0x0018 #define CITOR 0x001C #define CIBR0 0x0028 #define CIBR1 0x0030 #define CIBR2 0x0038 #define CICR0_DMAEN (1 << 31) /* DMA request enable */ #define CICR0_PAR_EN (1 << 30) /* Parity enable */ #define CICR0_SL_CAP_EN (1 << 29) /* Capture enable for slave mode */ #define CICR0_ENB (1 << 28) /* Camera interface enable */ #define CICR0_DIS (1 << 27) /* Camera interface disable */ #define CICR0_SIM (0x7 << 24) /* Sensor interface mode mask */ #define CICR0_TOM (1 << 9) /* Time-out mask */ #define CICR0_RDAVM (1 << 8) /* Receive-data-available mask */ #define CICR0_FEM (1 << 7) /* FIFO-empty mask */ #define CICR0_EOLM (1 << 6) /* End-of-line mask */ #define CICR0_PERRM (1 << 5) /* Parity-error mask */ #define CICR0_QDM (1 << 4) /* Quick-disable mask */ #define CICR0_CDM (1 << 3) /* Disable-done mask */ #define CICR0_SOFM (1 << 2) /* Start-of-frame mask */ #define CICR0_EOFM (1 << 1) /* End-of-frame mask */ #define CICR0_FOM (1 << 0) /* FIFO-overrun mask */ #define CICR1_TBIT (1 << 31) /* Transparency bit */ #define CICR1_RGBT_CONV (0x3 << 29) /* RGBT conversion mask */ #define CICR1_PPL (0x7ff << 15) /* Pixels per line mask */ #define CICR1_RGB_CONV (0x7 << 12) /* RGB conversion mask */ #define CICR1_RGB_F (1 << 11) /* RGB format */ #define CICR1_YCBCR_F (1 << 10) /* YCbCr format */ #define CICR1_RGB_BPP (0x7 << 7) /* RGB bis per pixel mask */ #define CICR1_RAW_BPP (0x3 << 5) /* Raw bis per pixel mask */ #define CICR1_COLOR_SP (0x3 << 3) /* Color space mask */ #define CICR1_DW (0x7 << 0) /* Data width mask */ #define CICR2_BLW (0xff << 24) /* Beginning-of-line pixel clock wait count mask */ #define CICR2_ELW (0xff << 16) /* End-of-line pixel clock wait count mask */ #define CICR2_HSW (0x3f << 10) /* Horizontal sync pulse width mask */ #define CICR2_BFPW (0x3f << 3) /* Beginning-of-frame pixel clock wait count mask */ #define CICR2_FSW (0x7 << 0) /* Frame stabilization wait count mask */ #define CICR3_BFW (0xff << 24) /* Beginning-of-frame line clock wait count mask */ #define CICR3_EFW (0xff << 16) /* End-of-frame line clock wait count mask */ #define CICR3_VSW (0x3f << 10) /* Vertical sync pulse width mask */ #define CICR3_BFPW (0x3f << 3) /* Beginning-of-frame pixel clock wait count mask */ #define CICR3_LPF (0x7ff << 0) /* Lines per frame mask */ #define CICR4_MCLK_DLY (0x3 << 24) /* MCLK Data Capture Delay mask */ #define CICR4_PCLK_EN (1 << 23) /* Pixel clock enable */ #define CICR4_PCP (1 << 22) /* Pixel clock polarity */ #define CICR4_HSP (1 << 21) /* Horizontal sync polarity */ #define CICR4_VSP (1 << 20) /* Vertical sync polarity */ #define CICR4_MCLK_EN (1 << 19) /* MCLK enable */ #define CICR4_FR_RATE (0x7 << 8) /* Frame rate mask */ #define CICR4_DIV (0xff << 0) /* Clock divisor mask */ #define CISR_FTO (1 << 15) /* FIFO time-out */ #define CISR_RDAV_2 (1 << 14) /* Channel 2 receive data available */ #define CISR_RDAV_1 (1 << 13) /* Channel 1 receive data available */ #define CISR_RDAV_0 (1 << 12) /* Channel 0 receive data available */ #define CISR_FEMPTY_2 (1 << 11) /* Channel 2 FIFO empty */ #define CISR_FEMPTY_1 (1 << 10) /* Channel 1 FIFO empty */ #define CISR_FEMPTY_0 (1 << 9) /* Channel 0 FIFO empty */ #define CISR_EOL (1 << 8) /* End of line */ #define CISR_PAR_ERR (1 << 7) /* Parity error */ #define CISR_CQD (1 << 6) /* Camera interface quick disable */ #define CISR_CDD (1 << 5) /* Camera interface disable done */ #define CISR_SOF (1 << 4) /* Start of frame */ #define CISR_EOF (1 << 3) /* End of frame */ #define CISR_IFO_2 (1 << 2) /* FIFO overrun for Channel 2 */ #define CISR_IFO_1 (1 << 1) /* FIFO overrun for Channel 1 */ #define CISR_IFO_0 (1 << 0) /* FIFO overrun for Channel 0 */ #define CIFR_FLVL2 (0x7f << 23) /* FIFO 2 level mask */ #define CIFR_FLVL1 (0x7f << 16) /* FIFO 1 level mask */ #define CIFR_FLVL0 (0xff << 8) /* FIFO 0 level mask */ #define CIFR_THL_0 (0x3 << 4) /* Threshold Level for Channel 0 FIFO */ #define CIFR_RESET_F (1 << 3) /* Reset input FIFOs */ #define CIFR_FEN2 (1 << 2) /* FIFO enable for channel 2 */ #define CIFR_FEN1 (1 << 1) /* FIFO enable for channel 1 */ #define CIFR_FEN0 (1 << 0) /* FIFO enable for channel 0 */ #define CICR0_SIM_MP (0 << 24) #define CICR0_SIM_SP (1 << 24) #define CICR0_SIM_MS (2 << 24) #define CICR0_SIM_EP (3 << 24) #define CICR0_SIM_ES (4 << 24) #define CICR1_DW_VAL(x) ((x) & CICR1_DW) /* Data bus width */ #define CICR1_PPL_VAL(x) (((x) << 15) & CICR1_PPL) /* Pixels per line */ #define CICR1_COLOR_SP_VAL(x) (((x) << 3) & CICR1_COLOR_SP) /* color space */ #define CICR1_RGB_BPP_VAL(x) (((x) << 7) & CICR1_RGB_BPP) /* bpp for rgb */ #define CICR1_RGBT_CONV_VAL(x) (((x) << 29) & CICR1_RGBT_CONV) /* rgbt conv */ #define CICR2_BLW_VAL(x) (((x) << 24) & CICR2_BLW) /* Beginning-of-line pixel clock wait count */ #define CICR2_ELW_VAL(x) (((x) << 16) & CICR2_ELW) /* End-of-line pixel clock wait count */ #define CICR2_HSW_VAL(x) (((x) << 10) & CICR2_HSW) /* Horizontal sync pulse width */ #define CICR2_BFPW_VAL(x) (((x) << 3) & CICR2_BFPW) /* Beginning-of-frame pixel clock wait count */ #define CICR2_FSW_VAL(x) (((x) << 0) & CICR2_FSW) /* Frame stabilization wait count */ #define CICR3_BFW_VAL(x) (((x) << 24) & CICR3_BFW) /* Beginning-of-frame line clock wait count */ #define CICR3_EFW_VAL(x) (((x) << 16) & CICR3_EFW) /* End-of-frame line clock wait count */ #define CICR3_VSW_VAL(x) (((x) << 11) & CICR3_VSW) /* Vertical sync pulse width */ #define CICR3_LPF_VAL(x) (((x) << 0) & CICR3_LPF) /* Lines per frame */ #define CICR0_IRQ_MASK (CICR0_TOM | CICR0_RDAVM | CICR0_FEM | CICR0_EOLM | \ CICR0_PERRM | CICR0_QDM | CICR0_CDM | CICR0_SOFM | \ CICR0_EOFM | CICR0_FOM) /* * Structures */ enum pxa_camera_active_dma { DMA_Y = 0x1, DMA_U = 0x2, DMA_V = 0x4, }; /* descriptor needed for the PXA DMA engine */ struct pxa_cam_dma { dma_addr_t sg_dma; struct pxa_dma_desc *sg_cpu; size_t sg_size; int sglen; }; /* buffer for one video frame */ struct pxa_buffer { /* common v4l buffer stuff -- must be first */ struct videobuf_buffer vb; enum v4l2_mbus_pixelcode code; /* our descriptor lists for Y, U and V channels */ struct pxa_cam_dma dmas[3]; int inwork; enum pxa_camera_active_dma active_dma; }; struct pxa_camera_dev { struct soc_camera_host soc_host; /* * PXA27x is only supposed to handle one camera on its Quick Capture * interface. If anyone ever builds hardware to enable more than * one camera, they will have to modify this driver too */ struct soc_camera_device *icd; struct clk *clk; unsigned int irq; void __iomem *base; int channels; unsigned int dma_chans[3]; struct pxacamera_platform_data *pdata; struct resource *res; unsigned long platform_flags; unsigned long ciclk; unsigned long mclk; u32 mclk_divisor; struct list_head capture; spinlock_t lock; struct pxa_buffer *active; struct pxa_dma_desc *sg_tail[3]; u32 save_cicr[5]; }; struct pxa_cam { unsigned long flags; }; static const char *pxa_cam_driver_description = "PXA_Camera"; static unsigned int vid_limit = 16; /* Video memory limit, in Mb */ /* * Videobuf operations */ static int pxa_videobuf_setup(struct videobuf_queue *vq, unsigned int *count, unsigned int *size) { struct soc_camera_device *icd = vq->priv_data; int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width, icd->current_fmt->host_fmt); if (bytes_per_line < 0) return bytes_per_line; dev_dbg(icd->dev.parent, "count=%d, size=%d\n", *count, *size); *size = bytes_per_line * icd->user_height; if (0 == *count) *count = 32; if (*size * *count > vid_limit * 1024 * 1024) *count = (vid_limit * 1024 * 1024) / *size; return 0; } static void free_buffer(struct videobuf_queue *vq, struct pxa_buffer *buf) { struct soc_camera_device *icd = vq->priv_data; struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct videobuf_dmabuf *dma = videobuf_to_dma(&buf->vb); int i; BUG_ON(in_interrupt()); dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d\n", __func__, &buf->vb, buf->vb.baddr, buf->vb.bsize); /* * This waits until this buffer is out of danger, i.e., until it is no * longer in STATE_QUEUED or STATE_ACTIVE */ videobuf_waiton(vq, &buf->vb, 0, 0); videobuf_dma_unmap(vq->dev, dma); videobuf_dma_free(dma); for (i = 0; i < ARRAY_SIZE(buf->dmas); i++) { if (buf->dmas[i].sg_cpu) dma_free_coherent(ici->v4l2_dev.dev, buf->dmas[i].sg_size, buf->dmas[i].sg_cpu, buf->dmas[i].sg_dma); buf->dmas[i].sg_cpu = NULL; } buf->vb.state = VIDEOBUF_NEEDS_INIT; } static int calculate_dma_sglen(struct scatterlist *sglist, int sglen, int sg_first_ofs, int size) { int i, offset, dma_len, xfer_len; struct scatterlist *sg; offset = sg_first_ofs; for_each_sg(sglist, sg, sglen, i) { dma_len = sg_dma_len(sg); /* PXA27x Developer's Manual 27.4.4.1: round up to 8 bytes */ xfer_len = roundup(min(dma_len - offset, size), 8); size = max(0, size - xfer_len); offset = 0; if (size == 0) break; } BUG_ON(size != 0); return i + 1; } /** * pxa_init_dma_channel - init dma descriptors * @pcdev: pxa camera device * @buf: pxa buffer to find pxa dma channel * @dma: dma video buffer * @channel: dma channel (0 => 'Y', 1 => 'U', 2 => 'V') * @cibr: camera Receive Buffer Register * @size: bytes to transfer * @sg_first: first element of sg_list * @sg_first_ofs: offset in first element of sg_list * * Prepares the pxa dma descriptors to transfer one camera channel. * Beware sg_first and sg_first_ofs are both input and output parameters. * * Returns 0 or -ENOMEM if no coherent memory is available */ static int pxa_init_dma_channel(struct pxa_camera_dev *pcdev, struct pxa_buffer *buf, struct videobuf_dmabuf *dma, int channel, int cibr, int size, struct scatterlist **sg_first, int *sg_first_ofs) { struct pxa_cam_dma *pxa_dma = &buf->dmas[channel]; struct device *dev = pcdev->soc_host.v4l2_dev.dev; struct scatterlist *sg; int i, offset, sglen; int dma_len = 0, xfer_len = 0; if (pxa_dma->sg_cpu) dma_free_coherent(dev, pxa_dma->sg_size, pxa_dma->sg_cpu, pxa_dma->sg_dma); sglen = calculate_dma_sglen(*sg_first, dma->sglen, *sg_first_ofs, size); pxa_dma->sg_size = (sglen + 1) * sizeof(struct pxa_dma_desc); pxa_dma->sg_cpu = dma_alloc_coherent(dev, pxa_dma->sg_size, &pxa_dma->sg_dma, GFP_KERNEL); if (!pxa_dma->sg_cpu) return -ENOMEM; pxa_dma->sglen = sglen; offset = *sg_first_ofs; dev_dbg(dev, "DMA: sg_first=%p, sglen=%d, ofs=%d, dma.desc=%x\n", *sg_first, sglen, *sg_first_ofs, pxa_dma->sg_dma); for_each_sg(*sg_first, sg, sglen, i) { dma_len = sg_dma_len(sg); /* PXA27x Developer's Manual 27.4.4.1: round up to 8 bytes */ xfer_len = roundup(min(dma_len - offset, size), 8); size = max(0, size - xfer_len); pxa_dma->sg_cpu[i].dsadr = pcdev->res->start + cibr; pxa_dma->sg_cpu[i].dtadr = sg_dma_address(sg) + offset; pxa_dma->sg_cpu[i].dcmd = DCMD_FLOWSRC | DCMD_BURST8 | DCMD_INCTRGADDR | xfer_len; #ifdef DEBUG if (!i) pxa_dma->sg_cpu[i].dcmd |= DCMD_STARTIRQEN; #endif pxa_dma->sg_cpu[i].ddadr = pxa_dma->sg_dma + (i + 1) * sizeof(struct pxa_dma_desc); dev_vdbg(dev, "DMA: desc.%08x->@phys=0x%08x, len=%d\n", pxa_dma->sg_dma + i * sizeof(struct pxa_dma_desc), sg_dma_address(sg) + offset, xfer_len); offset = 0; if (size == 0) break; } pxa_dma->sg_cpu[sglen].ddadr = DDADR_STOP; pxa_dma->sg_cpu[sglen].dcmd = DCMD_FLOWSRC | DCMD_BURST8 | DCMD_ENDIRQEN; /* * Handle 1 special case : * - in 3 planes (YUV422P format), we might finish with xfer_len equal * to dma_len (end on PAGE boundary). In this case, the sg element * for next plane should be the next after the last used to store the * last scatter gather RAM page */ if (xfer_len >= dma_len) { *sg_first_ofs = xfer_len - dma_len; *sg_first = sg_next(sg); } else { *sg_first_ofs = xfer_len; *sg_first = sg; } return 0; } static void pxa_videobuf_set_actdma(struct pxa_camera_dev *pcdev, struct pxa_buffer *buf) { buf->active_dma = DMA_Y; if (pcdev->channels == 3) buf->active_dma |= DMA_U | DMA_V; } /* * Please check the DMA prepared buffer structure in : * Documentation/video4linux/pxa_camera.txt * Please check also in pxa_camera_check_link_miss() to understand why DMA chain * modification while DMA chain is running will work anyway. */ static int pxa_videobuf_prepare(struct videobuf_queue *vq, struct videobuf_buffer *vb, enum v4l2_field field) { struct soc_camera_device *icd = vq->priv_data; struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct pxa_camera_dev *pcdev = ici->priv; struct device *dev = pcdev->soc_host.v4l2_dev.dev; struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb); int ret; int size_y, size_u = 0, size_v = 0; int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width, icd->current_fmt->host_fmt); if (bytes_per_line < 0) return bytes_per_line; dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__, vb, vb->baddr, vb->bsize); /* Added list head initialization on alloc */ WARN_ON(!list_empty(&vb->queue)); #ifdef DEBUG /* * This can be useful if you want to see if we actually fill * the buffer with something */ memset((void *)vb->baddr, 0xaa, vb->bsize); #endif BUG_ON(NULL == icd->current_fmt); /* * I think, in buf_prepare you only have to protect global data, * the actual buffer is yours */ buf->inwork = 1; if (buf->code != icd->current_fmt->code || vb->width != icd->user_width || vb->height != icd->user_height || vb->field != field) { buf->code = icd->current_fmt->code; vb->width = icd->user_width; vb->height = icd->user_height; vb->field = field; vb->state = VIDEOBUF_NEEDS_INIT; } vb->size = bytes_per_line * vb->height; if (0 != vb->baddr && vb->bsize < vb->size) { ret = -EINVAL; goto out; } if (vb->state == VIDEOBUF_NEEDS_INIT) { int size = vb->size; int next_ofs = 0; struct videobuf_dmabuf *dma = videobuf_to_dma(vb); struct scatterlist *sg; ret = videobuf_iolock(vq, vb, NULL); if (ret) goto fail; if (pcdev->channels == 3) { size_y = size / 2; size_u = size_v = size / 4; } else { size_y = size; } sg = dma->sglist; /* init DMA for Y channel */ ret = pxa_init_dma_channel(pcdev, buf, dma, 0, CIBR0, size_y, &sg, &next_ofs); if (ret) { dev_err(dev, "DMA initialization for Y/RGB failed\n"); goto fail; } /* init DMA for U channel */ if (size_u) ret = pxa_init_dma_channel(pcdev, buf, dma, 1, CIBR1, size_u, &sg, &next_ofs); if (ret) { dev_err(dev, "DMA initialization for U failed\n"); goto fail_u; } /* init DMA for V channel */ if (size_v) ret = pxa_init_dma_channel(pcdev, buf, dma, 2, CIBR2, size_v, &sg, &next_ofs); if (ret) { dev_err(dev, "DMA initialization for V failed\n"); goto fail_v; } vb->state = VIDEOBUF_PREPARED; } buf->inwork = 0; pxa_videobuf_set_actdma(pcdev, buf); return 0; fail_v: dma_free_coherent(dev, buf->dmas[1].sg_size, buf->dmas[1].sg_cpu, buf->dmas[1].sg_dma); fail_u: dma_free_coherent(dev, buf->dmas[0].sg_size, buf->dmas[0].sg_cpu, buf->dmas[0].sg_dma); fail: free_buffer(vq, buf); out: buf->inwork = 0; return ret; } /** * pxa_dma_start_channels - start DMA channel for active buffer * @pcdev: pxa camera device * * Initialize DMA channels to the beginning of the active video buffer, and * start these channels. */ static void pxa_dma_start_channels(struct pxa_camera_dev *pcdev) { int i; struct pxa_buffer *active; active = pcdev->active; for (i = 0; i < pcdev->channels; i++) { dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s (channel=%d) ddadr=%08x\n", __func__, i, active->dmas[i].sg_dma); DDADR(pcdev->dma_chans[i]) = active->dmas[i].sg_dma; DCSR(pcdev->dma_chans[i]) = DCSR_RUN; } } static void pxa_dma_stop_channels(struct pxa_camera_dev *pcdev) { int i; for (i = 0; i < pcdev->channels; i++) { dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s (channel=%d)\n", __func__, i); DCSR(pcdev->dma_chans[i]) = 0; } } static void pxa_dma_add_tail_buf(struct pxa_camera_dev *pcdev, struct pxa_buffer *buf) { int i; struct pxa_dma_desc *buf_last_desc; for (i = 0; i < pcdev->channels; i++) { buf_last_desc = buf->dmas[i].sg_cpu + buf->dmas[i].sglen; buf_last_desc->ddadr = DDADR_STOP; if (pcdev->sg_tail[i]) /* Link the new buffer to the old tail */ pcdev->sg_tail[i]->ddadr = buf->dmas[i].sg_dma; /* Update the channel tail */ pcdev->sg_tail[i] = buf_last_desc; } } /** * pxa_camera_start_capture - start video capturing * @pcdev: camera device * * Launch capturing. DMA channels should not be active yet. They should get * activated at the end of frame interrupt, to capture only whole frames, and * never begin the capture of a partial frame. */ static void pxa_camera_start_capture(struct pxa_camera_dev *pcdev) { unsigned long cicr0; dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s\n", __func__); /* Enable End-Of-Frame Interrupt */ cicr0 = __raw_readl(pcdev->base + CICR0) | CICR0_ENB; cicr0 &= ~CICR0_EOFM; __raw_writel(cicr0, pcdev->base + CICR0); } static void pxa_camera_stop_capture(struct pxa_camera_dev *pcdev) { unsigned long cicr0; pxa_dma_stop_channels(pcdev); cicr0 = __raw_readl(pcdev->base + CICR0) & ~CICR0_ENB; __raw_writel(cicr0, pcdev->base + CICR0); pcdev->active = NULL; dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s\n", __func__); } /* Called under spinlock_irqsave(&pcdev->lock, ...) */ static void pxa_videobuf_queue(struct videobuf_queue *vq, struct videobuf_buffer *vb) { struct soc_camera_device *icd = vq->priv_data; struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct pxa_camera_dev *pcdev = ici->priv; struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb); dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%08lx %d active=%p\n", __func__, vb, vb->baddr, vb->bsize, pcdev->active); list_add_tail(&vb->queue, &pcdev->capture); vb->state = VIDEOBUF_ACTIVE; pxa_dma_add_tail_buf(pcdev, buf); if (!pcdev->active) pxa_camera_start_capture(pcdev); } static void pxa_videobuf_release(struct videobuf_queue *vq, struct videobuf_buffer *vb) { struct pxa_buffer *buf = container_of(vb, struct pxa_buffer, vb); #ifdef DEBUG struct soc_camera_device *icd = vq->priv_data; struct device *dev = icd->dev.parent; dev_dbg(dev, "%s (vb=0x%p) 0x%08lx %d\n", __func__, vb, vb->baddr, vb->bsize); switch (vb->state) { case VIDEOBUF_ACTIVE: dev_dbg(dev, "%s (active)\n", __func__); break; case VIDEOBUF_QUEUED: dev_dbg(dev, "%s (queued)\n", __func__); break; case VIDEOBUF_PREPARED: dev_dbg(dev, "%s (prepared)\n", __func__); break; default: dev_dbg(dev, "%s (unknown)\n", __func__); break; } #endif free_buffer(vq, buf); } static void pxa_camera_wakeup(struct pxa_camera_dev *pcdev, struct videobuf_buffer *vb, struct pxa_buffer *buf) { int i; /* _init is used to debug races, see comment in pxa_camera_reqbufs() */ list_del_init(&vb->queue); vb->state = VIDEOBUF_DONE; do_gettimeofday(&vb->ts); vb->field_count++; wake_up(&vb->done); dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s dequeud buffer (vb=0x%p)\n", __func__, vb); if (list_empty(&pcdev->capture)) { pxa_camera_stop_capture(pcdev); for (i = 0; i < pcdev->channels; i++) pcdev->sg_tail[i] = NULL; return; } pcdev->active = list_entry(pcdev->capture.next, struct pxa_buffer, vb.queue); } /** * pxa_camera_check_link_miss - check missed DMA linking * @pcdev: camera device * * The DMA chaining is done with DMA running. This means a tiny temporal window * remains, where a buffer is queued on the chain, while the chain is already * stopped. This means the tailed buffer would never be transferred by DMA. * This function restarts the capture for this corner case, where : * - DADR() == DADDR_STOP * - a videobuffer is queued on the pcdev->capture list * * Please check the "DMA hot chaining timeslice issue" in * Documentation/video4linux/pxa_camera.txt * * Context: should only be called within the dma irq handler */ static void pxa_camera_check_link_miss(struct pxa_camera_dev *pcdev) { int i, is_dma_stopped = 1; for (i = 0; i < pcdev->channels; i++) if (DDADR(pcdev->dma_chans[i]) != DDADR_STOP) is_dma_stopped = 0; dev_dbg(pcdev->soc_host.v4l2_dev.dev, "%s : top queued buffer=%p, dma_stopped=%d\n", __func__, pcdev->active, is_dma_stopped); if (pcdev->active && is_dma_stopped) pxa_camera_start_capture(pcdev); } static void pxa_camera_dma_irq(int channel, struct pxa_camera_dev *pcdev, enum pxa_camera_active_dma act_dma) { struct device *dev = pcdev->soc_host.v4l2_dev.dev; struct pxa_buffer *buf; unsigned long flags; u32 status, camera_status, overrun; struct videobuf_buffer *vb; spin_lock_irqsave(&pcdev->lock, flags); status = DCSR(channel); DCSR(channel) = status; camera_status = __raw_readl(pcdev->base + CISR); overrun = CISR_IFO_0; if (pcdev->channels == 3) overrun |= CISR_IFO_1 | CISR_IFO_2; if (status & DCSR_BUSERR) { dev_err(dev, "DMA Bus Error IRQ!\n"); goto out; } if (!(status & (DCSR_ENDINTR | DCSR_STARTINTR))) { dev_err(dev, "Unknown DMA IRQ source, status: 0x%08x\n", status); goto out; } /* * pcdev->active should not be NULL in DMA irq handler. * * But there is one corner case : if capture was stopped due to an * overrun of channel 1, and at that same channel 2 was completed. * * When handling the overrun in DMA irq for channel 1, we'll stop the * capture and restart it (and thus set pcdev->active to NULL). But the * DMA irq handler will already be pending for channel 2. So on entering * the DMA irq handler for channel 2 there will be no active buffer, yet * that is normal. */ if (!pcdev->active) goto out; vb = &pcdev->active->vb; buf = container_of(vb, struct pxa_buffer, vb); WARN_ON(buf->inwork || list_empty(&vb->queue)); dev_dbg(dev, "%s channel=%d %s%s(vb=0x%p) dma.desc=%x\n", __func__, channel, status & DCSR_STARTINTR ? "SOF " : "", status & DCSR_ENDINTR ? "EOF " : "", vb, DDADR(channel)); if (status & DCSR_ENDINTR) { /* * It's normal if the last frame creates an overrun, as there * are no more DMA descriptors to fetch from QCI fifos */ if (camera_status & overrun && !list_is_last(pcdev->capture.next, &pcdev->capture)) { dev_dbg(dev, "FIFO overrun! CISR: %x\n", camera_status); pxa_camera_stop_capture(pcdev); pxa_camera_start_capture(pcdev); goto out; } buf->active_dma &= ~act_dma; if (!buf->active_dma) { pxa_camera_wakeup(pcdev, vb, buf); pxa_camera_check_link_miss(pcdev); } } out: spin_unlock_irqrestore(&pcdev->lock, flags); } static void pxa_camera_dma_irq_y(int channel, void *data) { struct pxa_camera_dev *pcdev = data; pxa_camera_dma_irq(channel, pcdev, DMA_Y); } static void pxa_camera_dma_irq_u(int channel, void *data) { struct pxa_camera_dev *pcdev = data; pxa_camera_dma_irq(channel, pcdev, DMA_U); } static void pxa_camera_dma_irq_v(int channel, void *data) { struct pxa_camera_dev *pcdev = data; pxa_camera_dma_irq(channel, pcdev, DMA_V); } static struct videobuf_queue_ops pxa_videobuf_ops = { .buf_setup = pxa_videobuf_setup, .buf_prepare = pxa_videobuf_prepare, .buf_queue = pxa_videobuf_queue, .buf_release = pxa_videobuf_release, }; static void pxa_camera_init_videobuf(struct videobuf_queue *q, struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct pxa_camera_dev *pcdev = ici->priv; /* * We must pass NULL as dev pointer, then all pci_* dma operations * transform to normal dma_* ones. */ videobuf_queue_sg_init(q, &pxa_videobuf_ops, NULL, &pcdev->lock, V4L2_BUF_TYPE_VIDEO_CAPTURE, V4L2_FIELD_NONE, sizeof(struct pxa_buffer), icd, &icd->video_lock); } static u32 mclk_get_divisor(struct platform_device *pdev, struct pxa_camera_dev *pcdev) { unsigned long mclk = pcdev->mclk; struct device *dev = &pdev->dev; u32 div; unsigned long lcdclk; lcdclk = clk_get_rate(pcdev->clk); pcdev->ciclk = lcdclk; /* mclk <= ciclk / 4 (27.4.2) */ if (mclk > lcdclk / 4) { mclk = lcdclk / 4; dev_warn(dev, "Limiting master clock to %lu\n", mclk); } /* We verify mclk != 0, so if anyone breaks it, here comes their Oops */ div = (lcdclk + 2 * mclk - 1) / (2 * mclk) - 1; /* If we're not supplying MCLK, leave it at 0 */ if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN) pcdev->mclk = lcdclk / (2 * (div + 1)); dev_dbg(dev, "LCD clock %luHz, target freq %luHz, divisor %u\n", lcdclk, mclk, div); return div; } static void recalculate_fifo_timeout(struct pxa_camera_dev *pcdev, unsigned long pclk) { /* We want a timeout > 1 pixel time, not ">=" */ u32 ciclk_per_pixel = pcdev->ciclk / pclk + 1; __raw_writel(ciclk_per_pixel, pcdev->base + CITOR); } static void pxa_camera_activate(struct pxa_camera_dev *pcdev) { u32 cicr4 = 0; /* disable all interrupts */ __raw_writel(0x3ff, pcdev->base + CICR0); if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN) cicr4 |= CICR4_PCLK_EN; if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN) cicr4 |= CICR4_MCLK_EN; if (pcdev->platform_flags & PXA_CAMERA_PCP) cicr4 |= CICR4_PCP; if (pcdev->platform_flags & PXA_CAMERA_HSP) cicr4 |= CICR4_HSP; if (pcdev->platform_flags & PXA_CAMERA_VSP) cicr4 |= CICR4_VSP; __raw_writel(pcdev->mclk_divisor | cicr4, pcdev->base + CICR4); if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN) /* Initialise the timeout under the assumption pclk = mclk */ recalculate_fifo_timeout(pcdev, pcdev->mclk); else /* "Safe default" - 13MHz */ recalculate_fifo_timeout(pcdev, 13000000); clk_enable(pcdev->clk); } static void pxa_camera_deactivate(struct pxa_camera_dev *pcdev) { clk_disable(pcdev->clk); } static irqreturn_t pxa_camera_irq(int irq, void *data) { struct pxa_camera_dev *pcdev = data; unsigned long status, cifr, cicr0; struct pxa_buffer *buf; struct videobuf_buffer *vb; status = __raw_readl(pcdev->base + CISR); dev_dbg(pcdev->soc_host.v4l2_dev.dev, "Camera interrupt status 0x%lx\n", status); if (!status) return IRQ_NONE; __raw_writel(status, pcdev->base + CISR); if (status & CISR_EOF) { /* Reset the FIFOs */ cifr = __raw_readl(pcdev->base + CIFR) | CIFR_RESET_F; __raw_writel(cifr, pcdev->base + CIFR); pcdev->active = list_first_entry(&pcdev->capture, struct pxa_buffer, vb.queue); vb = &pcdev->active->vb; buf = container_of(vb, struct pxa_buffer, vb); pxa_videobuf_set_actdma(pcdev, buf); pxa_dma_start_channels(pcdev); cicr0 = __raw_readl(pcdev->base + CICR0) | CICR0_EOFM; __raw_writel(cicr0, pcdev->base + CICR0); } return IRQ_HANDLED; } /* * The following two functions absolutely depend on the fact, that * there can be only one camera on PXA quick capture interface * Called with .video_lock held */ static int pxa_camera_add_device(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct pxa_camera_dev *pcdev = ici->priv; if (pcdev->icd) return -EBUSY; pxa_camera_activate(pcdev); pcdev->icd = icd; dev_info(icd->dev.parent, "PXA Camera driver attached to camera %d\n", icd->devnum); return 0; } /* Called with .video_lock held */ static void pxa_camera_remove_device(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct pxa_camera_dev *pcdev = ici->priv; BUG_ON(icd != pcdev->icd); dev_info(icd->dev.parent, "PXA Camera driver detached from camera %d\n", icd->devnum); /* disable capture, disable interrupts */ __raw_writel(0x3ff, pcdev->base + CICR0); /* Stop DMA engine */ DCSR(pcdev->dma_chans[0]) = 0; DCSR(pcdev->dma_chans[1]) = 0; DCSR(pcdev->dma_chans[2]) = 0; pxa_camera_deactivate(pcdev); pcdev->icd = NULL; } static int test_platform_param(struct pxa_camera_dev *pcdev, unsigned char buswidth, unsigned long *flags) { /* * Platform specified synchronization and pixel clock polarities are * only a recommendation and are only used during probing. The PXA270 * quick capture interface supports both. */ *flags = (pcdev->platform_flags & PXA_CAMERA_MASTER ? SOCAM_MASTER : SOCAM_SLAVE) | SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_LOW | SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_LOW | SOCAM_DATA_ACTIVE_HIGH | SOCAM_PCLK_SAMPLE_RISING | SOCAM_PCLK_SAMPLE_FALLING; /* If requested data width is supported by the platform, use it */ switch (buswidth) { case 10: if (!(pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_10)) return -EINVAL; *flags |= SOCAM_DATAWIDTH_10; break; case 9: if (!(pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_9)) return -EINVAL; *flags |= SOCAM_DATAWIDTH_9; break; case 8: if (!(pcdev->platform_flags & PXA_CAMERA_DATAWIDTH_8)) return -EINVAL; *flags |= SOCAM_DATAWIDTH_8; break; default: return -EINVAL; } return 0; } static void pxa_camera_setup_cicr(struct soc_camera_device *icd, unsigned long flags, __u32 pixfmt) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct pxa_camera_dev *pcdev = ici->priv; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); unsigned long dw, bpp; u32 cicr0, cicr1, cicr2, cicr3, cicr4 = 0, y_skip_top; int ret = v4l2_subdev_call(sd, sensor, g_skip_top_lines, &y_skip_top); if (ret < 0) y_skip_top = 0; /* * Datawidth is now guaranteed to be equal to one of the three values. * We fix bit-per-pixel equal to data-width... */ switch (flags & SOCAM_DATAWIDTH_MASK) { case SOCAM_DATAWIDTH_10: dw = 4; bpp = 0x40; break; case SOCAM_DATAWIDTH_9: dw = 3; bpp = 0x20; break; default: /* * Actually it can only be 8 now, * default is just to silence compiler warnings */ case SOCAM_DATAWIDTH_8: dw = 2; bpp = 0; } if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN) cicr4 |= CICR4_PCLK_EN; if (pcdev->platform_flags & PXA_CAMERA_MCLK_EN) cicr4 |= CICR4_MCLK_EN; if (flags & SOCAM_PCLK_SAMPLE_FALLING) cicr4 |= CICR4_PCP; if (flags & SOCAM_HSYNC_ACTIVE_LOW) cicr4 |= CICR4_HSP; if (flags & SOCAM_VSYNC_ACTIVE_LOW) cicr4 |= CICR4_VSP; cicr0 = __raw_readl(pcdev->base + CICR0); if (cicr0 & CICR0_ENB) __raw_writel(cicr0 & ~CICR0_ENB, pcdev->base + CICR0); cicr1 = CICR1_PPL_VAL(icd->user_width - 1) | bpp | dw; switch (pixfmt) { case V4L2_PIX_FMT_YUV422P: pcdev->channels = 3; cicr1 |= CICR1_YCBCR_F; /* * Normally, pxa bus wants as input UYVY format. We allow all * reorderings of the YUV422 format, as no processing is done, * and the YUV stream is just passed through without any * transformation. Note that UYVY is the only format that * should be used if pxa framebuffer Overlay2 is used. */ case V4L2_PIX_FMT_UYVY: case V4L2_PIX_FMT_VYUY: case V4L2_PIX_FMT_YUYV: case V4L2_PIX_FMT_YVYU: cicr1 |= CICR1_COLOR_SP_VAL(2); break; case V4L2_PIX_FMT_RGB555: cicr1 |= CICR1_RGB_BPP_VAL(1) | CICR1_RGBT_CONV_VAL(2) | CICR1_TBIT | CICR1_COLOR_SP_VAL(1); break; case V4L2_PIX_FMT_RGB565: cicr1 |= CICR1_COLOR_SP_VAL(1) | CICR1_RGB_BPP_VAL(2); break; } cicr2 = 0; cicr3 = CICR3_LPF_VAL(icd->user_height - 1) | CICR3_BFW_VAL(min((u32)255, y_skip_top)); cicr4 |= pcdev->mclk_divisor; __raw_writel(cicr1, pcdev->base + CICR1); __raw_writel(cicr2, pcdev->base + CICR2); __raw_writel(cicr3, pcdev->base + CICR3); __raw_writel(cicr4, pcdev->base + CICR4); /* CIF interrupts are not used, only DMA */ cicr0 = (cicr0 & CICR0_ENB) | (pcdev->platform_flags & PXA_CAMERA_MASTER ? CICR0_SIM_MP : (CICR0_SL_CAP_EN | CICR0_SIM_SP)); cicr0 |= CICR0_DMAEN | CICR0_IRQ_MASK; __raw_writel(cicr0, pcdev->base + CICR0); } static int pxa_camera_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct pxa_camera_dev *pcdev = ici->priv; unsigned long bus_flags, camera_flags, common_flags; int ret; struct pxa_cam *cam = icd->host_priv; ret = test_platform_param(pcdev, icd->current_fmt->host_fmt->bits_per_sample, &bus_flags); if (ret < 0) return ret; camera_flags = icd->ops->query_bus_param(icd); common_flags = soc_camera_bus_param_compatible(camera_flags, bus_flags); if (!common_flags) return -EINVAL; pcdev->channels = 1; /* Make choises, based on platform preferences */ if ((common_flags & SOCAM_HSYNC_ACTIVE_HIGH) && (common_flags & SOCAM_HSYNC_ACTIVE_LOW)) { if (pcdev->platform_flags & PXA_CAMERA_HSP) common_flags &= ~SOCAM_HSYNC_ACTIVE_HIGH; else common_flags &= ~SOCAM_HSYNC_ACTIVE_LOW; } if ((common_flags & SOCAM_VSYNC_ACTIVE_HIGH) && (common_flags & SOCAM_VSYNC_ACTIVE_LOW)) { if (pcdev->platform_flags & PXA_CAMERA_VSP) common_flags &= ~SOCAM_VSYNC_ACTIVE_HIGH; else common_flags &= ~SOCAM_VSYNC_ACTIVE_LOW; } if ((common_flags & SOCAM_PCLK_SAMPLE_RISING) && (common_flags & SOCAM_PCLK_SAMPLE_FALLING)) { if (pcdev->platform_flags & PXA_CAMERA_PCP) common_flags &= ~SOCAM_PCLK_SAMPLE_RISING; else common_flags &= ~SOCAM_PCLK_SAMPLE_FALLING; } cam->flags = common_flags; ret = icd->ops->set_bus_param(icd, common_flags); if (ret < 0) return ret; pxa_camera_setup_cicr(icd, common_flags, pixfmt); return 0; } static int pxa_camera_try_bus_param(struct soc_camera_device *icd, unsigned char buswidth) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct pxa_camera_dev *pcdev = ici->priv; unsigned long bus_flags, camera_flags; int ret = test_platform_param(pcdev, buswidth, &bus_flags); if (ret < 0) return ret; camera_flags = icd->ops->query_bus_param(icd); return soc_camera_bus_param_compatible(camera_flags, bus_flags) ? 0 : -EINVAL; } static const struct soc_mbus_pixelfmt pxa_camera_formats[] = { { .fourcc = V4L2_PIX_FMT_YUV422P, .name = "Planar YUV422 16 bit", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, }, }; /* This will be corrected as we get more formats */ static bool pxa_camera_packing_supported(const struct soc_mbus_pixelfmt *fmt) { return fmt->packing == SOC_MBUS_PACKING_NONE || (fmt->bits_per_sample == 8 && fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) || (fmt->bits_per_sample > 8 && fmt->packing == SOC_MBUS_PACKING_EXTEND16); } static int pxa_camera_get_formats(struct soc_camera_device *icd, unsigned int idx, struct soc_camera_format_xlate *xlate) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct device *dev = icd->dev.parent; int formats = 0, ret; struct pxa_cam *cam; enum v4l2_mbus_pixelcode code; const struct soc_mbus_pixelfmt *fmt; ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code); if (ret < 0) /* No more formats */ return 0; fmt = soc_mbus_get_fmtdesc(code); if (!fmt) { dev_err(dev, "Invalid format code #%u: %d\n", idx, code); return 0; } /* This also checks support for the requested bits-per-sample */ ret = pxa_camera_try_bus_param(icd, fmt->bits_per_sample); if (ret < 0) return 0; if (!icd->host_priv) { cam = kzalloc(sizeof(*cam), GFP_KERNEL); if (!cam) return -ENOMEM; icd->host_priv = cam; } else { cam = icd->host_priv; } switch (code) { case V4L2_MBUS_FMT_UYVY8_2X8: formats++; if (xlate) { xlate->host_fmt = &pxa_camera_formats[0]; xlate->code = code; xlate++; dev_dbg(dev, "Providing format %s using code %d\n", pxa_camera_formats[0].name, code); } case V4L2_MBUS_FMT_VYUY8_2X8: case V4L2_MBUS_FMT_YUYV8_2X8: case V4L2_MBUS_FMT_YVYU8_2X8: case V4L2_MBUS_FMT_RGB565_2X8_LE: case V4L2_MBUS_FMT_RGB555_2X8_PADHI_LE: if (xlate) dev_dbg(dev, "Providing format %s packed\n", fmt->name); break; default: if (!pxa_camera_packing_supported(fmt)) return 0; if (xlate) dev_dbg(dev, "Providing format %s in pass-through mode\n", fmt->name); } /* Generic pass-through */ formats++; if (xlate) { xlate->host_fmt = fmt; xlate->code = code; xlate++; } return formats; } static void pxa_camera_put_formats(struct soc_camera_device *icd) { kfree(icd->host_priv); icd->host_priv = NULL; } static int pxa_camera_check_frame(u32 width, u32 height) { /* limit to pxa hardware capabilities */ return height < 32 || height > 2048 || width < 48 || width > 2048 || (width & 0x01); } static int pxa_camera_set_crop(struct soc_camera_device *icd, struct v4l2_crop *a) { struct v4l2_rect *rect = &a->c; struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct pxa_camera_dev *pcdev = ici->priv; struct device *dev = icd->dev.parent; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct soc_camera_sense sense = { .master_clock = pcdev->mclk, .pixel_clock_max = pcdev->ciclk / 4, }; struct v4l2_mbus_framefmt mf; struct pxa_cam *cam = icd->host_priv; u32 fourcc = icd->current_fmt->host_fmt->fourcc; int ret; /* If PCLK is used to latch data from the sensor, check sense */ if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN) icd->sense = &sense; ret = v4l2_subdev_call(sd, video, s_crop, a); icd->sense = NULL; if (ret < 0) { dev_warn(dev, "Failed to crop to %ux%u@%u:%u\n", rect->width, rect->height, rect->left, rect->top); return ret; } ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf); if (ret < 0) return ret; if (pxa_camera_check_frame(mf.width, mf.height)) { /* * Camera cropping produced a frame beyond our capabilities. * FIXME: just extract a subframe, that we can process. */ v4l_bound_align_image(&mf.width, 48, 2048, 1, &mf.height, 32, 2048, 0, fourcc == V4L2_PIX_FMT_YUV422P ? 4 : 0); ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf); if (ret < 0) return ret; if (pxa_camera_check_frame(mf.width, mf.height)) { dev_warn(icd->dev.parent, "Inconsistent state. Use S_FMT to repair\n"); return -EINVAL; } } if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) { if (sense.pixel_clock > sense.pixel_clock_max) { dev_err(dev, "pixel clock %lu set by the camera too high!", sense.pixel_clock); return -EIO; } recalculate_fifo_timeout(pcdev, sense.pixel_clock); } icd->user_width = mf.width; icd->user_height = mf.height; pxa_camera_setup_cicr(icd, cam->flags, fourcc); return ret; } static int pxa_camera_set_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct pxa_camera_dev *pcdev = ici->priv; struct device *dev = icd->dev.parent; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); const struct soc_camera_format_xlate *xlate = NULL; struct soc_camera_sense sense = { .master_clock = pcdev->mclk, .pixel_clock_max = pcdev->ciclk / 4, }; struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_mbus_framefmt mf; int ret; xlate = soc_camera_xlate_by_fourcc(icd, pix->pixelformat); if (!xlate) { dev_warn(dev, "Format %x not found\n", pix->pixelformat); return -EINVAL; } /* If PCLK is used to latch data from the sensor, check sense */ if (pcdev->platform_flags & PXA_CAMERA_PCLK_EN) /* The caller holds a mutex. */ icd->sense = &sense; mf.width = pix->width; mf.height = pix->height; mf.field = pix->field; mf.colorspace = pix->colorspace; mf.code = xlate->code; ret = v4l2_subdev_call(sd, video, s_mbus_fmt, &mf); if (mf.code != xlate->code) return -EINVAL; icd->sense = NULL; if (ret < 0) { dev_warn(dev, "Failed to configure for format %x\n", pix->pixelformat); } else if (pxa_camera_check_frame(mf.width, mf.height)) { dev_warn(dev, "Camera driver produced an unsupported frame %dx%d\n", mf.width, mf.height); ret = -EINVAL; } else if (sense.flags & SOCAM_SENSE_PCLK_CHANGED) { if (sense.pixel_clock > sense.pixel_clock_max) { dev_err(dev, "pixel clock %lu set by the camera too high!", sense.pixel_clock); return -EIO; } recalculate_fifo_timeout(pcdev, sense.pixel_clock); } if (ret < 0) return ret; pix->width = mf.width; pix->height = mf.height; pix->field = mf.field; pix->colorspace = mf.colorspace; icd->current_fmt = xlate; return ret; } static int pxa_camera_try_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); const struct soc_camera_format_xlate *xlate; struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_mbus_framefmt mf; __u32 pixfmt = pix->pixelformat; int ret; xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); if (!xlate) { dev_warn(icd->dev.parent, "Format %x not found\n", pixfmt); return -EINVAL; } /* * Limit to pxa hardware capabilities. YUV422P planar format requires * images size to be a multiple of 16 bytes. If not, zeros will be * inserted between Y and U planes, and U and V planes, which violates * the YUV422P standard. */ v4l_bound_align_image(&pix->width, 48, 2048, 1, &pix->height, 32, 2048, 0, pixfmt == V4L2_PIX_FMT_YUV422P ? 4 : 0); pix->bytesperline = soc_mbus_bytes_per_line(pix->width, xlate->host_fmt); if (pix->bytesperline < 0) return pix->bytesperline; pix->sizeimage = pix->height * pix->bytesperline; /* limit to sensor capabilities */ mf.width = pix->width; mf.height = pix->height; mf.field = pix->field; mf.colorspace = pix->colorspace; mf.code = xlate->code; ret = v4l2_subdev_call(sd, video, try_mbus_fmt, &mf); if (ret < 0) return ret; pix->width = mf.width; pix->height = mf.height; pix->colorspace = mf.colorspace; switch (mf.field) { case V4L2_FIELD_ANY: case V4L2_FIELD_NONE: pix->field = V4L2_FIELD_NONE; break; default: /* TODO: support interlaced at least in pass-through mode */ dev_err(icd->dev.parent, "Field type %d unsupported.\n", mf.field); return -EINVAL; } return ret; } static int pxa_camera_reqbufs(struct soc_camera_device *icd, struct v4l2_requestbuffers *p) { int i; /* * This is for locking debugging only. I removed spinlocks and now I * check whether .prepare is ever called on a linked buffer, or whether * a dma IRQ can occur for an in-work or unlinked buffer. Until now * it hadn't triggered */ for (i = 0; i < p->count; i++) { struct pxa_buffer *buf = container_of(icd->vb_vidq.bufs[i], struct pxa_buffer, vb); buf->inwork = 0; INIT_LIST_HEAD(&buf->vb.queue); } return 0; } static unsigned int pxa_camera_poll(struct file *file, poll_table *pt) { struct soc_camera_device *icd = file->private_data; struct pxa_buffer *buf; buf = list_entry(icd->vb_vidq.stream.next, struct pxa_buffer, vb.stream); poll_wait(file, &buf->vb.done, pt); if (buf->vb.state == VIDEOBUF_DONE || buf->vb.state == VIDEOBUF_ERROR) return POLLIN|POLLRDNORM; return 0; } static int pxa_camera_querycap(struct soc_camera_host *ici, struct v4l2_capability *cap) { /* cap->name is set by the firendly caller:-> */ strlcpy(cap->card, pxa_cam_driver_description, sizeof(cap->card)); cap->version = PXA_CAM_VERSION_CODE; cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; return 0; } static int pxa_camera_suspend(struct soc_camera_device *icd, pm_message_t state) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct pxa_camera_dev *pcdev = ici->priv; int i = 0, ret = 0; pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR0); pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR1); pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR2); pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR3); pcdev->save_cicr[i++] = __raw_readl(pcdev->base + CICR4); if ((pcdev->icd) && (pcdev->icd->ops->suspend)) ret = pcdev->icd->ops->suspend(pcdev->icd, state); return ret; } static int pxa_camera_resume(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct pxa_camera_dev *pcdev = ici->priv; int i = 0, ret = 0; DRCMR(68) = pcdev->dma_chans[0] | DRCMR_MAPVLD; DRCMR(69) = pcdev->dma_chans[1] | DRCMR_MAPVLD; DRCMR(70) = pcdev->dma_chans[2] | DRCMR_MAPVLD; __raw_writel(pcdev->save_cicr[i++] & ~CICR0_ENB, pcdev->base + CICR0); __raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR1); __raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR2); __raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR3); __raw_writel(pcdev->save_cicr[i++], pcdev->base + CICR4); if ((pcdev->icd) && (pcdev->icd->ops->resume)) ret = pcdev->icd->ops->resume(pcdev->icd); /* Restart frame capture if active buffer exists */ if (!ret && pcdev->active) pxa_camera_start_capture(pcdev); return ret; } static struct soc_camera_host_ops pxa_soc_camera_host_ops = { .owner = THIS_MODULE, .add = pxa_camera_add_device, .remove = pxa_camera_remove_device, .suspend = pxa_camera_suspend, .resume = pxa_camera_resume, .set_crop = pxa_camera_set_crop, .get_formats = pxa_camera_get_formats, .put_formats = pxa_camera_put_formats, .set_fmt = pxa_camera_set_fmt, .try_fmt = pxa_camera_try_fmt, .init_videobuf = pxa_camera_init_videobuf, .reqbufs = pxa_camera_reqbufs, .poll = pxa_camera_poll, .querycap = pxa_camera_querycap, .set_bus_param = pxa_camera_set_bus_param, }; static int __devinit pxa_camera_probe(struct platform_device *pdev) { struct pxa_camera_dev *pcdev; struct resource *res; void __iomem *base; int irq; int err = 0; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!res || irq < 0) { err = -ENODEV; goto exit; } pcdev = kzalloc(sizeof(*pcdev), GFP_KERNEL); if (!pcdev) { dev_err(&pdev->dev, "Could not allocate pcdev\n"); err = -ENOMEM; goto exit; } pcdev->clk = clk_get(&pdev->dev, NULL); if (IS_ERR(pcdev->clk)) { err = PTR_ERR(pcdev->clk); goto exit_kfree; } pcdev->res = res; pcdev->pdata = pdev->dev.platform_data; pcdev->platform_flags = pcdev->pdata->flags; if (!(pcdev->platform_flags & (PXA_CAMERA_DATAWIDTH_8 | PXA_CAMERA_DATAWIDTH_9 | PXA_CAMERA_DATAWIDTH_10))) { /* * Platform hasn't set available data widths. This is bad. * Warn and use a default. */ dev_warn(&pdev->dev, "WARNING! Platform hasn't set available " "data widths, using default 10 bit\n"); pcdev->platform_flags |= PXA_CAMERA_DATAWIDTH_10; } pcdev->mclk = pcdev->pdata->mclk_10khz * 10000; if (!pcdev->mclk) { dev_warn(&pdev->dev, "mclk == 0! Please, fix your platform data. " "Using default 20MHz\n"); pcdev->mclk = 20000000; } pcdev->mclk_divisor = mclk_get_divisor(pdev, pcdev); INIT_LIST_HEAD(&pcdev->capture); spin_lock_init(&pcdev->lock); /* * Request the regions. */ if (!request_mem_region(res->start, resource_size(res), PXA_CAM_DRV_NAME)) { err = -EBUSY; goto exit_clk; } base = ioremap(res->start, resource_size(res)); if (!base) { err = -ENOMEM; goto exit_release; } pcdev->irq = irq; pcdev->base = base; /* request dma */ err = pxa_request_dma("CI_Y", DMA_PRIO_HIGH, pxa_camera_dma_irq_y, pcdev); if (err < 0) { dev_err(&pdev->dev, "Can't request DMA for Y\n"); goto exit_iounmap; } pcdev->dma_chans[0] = err; dev_dbg(&pdev->dev, "got DMA channel %d\n", pcdev->dma_chans[0]); err = pxa_request_dma("CI_U", DMA_PRIO_HIGH, pxa_camera_dma_irq_u, pcdev); if (err < 0) { dev_err(&pdev->dev, "Can't request DMA for U\n"); goto exit_free_dma_y; } pcdev->dma_chans[1] = err; dev_dbg(&pdev->dev, "got DMA channel (U) %d\n", pcdev->dma_chans[1]); err = pxa_request_dma("CI_V", DMA_PRIO_HIGH, pxa_camera_dma_irq_v, pcdev); if (err < 0) { dev_err(&pdev->dev, "Can't request DMA for V\n"); goto exit_free_dma_u; } pcdev->dma_chans[2] = err; dev_dbg(&pdev->dev, "got DMA channel (V) %d\n", pcdev->dma_chans[2]); DRCMR(68) = pcdev->dma_chans[0] | DRCMR_MAPVLD; DRCMR(69) = pcdev->dma_chans[1] | DRCMR_MAPVLD; DRCMR(70) = pcdev->dma_chans[2] | DRCMR_MAPVLD; /* request irq */ err = request_irq(pcdev->irq, pxa_camera_irq, 0, PXA_CAM_DRV_NAME, pcdev); if (err) { dev_err(&pdev->dev, "Camera interrupt register failed \n"); goto exit_free_dma; } pcdev->soc_host.drv_name = PXA_CAM_DRV_NAME; pcdev->soc_host.ops = &pxa_soc_camera_host_ops; pcdev->soc_host.priv = pcdev; pcdev->soc_host.v4l2_dev.dev = &pdev->dev; pcdev->soc_host.nr = pdev->id; err = soc_camera_host_register(&pcdev->soc_host); if (err) goto exit_free_irq; return 0; exit_free_irq: free_irq(pcdev->irq, pcdev); exit_free_dma: pxa_free_dma(pcdev->dma_chans[2]); exit_free_dma_u: pxa_free_dma(pcdev->dma_chans[1]); exit_free_dma_y: pxa_free_dma(pcdev->dma_chans[0]); exit_iounmap: iounmap(base); exit_release: release_mem_region(res->start, resource_size(res)); exit_clk: clk_put(pcdev->clk); exit_kfree: kfree(pcdev); exit: return err; } static int __devexit pxa_camera_remove(struct platform_device *pdev) { struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev); struct pxa_camera_dev *pcdev = container_of(soc_host, struct pxa_camera_dev, soc_host); struct resource *res; clk_put(pcdev->clk); pxa_free_dma(pcdev->dma_chans[0]); pxa_free_dma(pcdev->dma_chans[1]); pxa_free_dma(pcdev->dma_chans[2]); free_irq(pcdev->irq, pcdev); soc_camera_host_unregister(soc_host); iounmap(pcdev->base); res = pcdev->res; release_mem_region(res->start, resource_size(res)); kfree(pcdev); dev_info(&pdev->dev, "PXA Camera driver unloaded\n"); return 0; } static struct platform_driver pxa_camera_driver = { .driver = { .name = PXA_CAM_DRV_NAME, }, .probe = pxa_camera_probe, .remove = __devexit_p(pxa_camera_remove), }; static int __init pxa_camera_init(void) { return platform_driver_register(&pxa_camera_driver); } static void __exit pxa_camera_exit(void) { platform_driver_unregister(&pxa_camera_driver); } module_init(pxa_camera_init); module_exit(pxa_camera_exit); MODULE_DESCRIPTION("PXA27x SoC Camera Host driver"); MODULE_AUTHOR("Guennadi Liakhovetski <kernel@pengutronix.de>"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:" PXA_CAM_DRV_NAME);
gpl-2.0
AICP/kernel_samsung_tuna
drivers/media/video/usbvision/usbvision-video.c
2385
48005
/* * USB USBVISION Video device driver 0.9.10 * * * * Copyright (c) 1999-2005 Joerg Heckenbach <joerg@heckenbach-aw.de> * * This module is part of usbvision driver project. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * * Let's call the version 0.... until compression decoding is completely * implemented. * * This driver is written by Jose Ignacio Gijon and Joerg Heckenbach. * It was based on USB CPiA driver written by Peter Pregler, * Scott J. Bertin and Johannes Erdfelt * Ideas are taken from bttv driver by Ralph Metzler, Marcus Metzler & * Gerd Knorr and zoran 36120/36125 driver by Pauline Middelink * Updates to driver completed by Dwaine P. Garden * * * TODO: * - use submit_urb for all setup packets * - Fix memory settings for nt1004. It is 4 times as big as the * nt1003 memory. * - Add audio on endpoint 3 for nt1004 chip. * Seems impossible, needs a codec interface. Which one? * - Clean up the driver. * - optimization for performance. * - Add Videotext capability (VBI). Working on it..... * - Check audio for other devices * */ #include <linux/version.h> #include <linux/kernel.h> #include <linux/list.h> #include <linux/timer.h> #include <linux/slab.h> #include <linux/mm.h> #include <linux/highmem.h> #include <linux/vmalloc.h> #include <linux/module.h> #include <linux/init.h> #include <linux/spinlock.h> #include <linux/io.h> #include <linux/videodev2.h> #include <linux/i2c.h> #include <media/saa7115.h> #include <media/v4l2-common.h> #include <media/v4l2-ioctl.h> #include <media/tuner.h> #include <linux/workqueue.h> #include "usbvision.h" #include "usbvision-cards.h" #define DRIVER_AUTHOR \ "Joerg Heckenbach <joerg@heckenbach-aw.de>, " \ "Dwaine Garden <DwaineGarden@rogers.com>" #define DRIVER_NAME "usbvision" #define DRIVER_ALIAS "USBVision" #define DRIVER_DESC "USBVision USB Video Device Driver for Linux" #define DRIVER_LICENSE "GPL" #define USBVISION_DRIVER_VERSION_MAJOR 0 #define USBVISION_DRIVER_VERSION_MINOR 9 #define USBVISION_DRIVER_VERSION_PATCHLEVEL 10 #define USBVISION_DRIVER_VERSION KERNEL_VERSION(USBVISION_DRIVER_VERSION_MAJOR,\ USBVISION_DRIVER_VERSION_MINOR,\ USBVISION_DRIVER_VERSION_PATCHLEVEL) #define USBVISION_VERSION_STRING __stringify(USBVISION_DRIVER_VERSION_MAJOR) \ "." __stringify(USBVISION_DRIVER_VERSION_MINOR) \ "." __stringify(USBVISION_DRIVER_VERSION_PATCHLEVEL) #define ENABLE_HEXDUMP 0 /* Enable if you need it */ #ifdef USBVISION_DEBUG #define PDEBUG(level, fmt, args...) { \ if (video_debug & (level)) \ printk(KERN_INFO KBUILD_MODNAME ":[%s:%d] " fmt, \ __func__, __LINE__ , ## args); \ } #else #define PDEBUG(level, fmt, args...) do {} while (0) #endif #define DBG_IO (1 << 1) #define DBG_PROBE (1 << 2) #define DBG_MMAP (1 << 3) /* String operations */ #define rmspace(str) while (*str == ' ') str++; #define goto2next(str) while (*str != ' ') str++; while (*str == ' ') str++; /* sequential number of usbvision device */ static int usbvision_nr; static struct usbvision_v4l2_format_st usbvision_v4l2_format[] = { { 1, 1, 8, V4L2_PIX_FMT_GREY , "GREY" }, { 1, 2, 16, V4L2_PIX_FMT_RGB565 , "RGB565" }, { 1, 3, 24, V4L2_PIX_FMT_RGB24 , "RGB24" }, { 1, 4, 32, V4L2_PIX_FMT_RGB32 , "RGB32" }, { 1, 2, 16, V4L2_PIX_FMT_RGB555 , "RGB555" }, { 1, 2, 16, V4L2_PIX_FMT_YUYV , "YUV422" }, { 1, 2, 12, V4L2_PIX_FMT_YVU420 , "YUV420P" }, /* 1.5 ! */ { 1, 2, 16, V4L2_PIX_FMT_YUV422P , "YUV422P" } }; /* Function prototypes */ static void usbvision_release(struct usb_usbvision *usbvision); /* Default initialization of device driver parameters */ /* Set the default format for ISOC endpoint */ static int isoc_mode = ISOC_MODE_COMPRESS; /* Set the default Debug Mode of the device driver */ static int video_debug; /* Set the default device to power on at startup */ static int power_on_at_open = 1; /* Sequential Number of Video Device */ static int video_nr = -1; /* Sequential Number of Radio Device */ static int radio_nr = -1; /* Grab parameters for the device driver */ /* Showing parameters under SYSFS */ module_param(isoc_mode, int, 0444); module_param(video_debug, int, 0444); module_param(power_on_at_open, int, 0444); module_param(video_nr, int, 0444); module_param(radio_nr, int, 0444); MODULE_PARM_DESC(isoc_mode, " Set the default format for ISOC endpoint. Default: 0x60 (Compression On)"); MODULE_PARM_DESC(video_debug, " Set the default Debug Mode of the device driver. Default: 0 (Off)"); MODULE_PARM_DESC(power_on_at_open, " Set the default device to power on when device is opened. Default: 1 (On)"); MODULE_PARM_DESC(video_nr, "Set video device number (/dev/videoX). Default: -1 (autodetect)"); MODULE_PARM_DESC(radio_nr, "Set radio device number (/dev/radioX). Default: -1 (autodetect)"); /* Misc stuff */ MODULE_AUTHOR(DRIVER_AUTHOR); MODULE_DESCRIPTION(DRIVER_DESC); MODULE_LICENSE(DRIVER_LICENSE); MODULE_VERSION(USBVISION_VERSION_STRING); MODULE_ALIAS(DRIVER_ALIAS); /*****************************************************************************/ /* SYSFS Code - Copied from the stv680.c usb module. */ /* Device information is located at /sys/class/video4linux/video0 */ /* Device parameters information is located at /sys/module/usbvision */ /* Device USB Information is located at */ /* /sys/bus/usb/drivers/USBVision Video Grabber */ /*****************************************************************************/ #define YES_NO(x) ((x) ? "Yes" : "No") static inline struct usb_usbvision *cd_to_usbvision(struct device *cd) { struct video_device *vdev = container_of(cd, struct video_device, dev); return video_get_drvdata(vdev); } static ssize_t show_version(struct device *cd, struct device_attribute *attr, char *buf) { return sprintf(buf, "%s\n", USBVISION_VERSION_STRING); } static DEVICE_ATTR(version, S_IRUGO, show_version, NULL); static ssize_t show_model(struct device *cd, struct device_attribute *attr, char *buf) { struct video_device *vdev = container_of(cd, struct video_device, dev); struct usb_usbvision *usbvision = video_get_drvdata(vdev); return sprintf(buf, "%s\n", usbvision_device_data[usbvision->dev_model].model_string); } static DEVICE_ATTR(model, S_IRUGO, show_model, NULL); static ssize_t show_hue(struct device *cd, struct device_attribute *attr, char *buf) { struct video_device *vdev = container_of(cd, struct video_device, dev); struct usb_usbvision *usbvision = video_get_drvdata(vdev); struct v4l2_control ctrl; ctrl.id = V4L2_CID_HUE; ctrl.value = 0; if (usbvision->user) call_all(usbvision, core, g_ctrl, &ctrl); return sprintf(buf, "%d\n", ctrl.value); } static DEVICE_ATTR(hue, S_IRUGO, show_hue, NULL); static ssize_t show_contrast(struct device *cd, struct device_attribute *attr, char *buf) { struct video_device *vdev = container_of(cd, struct video_device, dev); struct usb_usbvision *usbvision = video_get_drvdata(vdev); struct v4l2_control ctrl; ctrl.id = V4L2_CID_CONTRAST; ctrl.value = 0; if (usbvision->user) call_all(usbvision, core, g_ctrl, &ctrl); return sprintf(buf, "%d\n", ctrl.value); } static DEVICE_ATTR(contrast, S_IRUGO, show_contrast, NULL); static ssize_t show_brightness(struct device *cd, struct device_attribute *attr, char *buf) { struct video_device *vdev = container_of(cd, struct video_device, dev); struct usb_usbvision *usbvision = video_get_drvdata(vdev); struct v4l2_control ctrl; ctrl.id = V4L2_CID_BRIGHTNESS; ctrl.value = 0; if (usbvision->user) call_all(usbvision, core, g_ctrl, &ctrl); return sprintf(buf, "%d\n", ctrl.value); } static DEVICE_ATTR(brightness, S_IRUGO, show_brightness, NULL); static ssize_t show_saturation(struct device *cd, struct device_attribute *attr, char *buf) { struct video_device *vdev = container_of(cd, struct video_device, dev); struct usb_usbvision *usbvision = video_get_drvdata(vdev); struct v4l2_control ctrl; ctrl.id = V4L2_CID_SATURATION; ctrl.value = 0; if (usbvision->user) call_all(usbvision, core, g_ctrl, &ctrl); return sprintf(buf, "%d\n", ctrl.value); } static DEVICE_ATTR(saturation, S_IRUGO, show_saturation, NULL); static ssize_t show_streaming(struct device *cd, struct device_attribute *attr, char *buf) { struct video_device *vdev = container_of(cd, struct video_device, dev); struct usb_usbvision *usbvision = video_get_drvdata(vdev); return sprintf(buf, "%s\n", YES_NO(usbvision->streaming == stream_on ? 1 : 0)); } static DEVICE_ATTR(streaming, S_IRUGO, show_streaming, NULL); static ssize_t show_compression(struct device *cd, struct device_attribute *attr, char *buf) { struct video_device *vdev = container_of(cd, struct video_device, dev); struct usb_usbvision *usbvision = video_get_drvdata(vdev); return sprintf(buf, "%s\n", YES_NO(usbvision->isoc_mode == ISOC_MODE_COMPRESS)); } static DEVICE_ATTR(compression, S_IRUGO, show_compression, NULL); static ssize_t show_device_bridge(struct device *cd, struct device_attribute *attr, char *buf) { struct video_device *vdev = container_of(cd, struct video_device, dev); struct usb_usbvision *usbvision = video_get_drvdata(vdev); return sprintf(buf, "%d\n", usbvision->bridge_type); } static DEVICE_ATTR(bridge, S_IRUGO, show_device_bridge, NULL); static void usbvision_create_sysfs(struct video_device *vdev) { int res; if (!vdev) return; do { res = device_create_file(&vdev->dev, &dev_attr_version); if (res < 0) break; res = device_create_file(&vdev->dev, &dev_attr_model); if (res < 0) break; res = device_create_file(&vdev->dev, &dev_attr_hue); if (res < 0) break; res = device_create_file(&vdev->dev, &dev_attr_contrast); if (res < 0) break; res = device_create_file(&vdev->dev, &dev_attr_brightness); if (res < 0) break; res = device_create_file(&vdev->dev, &dev_attr_saturation); if (res < 0) break; res = device_create_file(&vdev->dev, &dev_attr_streaming); if (res < 0) break; res = device_create_file(&vdev->dev, &dev_attr_compression); if (res < 0) break; res = device_create_file(&vdev->dev, &dev_attr_bridge); if (res >= 0) return; } while (0); dev_err(&vdev->dev, "%s error: %d\n", __func__, res); } static void usbvision_remove_sysfs(struct video_device *vdev) { if (vdev) { device_remove_file(&vdev->dev, &dev_attr_version); device_remove_file(&vdev->dev, &dev_attr_model); device_remove_file(&vdev->dev, &dev_attr_hue); device_remove_file(&vdev->dev, &dev_attr_contrast); device_remove_file(&vdev->dev, &dev_attr_brightness); device_remove_file(&vdev->dev, &dev_attr_saturation); device_remove_file(&vdev->dev, &dev_attr_streaming); device_remove_file(&vdev->dev, &dev_attr_compression); device_remove_file(&vdev->dev, &dev_attr_bridge); } } /* * usbvision_open() * * This is part of Video 4 Linux API. The driver can be opened by one * client only (checks internal counter 'usbvision->user'). The procedure * then allocates buffers needed for video processing. * */ static int usbvision_v4l2_open(struct file *file) { struct usb_usbvision *usbvision = video_drvdata(file); int err_code = 0; PDEBUG(DBG_IO, "open"); usbvision_reset_power_off_timer(usbvision); if (usbvision->user) err_code = -EBUSY; else { /* Allocate memory for the scratch ring buffer */ err_code = usbvision_scratch_alloc(usbvision); if (isoc_mode == ISOC_MODE_COMPRESS) { /* Allocate intermediate decompression buffers only if needed */ err_code = usbvision_decompress_alloc(usbvision); } if (err_code) { /* Deallocate all buffers if trouble */ usbvision_scratch_free(usbvision); usbvision_decompress_free(usbvision); } } /* If so far no errors then we shall start the camera */ if (!err_code) { if (usbvision->power == 0) { usbvision_power_on(usbvision); usbvision_i2c_register(usbvision); } /* Send init sequence only once, it's large! */ if (!usbvision->initialized) { int setup_ok = 0; setup_ok = usbvision_setup(usbvision, isoc_mode); if (setup_ok) usbvision->initialized = 1; else err_code = -EBUSY; } if (!err_code) { usbvision_begin_streaming(usbvision); err_code = usbvision_init_isoc(usbvision); /* device must be initialized before isoc transfer */ usbvision_muxsel(usbvision, 0); usbvision->user++; } else { if (power_on_at_open) { usbvision_i2c_unregister(usbvision); usbvision_power_off(usbvision); usbvision->initialized = 0; } } } /* prepare queues */ usbvision_empty_framequeues(usbvision); PDEBUG(DBG_IO, "success"); return err_code; } /* * usbvision_v4l2_close() * * This is part of Video 4 Linux API. The procedure * stops streaming and deallocates all buffers that were earlier * allocated in usbvision_v4l2_open(). * */ static int usbvision_v4l2_close(struct file *file) { struct usb_usbvision *usbvision = video_drvdata(file); PDEBUG(DBG_IO, "close"); usbvision_audio_off(usbvision); usbvision_restart_isoc(usbvision); usbvision_stop_isoc(usbvision); usbvision_decompress_free(usbvision); usbvision_frames_free(usbvision); usbvision_empty_framequeues(usbvision); usbvision_scratch_free(usbvision); usbvision->user--; if (power_on_at_open) { /* power off in a little while to avoid off/on every close/open short sequences */ usbvision_set_power_off_timer(usbvision); usbvision->initialized = 0; } if (usbvision->remove_pending) { printk(KERN_INFO "%s: Final disconnect\n", __func__); usbvision_release(usbvision); } PDEBUG(DBG_IO, "success"); return 0; } /* * usbvision_ioctl() * * This is part of Video 4 Linux API. The procedure handles ioctl() calls. * */ #ifdef CONFIG_VIDEO_ADV_DEBUG static int vidioc_g_register(struct file *file, void *priv, struct v4l2_dbg_register *reg) { struct usb_usbvision *usbvision = video_drvdata(file); int err_code; if (!v4l2_chip_match_host(&reg->match)) return -EINVAL; /* NT100x has a 8-bit register space */ err_code = usbvision_read_reg(usbvision, reg->reg&0xff); if (err_code < 0) { dev_err(&usbvision->vdev->dev, "%s: VIDIOC_DBG_G_REGISTER failed: error %d\n", __func__, err_code); return err_code; } reg->val = err_code; reg->size = 1; return 0; } static int vidioc_s_register(struct file *file, void *priv, struct v4l2_dbg_register *reg) { struct usb_usbvision *usbvision = video_drvdata(file); int err_code; if (!v4l2_chip_match_host(&reg->match)) return -EINVAL; /* NT100x has a 8-bit register space */ err_code = usbvision_write_reg(usbvision, reg->reg & 0xff, reg->val); if (err_code < 0) { dev_err(&usbvision->vdev->dev, "%s: VIDIOC_DBG_S_REGISTER failed: error %d\n", __func__, err_code); return err_code; } return 0; } #endif static int vidioc_querycap(struct file *file, void *priv, struct v4l2_capability *vc) { struct usb_usbvision *usbvision = video_drvdata(file); strlcpy(vc->driver, "USBVision", sizeof(vc->driver)); strlcpy(vc->card, usbvision_device_data[usbvision->dev_model].model_string, sizeof(vc->card)); usb_make_path(usbvision->dev, vc->bus_info, sizeof(vc->bus_info)); vc->version = USBVISION_DRIVER_VERSION; vc->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_AUDIO | V4L2_CAP_READWRITE | V4L2_CAP_STREAMING | (usbvision->have_tuner ? V4L2_CAP_TUNER : 0); return 0; } static int vidioc_enum_input(struct file *file, void *priv, struct v4l2_input *vi) { struct usb_usbvision *usbvision = video_drvdata(file); int chan; if (vi->index >= usbvision->video_inputs) return -EINVAL; if (usbvision->have_tuner) chan = vi->index; else chan = vi->index + 1; /* skip Television string*/ /* Determine the requested input characteristics specific for each usbvision card model */ switch (chan) { case 0: if (usbvision_device_data[usbvision->dev_model].video_channels == 4) { strcpy(vi->name, "White Video Input"); } else { strcpy(vi->name, "Television"); vi->type = V4L2_INPUT_TYPE_TUNER; vi->audioset = 1; vi->tuner = chan; vi->std = USBVISION_NORMS; } break; case 1: vi->type = V4L2_INPUT_TYPE_CAMERA; if (usbvision_device_data[usbvision->dev_model].video_channels == 4) strcpy(vi->name, "Green Video Input"); else strcpy(vi->name, "Composite Video Input"); vi->std = V4L2_STD_PAL; break; case 2: vi->type = V4L2_INPUT_TYPE_CAMERA; if (usbvision_device_data[usbvision->dev_model].video_channels == 4) strcpy(vi->name, "Yellow Video Input"); else strcpy(vi->name, "S-Video Input"); vi->std = V4L2_STD_PAL; break; case 3: vi->type = V4L2_INPUT_TYPE_CAMERA; strcpy(vi->name, "Red Video Input"); vi->std = V4L2_STD_PAL; break; } return 0; } static int vidioc_g_input(struct file *file, void *priv, unsigned int *input) { struct usb_usbvision *usbvision = video_drvdata(file); *input = usbvision->ctl_input; return 0; } static int vidioc_s_input(struct file *file, void *priv, unsigned int input) { struct usb_usbvision *usbvision = video_drvdata(file); if (input >= usbvision->video_inputs) return -EINVAL; usbvision_muxsel(usbvision, input); usbvision_set_input(usbvision); usbvision_set_output(usbvision, usbvision->curwidth, usbvision->curheight); return 0; } static int vidioc_s_std(struct file *file, void *priv, v4l2_std_id *id) { struct usb_usbvision *usbvision = video_drvdata(file); usbvision->tvnorm_id = *id; call_all(usbvision, core, s_std, usbvision->tvnorm_id); /* propagate the change to the decoder */ usbvision_muxsel(usbvision, usbvision->ctl_input); return 0; } static int vidioc_g_tuner(struct file *file, void *priv, struct v4l2_tuner *vt) { struct usb_usbvision *usbvision = video_drvdata(file); if (!usbvision->have_tuner || vt->index) /* Only tuner 0 */ return -EINVAL; if (usbvision->radio) { strcpy(vt->name, "Radio"); vt->type = V4L2_TUNER_RADIO; } else { strcpy(vt->name, "Television"); } /* Let clients fill in the remainder of this struct */ call_all(usbvision, tuner, g_tuner, vt); return 0; } static int vidioc_s_tuner(struct file *file, void *priv, struct v4l2_tuner *vt) { struct usb_usbvision *usbvision = video_drvdata(file); /* Only no or one tuner for now */ if (!usbvision->have_tuner || vt->index) return -EINVAL; /* let clients handle this */ call_all(usbvision, tuner, s_tuner, vt); return 0; } static int vidioc_g_frequency(struct file *file, void *priv, struct v4l2_frequency *freq) { struct usb_usbvision *usbvision = video_drvdata(file); freq->tuner = 0; /* Only one tuner */ if (usbvision->radio) freq->type = V4L2_TUNER_RADIO; else freq->type = V4L2_TUNER_ANALOG_TV; freq->frequency = usbvision->freq; return 0; } static int vidioc_s_frequency(struct file *file, void *priv, struct v4l2_frequency *freq) { struct usb_usbvision *usbvision = video_drvdata(file); /* Only no or one tuner for now */ if (!usbvision->have_tuner || freq->tuner) return -EINVAL; usbvision->freq = freq->frequency; call_all(usbvision, tuner, s_frequency, freq); return 0; } static int vidioc_g_audio(struct file *file, void *priv, struct v4l2_audio *a) { struct usb_usbvision *usbvision = video_drvdata(file); if (usbvision->radio) strcpy(a->name, "Radio"); else strcpy(a->name, "TV"); return 0; } static int vidioc_s_audio(struct file *file, void *fh, struct v4l2_audio *a) { if (a->index) return -EINVAL; return 0; } static int vidioc_queryctrl(struct file *file, void *priv, struct v4l2_queryctrl *ctrl) { struct usb_usbvision *usbvision = video_drvdata(file); call_all(usbvision, core, queryctrl, ctrl); if (!ctrl->type) return -EINVAL; return 0; } static int vidioc_g_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct usb_usbvision *usbvision = video_drvdata(file); call_all(usbvision, core, g_ctrl, ctrl); return 0; } static int vidioc_s_ctrl(struct file *file, void *priv, struct v4l2_control *ctrl) { struct usb_usbvision *usbvision = video_drvdata(file); call_all(usbvision, core, s_ctrl, ctrl); return 0; } static int vidioc_reqbufs(struct file *file, void *priv, struct v4l2_requestbuffers *vr) { struct usb_usbvision *usbvision = video_drvdata(file); int ret; RESTRICT_TO_RANGE(vr->count, 1, USBVISION_NUMFRAMES); /* Check input validity: the user must do a VIDEO CAPTURE and MMAP method. */ if (vr->memory != V4L2_MEMORY_MMAP) return -EINVAL; if (usbvision->streaming == stream_on) { ret = usbvision_stream_interrupt(usbvision); if (ret) return ret; } usbvision_frames_free(usbvision); usbvision_empty_framequeues(usbvision); vr->count = usbvision_frames_alloc(usbvision, vr->count); usbvision->cur_frame = NULL; return 0; } static int vidioc_querybuf(struct file *file, void *priv, struct v4l2_buffer *vb) { struct usb_usbvision *usbvision = video_drvdata(file); struct usbvision_frame *frame; /* FIXME : must control that buffers are mapped (VIDIOC_REQBUFS has been called) */ if (vb->index >= usbvision->num_frames) return -EINVAL; /* Updating the corresponding frame state */ vb->flags = 0; frame = &usbvision->frame[vb->index]; if (frame->grabstate >= frame_state_ready) vb->flags |= V4L2_BUF_FLAG_QUEUED; if (frame->grabstate >= frame_state_done) vb->flags |= V4L2_BUF_FLAG_DONE; if (frame->grabstate == frame_state_unused) vb->flags |= V4L2_BUF_FLAG_MAPPED; vb->memory = V4L2_MEMORY_MMAP; vb->m.offset = vb->index * PAGE_ALIGN(usbvision->max_frame_size); vb->memory = V4L2_MEMORY_MMAP; vb->field = V4L2_FIELD_NONE; vb->length = usbvision->curwidth * usbvision->curheight * usbvision->palette.bytes_per_pixel; vb->timestamp = usbvision->frame[vb->index].timestamp; vb->sequence = usbvision->frame[vb->index].sequence; return 0; } static int vidioc_qbuf(struct file *file, void *priv, struct v4l2_buffer *vb) { struct usb_usbvision *usbvision = video_drvdata(file); struct usbvision_frame *frame; unsigned long lock_flags; /* FIXME : works only on VIDEO_CAPTURE MODE, MMAP. */ if (vb->index >= usbvision->num_frames) return -EINVAL; frame = &usbvision->frame[vb->index]; if (frame->grabstate != frame_state_unused) return -EAGAIN; /* Mark it as ready and enqueue frame */ frame->grabstate = frame_state_ready; frame->scanstate = scan_state_scanning; frame->scanlength = 0; /* Accumulated in usbvision_parse_data() */ vb->flags &= ~V4L2_BUF_FLAG_DONE; /* set v4l2_format index */ frame->v4l2_format = usbvision->palette; spin_lock_irqsave(&usbvision->queue_lock, lock_flags); list_add_tail(&usbvision->frame[vb->index].frame, &usbvision->inqueue); spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags); return 0; } static int vidioc_dqbuf(struct file *file, void *priv, struct v4l2_buffer *vb) { struct usb_usbvision *usbvision = video_drvdata(file); int ret; struct usbvision_frame *f; unsigned long lock_flags; if (list_empty(&(usbvision->outqueue))) { if (usbvision->streaming == stream_idle) return -EINVAL; ret = wait_event_interruptible (usbvision->wait_frame, !list_empty(&(usbvision->outqueue))); if (ret) return ret; } spin_lock_irqsave(&usbvision->queue_lock, lock_flags); f = list_entry(usbvision->outqueue.next, struct usbvision_frame, frame); list_del(usbvision->outqueue.next); spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags); f->grabstate = frame_state_unused; vb->memory = V4L2_MEMORY_MMAP; vb->flags = V4L2_BUF_FLAG_MAPPED | V4L2_BUF_FLAG_QUEUED | V4L2_BUF_FLAG_DONE; vb->index = f->index; vb->sequence = f->sequence; vb->timestamp = f->timestamp; vb->field = V4L2_FIELD_NONE; vb->bytesused = f->scanlength; return 0; } static int vidioc_streamon(struct file *file, void *priv, enum v4l2_buf_type i) { struct usb_usbvision *usbvision = video_drvdata(file); usbvision->streaming = stream_on; call_all(usbvision, video, s_stream, 1); return 0; } static int vidioc_streamoff(struct file *file, void *priv, enum v4l2_buf_type type) { struct usb_usbvision *usbvision = video_drvdata(file); if (type != V4L2_BUF_TYPE_VIDEO_CAPTURE) return -EINVAL; if (usbvision->streaming == stream_on) { usbvision_stream_interrupt(usbvision); /* Stop all video streamings */ call_all(usbvision, video, s_stream, 0); } usbvision_empty_framequeues(usbvision); return 0; } static int vidioc_enum_fmt_vid_cap(struct file *file, void *priv, struct v4l2_fmtdesc *vfd) { if (vfd->index >= USBVISION_SUPPORTED_PALETTES - 1) return -EINVAL; strcpy(vfd->description, usbvision_v4l2_format[vfd->index].desc); vfd->pixelformat = usbvision_v4l2_format[vfd->index].format; return 0; } static int vidioc_g_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *vf) { struct usb_usbvision *usbvision = video_drvdata(file); vf->fmt.pix.width = usbvision->curwidth; vf->fmt.pix.height = usbvision->curheight; vf->fmt.pix.pixelformat = usbvision->palette.format; vf->fmt.pix.bytesperline = usbvision->curwidth * usbvision->palette.bytes_per_pixel; vf->fmt.pix.sizeimage = vf->fmt.pix.bytesperline * usbvision->curheight; vf->fmt.pix.colorspace = V4L2_COLORSPACE_SMPTE170M; vf->fmt.pix.field = V4L2_FIELD_NONE; /* Always progressive image */ return 0; } static int vidioc_try_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *vf) { struct usb_usbvision *usbvision = video_drvdata(file); int format_idx; /* Find requested format in available ones */ for (format_idx = 0; format_idx < USBVISION_SUPPORTED_PALETTES; format_idx++) { if (vf->fmt.pix.pixelformat == usbvision_v4l2_format[format_idx].format) { usbvision->palette = usbvision_v4l2_format[format_idx]; break; } } /* robustness */ if (format_idx == USBVISION_SUPPORTED_PALETTES) return -EINVAL; RESTRICT_TO_RANGE(vf->fmt.pix.width, MIN_FRAME_WIDTH, MAX_FRAME_WIDTH); RESTRICT_TO_RANGE(vf->fmt.pix.height, MIN_FRAME_HEIGHT, MAX_FRAME_HEIGHT); vf->fmt.pix.bytesperline = vf->fmt.pix.width* usbvision->palette.bytes_per_pixel; vf->fmt.pix.sizeimage = vf->fmt.pix.bytesperline*vf->fmt.pix.height; return 0; } static int vidioc_s_fmt_vid_cap(struct file *file, void *priv, struct v4l2_format *vf) { struct usb_usbvision *usbvision = video_drvdata(file); int ret; ret = vidioc_try_fmt_vid_cap(file, priv, vf); if (ret) return ret; /* stop io in case it is already in progress */ if (usbvision->streaming == stream_on) { ret = usbvision_stream_interrupt(usbvision); if (ret) return ret; } usbvision_frames_free(usbvision); usbvision_empty_framequeues(usbvision); usbvision->cur_frame = NULL; /* by now we are committed to the new data... */ usbvision_set_output(usbvision, vf->fmt.pix.width, vf->fmt.pix.height); return 0; } static ssize_t usbvision_v4l2_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { struct usb_usbvision *usbvision = video_drvdata(file); int noblock = file->f_flags & O_NONBLOCK; unsigned long lock_flags; int ret, i; struct usbvision_frame *frame; PDEBUG(DBG_IO, "%s: %ld bytes, noblock=%d", __func__, (unsigned long)count, noblock); if (!USBVISION_IS_OPERATIONAL(usbvision) || (buf == NULL)) return -EFAULT; /* This entry point is compatible with the mmap routines so that a user can do either VIDIOC_QBUF/VIDIOC_DQBUF to get frames or call read on the device. */ if (!usbvision->num_frames) { /* First, allocate some frames to work with if this has not been done with VIDIOC_REQBUF */ usbvision_frames_free(usbvision); usbvision_empty_framequeues(usbvision); usbvision_frames_alloc(usbvision, USBVISION_NUMFRAMES); } if (usbvision->streaming != stream_on) { /* no stream is running, make it running ! */ usbvision->streaming = stream_on; call_all(usbvision, video, s_stream, 1); } /* Then, enqueue as many frames as possible (like a user of VIDIOC_QBUF would do) */ for (i = 0; i < usbvision->num_frames; i++) { frame = &usbvision->frame[i]; if (frame->grabstate == frame_state_unused) { /* Mark it as ready and enqueue frame */ frame->grabstate = frame_state_ready; frame->scanstate = scan_state_scanning; /* Accumulated in usbvision_parse_data() */ frame->scanlength = 0; /* set v4l2_format index */ frame->v4l2_format = usbvision->palette; spin_lock_irqsave(&usbvision->queue_lock, lock_flags); list_add_tail(&frame->frame, &usbvision->inqueue); spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags); } } /* Then try to steal a frame (like a VIDIOC_DQBUF would do) */ if (list_empty(&(usbvision->outqueue))) { if (noblock) return -EAGAIN; ret = wait_event_interruptible (usbvision->wait_frame, !list_empty(&(usbvision->outqueue))); if (ret) return ret; } spin_lock_irqsave(&usbvision->queue_lock, lock_flags); frame = list_entry(usbvision->outqueue.next, struct usbvision_frame, frame); list_del(usbvision->outqueue.next); spin_unlock_irqrestore(&usbvision->queue_lock, lock_flags); /* An error returns an empty frame */ if (frame->grabstate == frame_state_error) { frame->bytes_read = 0; return 0; } PDEBUG(DBG_IO, "%s: frmx=%d, bytes_read=%ld, scanlength=%ld", __func__, frame->index, frame->bytes_read, frame->scanlength); /* copy bytes to user space; we allow for partials reads */ if ((count + frame->bytes_read) > (unsigned long)frame->scanlength) count = frame->scanlength - frame->bytes_read; if (copy_to_user(buf, frame->data + frame->bytes_read, count)) return -EFAULT; frame->bytes_read += count; PDEBUG(DBG_IO, "%s: {copy} count used=%ld, new bytes_read=%ld", __func__, (unsigned long)count, frame->bytes_read); /* For now, forget the frame if it has not been read in one shot. */ /* if (frame->bytes_read >= frame->scanlength) {*/ /* All data has been read */ frame->bytes_read = 0; /* Mark it as available to be used again. */ frame->grabstate = frame_state_unused; /* } */ return count; } static int usbvision_v4l2_mmap(struct file *file, struct vm_area_struct *vma) { unsigned long size = vma->vm_end - vma->vm_start, start = vma->vm_start; void *pos; u32 i; struct usb_usbvision *usbvision = video_drvdata(file); PDEBUG(DBG_MMAP, "mmap"); if (!USBVISION_IS_OPERATIONAL(usbvision)) return -EFAULT; if (!(vma->vm_flags & VM_WRITE) || size != PAGE_ALIGN(usbvision->max_frame_size)) { return -EINVAL; } for (i = 0; i < usbvision->num_frames; i++) { if (((PAGE_ALIGN(usbvision->max_frame_size)*i) >> PAGE_SHIFT) == vma->vm_pgoff) break; } if (i == usbvision->num_frames) { PDEBUG(DBG_MMAP, "mmap: user supplied mapping address is out of range"); return -EINVAL; } /* VM_IO is eventually going to replace PageReserved altogether */ vma->vm_flags |= VM_IO; vma->vm_flags |= VM_RESERVED; /* avoid to swap out this VMA */ pos = usbvision->frame[i].data; while (size > 0) { if (vm_insert_page(vma, start, vmalloc_to_page(pos))) { PDEBUG(DBG_MMAP, "mmap: vm_insert_page failed"); return -EAGAIN; } start += PAGE_SIZE; pos += PAGE_SIZE; size -= PAGE_SIZE; } return 0; } /* * Here comes the stuff for radio on usbvision based devices * */ static int usbvision_radio_open(struct file *file) { struct usb_usbvision *usbvision = video_drvdata(file); int err_code = 0; PDEBUG(DBG_IO, "%s:", __func__); if (usbvision->user) { dev_err(&usbvision->rdev->dev, "%s: Someone tried to open an already opened USBVision Radio!\n", __func__); err_code = -EBUSY; } else { if (power_on_at_open) { usbvision_reset_power_off_timer(usbvision); if (usbvision->power == 0) { usbvision_power_on(usbvision); usbvision_i2c_register(usbvision); } } /* Alternate interface 1 is is the biggest frame size */ err_code = usbvision_set_alternate(usbvision); if (err_code < 0) { usbvision->last_error = err_code; err_code = -EBUSY; goto out; } /* If so far no errors then we shall start the radio */ usbvision->radio = 1; call_all(usbvision, tuner, s_radio); usbvision_set_audio(usbvision, USBVISION_AUDIO_RADIO); usbvision->user++; } if (err_code) { if (power_on_at_open) { usbvision_i2c_unregister(usbvision); usbvision_power_off(usbvision); usbvision->initialized = 0; } } out: return err_code; } static int usbvision_radio_close(struct file *file) { struct usb_usbvision *usbvision = video_drvdata(file); int err_code = 0; PDEBUG(DBG_IO, ""); /* Set packet size to 0 */ usbvision->iface_alt = 0; err_code = usb_set_interface(usbvision->dev, usbvision->iface, usbvision->iface_alt); usbvision_audio_off(usbvision); usbvision->radio = 0; usbvision->user--; if (power_on_at_open) { usbvision_set_power_off_timer(usbvision); usbvision->initialized = 0; } if (usbvision->remove_pending) { printk(KERN_INFO "%s: Final disconnect\n", __func__); usbvision_release(usbvision); } PDEBUG(DBG_IO, "success"); return err_code; } /* Video registration stuff */ /* Video template */ static const struct v4l2_file_operations usbvision_fops = { .owner = THIS_MODULE, .open = usbvision_v4l2_open, .release = usbvision_v4l2_close, .read = usbvision_v4l2_read, .mmap = usbvision_v4l2_mmap, .unlocked_ioctl = video_ioctl2, /* .poll = video_poll, */ }; static const struct v4l2_ioctl_ops usbvision_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_fmt_vid_cap = vidioc_enum_fmt_vid_cap, .vidioc_g_fmt_vid_cap = vidioc_g_fmt_vid_cap, .vidioc_try_fmt_vid_cap = vidioc_try_fmt_vid_cap, .vidioc_s_fmt_vid_cap = vidioc_s_fmt_vid_cap, .vidioc_reqbufs = vidioc_reqbufs, .vidioc_querybuf = vidioc_querybuf, .vidioc_qbuf = vidioc_qbuf, .vidioc_dqbuf = vidioc_dqbuf, .vidioc_s_std = vidioc_s_std, .vidioc_enum_input = vidioc_enum_input, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, .vidioc_queryctrl = vidioc_queryctrl, .vidioc_g_audio = vidioc_g_audio, .vidioc_s_audio = vidioc_s_audio, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_streamon = vidioc_streamon, .vidioc_streamoff = vidioc_streamoff, .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, #ifdef CONFIG_VIDEO_ADV_DEBUG .vidioc_g_register = vidioc_g_register, .vidioc_s_register = vidioc_s_register, #endif }; static struct video_device usbvision_video_template = { .fops = &usbvision_fops, .ioctl_ops = &usbvision_ioctl_ops, .name = "usbvision-video", .release = video_device_release, .tvnorms = USBVISION_NORMS, .current_norm = V4L2_STD_PAL }; /* Radio template */ static const struct v4l2_file_operations usbvision_radio_fops = { .owner = THIS_MODULE, .open = usbvision_radio_open, .release = usbvision_radio_close, .unlocked_ioctl = video_ioctl2, }; static const struct v4l2_ioctl_ops usbvision_radio_ioctl_ops = { .vidioc_querycap = vidioc_querycap, .vidioc_enum_input = vidioc_enum_input, .vidioc_g_input = vidioc_g_input, .vidioc_s_input = vidioc_s_input, .vidioc_queryctrl = vidioc_queryctrl, .vidioc_g_audio = vidioc_g_audio, .vidioc_s_audio = vidioc_s_audio, .vidioc_g_ctrl = vidioc_g_ctrl, .vidioc_s_ctrl = vidioc_s_ctrl, .vidioc_g_tuner = vidioc_g_tuner, .vidioc_s_tuner = vidioc_s_tuner, .vidioc_g_frequency = vidioc_g_frequency, .vidioc_s_frequency = vidioc_s_frequency, }; static struct video_device usbvision_radio_template = { .fops = &usbvision_radio_fops, .name = "usbvision-radio", .release = video_device_release, .ioctl_ops = &usbvision_radio_ioctl_ops, .tvnorms = USBVISION_NORMS, .current_norm = V4L2_STD_PAL }; static struct video_device *usbvision_vdev_init(struct usb_usbvision *usbvision, struct video_device *vdev_template, char *name) { struct usb_device *usb_dev = usbvision->dev; struct video_device *vdev; if (usb_dev == NULL) { dev_err(&usbvision->dev->dev, "%s: usbvision->dev is not set\n", __func__); return NULL; } vdev = video_device_alloc(); if (NULL == vdev) return NULL; *vdev = *vdev_template; vdev->lock = &usbvision->v4l2_lock; vdev->v4l2_dev = &usbvision->v4l2_dev; snprintf(vdev->name, sizeof(vdev->name), "%s", name); video_set_drvdata(vdev, usbvision); return vdev; } /* unregister video4linux devices */ static void usbvision_unregister_video(struct usb_usbvision *usbvision) { /* Radio Device: */ if (usbvision->rdev) { PDEBUG(DBG_PROBE, "unregister %s [v4l2]", video_device_node_name(usbvision->rdev)); if (video_is_registered(usbvision->rdev)) video_unregister_device(usbvision->rdev); else video_device_release(usbvision->rdev); usbvision->rdev = NULL; } /* Video Device: */ if (usbvision->vdev) { PDEBUG(DBG_PROBE, "unregister %s [v4l2]", video_device_node_name(usbvision->vdev)); if (video_is_registered(usbvision->vdev)) video_unregister_device(usbvision->vdev); else video_device_release(usbvision->vdev); usbvision->vdev = NULL; } } /* register video4linux devices */ static int __devinit usbvision_register_video(struct usb_usbvision *usbvision) { /* Video Device: */ usbvision->vdev = usbvision_vdev_init(usbvision, &usbvision_video_template, "USBVision Video"); if (usbvision->vdev == NULL) goto err_exit; if (video_register_device(usbvision->vdev, VFL_TYPE_GRABBER, video_nr) < 0) goto err_exit; printk(KERN_INFO "USBVision[%d]: registered USBVision Video device %s [v4l2]\n", usbvision->nr, video_device_node_name(usbvision->vdev)); /* Radio Device: */ if (usbvision_device_data[usbvision->dev_model].radio) { /* usbvision has radio */ usbvision->rdev = usbvision_vdev_init(usbvision, &usbvision_radio_template, "USBVision Radio"); if (usbvision->rdev == NULL) goto err_exit; if (video_register_device(usbvision->rdev, VFL_TYPE_RADIO, radio_nr) < 0) goto err_exit; printk(KERN_INFO "USBVision[%d]: registered USBVision Radio device %s [v4l2]\n", usbvision->nr, video_device_node_name(usbvision->rdev)); } /* all done */ return 0; err_exit: dev_err(&usbvision->dev->dev, "USBVision[%d]: video_register_device() failed\n", usbvision->nr); usbvision_unregister_video(usbvision); return -1; } /* * usbvision_alloc() * * This code allocates the struct usb_usbvision. * It is filled with default values. * * Returns NULL on error, a pointer to usb_usbvision else. * */ static struct usb_usbvision *usbvision_alloc(struct usb_device *dev, struct usb_interface *intf) { struct usb_usbvision *usbvision; usbvision = kzalloc(sizeof(struct usb_usbvision), GFP_KERNEL); if (usbvision == NULL) return NULL; usbvision->dev = dev; if (v4l2_device_register(&intf->dev, &usbvision->v4l2_dev)) goto err_free; mutex_init(&usbvision->v4l2_lock); /* prepare control urb for control messages during interrupts */ usbvision->ctrl_urb = usb_alloc_urb(USBVISION_URB_FRAMES, GFP_KERNEL); if (usbvision->ctrl_urb == NULL) goto err_unreg; init_waitqueue_head(&usbvision->ctrl_urb_wq); usbvision_init_power_off_timer(usbvision); return usbvision; err_unreg: v4l2_device_unregister(&usbvision->v4l2_dev); err_free: kfree(usbvision); return NULL; } /* * usbvision_release() * * This code does final release of struct usb_usbvision. This happens * after the device is disconnected -and- all clients closed their files. * */ static void usbvision_release(struct usb_usbvision *usbvision) { PDEBUG(DBG_PROBE, ""); usbvision_reset_power_off_timer(usbvision); usbvision->initialized = 0; usbvision_remove_sysfs(usbvision->vdev); usbvision_unregister_video(usbvision); usb_free_urb(usbvision->ctrl_urb); v4l2_device_unregister(&usbvision->v4l2_dev); kfree(usbvision); PDEBUG(DBG_PROBE, "success"); } /*********************** usb interface **********************************/ static void usbvision_configure_video(struct usb_usbvision *usbvision) { int model; if (usbvision == NULL) return; model = usbvision->dev_model; usbvision->palette = usbvision_v4l2_format[2]; /* V4L2_PIX_FMT_RGB24; */ if (usbvision_device_data[usbvision->dev_model].vin_reg2_override) { usbvision->vin_reg2_preset = usbvision_device_data[usbvision->dev_model].vin_reg2; } else { usbvision->vin_reg2_preset = 0; } usbvision->tvnorm_id = usbvision_device_data[model].video_norm; usbvision->video_inputs = usbvision_device_data[model].video_channels; usbvision->ctl_input = 0; /* This should be here to make i2c clients to be able to register */ /* first switch off audio */ if (usbvision_device_data[model].audio_channels > 0) usbvision_audio_off(usbvision); if (!power_on_at_open) { /* and then power up the noisy tuner */ usbvision_power_on(usbvision); usbvision_i2c_register(usbvision); } } /* * usbvision_probe() * * This procedure queries device descriptor and accepts the interface * if it looks like USBVISION video device * */ static int __devinit usbvision_probe(struct usb_interface *intf, const struct usb_device_id *devid) { struct usb_device *dev = usb_get_dev(interface_to_usbdev(intf)); struct usb_interface *uif; __u8 ifnum = intf->altsetting->desc.bInterfaceNumber; const struct usb_host_interface *interface; struct usb_usbvision *usbvision = NULL; const struct usb_endpoint_descriptor *endpoint; int model, i; PDEBUG(DBG_PROBE, "VID=%#04x, PID=%#04x, ifnum=%u", dev->descriptor.idVendor, dev->descriptor.idProduct, ifnum); model = devid->driver_info; if (model < 0 || model >= usbvision_device_data_size) { PDEBUG(DBG_PROBE, "model out of bounds %d", model); return -ENODEV; } printk(KERN_INFO "%s: %s found\n", __func__, usbvision_device_data[model].model_string); if (usbvision_device_data[model].interface >= 0) interface = &dev->actconfig->interface[usbvision_device_data[model].interface]->altsetting[0]; else interface = &dev->actconfig->interface[ifnum]->altsetting[0]; endpoint = &interface->endpoint[1].desc; if (!usb_endpoint_xfer_isoc(endpoint)) { dev_err(&intf->dev, "%s: interface %d. has non-ISO endpoint!\n", __func__, ifnum); dev_err(&intf->dev, "%s: Endpoint attributes %d", __func__, endpoint->bmAttributes); return -ENODEV; } if (usb_endpoint_dir_out(endpoint)) { dev_err(&intf->dev, "%s: interface %d. has ISO OUT endpoint!\n", __func__, ifnum); return -ENODEV; } usbvision = usbvision_alloc(dev, intf); if (usbvision == NULL) { dev_err(&intf->dev, "%s: couldn't allocate USBVision struct\n", __func__); return -ENOMEM; } if (dev->descriptor.bNumConfigurations > 1) usbvision->bridge_type = BRIDGE_NT1004; else if (model == DAZZLE_DVC_90_REV_1_SECAM) usbvision->bridge_type = BRIDGE_NT1005; else usbvision->bridge_type = BRIDGE_NT1003; PDEBUG(DBG_PROBE, "bridge_type %d", usbvision->bridge_type); /* compute alternate max packet sizes */ uif = dev->actconfig->interface[0]; usbvision->num_alt = uif->num_altsetting; PDEBUG(DBG_PROBE, "Alternate settings: %i", usbvision->num_alt); usbvision->alt_max_pkt_size = kmalloc(32 * usbvision->num_alt, GFP_KERNEL); if (usbvision->alt_max_pkt_size == NULL) { dev_err(&intf->dev, "usbvision: out of memory!\n"); return -ENOMEM; } for (i = 0; i < usbvision->num_alt; i++) { u16 tmp = le16_to_cpu(uif->altsetting[i].endpoint[1].desc. wMaxPacketSize); usbvision->alt_max_pkt_size[i] = (tmp & 0x07ff) * (((tmp & 0x1800) >> 11) + 1); PDEBUG(DBG_PROBE, "Alternate setting %i, max size= %i", i, usbvision->alt_max_pkt_size[i]); } usbvision->nr = usbvision_nr++; usbvision->have_tuner = usbvision_device_data[model].tuner; if (usbvision->have_tuner) usbvision->tuner_type = usbvision_device_data[model].tuner_type; usbvision->dev_model = model; usbvision->remove_pending = 0; usbvision->iface = ifnum; usbvision->iface_alt = 0; usbvision->video_endp = endpoint->bEndpointAddress; usbvision->isoc_packet_size = 0; usbvision->usb_bandwidth = 0; usbvision->user = 0; usbvision->streaming = stream_off; usbvision_configure_video(usbvision); usbvision_register_video(usbvision); usbvision_create_sysfs(usbvision->vdev); PDEBUG(DBG_PROBE, "success"); return 0; } /* * usbvision_disconnect() * * This procedure stops all driver activity, deallocates interface-private * structure (pointed by 'ptr') and after that driver should be removable * with no ill consequences. * */ static void __devexit usbvision_disconnect(struct usb_interface *intf) { struct usb_usbvision *usbvision = to_usbvision(usb_get_intfdata(intf)); PDEBUG(DBG_PROBE, ""); if (usbvision == NULL) { pr_err("%s: usb_get_intfdata() failed\n", __func__); return; } mutex_lock(&usbvision->v4l2_lock); /* At this time we ask to cancel outstanding URBs */ usbvision_stop_isoc(usbvision); v4l2_device_disconnect(&usbvision->v4l2_dev); if (usbvision->power) { usbvision_i2c_unregister(usbvision); usbvision_power_off(usbvision); } usbvision->remove_pending = 1; /* Now all ISO data will be ignored */ usb_put_dev(usbvision->dev); usbvision->dev = NULL; /* USB device is no more */ mutex_unlock(&usbvision->v4l2_lock); if (usbvision->user) { printk(KERN_INFO "%s: In use, disconnect pending\n", __func__); wake_up_interruptible(&usbvision->wait_frame); wake_up_interruptible(&usbvision->wait_stream); } else { usbvision_release(usbvision); } PDEBUG(DBG_PROBE, "success"); } static struct usb_driver usbvision_driver = { .name = "usbvision", .id_table = usbvision_table, .probe = usbvision_probe, .disconnect = __devexit_p(usbvision_disconnect), }; /* * usbvision_init() * * This code is run to initialize the driver. * */ static int __init usbvision_init(void) { int err_code; PDEBUG(DBG_PROBE, ""); PDEBUG(DBG_IO, "IO debugging is enabled [video]"); PDEBUG(DBG_PROBE, "PROBE debugging is enabled [video]"); PDEBUG(DBG_MMAP, "MMAP debugging is enabled [video]"); /* disable planar mode support unless compression enabled */ if (isoc_mode != ISOC_MODE_COMPRESS) { /* FIXME : not the right way to set supported flag */ usbvision_v4l2_format[6].supported = 0; /* V4L2_PIX_FMT_YVU420 */ usbvision_v4l2_format[7].supported = 0; /* V4L2_PIX_FMT_YUV422P */ } err_code = usb_register(&usbvision_driver); if (err_code == 0) { printk(KERN_INFO DRIVER_DESC " : " USBVISION_VERSION_STRING "\n"); PDEBUG(DBG_PROBE, "success"); } return err_code; } static void __exit usbvision_exit(void) { PDEBUG(DBG_PROBE, ""); usb_deregister(&usbvision_driver); PDEBUG(DBG_PROBE, "success"); } module_init(usbvision_init); module_exit(usbvision_exit); /* * Overrides for Emacs so that we follow Linus's tabbing style. * --------------------------------------------------------------------------- * Local variables: * c-basic-offset: 8 * End: */
gpl-2.0
pawo99/stock_mm_dpgw
drivers/scsi/vmw_pvscsi.c
2385
39402
/* * Linux driver for VMware's para-virtualized SCSI HBA. * * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; version 2 of the License and no later version. * * This program is distributed in the hope that it will be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or * NON INFRINGEMENT. See the GNU General Public License for more * details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Maintained by: Arvind Kumar <arvindkumar@vmware.com> * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/workqueue.h> #include <linux/pci.h> #include <scsi/scsi.h> #include <scsi/scsi_host.h> #include <scsi/scsi_cmnd.h> #include <scsi/scsi_device.h> #include "vmw_pvscsi.h" #define PVSCSI_LINUX_DRIVER_DESC "VMware PVSCSI driver" MODULE_DESCRIPTION(PVSCSI_LINUX_DRIVER_DESC); MODULE_AUTHOR("VMware, Inc."); MODULE_LICENSE("GPL"); MODULE_VERSION(PVSCSI_DRIVER_VERSION_STRING); #define PVSCSI_DEFAULT_NUM_PAGES_PER_RING 8 #define PVSCSI_DEFAULT_NUM_PAGES_MSG_RING 1 #define PVSCSI_DEFAULT_QUEUE_DEPTH 64 #define SGL_SIZE PAGE_SIZE struct pvscsi_sg_list { struct PVSCSISGElement sge[PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT]; }; struct pvscsi_ctx { /* * The index of the context in cmd_map serves as the context ID for a * 1-to-1 mapping completions back to requests. */ struct scsi_cmnd *cmd; struct pvscsi_sg_list *sgl; struct list_head list; dma_addr_t dataPA; dma_addr_t sensePA; dma_addr_t sglPA; }; struct pvscsi_adapter { char *mmioBase; unsigned int irq; u8 rev; bool use_msi; bool use_msix; bool use_msg; spinlock_t hw_lock; struct workqueue_struct *workqueue; struct work_struct work; struct PVSCSIRingReqDesc *req_ring; unsigned req_pages; unsigned req_depth; dma_addr_t reqRingPA; struct PVSCSIRingCmpDesc *cmp_ring; unsigned cmp_pages; dma_addr_t cmpRingPA; struct PVSCSIRingMsgDesc *msg_ring; unsigned msg_pages; dma_addr_t msgRingPA; struct PVSCSIRingsState *rings_state; dma_addr_t ringStatePA; struct pci_dev *dev; struct Scsi_Host *host; struct list_head cmd_pool; struct pvscsi_ctx *cmd_map; }; /* Command line parameters */ static int pvscsi_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_PER_RING; static int pvscsi_msg_ring_pages = PVSCSI_DEFAULT_NUM_PAGES_MSG_RING; static int pvscsi_cmd_per_lun = PVSCSI_DEFAULT_QUEUE_DEPTH; static bool pvscsi_disable_msi; static bool pvscsi_disable_msix; static bool pvscsi_use_msg = true; #define PVSCSI_RW (S_IRUSR | S_IWUSR) module_param_named(ring_pages, pvscsi_ring_pages, int, PVSCSI_RW); MODULE_PARM_DESC(ring_pages, "Number of pages per req/cmp ring - (default=" __stringify(PVSCSI_DEFAULT_NUM_PAGES_PER_RING) ")"); module_param_named(msg_ring_pages, pvscsi_msg_ring_pages, int, PVSCSI_RW); MODULE_PARM_DESC(msg_ring_pages, "Number of pages for the msg ring - (default=" __stringify(PVSCSI_DEFAULT_NUM_PAGES_MSG_RING) ")"); module_param_named(cmd_per_lun, pvscsi_cmd_per_lun, int, PVSCSI_RW); MODULE_PARM_DESC(cmd_per_lun, "Maximum commands per lun - (default=" __stringify(PVSCSI_MAX_REQ_QUEUE_DEPTH) ")"); module_param_named(disable_msi, pvscsi_disable_msi, bool, PVSCSI_RW); MODULE_PARM_DESC(disable_msi, "Disable MSI use in driver - (default=0)"); module_param_named(disable_msix, pvscsi_disable_msix, bool, PVSCSI_RW); MODULE_PARM_DESC(disable_msix, "Disable MSI-X use in driver - (default=0)"); module_param_named(use_msg, pvscsi_use_msg, bool, PVSCSI_RW); MODULE_PARM_DESC(use_msg, "Use msg ring when available - (default=1)"); static const struct pci_device_id pvscsi_pci_tbl[] = { { PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_PVSCSI) }, { 0 } }; MODULE_DEVICE_TABLE(pci, pvscsi_pci_tbl); static struct device * pvscsi_dev(const struct pvscsi_adapter *adapter) { return &(adapter->dev->dev); } static struct pvscsi_ctx * pvscsi_find_context(const struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd) { struct pvscsi_ctx *ctx, *end; end = &adapter->cmd_map[adapter->req_depth]; for (ctx = adapter->cmd_map; ctx < end; ctx++) if (ctx->cmd == cmd) return ctx; return NULL; } static struct pvscsi_ctx * pvscsi_acquire_context(struct pvscsi_adapter *adapter, struct scsi_cmnd *cmd) { struct pvscsi_ctx *ctx; if (list_empty(&adapter->cmd_pool)) return NULL; ctx = list_first_entry(&adapter->cmd_pool, struct pvscsi_ctx, list); ctx->cmd = cmd; list_del(&ctx->list); return ctx; } static void pvscsi_release_context(struct pvscsi_adapter *adapter, struct pvscsi_ctx *ctx) { ctx->cmd = NULL; list_add(&ctx->list, &adapter->cmd_pool); } /* * Map a pvscsi_ctx struct to a context ID field value; we map to a simple * non-zero integer. ctx always points to an entry in cmd_map array, hence * the return value is always >=1. */ static u64 pvscsi_map_context(const struct pvscsi_adapter *adapter, const struct pvscsi_ctx *ctx) { return ctx - adapter->cmd_map + 1; } static struct pvscsi_ctx * pvscsi_get_context(const struct pvscsi_adapter *adapter, u64 context) { return &adapter->cmd_map[context - 1]; } static void pvscsi_reg_write(const struct pvscsi_adapter *adapter, u32 offset, u32 val) { writel(val, adapter->mmioBase + offset); } static u32 pvscsi_reg_read(const struct pvscsi_adapter *adapter, u32 offset) { return readl(adapter->mmioBase + offset); } static u32 pvscsi_read_intr_status(const struct pvscsi_adapter *adapter) { return pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_INTR_STATUS); } static void pvscsi_write_intr_status(const struct pvscsi_adapter *adapter, u32 val) { pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_STATUS, val); } static void pvscsi_unmask_intr(const struct pvscsi_adapter *adapter) { u32 intr_bits; intr_bits = PVSCSI_INTR_CMPL_MASK; if (adapter->use_msg) intr_bits |= PVSCSI_INTR_MSG_MASK; pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, intr_bits); } static void pvscsi_mask_intr(const struct pvscsi_adapter *adapter) { pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_INTR_MASK, 0); } static void pvscsi_write_cmd_desc(const struct pvscsi_adapter *adapter, u32 cmd, const void *desc, size_t len) { const u32 *ptr = desc; size_t i; len /= sizeof(*ptr); pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, cmd); for (i = 0; i < len; i++) pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND_DATA, ptr[i]); } static void pvscsi_abort_cmd(const struct pvscsi_adapter *adapter, const struct pvscsi_ctx *ctx) { struct PVSCSICmdDescAbortCmd cmd = { 0 }; cmd.target = ctx->cmd->device->id; cmd.context = pvscsi_map_context(adapter, ctx); pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ABORT_CMD, &cmd, sizeof(cmd)); } static void pvscsi_kick_rw_io(const struct pvscsi_adapter *adapter) { pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_RW_IO, 0); } static void pvscsi_process_request_ring(const struct pvscsi_adapter *adapter) { pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_KICK_NON_RW_IO, 0); } static int scsi_is_rw(unsigned char op) { return op == READ_6 || op == WRITE_6 || op == READ_10 || op == WRITE_10 || op == READ_12 || op == WRITE_12 || op == READ_16 || op == WRITE_16; } static void pvscsi_kick_io(const struct pvscsi_adapter *adapter, unsigned char op) { if (scsi_is_rw(op)) pvscsi_kick_rw_io(adapter); else pvscsi_process_request_ring(adapter); } static void ll_adapter_reset(const struct pvscsi_adapter *adapter) { dev_dbg(pvscsi_dev(adapter), "Adapter Reset on %p\n", adapter); pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_ADAPTER_RESET, NULL, 0); } static void ll_bus_reset(const struct pvscsi_adapter *adapter) { dev_dbg(pvscsi_dev(adapter), "Resetting bus on %p\n", adapter); pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_BUS, NULL, 0); } static void ll_device_reset(const struct pvscsi_adapter *adapter, u32 target) { struct PVSCSICmdDescResetDevice cmd = { 0 }; dev_dbg(pvscsi_dev(adapter), "Resetting device: target=%u\n", target); cmd.target = target; pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_RESET_DEVICE, &cmd, sizeof(cmd)); } static void pvscsi_create_sg(struct pvscsi_ctx *ctx, struct scatterlist *sg, unsigned count) { unsigned i; struct PVSCSISGElement *sge; BUG_ON(count > PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT); sge = &ctx->sgl->sge[0]; for (i = 0; i < count; i++, sg++) { sge[i].addr = sg_dma_address(sg); sge[i].length = sg_dma_len(sg); sge[i].flags = 0; } } /* * Map all data buffers for a command into PCI space and * setup the scatter/gather list if needed. */ static void pvscsi_map_buffers(struct pvscsi_adapter *adapter, struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd, struct PVSCSIRingReqDesc *e) { unsigned count; unsigned bufflen = scsi_bufflen(cmd); struct scatterlist *sg; e->dataLen = bufflen; e->dataAddr = 0; if (bufflen == 0) return; sg = scsi_sglist(cmd); count = scsi_sg_count(cmd); if (count != 0) { int segs = scsi_dma_map(cmd); if (segs > 1) { pvscsi_create_sg(ctx, sg, segs); e->flags |= PVSCSI_FLAG_CMD_WITH_SG_LIST; ctx->sglPA = pci_map_single(adapter->dev, ctx->sgl, SGL_SIZE, PCI_DMA_TODEVICE); e->dataAddr = ctx->sglPA; } else e->dataAddr = sg_dma_address(sg); } else { /* * In case there is no S/G list, scsi_sglist points * directly to the buffer. */ ctx->dataPA = pci_map_single(adapter->dev, sg, bufflen, cmd->sc_data_direction); e->dataAddr = ctx->dataPA; } } static void pvscsi_unmap_buffers(const struct pvscsi_adapter *adapter, struct pvscsi_ctx *ctx) { struct scsi_cmnd *cmd; unsigned bufflen; cmd = ctx->cmd; bufflen = scsi_bufflen(cmd); if (bufflen != 0) { unsigned count = scsi_sg_count(cmd); if (count != 0) { scsi_dma_unmap(cmd); if (ctx->sglPA) { pci_unmap_single(adapter->dev, ctx->sglPA, SGL_SIZE, PCI_DMA_TODEVICE); ctx->sglPA = 0; } } else pci_unmap_single(adapter->dev, ctx->dataPA, bufflen, cmd->sc_data_direction); } if (cmd->sense_buffer) pci_unmap_single(adapter->dev, ctx->sensePA, SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE); } static int pvscsi_allocate_rings(struct pvscsi_adapter *adapter) { adapter->rings_state = pci_alloc_consistent(adapter->dev, PAGE_SIZE, &adapter->ringStatePA); if (!adapter->rings_state) return -ENOMEM; adapter->req_pages = min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages); adapter->req_depth = adapter->req_pages * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; adapter->req_ring = pci_alloc_consistent(adapter->dev, adapter->req_pages * PAGE_SIZE, &adapter->reqRingPA); if (!adapter->req_ring) return -ENOMEM; adapter->cmp_pages = min(PVSCSI_MAX_NUM_PAGES_CMP_RING, pvscsi_ring_pages); adapter->cmp_ring = pci_alloc_consistent(adapter->dev, adapter->cmp_pages * PAGE_SIZE, &adapter->cmpRingPA); if (!adapter->cmp_ring) return -ENOMEM; BUG_ON(!IS_ALIGNED(adapter->ringStatePA, PAGE_SIZE)); BUG_ON(!IS_ALIGNED(adapter->reqRingPA, PAGE_SIZE)); BUG_ON(!IS_ALIGNED(adapter->cmpRingPA, PAGE_SIZE)); if (!adapter->use_msg) return 0; adapter->msg_pages = min(PVSCSI_MAX_NUM_PAGES_MSG_RING, pvscsi_msg_ring_pages); adapter->msg_ring = pci_alloc_consistent(adapter->dev, adapter->msg_pages * PAGE_SIZE, &adapter->msgRingPA); if (!adapter->msg_ring) return -ENOMEM; BUG_ON(!IS_ALIGNED(adapter->msgRingPA, PAGE_SIZE)); return 0; } static void pvscsi_setup_all_rings(const struct pvscsi_adapter *adapter) { struct PVSCSICmdDescSetupRings cmd = { 0 }; dma_addr_t base; unsigned i; cmd.ringsStatePPN = adapter->ringStatePA >> PAGE_SHIFT; cmd.reqRingNumPages = adapter->req_pages; cmd.cmpRingNumPages = adapter->cmp_pages; base = adapter->reqRingPA; for (i = 0; i < adapter->req_pages; i++) { cmd.reqRingPPNs[i] = base >> PAGE_SHIFT; base += PAGE_SIZE; } base = adapter->cmpRingPA; for (i = 0; i < adapter->cmp_pages; i++) { cmd.cmpRingPPNs[i] = base >> PAGE_SHIFT; base += PAGE_SIZE; } memset(adapter->rings_state, 0, PAGE_SIZE); memset(adapter->req_ring, 0, adapter->req_pages * PAGE_SIZE); memset(adapter->cmp_ring, 0, adapter->cmp_pages * PAGE_SIZE); pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_RINGS, &cmd, sizeof(cmd)); if (adapter->use_msg) { struct PVSCSICmdDescSetupMsgRing cmd_msg = { 0 }; cmd_msg.numPages = adapter->msg_pages; base = adapter->msgRingPA; for (i = 0; i < adapter->msg_pages; i++) { cmd_msg.ringPPNs[i] = base >> PAGE_SHIFT; base += PAGE_SIZE; } memset(adapter->msg_ring, 0, adapter->msg_pages * PAGE_SIZE); pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_SETUP_MSG_RING, &cmd_msg, sizeof(cmd_msg)); } } /* * Pull a completion descriptor off and pass the completion back * to the SCSI mid layer. */ static void pvscsi_complete_request(struct pvscsi_adapter *adapter, const struct PVSCSIRingCmpDesc *e) { struct pvscsi_ctx *ctx; struct scsi_cmnd *cmd; u32 btstat = e->hostStatus; u32 sdstat = e->scsiStatus; ctx = pvscsi_get_context(adapter, e->context); cmd = ctx->cmd; pvscsi_unmap_buffers(adapter, ctx); pvscsi_release_context(adapter, ctx); cmd->result = 0; if (sdstat != SAM_STAT_GOOD && (btstat == BTSTAT_SUCCESS || btstat == BTSTAT_LINKED_COMMAND_COMPLETED || btstat == BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG)) { cmd->result = (DID_OK << 16) | sdstat; if (sdstat == SAM_STAT_CHECK_CONDITION && cmd->sense_buffer) cmd->result |= (DRIVER_SENSE << 24); } else switch (btstat) { case BTSTAT_SUCCESS: case BTSTAT_LINKED_COMMAND_COMPLETED: case BTSTAT_LINKED_COMMAND_COMPLETED_WITH_FLAG: /* If everything went fine, let's move on.. */ cmd->result = (DID_OK << 16); break; case BTSTAT_DATARUN: case BTSTAT_DATA_UNDERRUN: /* Report residual data in underruns */ scsi_set_resid(cmd, scsi_bufflen(cmd) - e->dataLen); cmd->result = (DID_ERROR << 16); break; case BTSTAT_SELTIMEO: /* Our emulation returns this for non-connected devs */ cmd->result = (DID_BAD_TARGET << 16); break; case BTSTAT_LUNMISMATCH: case BTSTAT_TAGREJECT: case BTSTAT_BADMSG: cmd->result = (DRIVER_INVALID << 24); /* fall through */ case BTSTAT_HAHARDWARE: case BTSTAT_INVPHASE: case BTSTAT_HATIMEOUT: case BTSTAT_NORESPONSE: case BTSTAT_DISCONNECT: case BTSTAT_HASOFTWARE: case BTSTAT_BUSFREE: case BTSTAT_SENSFAILED: cmd->result |= (DID_ERROR << 16); break; case BTSTAT_SENTRST: case BTSTAT_RECVRST: case BTSTAT_BUSRESET: cmd->result = (DID_RESET << 16); break; case BTSTAT_ABORTQUEUE: cmd->result = (DID_ABORT << 16); break; case BTSTAT_SCSIPARITY: cmd->result = (DID_PARITY << 16); break; default: cmd->result = (DID_ERROR << 16); scmd_printk(KERN_DEBUG, cmd, "Unknown completion status: 0x%x\n", btstat); } dev_dbg(&cmd->device->sdev_gendev, "cmd=%p %x ctx=%p result=0x%x status=0x%x,%x\n", cmd, cmd->cmnd[0], ctx, cmd->result, btstat, sdstat); cmd->scsi_done(cmd); } /* * barrier usage : Since the PVSCSI device is emulated, there could be cases * where we may want to serialize some accesses between the driver and the * emulation layer. We use compiler barriers instead of the more expensive * memory barriers because PVSCSI is only supported on X86 which has strong * memory access ordering. */ static void pvscsi_process_completion_ring(struct pvscsi_adapter *adapter) { struct PVSCSIRingsState *s = adapter->rings_state; struct PVSCSIRingCmpDesc *ring = adapter->cmp_ring; u32 cmp_entries = s->cmpNumEntriesLog2; while (s->cmpConsIdx != s->cmpProdIdx) { struct PVSCSIRingCmpDesc *e = ring + (s->cmpConsIdx & MASK(cmp_entries)); /* * This barrier() ensures that *e is not dereferenced while * the device emulation still writes data into the slot. * Since the device emulation advances s->cmpProdIdx only after * updating the slot we want to check it first. */ barrier(); pvscsi_complete_request(adapter, e); /* * This barrier() ensures that compiler doesn't reorder write * to s->cmpConsIdx before the read of (*e) inside * pvscsi_complete_request. Otherwise, device emulation may * overwrite *e before we had a chance to read it. */ barrier(); s->cmpConsIdx++; } } /* * Translate a Linux SCSI request into a request ring entry. */ static int pvscsi_queue_ring(struct pvscsi_adapter *adapter, struct pvscsi_ctx *ctx, struct scsi_cmnd *cmd) { struct PVSCSIRingsState *s; struct PVSCSIRingReqDesc *e; struct scsi_device *sdev; u32 req_entries; s = adapter->rings_state; sdev = cmd->device; req_entries = s->reqNumEntriesLog2; /* * If this condition holds, we might have room on the request ring, but * we might not have room on the completion ring for the response. * However, we have already ruled out this possibility - we would not * have successfully allocated a context if it were true, since we only * have one context per request entry. Check for it anyway, since it * would be a serious bug. */ if (s->reqProdIdx - s->cmpConsIdx >= 1 << req_entries) { scmd_printk(KERN_ERR, cmd, "vmw_pvscsi: " "ring full: reqProdIdx=%d cmpConsIdx=%d\n", s->reqProdIdx, s->cmpConsIdx); return -1; } e = adapter->req_ring + (s->reqProdIdx & MASK(req_entries)); e->bus = sdev->channel; e->target = sdev->id; memset(e->lun, 0, sizeof(e->lun)); e->lun[1] = sdev->lun; if (cmd->sense_buffer) { ctx->sensePA = pci_map_single(adapter->dev, cmd->sense_buffer, SCSI_SENSE_BUFFERSIZE, PCI_DMA_FROMDEVICE); e->senseAddr = ctx->sensePA; e->senseLen = SCSI_SENSE_BUFFERSIZE; } else { e->senseLen = 0; e->senseAddr = 0; } e->cdbLen = cmd->cmd_len; e->vcpuHint = smp_processor_id(); memcpy(e->cdb, cmd->cmnd, e->cdbLen); e->tag = SIMPLE_QUEUE_TAG; if (sdev->tagged_supported && (cmd->tag == HEAD_OF_QUEUE_TAG || cmd->tag == ORDERED_QUEUE_TAG)) e->tag = cmd->tag; if (cmd->sc_data_direction == DMA_FROM_DEVICE) e->flags = PVSCSI_FLAG_CMD_DIR_TOHOST; else if (cmd->sc_data_direction == DMA_TO_DEVICE) e->flags = PVSCSI_FLAG_CMD_DIR_TODEVICE; else if (cmd->sc_data_direction == DMA_NONE) e->flags = PVSCSI_FLAG_CMD_DIR_NONE; else e->flags = 0; pvscsi_map_buffers(adapter, ctx, cmd, e); e->context = pvscsi_map_context(adapter, ctx); barrier(); s->reqProdIdx++; return 0; } static int pvscsi_queue_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)) { struct Scsi_Host *host = cmd->device->host; struct pvscsi_adapter *adapter = shost_priv(host); struct pvscsi_ctx *ctx; unsigned long flags; spin_lock_irqsave(&adapter->hw_lock, flags); ctx = pvscsi_acquire_context(adapter, cmd); if (!ctx || pvscsi_queue_ring(adapter, ctx, cmd) != 0) { if (ctx) pvscsi_release_context(adapter, ctx); spin_unlock_irqrestore(&adapter->hw_lock, flags); return SCSI_MLQUEUE_HOST_BUSY; } cmd->scsi_done = done; dev_dbg(&cmd->device->sdev_gendev, "queued cmd %p, ctx %p, op=%x\n", cmd, ctx, cmd->cmnd[0]); spin_unlock_irqrestore(&adapter->hw_lock, flags); pvscsi_kick_io(adapter, cmd->cmnd[0]); return 0; } static DEF_SCSI_QCMD(pvscsi_queue) static int pvscsi_abort(struct scsi_cmnd *cmd) { struct pvscsi_adapter *adapter = shost_priv(cmd->device->host); struct pvscsi_ctx *ctx; unsigned long flags; scmd_printk(KERN_DEBUG, cmd, "task abort on host %u, %p\n", adapter->host->host_no, cmd); spin_lock_irqsave(&adapter->hw_lock, flags); /* * Poll the completion ring first - we might be trying to abort * a command that is waiting to be dispatched in the completion ring. */ pvscsi_process_completion_ring(adapter); /* * If there is no context for the command, it either already succeeded * or else was never properly issued. Not our problem. */ ctx = pvscsi_find_context(adapter, cmd); if (!ctx) { scmd_printk(KERN_DEBUG, cmd, "Failed to abort cmd %p\n", cmd); goto out; } pvscsi_abort_cmd(adapter, ctx); pvscsi_process_completion_ring(adapter); out: spin_unlock_irqrestore(&adapter->hw_lock, flags); return SUCCESS; } /* * Abort all outstanding requests. This is only safe to use if the completion * ring will never be walked again or the device has been reset, because it * destroys the 1-1 mapping between context field passed to emulation and our * request structure. */ static void pvscsi_reset_all(struct pvscsi_adapter *adapter) { unsigned i; for (i = 0; i < adapter->req_depth; i++) { struct pvscsi_ctx *ctx = &adapter->cmd_map[i]; struct scsi_cmnd *cmd = ctx->cmd; if (cmd) { scmd_printk(KERN_ERR, cmd, "Forced reset on cmd %p\n", cmd); pvscsi_unmap_buffers(adapter, ctx); pvscsi_release_context(adapter, ctx); cmd->result = (DID_RESET << 16); cmd->scsi_done(cmd); } } } static int pvscsi_host_reset(struct scsi_cmnd *cmd) { struct Scsi_Host *host = cmd->device->host; struct pvscsi_adapter *adapter = shost_priv(host); unsigned long flags; bool use_msg; scmd_printk(KERN_INFO, cmd, "SCSI Host reset\n"); spin_lock_irqsave(&adapter->hw_lock, flags); use_msg = adapter->use_msg; if (use_msg) { adapter->use_msg = 0; spin_unlock_irqrestore(&adapter->hw_lock, flags); /* * Now that we know that the ISR won't add more work on the * workqueue we can safely flush any outstanding work. */ flush_workqueue(adapter->workqueue); spin_lock_irqsave(&adapter->hw_lock, flags); } /* * We're going to tear down the entire ring structure and set it back * up, so stalling new requests until all completions are flushed and * the rings are back in place. */ pvscsi_process_request_ring(adapter); ll_adapter_reset(adapter); /* * Now process any completions. Note we do this AFTER adapter reset, * which is strange, but stops races where completions get posted * between processing the ring and issuing the reset. The backend will * not touch the ring memory after reset, so the immediately pre-reset * completion ring state is still valid. */ pvscsi_process_completion_ring(adapter); pvscsi_reset_all(adapter); adapter->use_msg = use_msg; pvscsi_setup_all_rings(adapter); pvscsi_unmask_intr(adapter); spin_unlock_irqrestore(&adapter->hw_lock, flags); return SUCCESS; } static int pvscsi_bus_reset(struct scsi_cmnd *cmd) { struct Scsi_Host *host = cmd->device->host; struct pvscsi_adapter *adapter = shost_priv(host); unsigned long flags; scmd_printk(KERN_INFO, cmd, "SCSI Bus reset\n"); /* * We don't want to queue new requests for this bus after * flushing all pending requests to emulation, since new * requests could then sneak in during this bus reset phase, * so take the lock now. */ spin_lock_irqsave(&adapter->hw_lock, flags); pvscsi_process_request_ring(adapter); ll_bus_reset(adapter); pvscsi_process_completion_ring(adapter); spin_unlock_irqrestore(&adapter->hw_lock, flags); return SUCCESS; } static int pvscsi_device_reset(struct scsi_cmnd *cmd) { struct Scsi_Host *host = cmd->device->host; struct pvscsi_adapter *adapter = shost_priv(host); unsigned long flags; scmd_printk(KERN_INFO, cmd, "SCSI device reset on scsi%u:%u\n", host->host_no, cmd->device->id); /* * We don't want to queue new requests for this device after flushing * all pending requests to emulation, since new requests could then * sneak in during this device reset phase, so take the lock now. */ spin_lock_irqsave(&adapter->hw_lock, flags); pvscsi_process_request_ring(adapter); ll_device_reset(adapter, cmd->device->id); pvscsi_process_completion_ring(adapter); spin_unlock_irqrestore(&adapter->hw_lock, flags); return SUCCESS; } static struct scsi_host_template pvscsi_template; static const char *pvscsi_info(struct Scsi_Host *host) { struct pvscsi_adapter *adapter = shost_priv(host); static char buf[256]; sprintf(buf, "VMware PVSCSI storage adapter rev %d, req/cmp/msg rings: " "%u/%u/%u pages, cmd_per_lun=%u", adapter->rev, adapter->req_pages, adapter->cmp_pages, adapter->msg_pages, pvscsi_template.cmd_per_lun); return buf; } static struct scsi_host_template pvscsi_template = { .module = THIS_MODULE, .name = "VMware PVSCSI Host Adapter", .proc_name = "vmw_pvscsi", .info = pvscsi_info, .queuecommand = pvscsi_queue, .this_id = -1, .sg_tablesize = PVSCSI_MAX_NUM_SG_ENTRIES_PER_SEGMENT, .dma_boundary = UINT_MAX, .max_sectors = 0xffff, .use_clustering = ENABLE_CLUSTERING, .eh_abort_handler = pvscsi_abort, .eh_device_reset_handler = pvscsi_device_reset, .eh_bus_reset_handler = pvscsi_bus_reset, .eh_host_reset_handler = pvscsi_host_reset, }; static void pvscsi_process_msg(const struct pvscsi_adapter *adapter, const struct PVSCSIRingMsgDesc *e) { struct PVSCSIRingsState *s = adapter->rings_state; struct Scsi_Host *host = adapter->host; struct scsi_device *sdev; printk(KERN_INFO "vmw_pvscsi: msg type: 0x%x - MSG RING: %u/%u (%u) \n", e->type, s->msgProdIdx, s->msgConsIdx, s->msgNumEntriesLog2); BUILD_BUG_ON(PVSCSI_MSG_LAST != 2); if (e->type == PVSCSI_MSG_DEV_ADDED) { struct PVSCSIMsgDescDevStatusChanged *desc; desc = (struct PVSCSIMsgDescDevStatusChanged *)e; printk(KERN_INFO "vmw_pvscsi: msg: device added at scsi%u:%u:%u\n", desc->bus, desc->target, desc->lun[1]); if (!scsi_host_get(host)) return; sdev = scsi_device_lookup(host, desc->bus, desc->target, desc->lun[1]); if (sdev) { printk(KERN_INFO "vmw_pvscsi: device already exists\n"); scsi_device_put(sdev); } else scsi_add_device(adapter->host, desc->bus, desc->target, desc->lun[1]); scsi_host_put(host); } else if (e->type == PVSCSI_MSG_DEV_REMOVED) { struct PVSCSIMsgDescDevStatusChanged *desc; desc = (struct PVSCSIMsgDescDevStatusChanged *)e; printk(KERN_INFO "vmw_pvscsi: msg: device removed at scsi%u:%u:%u\n", desc->bus, desc->target, desc->lun[1]); if (!scsi_host_get(host)) return; sdev = scsi_device_lookup(host, desc->bus, desc->target, desc->lun[1]); if (sdev) { scsi_remove_device(sdev); scsi_device_put(sdev); } else printk(KERN_INFO "vmw_pvscsi: failed to lookup scsi%u:%u:%u\n", desc->bus, desc->target, desc->lun[1]); scsi_host_put(host); } } static int pvscsi_msg_pending(const struct pvscsi_adapter *adapter) { struct PVSCSIRingsState *s = adapter->rings_state; return s->msgProdIdx != s->msgConsIdx; } static void pvscsi_process_msg_ring(const struct pvscsi_adapter *adapter) { struct PVSCSIRingsState *s = adapter->rings_state; struct PVSCSIRingMsgDesc *ring = adapter->msg_ring; u32 msg_entries = s->msgNumEntriesLog2; while (pvscsi_msg_pending(adapter)) { struct PVSCSIRingMsgDesc *e = ring + (s->msgConsIdx & MASK(msg_entries)); barrier(); pvscsi_process_msg(adapter, e); barrier(); s->msgConsIdx++; } } static void pvscsi_msg_workqueue_handler(struct work_struct *data) { struct pvscsi_adapter *adapter; adapter = container_of(data, struct pvscsi_adapter, work); pvscsi_process_msg_ring(adapter); } static int pvscsi_setup_msg_workqueue(struct pvscsi_adapter *adapter) { char name[32]; if (!pvscsi_use_msg) return 0; pvscsi_reg_write(adapter, PVSCSI_REG_OFFSET_COMMAND, PVSCSI_CMD_SETUP_MSG_RING); if (pvscsi_reg_read(adapter, PVSCSI_REG_OFFSET_COMMAND_STATUS) == -1) return 0; snprintf(name, sizeof(name), "vmw_pvscsi_wq_%u", adapter->host->host_no); adapter->workqueue = create_singlethread_workqueue(name); if (!adapter->workqueue) { printk(KERN_ERR "vmw_pvscsi: failed to create work queue\n"); return 0; } INIT_WORK(&adapter->work, pvscsi_msg_workqueue_handler); return 1; } static irqreturn_t pvscsi_isr(int irq, void *devp) { struct pvscsi_adapter *adapter = devp; int handled; if (adapter->use_msi || adapter->use_msix) handled = true; else { u32 val = pvscsi_read_intr_status(adapter); handled = (val & PVSCSI_INTR_ALL_SUPPORTED) != 0; if (handled) pvscsi_write_intr_status(devp, val); } if (handled) { unsigned long flags; spin_lock_irqsave(&adapter->hw_lock, flags); pvscsi_process_completion_ring(adapter); if (adapter->use_msg && pvscsi_msg_pending(adapter)) queue_work(adapter->workqueue, &adapter->work); spin_unlock_irqrestore(&adapter->hw_lock, flags); } return IRQ_RETVAL(handled); } static void pvscsi_free_sgls(const struct pvscsi_adapter *adapter) { struct pvscsi_ctx *ctx = adapter->cmd_map; unsigned i; for (i = 0; i < adapter->req_depth; ++i, ++ctx) free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE)); } static int pvscsi_setup_msix(const struct pvscsi_adapter *adapter, unsigned int *irq) { struct msix_entry entry = { 0, PVSCSI_VECTOR_COMPLETION }; int ret; ret = pci_enable_msix(adapter->dev, &entry, 1); if (ret) return ret; *irq = entry.vector; return 0; } static void pvscsi_shutdown_intr(struct pvscsi_adapter *adapter) { if (adapter->irq) { free_irq(adapter->irq, adapter); adapter->irq = 0; } if (adapter->use_msi) { pci_disable_msi(adapter->dev); adapter->use_msi = 0; } else if (adapter->use_msix) { pci_disable_msix(adapter->dev); adapter->use_msix = 0; } } static void pvscsi_release_resources(struct pvscsi_adapter *adapter) { pvscsi_shutdown_intr(adapter); if (adapter->workqueue) destroy_workqueue(adapter->workqueue); if (adapter->mmioBase) pci_iounmap(adapter->dev, adapter->mmioBase); pci_release_regions(adapter->dev); if (adapter->cmd_map) { pvscsi_free_sgls(adapter); kfree(adapter->cmd_map); } if (adapter->rings_state) pci_free_consistent(adapter->dev, PAGE_SIZE, adapter->rings_state, adapter->ringStatePA); if (adapter->req_ring) pci_free_consistent(adapter->dev, adapter->req_pages * PAGE_SIZE, adapter->req_ring, adapter->reqRingPA); if (adapter->cmp_ring) pci_free_consistent(adapter->dev, adapter->cmp_pages * PAGE_SIZE, adapter->cmp_ring, adapter->cmpRingPA); if (adapter->msg_ring) pci_free_consistent(adapter->dev, adapter->msg_pages * PAGE_SIZE, adapter->msg_ring, adapter->msgRingPA); } /* * Allocate scatter gather lists. * * These are statically allocated. Trying to be clever was not worth it. * * Dynamic allocation can fail, and we can't go deep into the memory * allocator, since we're a SCSI driver, and trying too hard to allocate * memory might generate disk I/O. We also don't want to fail disk I/O * in that case because we can't get an allocation - the I/O could be * trying to swap out data to free memory. Since that is pathological, * just use a statically allocated scatter list. * */ static int pvscsi_allocate_sg(struct pvscsi_adapter *adapter) { struct pvscsi_ctx *ctx; int i; ctx = adapter->cmd_map; BUILD_BUG_ON(sizeof(struct pvscsi_sg_list) > SGL_SIZE); for (i = 0; i < adapter->req_depth; ++i, ++ctx) { ctx->sgl = (void *)__get_free_pages(GFP_KERNEL, get_order(SGL_SIZE)); ctx->sglPA = 0; BUG_ON(!IS_ALIGNED(((unsigned long)ctx->sgl), PAGE_SIZE)); if (!ctx->sgl) { for (; i >= 0; --i, --ctx) { free_pages((unsigned long)ctx->sgl, get_order(SGL_SIZE)); ctx->sgl = NULL; } return -ENOMEM; } } return 0; } /* * Query the device, fetch the config info and return the * maximum number of targets on the adapter. In case of * failure due to any reason return default i.e. 16. */ static u32 pvscsi_get_max_targets(struct pvscsi_adapter *adapter) { struct PVSCSICmdDescConfigCmd cmd; struct PVSCSIConfigPageHeader *header; struct device *dev; dma_addr_t configPagePA; void *config_page; u32 numPhys = 16; dev = pvscsi_dev(adapter); config_page = pci_alloc_consistent(adapter->dev, PAGE_SIZE, &configPagePA); if (!config_page) { dev_warn(dev, "vmw_pvscsi: failed to allocate memory for config page\n"); goto exit; } BUG_ON(configPagePA & ~PAGE_MASK); /* Fetch config info from the device. */ cmd.configPageAddress = ((u64)PVSCSI_CONFIG_CONTROLLER_ADDRESS) << 32; cmd.configPageNum = PVSCSI_CONFIG_PAGE_CONTROLLER; cmd.cmpAddr = configPagePA; cmd._pad = 0; /* * Mark the completion page header with error values. If the device * completes the command successfully, it sets the status values to * indicate success. */ header = config_page; memset(header, 0, sizeof *header); header->hostStatus = BTSTAT_INVPARAM; header->scsiStatus = SDSTAT_CHECK; pvscsi_write_cmd_desc(adapter, PVSCSI_CMD_CONFIG, &cmd, sizeof cmd); if (header->hostStatus == BTSTAT_SUCCESS && header->scsiStatus == SDSTAT_GOOD) { struct PVSCSIConfigPageController *config; config = config_page; numPhys = config->numPhys; } else dev_warn(dev, "vmw_pvscsi: PVSCSI_CMD_CONFIG failed. hostStatus = 0x%x, scsiStatus = 0x%x\n", header->hostStatus, header->scsiStatus); pci_free_consistent(adapter->dev, PAGE_SIZE, config_page, configPagePA); exit: return numPhys; } static int pvscsi_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct pvscsi_adapter *adapter; struct Scsi_Host *host; struct device *dev; unsigned int i; unsigned long flags = 0; int error; error = -ENODEV; if (pci_enable_device(pdev)) return error; if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0 && pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) { printk(KERN_INFO "vmw_pvscsi: using 64bit dma\n"); } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) == 0 && pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)) == 0) { printk(KERN_INFO "vmw_pvscsi: using 32bit dma\n"); } else { printk(KERN_ERR "vmw_pvscsi: failed to set DMA mask\n"); goto out_disable_device; } pvscsi_template.can_queue = min(PVSCSI_MAX_NUM_PAGES_REQ_RING, pvscsi_ring_pages) * PVSCSI_MAX_NUM_REQ_ENTRIES_PER_PAGE; pvscsi_template.cmd_per_lun = min(pvscsi_template.can_queue, pvscsi_cmd_per_lun); host = scsi_host_alloc(&pvscsi_template, sizeof(struct pvscsi_adapter)); if (!host) { printk(KERN_ERR "vmw_pvscsi: failed to allocate host\n"); goto out_disable_device; } adapter = shost_priv(host); memset(adapter, 0, sizeof(*adapter)); adapter->dev = pdev; adapter->host = host; spin_lock_init(&adapter->hw_lock); host->max_channel = 0; host->max_id = 16; host->max_lun = 1; host->max_cmd_len = 16; adapter->rev = pdev->revision; if (pci_request_regions(pdev, "vmw_pvscsi")) { printk(KERN_ERR "vmw_pvscsi: pci memory selection failed\n"); goto out_free_host; } for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { if ((pci_resource_flags(pdev, i) & PCI_BASE_ADDRESS_SPACE_IO)) continue; if (pci_resource_len(pdev, i) < PVSCSI_MEM_SPACE_SIZE) continue; break; } if (i == DEVICE_COUNT_RESOURCE) { printk(KERN_ERR "vmw_pvscsi: adapter has no suitable MMIO region\n"); goto out_release_resources; } adapter->mmioBase = pci_iomap(pdev, i, PVSCSI_MEM_SPACE_SIZE); if (!adapter->mmioBase) { printk(KERN_ERR "vmw_pvscsi: can't iomap for BAR %d memsize %lu\n", i, PVSCSI_MEM_SPACE_SIZE); goto out_release_resources; } pci_set_master(pdev); pci_set_drvdata(pdev, host); ll_adapter_reset(adapter); adapter->use_msg = pvscsi_setup_msg_workqueue(adapter); error = pvscsi_allocate_rings(adapter); if (error) { printk(KERN_ERR "vmw_pvscsi: unable to allocate ring memory\n"); goto out_release_resources; } /* * Ask the device for max number of targets. */ host->max_id = pvscsi_get_max_targets(adapter); dev = pvscsi_dev(adapter); dev_info(dev, "vmw_pvscsi: host->max_id: %u\n", host->max_id); /* * From this point on we should reset the adapter if anything goes * wrong. */ pvscsi_setup_all_rings(adapter); adapter->cmd_map = kcalloc(adapter->req_depth, sizeof(struct pvscsi_ctx), GFP_KERNEL); if (!adapter->cmd_map) { printk(KERN_ERR "vmw_pvscsi: failed to allocate memory.\n"); error = -ENOMEM; goto out_reset_adapter; } INIT_LIST_HEAD(&adapter->cmd_pool); for (i = 0; i < adapter->req_depth; i++) { struct pvscsi_ctx *ctx = adapter->cmd_map + i; list_add(&ctx->list, &adapter->cmd_pool); } error = pvscsi_allocate_sg(adapter); if (error) { printk(KERN_ERR "vmw_pvscsi: unable to allocate s/g table\n"); goto out_reset_adapter; } if (!pvscsi_disable_msix && pvscsi_setup_msix(adapter, &adapter->irq) == 0) { printk(KERN_INFO "vmw_pvscsi: using MSI-X\n"); adapter->use_msix = 1; } else if (!pvscsi_disable_msi && pci_enable_msi(pdev) == 0) { printk(KERN_INFO "vmw_pvscsi: using MSI\n"); adapter->use_msi = 1; adapter->irq = pdev->irq; } else { printk(KERN_INFO "vmw_pvscsi: using INTx\n"); adapter->irq = pdev->irq; flags = IRQF_SHARED; } error = request_irq(adapter->irq, pvscsi_isr, flags, "vmw_pvscsi", adapter); if (error) { printk(KERN_ERR "vmw_pvscsi: unable to request IRQ: %d\n", error); adapter->irq = 0; goto out_reset_adapter; } error = scsi_add_host(host, &pdev->dev); if (error) { printk(KERN_ERR "vmw_pvscsi: scsi_add_host failed: %d\n", error); goto out_reset_adapter; } dev_info(&pdev->dev, "VMware PVSCSI rev %d host #%u\n", adapter->rev, host->host_no); pvscsi_unmask_intr(adapter); scsi_scan_host(host); return 0; out_reset_adapter: ll_adapter_reset(adapter); out_release_resources: pvscsi_release_resources(adapter); out_free_host: scsi_host_put(host); out_disable_device: pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); return error; } static void __pvscsi_shutdown(struct pvscsi_adapter *adapter) { pvscsi_mask_intr(adapter); if (adapter->workqueue) flush_workqueue(adapter->workqueue); pvscsi_shutdown_intr(adapter); pvscsi_process_request_ring(adapter); pvscsi_process_completion_ring(adapter); ll_adapter_reset(adapter); } static void pvscsi_shutdown(struct pci_dev *dev) { struct Scsi_Host *host = pci_get_drvdata(dev); struct pvscsi_adapter *adapter = shost_priv(host); __pvscsi_shutdown(adapter); } static void pvscsi_remove(struct pci_dev *pdev) { struct Scsi_Host *host = pci_get_drvdata(pdev); struct pvscsi_adapter *adapter = shost_priv(host); scsi_remove_host(host); __pvscsi_shutdown(adapter); pvscsi_release_resources(adapter); scsi_host_put(host); pci_set_drvdata(pdev, NULL); pci_disable_device(pdev); } static struct pci_driver pvscsi_pci_driver = { .name = "vmw_pvscsi", .id_table = pvscsi_pci_tbl, .probe = pvscsi_probe, .remove = pvscsi_remove, .shutdown = pvscsi_shutdown, }; static int __init pvscsi_init(void) { pr_info("%s - version %s\n", PVSCSI_LINUX_DRIVER_DESC, PVSCSI_DRIVER_VERSION_STRING); return pci_register_driver(&pvscsi_pci_driver); } static void __exit pvscsi_exit(void) { pci_unregister_driver(&pvscsi_pci_driver); } module_init(pvscsi_init); module_exit(pvscsi_exit);
gpl-2.0
ariafan/S5501-3.10
arch/arm/mach-omap2/cclock2430_data.c
2641
55513
/* * OMAP2430 clock data * * Copyright (C) 2005-2009, 2012 Texas Instruments, Inc. * Copyright (C) 2004-2011 Nokia Corporation * * Contacts: * Richard Woodruff <r-woodruff2@ti.com> * Paul Walmsley * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. */ #include <linux/kernel.h> #include <linux/clk.h> #include <linux/clk-private.h> #include <linux/list.h> #include "soc.h" #include "iomap.h" #include "clock.h" #include "clock2xxx.h" #include "opp2xxx.h" #include "cm2xxx.h" #include "prm2xxx.h" #include "prm-regbits-24xx.h" #include "cm-regbits-24xx.h" #include "sdrc.h" #include "control.h" #define OMAP_CM_REGADDR OMAP2430_CM_REGADDR /* * 2430 clock tree. * * NOTE:In many cases here we are assigning a 'default' parent. In * many cases the parent is selectable. The set parent calls will * also switch sources. * * Several sources are given initial rates which may be wrong, this will * be fixed up in the init func. * * Things are broadly separated below by clock domains. It is * noteworthy that most peripherals have dependencies on multiple clock * domains. Many get their interface clocks from the L4 domain, but get * functional clocks from fixed sources or other core domain derived * clocks. */ DEFINE_CLK_FIXED_RATE(alt_ck, CLK_IS_ROOT, 54000000, 0x0); DEFINE_CLK_FIXED_RATE(func_32k_ck, CLK_IS_ROOT, 32768, 0x0); DEFINE_CLK_FIXED_RATE(mcbsp_clks, CLK_IS_ROOT, 0x0, 0x0); static struct clk osc_ck; static const struct clk_ops osc_ck_ops = { .enable = &omap2_enable_osc_ck, .disable = omap2_disable_osc_ck, .recalc_rate = &omap2_osc_clk_recalc, }; static struct clk_hw_omap osc_ck_hw = { .hw = { .clk = &osc_ck, }, }; static struct clk osc_ck = { .name = "osc_ck", .ops = &osc_ck_ops, .hw = &osc_ck_hw.hw, .flags = CLK_IS_ROOT, }; DEFINE_CLK_FIXED_RATE(secure_32k_ck, CLK_IS_ROOT, 32768, 0x0); static struct clk sys_ck; static const char *sys_ck_parent_names[] = { "osc_ck", }; static const struct clk_ops sys_ck_ops = { .init = &omap2_init_clk_clkdm, .recalc_rate = &omap2xxx_sys_clk_recalc, }; DEFINE_STRUCT_CLK_HW_OMAP(sys_ck, "wkup_clkdm"); DEFINE_STRUCT_CLK(sys_ck, sys_ck_parent_names, sys_ck_ops); static struct dpll_data dpll_dd = { .mult_div1_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1), .mult_mask = OMAP24XX_DPLL_MULT_MASK, .div1_mask = OMAP24XX_DPLL_DIV_MASK, .clk_bypass = &sys_ck, .clk_ref = &sys_ck, .control_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), .enable_mask = OMAP24XX_EN_DPLL_MASK, .max_multiplier = 1023, .min_divider = 1, .max_divider = 16, }; static struct clk dpll_ck; static const char *dpll_ck_parent_names[] = { "sys_ck", }; static const struct clk_ops dpll_ck_ops = { .init = &omap2_init_clk_clkdm, .get_parent = &omap2_init_dpll_parent, .recalc_rate = &omap2_dpllcore_recalc, .round_rate = &omap2_dpll_round_rate, .set_rate = &omap2_reprogram_dpllcore, }; static struct clk_hw_omap dpll_ck_hw = { .hw = { .clk = &dpll_ck, }, .ops = &clkhwops_omap2xxx_dpll, .dpll_data = &dpll_dd, .clkdm_name = "wkup_clkdm", }; DEFINE_STRUCT_CLK(dpll_ck, dpll_ck_parent_names, dpll_ck_ops); static struct clk core_ck; static const char *core_ck_parent_names[] = { "dpll_ck", }; static const struct clk_ops core_ck_ops = { .init = &omap2_init_clk_clkdm, }; DEFINE_STRUCT_CLK_HW_OMAP(core_ck, "wkup_clkdm"); DEFINE_STRUCT_CLK(core_ck, core_ck_parent_names, core_ck_ops); DEFINE_CLK_DIVIDER(core_l3_ck, "core_ck", &core_ck, 0x0, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1), OMAP24XX_CLKSEL_L3_SHIFT, OMAP24XX_CLKSEL_L3_WIDTH, CLK_DIVIDER_ONE_BASED, NULL); DEFINE_CLK_DIVIDER(l4_ck, "core_l3_ck", &core_l3_ck, 0x0, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1), OMAP24XX_CLKSEL_L4_SHIFT, OMAP24XX_CLKSEL_L4_WIDTH, CLK_DIVIDER_ONE_BASED, NULL); static struct clk aes_ick; static const char *aes_ick_parent_names[] = { "l4_ck", }; static const struct clk_ops aes_ick_ops = { .init = &omap2_init_clk_clkdm, .enable = &omap2_dflt_clk_enable, .disable = &omap2_dflt_clk_disable, .is_enabled = &omap2_dflt_clk_is_enabled, }; static struct clk_hw_omap aes_ick_hw = { .hw = { .clk = &aes_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4), .enable_bit = OMAP24XX_EN_AES_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(aes_ick, aes_ick_parent_names, aes_ick_ops); static struct clk apll54_ck; static const struct clk_ops apll54_ck_ops = { .init = &omap2_init_clk_clkdm, .enable = &omap2_clk_apll54_enable, .disable = &omap2_clk_apll54_disable, .recalc_rate = &omap2_clk_apll54_recalc, }; static struct clk_hw_omap apll54_ck_hw = { .hw = { .clk = &apll54_ck, }, .ops = &clkhwops_apll54, .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), .enable_bit = OMAP24XX_EN_54M_PLL_SHIFT, .flags = ENABLE_ON_INIT, .clkdm_name = "wkup_clkdm", }; DEFINE_STRUCT_CLK(apll54_ck, dpll_ck_parent_names, apll54_ck_ops); static struct clk apll96_ck; static const struct clk_ops apll96_ck_ops = { .init = &omap2_init_clk_clkdm, .enable = &omap2_clk_apll96_enable, .disable = &omap2_clk_apll96_disable, .recalc_rate = &omap2_clk_apll96_recalc, }; static struct clk_hw_omap apll96_ck_hw = { .hw = { .clk = &apll96_ck, }, .ops = &clkhwops_apll96, .enable_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKEN), .enable_bit = OMAP24XX_EN_96M_PLL_SHIFT, .flags = ENABLE_ON_INIT, .clkdm_name = "wkup_clkdm", }; DEFINE_STRUCT_CLK(apll96_ck, dpll_ck_parent_names, apll96_ck_ops); static const char *func_96m_ck_parent_names[] = { "apll96_ck", "alt_ck", }; DEFINE_CLK_MUX(func_96m_ck, func_96m_ck_parent_names, NULL, 0x0, OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1), OMAP2430_96M_SOURCE_SHIFT, OMAP2430_96M_SOURCE_WIDTH, 0x0, NULL); static struct clk cam_fck; static const char *cam_fck_parent_names[] = { "func_96m_ck", }; static struct clk_hw_omap cam_fck_hw = { .hw = { .clk = &cam_fck, }, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), .enable_bit = OMAP24XX_EN_CAM_SHIFT, .clkdm_name = "core_l3_clkdm", }; DEFINE_STRUCT_CLK(cam_fck, cam_fck_parent_names, aes_ick_ops); static struct clk cam_ick; static struct clk_hw_omap cam_ick_hw = { .hw = { .clk = &cam_ick, }, .ops = &clkhwops_iclk, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), .enable_bit = OMAP24XX_EN_CAM_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(cam_ick, aes_ick_parent_names, aes_ick_ops); static struct clk des_ick; static struct clk_hw_omap des_ick_hw = { .hw = { .clk = &des_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4), .enable_bit = OMAP24XX_EN_DES_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(des_ick, aes_ick_parent_names, aes_ick_ops); static const struct clksel_rate dsp_fck_core_rates[] = { { .div = 1, .val = 1, .flags = RATE_IN_24XX }, { .div = 2, .val = 2, .flags = RATE_IN_24XX }, { .div = 3, .val = 3, .flags = RATE_IN_24XX }, { .div = 4, .val = 4, .flags = RATE_IN_24XX }, { .div = 0 } }; static const struct clksel dsp_fck_clksel[] = { { .parent = &core_ck, .rates = dsp_fck_core_rates }, { .parent = NULL }, }; static const char *dsp_fck_parent_names[] = { "core_ck", }; static struct clk dsp_fck; static const struct clk_ops dsp_fck_ops = { .init = &omap2_init_clk_clkdm, .enable = &omap2_dflt_clk_enable, .disable = &omap2_dflt_clk_disable, .is_enabled = &omap2_dflt_clk_is_enabled, .recalc_rate = &omap2_clksel_recalc, .set_rate = &omap2_clksel_set_rate, .round_rate = &omap2_clksel_round_rate, }; DEFINE_CLK_OMAP_MUX_GATE(dsp_fck, "dsp_clkdm", dsp_fck_clksel, OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_CLKSEL), OMAP24XX_CLKSEL_DSP_MASK, OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_FCLKEN), OMAP24XX_CM_FCLKEN_DSP_EN_DSP_SHIFT, &clkhwops_wait, dsp_fck_parent_names, dsp_fck_ops); static const struct clksel_rate dss1_fck_sys_rates[] = { { .div = 1, .val = 0, .flags = RATE_IN_24XX }, { .div = 0 } }; static const struct clksel_rate dss1_fck_core_rates[] = { { .div = 1, .val = 1, .flags = RATE_IN_24XX }, { .div = 2, .val = 2, .flags = RATE_IN_24XX }, { .div = 3, .val = 3, .flags = RATE_IN_24XX }, { .div = 4, .val = 4, .flags = RATE_IN_24XX }, { .div = 5, .val = 5, .flags = RATE_IN_24XX }, { .div = 6, .val = 6, .flags = RATE_IN_24XX }, { .div = 8, .val = 8, .flags = RATE_IN_24XX }, { .div = 9, .val = 9, .flags = RATE_IN_24XX }, { .div = 12, .val = 12, .flags = RATE_IN_24XX }, { .div = 16, .val = 16, .flags = RATE_IN_24XX }, { .div = 0 } }; static const struct clksel dss1_fck_clksel[] = { { .parent = &sys_ck, .rates = dss1_fck_sys_rates }, { .parent = &core_ck, .rates = dss1_fck_core_rates }, { .parent = NULL }, }; static const char *dss1_fck_parent_names[] = { "sys_ck", "core_ck", }; static const struct clk_ops dss1_fck_ops = { .init = &omap2_init_clk_clkdm, .enable = &omap2_dflt_clk_enable, .disable = &omap2_dflt_clk_disable, .is_enabled = &omap2_dflt_clk_is_enabled, .recalc_rate = &omap2_clksel_recalc, .get_parent = &omap2_clksel_find_parent_index, .set_parent = &omap2_clksel_set_parent, }; DEFINE_CLK_OMAP_MUX_GATE(dss1_fck, "dss_clkdm", dss1_fck_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1), OMAP24XX_CLKSEL_DSS1_MASK, OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), OMAP24XX_EN_DSS1_SHIFT, NULL, dss1_fck_parent_names, dss1_fck_ops); static const struct clksel_rate dss2_fck_sys_rates[] = { { .div = 1, .val = 0, .flags = RATE_IN_24XX }, { .div = 0 } }; static const struct clksel_rate dss2_fck_48m_rates[] = { { .div = 1, .val = 1, .flags = RATE_IN_24XX }, { .div = 0 } }; static const struct clksel_rate func_48m_apll96_rates[] = { { .div = 2, .val = 0, .flags = RATE_IN_24XX }, { .div = 0 } }; static const struct clksel_rate func_48m_alt_rates[] = { { .div = 1, .val = 1, .flags = RATE_IN_24XX }, { .div = 0 } }; static const struct clksel func_48m_clksel[] = { { .parent = &apll96_ck, .rates = func_48m_apll96_rates }, { .parent = &alt_ck, .rates = func_48m_alt_rates }, { .parent = NULL }, }; static const char *func_48m_ck_parent_names[] = { "apll96_ck", "alt_ck", }; static struct clk func_48m_ck; static const struct clk_ops func_48m_ck_ops = { .init = &omap2_init_clk_clkdm, .recalc_rate = &omap2_clksel_recalc, .set_rate = &omap2_clksel_set_rate, .round_rate = &omap2_clksel_round_rate, .get_parent = &omap2_clksel_find_parent_index, .set_parent = &omap2_clksel_set_parent, }; static struct clk_hw_omap func_48m_ck_hw = { .hw = { .clk = &func_48m_ck, }, .clksel = func_48m_clksel, .clksel_reg = OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1), .clksel_mask = OMAP24XX_48M_SOURCE_MASK, .clkdm_name = "wkup_clkdm", }; DEFINE_STRUCT_CLK(func_48m_ck, func_48m_ck_parent_names, func_48m_ck_ops); static const struct clksel dss2_fck_clksel[] = { { .parent = &sys_ck, .rates = dss2_fck_sys_rates }, { .parent = &func_48m_ck, .rates = dss2_fck_48m_rates }, { .parent = NULL }, }; static const char *dss2_fck_parent_names[] = { "sys_ck", "func_48m_ck", }; DEFINE_CLK_OMAP_MUX_GATE(dss2_fck, "dss_clkdm", dss2_fck_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1), OMAP24XX_CLKSEL_DSS2_MASK, OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), OMAP24XX_EN_DSS2_SHIFT, NULL, dss2_fck_parent_names, dss1_fck_ops); static const char *func_54m_ck_parent_names[] = { "apll54_ck", "alt_ck", }; DEFINE_CLK_MUX(func_54m_ck, func_54m_ck_parent_names, NULL, 0x0, OMAP_CM_REGADDR(PLL_MOD, CM_CLKSEL1), OMAP24XX_54M_SOURCE_SHIFT, OMAP24XX_54M_SOURCE_WIDTH, 0x0, NULL); static struct clk dss_54m_fck; static const char *dss_54m_fck_parent_names[] = { "func_54m_ck", }; static struct clk_hw_omap dss_54m_fck_hw = { .hw = { .clk = &dss_54m_fck, }, .ops = &clkhwops_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), .enable_bit = OMAP24XX_EN_TV_SHIFT, .clkdm_name = "dss_clkdm", }; DEFINE_STRUCT_CLK(dss_54m_fck, dss_54m_fck_parent_names, aes_ick_ops); static struct clk dss_ick; static struct clk_hw_omap dss_ick_hw = { .hw = { .clk = &dss_ick, }, .ops = &clkhwops_iclk, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), .enable_bit = OMAP24XX_EN_DSS1_SHIFT, .clkdm_name = "dss_clkdm", }; DEFINE_STRUCT_CLK(dss_ick, aes_ick_parent_names, aes_ick_ops); static struct clk emul_ck; static struct clk_hw_omap emul_ck_hw = { .hw = { .clk = &emul_ck, }, .enable_reg = OMAP2430_PRCM_CLKEMUL_CTRL, .enable_bit = OMAP24XX_EMULATION_EN_SHIFT, .clkdm_name = "wkup_clkdm", }; DEFINE_STRUCT_CLK(emul_ck, dss_54m_fck_parent_names, aes_ick_ops); DEFINE_CLK_FIXED_FACTOR(func_12m_ck, "func_48m_ck", &func_48m_ck, 0x0, 1, 4); static struct clk fac_fck; static const char *fac_fck_parent_names[] = { "func_12m_ck", }; static struct clk_hw_omap fac_fck_hw = { .hw = { .clk = &fac_fck, }, .ops = &clkhwops_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), .enable_bit = OMAP24XX_EN_FAC_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(fac_fck, fac_fck_parent_names, aes_ick_ops); static struct clk fac_ick; static struct clk_hw_omap fac_ick_hw = { .hw = { .clk = &fac_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), .enable_bit = OMAP24XX_EN_FAC_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(fac_ick, aes_ick_parent_names, aes_ick_ops); static const struct clksel gfx_fck_clksel[] = { { .parent = &core_l3_ck, .rates = gfx_l3_rates }, { .parent = NULL }, }; static const char *gfx_2d_fck_parent_names[] = { "core_l3_ck", }; DEFINE_CLK_OMAP_MUX_GATE(gfx_2d_fck, "gfx_clkdm", gfx_fck_clksel, OMAP_CM_REGADDR(GFX_MOD, CM_CLKSEL), OMAP_CLKSEL_GFX_MASK, OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN), OMAP24XX_EN_2D_SHIFT, &clkhwops_wait, gfx_2d_fck_parent_names, dsp_fck_ops); DEFINE_CLK_OMAP_MUX_GATE(gfx_3d_fck, "gfx_clkdm", gfx_fck_clksel, OMAP_CM_REGADDR(GFX_MOD, CM_CLKSEL), OMAP_CLKSEL_GFX_MASK, OMAP_CM_REGADDR(GFX_MOD, CM_FCLKEN), OMAP24XX_EN_3D_SHIFT, &clkhwops_wait, gfx_2d_fck_parent_names, dsp_fck_ops); static struct clk gfx_ick; static const char *gfx_ick_parent_names[] = { "core_l3_ck", }; static struct clk_hw_omap gfx_ick_hw = { .hw = { .clk = &gfx_ick, }, .ops = &clkhwops_wait, .enable_reg = OMAP_CM_REGADDR(GFX_MOD, CM_ICLKEN), .enable_bit = OMAP_EN_GFX_SHIFT, .clkdm_name = "gfx_clkdm", }; DEFINE_STRUCT_CLK(gfx_ick, gfx_ick_parent_names, aes_ick_ops); static struct clk gpio5_fck; static const char *gpio5_fck_parent_names[] = { "func_32k_ck", }; static struct clk_hw_omap gpio5_fck_hw = { .hw = { .clk = &gpio5_fck, }, .ops = &clkhwops_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2), .enable_bit = OMAP2430_EN_GPIO5_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(gpio5_fck, gpio5_fck_parent_names, aes_ick_ops); static struct clk gpio5_ick; static struct clk_hw_omap gpio5_ick_hw = { .hw = { .clk = &gpio5_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2), .enable_bit = OMAP2430_EN_GPIO5_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(gpio5_ick, aes_ick_parent_names, aes_ick_ops); static struct clk gpios_fck; static struct clk_hw_omap gpios_fck_hw = { .hw = { .clk = &gpios_fck, }, .ops = &clkhwops_wait, .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN), .enable_bit = OMAP24XX_EN_GPIOS_SHIFT, .clkdm_name = "wkup_clkdm", }; DEFINE_STRUCT_CLK(gpios_fck, gpio5_fck_parent_names, aes_ick_ops); static struct clk gpios_ick; static const char *gpios_ick_parent_names[] = { "sys_ck", }; static struct clk_hw_omap gpios_ick_hw = { .hw = { .clk = &gpios_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN), .enable_bit = OMAP24XX_EN_GPIOS_SHIFT, .clkdm_name = "wkup_clkdm", }; DEFINE_STRUCT_CLK(gpios_ick, gpios_ick_parent_names, aes_ick_ops); static struct clk gpmc_fck; static struct clk_hw_omap gpmc_fck_hw = { .hw = { .clk = &gpmc_fck, }, .ops = &clkhwops_iclk, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3), .enable_bit = OMAP24XX_AUTO_GPMC_SHIFT, .flags = ENABLE_ON_INIT, .clkdm_name = "core_l3_clkdm", }; DEFINE_STRUCT_CLK(gpmc_fck, gfx_ick_parent_names, core_ck_ops); static const struct clksel_rate gpt_alt_rates[] = { { .div = 1, .val = 2, .flags = RATE_IN_24XX }, { .div = 0 } }; static const struct clksel omap24xx_gpt_clksel[] = { { .parent = &func_32k_ck, .rates = gpt_32k_rates }, { .parent = &sys_ck, .rates = gpt_sys_rates }, { .parent = &alt_ck, .rates = gpt_alt_rates }, { .parent = NULL }, }; static const char *gpt10_fck_parent_names[] = { "func_32k_ck", "sys_ck", "alt_ck", }; DEFINE_CLK_OMAP_MUX_GATE(gpt10_fck, "core_l4_clkdm", omap24xx_gpt_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2), OMAP24XX_CLKSEL_GPT10_MASK, OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), OMAP24XX_EN_GPT10_SHIFT, &clkhwops_wait, gpt10_fck_parent_names, dss1_fck_ops); static struct clk gpt10_ick; static struct clk_hw_omap gpt10_ick_hw = { .hw = { .clk = &gpt10_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), .enable_bit = OMAP24XX_EN_GPT10_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(gpt10_ick, aes_ick_parent_names, aes_ick_ops); DEFINE_CLK_OMAP_MUX_GATE(gpt11_fck, "core_l4_clkdm", omap24xx_gpt_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2), OMAP24XX_CLKSEL_GPT11_MASK, OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), OMAP24XX_EN_GPT11_SHIFT, &clkhwops_wait, gpt10_fck_parent_names, dss1_fck_ops); static struct clk gpt11_ick; static struct clk_hw_omap gpt11_ick_hw = { .hw = { .clk = &gpt11_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), .enable_bit = OMAP24XX_EN_GPT11_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(gpt11_ick, aes_ick_parent_names, aes_ick_ops); DEFINE_CLK_OMAP_MUX_GATE(gpt12_fck, "core_l4_clkdm", omap24xx_gpt_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2), OMAP24XX_CLKSEL_GPT12_MASK, OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), OMAP24XX_EN_GPT12_SHIFT, &clkhwops_wait, gpt10_fck_parent_names, dss1_fck_ops); static struct clk gpt12_ick; static struct clk_hw_omap gpt12_ick_hw = { .hw = { .clk = &gpt12_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), .enable_bit = OMAP24XX_EN_GPT12_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(gpt12_ick, aes_ick_parent_names, aes_ick_ops); static const struct clk_ops gpt1_fck_ops = { .init = &omap2_init_clk_clkdm, .enable = &omap2_dflt_clk_enable, .disable = &omap2_dflt_clk_disable, .is_enabled = &omap2_dflt_clk_is_enabled, .recalc_rate = &omap2_clksel_recalc, .set_rate = &omap2_clksel_set_rate, .round_rate = &omap2_clksel_round_rate, .get_parent = &omap2_clksel_find_parent_index, .set_parent = &omap2_clksel_set_parent, }; DEFINE_CLK_OMAP_MUX_GATE(gpt1_fck, "core_l4_clkdm", omap24xx_gpt_clksel, OMAP_CM_REGADDR(WKUP_MOD, CM_CLKSEL1), OMAP24XX_CLKSEL_GPT1_MASK, OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN), OMAP24XX_EN_GPT1_SHIFT, &clkhwops_wait, gpt10_fck_parent_names, gpt1_fck_ops); static struct clk gpt1_ick; static struct clk_hw_omap gpt1_ick_hw = { .hw = { .clk = &gpt1_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN), .enable_bit = OMAP24XX_EN_GPT1_SHIFT, .clkdm_name = "wkup_clkdm", }; DEFINE_STRUCT_CLK(gpt1_ick, gpios_ick_parent_names, aes_ick_ops); DEFINE_CLK_OMAP_MUX_GATE(gpt2_fck, "core_l4_clkdm", omap24xx_gpt_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2), OMAP24XX_CLKSEL_GPT2_MASK, OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), OMAP24XX_EN_GPT2_SHIFT, &clkhwops_wait, gpt10_fck_parent_names, dss1_fck_ops); static struct clk gpt2_ick; static struct clk_hw_omap gpt2_ick_hw = { .hw = { .clk = &gpt2_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), .enable_bit = OMAP24XX_EN_GPT2_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(gpt2_ick, aes_ick_parent_names, aes_ick_ops); DEFINE_CLK_OMAP_MUX_GATE(gpt3_fck, "core_l4_clkdm", omap24xx_gpt_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2), OMAP24XX_CLKSEL_GPT3_MASK, OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), OMAP24XX_EN_GPT3_SHIFT, &clkhwops_wait, gpt10_fck_parent_names, dss1_fck_ops); static struct clk gpt3_ick; static struct clk_hw_omap gpt3_ick_hw = { .hw = { .clk = &gpt3_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), .enable_bit = OMAP24XX_EN_GPT3_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(gpt3_ick, aes_ick_parent_names, aes_ick_ops); DEFINE_CLK_OMAP_MUX_GATE(gpt4_fck, "core_l4_clkdm", omap24xx_gpt_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2), OMAP24XX_CLKSEL_GPT4_MASK, OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), OMAP24XX_EN_GPT4_SHIFT, &clkhwops_wait, gpt10_fck_parent_names, dss1_fck_ops); static struct clk gpt4_ick; static struct clk_hw_omap gpt4_ick_hw = { .hw = { .clk = &gpt4_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), .enable_bit = OMAP24XX_EN_GPT4_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(gpt4_ick, aes_ick_parent_names, aes_ick_ops); DEFINE_CLK_OMAP_MUX_GATE(gpt5_fck, "core_l4_clkdm", omap24xx_gpt_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2), OMAP24XX_CLKSEL_GPT5_MASK, OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), OMAP24XX_EN_GPT5_SHIFT, &clkhwops_wait, gpt10_fck_parent_names, dss1_fck_ops); static struct clk gpt5_ick; static struct clk_hw_omap gpt5_ick_hw = { .hw = { .clk = &gpt5_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), .enable_bit = OMAP24XX_EN_GPT5_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(gpt5_ick, aes_ick_parent_names, aes_ick_ops); DEFINE_CLK_OMAP_MUX_GATE(gpt6_fck, "core_l4_clkdm", omap24xx_gpt_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2), OMAP24XX_CLKSEL_GPT6_MASK, OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), OMAP24XX_EN_GPT6_SHIFT, &clkhwops_wait, gpt10_fck_parent_names, dss1_fck_ops); static struct clk gpt6_ick; static struct clk_hw_omap gpt6_ick_hw = { .hw = { .clk = &gpt6_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), .enable_bit = OMAP24XX_EN_GPT6_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(gpt6_ick, aes_ick_parent_names, aes_ick_ops); DEFINE_CLK_OMAP_MUX_GATE(gpt7_fck, "core_l4_clkdm", omap24xx_gpt_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2), OMAP24XX_CLKSEL_GPT7_MASK, OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), OMAP24XX_EN_GPT7_SHIFT, &clkhwops_wait, gpt10_fck_parent_names, dss1_fck_ops); static struct clk gpt7_ick; static struct clk_hw_omap gpt7_ick_hw = { .hw = { .clk = &gpt7_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), .enable_bit = OMAP24XX_EN_GPT7_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(gpt7_ick, aes_ick_parent_names, aes_ick_ops); static struct clk gpt8_fck; DEFINE_CLK_OMAP_MUX_GATE(gpt8_fck, "core_l4_clkdm", omap24xx_gpt_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2), OMAP24XX_CLKSEL_GPT8_MASK, OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), OMAP24XX_EN_GPT8_SHIFT, &clkhwops_wait, gpt10_fck_parent_names, dss1_fck_ops); static struct clk gpt8_ick; static struct clk_hw_omap gpt8_ick_hw = { .hw = { .clk = &gpt8_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), .enable_bit = OMAP24XX_EN_GPT8_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(gpt8_ick, aes_ick_parent_names, aes_ick_ops); DEFINE_CLK_OMAP_MUX_GATE(gpt9_fck, "core_l4_clkdm", omap24xx_gpt_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL2), OMAP24XX_CLKSEL_GPT9_MASK, OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), OMAP24XX_EN_GPT9_SHIFT, &clkhwops_wait, gpt10_fck_parent_names, dss1_fck_ops); static struct clk gpt9_ick; static struct clk_hw_omap gpt9_ick_hw = { .hw = { .clk = &gpt9_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), .enable_bit = OMAP24XX_EN_GPT9_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(gpt9_ick, aes_ick_parent_names, aes_ick_ops); static struct clk hdq_fck; static struct clk_hw_omap hdq_fck_hw = { .hw = { .clk = &hdq_fck, }, .ops = &clkhwops_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), .enable_bit = OMAP24XX_EN_HDQ_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(hdq_fck, fac_fck_parent_names, aes_ick_ops); static struct clk hdq_ick; static struct clk_hw_omap hdq_ick_hw = { .hw = { .clk = &hdq_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), .enable_bit = OMAP24XX_EN_HDQ_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(hdq_ick, aes_ick_parent_names, aes_ick_ops); static struct clk i2c1_ick; static struct clk_hw_omap i2c1_ick_hw = { .hw = { .clk = &i2c1_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), .enable_bit = OMAP2420_EN_I2C1_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(i2c1_ick, aes_ick_parent_names, aes_ick_ops); static struct clk i2c2_ick; static struct clk_hw_omap i2c2_ick_hw = { .hw = { .clk = &i2c2_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), .enable_bit = OMAP2420_EN_I2C2_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(i2c2_ick, aes_ick_parent_names, aes_ick_ops); static struct clk i2chs1_fck; static struct clk_hw_omap i2chs1_fck_hw = { .hw = { .clk = &i2chs1_fck, }, .ops = &clkhwops_omap2430_i2chs_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2), .enable_bit = OMAP2430_EN_I2CHS1_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(i2chs1_fck, cam_fck_parent_names, aes_ick_ops); static struct clk i2chs2_fck; static struct clk_hw_omap i2chs2_fck_hw = { .hw = { .clk = &i2chs2_fck, }, .ops = &clkhwops_omap2430_i2chs_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2), .enable_bit = OMAP2430_EN_I2CHS2_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(i2chs2_fck, cam_fck_parent_names, aes_ick_ops); static struct clk icr_ick; static struct clk_hw_omap icr_ick_hw = { .hw = { .clk = &icr_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN), .enable_bit = OMAP2430_EN_ICR_SHIFT, .clkdm_name = "wkup_clkdm", }; DEFINE_STRUCT_CLK(icr_ick, gpios_ick_parent_names, aes_ick_ops); static const struct clksel dsp_ick_clksel[] = { { .parent = &dsp_fck, .rates = dsp_ick_rates }, { .parent = NULL }, }; static const char *iva2_1_ick_parent_names[] = { "dsp_fck", }; DEFINE_CLK_OMAP_MUX_GATE(iva2_1_ick, "dsp_clkdm", dsp_ick_clksel, OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_CLKSEL), OMAP24XX_CLKSEL_DSP_IF_MASK, OMAP_CM_REGADDR(OMAP24XX_DSP_MOD, CM_FCLKEN), OMAP24XX_CM_FCLKEN_DSP_EN_DSP_SHIFT, &clkhwops_wait, iva2_1_ick_parent_names, dsp_fck_ops); static struct clk mailboxes_ick; static struct clk_hw_omap mailboxes_ick_hw = { .hw = { .clk = &mailboxes_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), .enable_bit = OMAP24XX_EN_MAILBOXES_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(mailboxes_ick, aes_ick_parent_names, aes_ick_ops); static const struct clksel_rate common_mcbsp_96m_rates[] = { { .div = 1, .val = 0, .flags = RATE_IN_24XX }, { .div = 0 } }; static const struct clksel_rate common_mcbsp_mcbsp_rates[] = { { .div = 1, .val = 1, .flags = RATE_IN_24XX }, { .div = 0 } }; static const struct clksel mcbsp_fck_clksel[] = { { .parent = &func_96m_ck, .rates = common_mcbsp_96m_rates }, { .parent = &mcbsp_clks, .rates = common_mcbsp_mcbsp_rates }, { .parent = NULL }, }; static const char *mcbsp1_fck_parent_names[] = { "func_96m_ck", "mcbsp_clks", }; DEFINE_CLK_OMAP_MUX_GATE(mcbsp1_fck, "core_l4_clkdm", mcbsp_fck_clksel, OMAP243X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0), OMAP2_MCBSP1_CLKS_MASK, OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), OMAP24XX_EN_MCBSP1_SHIFT, &clkhwops_wait, mcbsp1_fck_parent_names, dss1_fck_ops); static struct clk mcbsp1_ick; static struct clk_hw_omap mcbsp1_ick_hw = { .hw = { .clk = &mcbsp1_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), .enable_bit = OMAP24XX_EN_MCBSP1_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(mcbsp1_ick, aes_ick_parent_names, aes_ick_ops); DEFINE_CLK_OMAP_MUX_GATE(mcbsp2_fck, "core_l4_clkdm", mcbsp_fck_clksel, OMAP243X_CTRL_REGADDR(OMAP2_CONTROL_DEVCONF0), OMAP2_MCBSP2_CLKS_MASK, OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), OMAP24XX_EN_MCBSP2_SHIFT, &clkhwops_wait, mcbsp1_fck_parent_names, dss1_fck_ops); static struct clk mcbsp2_ick; static struct clk_hw_omap mcbsp2_ick_hw = { .hw = { .clk = &mcbsp2_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), .enable_bit = OMAP24XX_EN_MCBSP2_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(mcbsp2_ick, aes_ick_parent_names, aes_ick_ops); DEFINE_CLK_OMAP_MUX_GATE(mcbsp3_fck, "core_l4_clkdm", mcbsp_fck_clksel, OMAP243X_CTRL_REGADDR(OMAP243X_CONTROL_DEVCONF1), OMAP2_MCBSP3_CLKS_MASK, OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2), OMAP2430_EN_MCBSP3_SHIFT, &clkhwops_wait, mcbsp1_fck_parent_names, dss1_fck_ops); static struct clk mcbsp3_ick; static struct clk_hw_omap mcbsp3_ick_hw = { .hw = { .clk = &mcbsp3_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2), .enable_bit = OMAP2430_EN_MCBSP3_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(mcbsp3_ick, aes_ick_parent_names, aes_ick_ops); DEFINE_CLK_OMAP_MUX_GATE(mcbsp4_fck, "core_l4_clkdm", mcbsp_fck_clksel, OMAP243X_CTRL_REGADDR(OMAP243X_CONTROL_DEVCONF1), OMAP2_MCBSP4_CLKS_MASK, OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2), OMAP2430_EN_MCBSP4_SHIFT, &clkhwops_wait, mcbsp1_fck_parent_names, dss1_fck_ops); static struct clk mcbsp4_ick; static struct clk_hw_omap mcbsp4_ick_hw = { .hw = { .clk = &mcbsp4_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2), .enable_bit = OMAP2430_EN_MCBSP4_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(mcbsp4_ick, aes_ick_parent_names, aes_ick_ops); DEFINE_CLK_OMAP_MUX_GATE(mcbsp5_fck, "core_l4_clkdm", mcbsp_fck_clksel, OMAP243X_CTRL_REGADDR(OMAP243X_CONTROL_DEVCONF1), OMAP2_MCBSP5_CLKS_MASK, OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2), OMAP2430_EN_MCBSP5_SHIFT, &clkhwops_wait, mcbsp1_fck_parent_names, dss1_fck_ops); static struct clk mcbsp5_ick; static struct clk_hw_omap mcbsp5_ick_hw = { .hw = { .clk = &mcbsp5_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2), .enable_bit = OMAP2430_EN_MCBSP5_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(mcbsp5_ick, aes_ick_parent_names, aes_ick_ops); static struct clk mcspi1_fck; static const char *mcspi1_fck_parent_names[] = { "func_48m_ck", }; static struct clk_hw_omap mcspi1_fck_hw = { .hw = { .clk = &mcspi1_fck, }, .ops = &clkhwops_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), .enable_bit = OMAP24XX_EN_MCSPI1_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(mcspi1_fck, mcspi1_fck_parent_names, aes_ick_ops); static struct clk mcspi1_ick; static struct clk_hw_omap mcspi1_ick_hw = { .hw = { .clk = &mcspi1_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), .enable_bit = OMAP24XX_EN_MCSPI1_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(mcspi1_ick, aes_ick_parent_names, aes_ick_ops); static struct clk mcspi2_fck; static struct clk_hw_omap mcspi2_fck_hw = { .hw = { .clk = &mcspi2_fck, }, .ops = &clkhwops_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), .enable_bit = OMAP24XX_EN_MCSPI2_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(mcspi2_fck, mcspi1_fck_parent_names, aes_ick_ops); static struct clk mcspi2_ick; static struct clk_hw_omap mcspi2_ick_hw = { .hw = { .clk = &mcspi2_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), .enable_bit = OMAP24XX_EN_MCSPI2_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(mcspi2_ick, aes_ick_parent_names, aes_ick_ops); static struct clk mcspi3_fck; static struct clk_hw_omap mcspi3_fck_hw = { .hw = { .clk = &mcspi3_fck, }, .ops = &clkhwops_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2), .enable_bit = OMAP2430_EN_MCSPI3_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(mcspi3_fck, mcspi1_fck_parent_names, aes_ick_ops); static struct clk mcspi3_ick; static struct clk_hw_omap mcspi3_ick_hw = { .hw = { .clk = &mcspi3_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2), .enable_bit = OMAP2430_EN_MCSPI3_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(mcspi3_ick, aes_ick_parent_names, aes_ick_ops); static const struct clksel_rate mdm_ick_core_rates[] = { { .div = 1, .val = 1, .flags = RATE_IN_243X }, { .div = 4, .val = 4, .flags = RATE_IN_243X }, { .div = 6, .val = 6, .flags = RATE_IN_243X }, { .div = 9, .val = 9, .flags = RATE_IN_243X }, { .div = 0 } }; static const struct clksel mdm_ick_clksel[] = { { .parent = &core_ck, .rates = mdm_ick_core_rates }, { .parent = NULL }, }; static const char *mdm_ick_parent_names[] = { "core_ck", }; DEFINE_CLK_OMAP_MUX_GATE(mdm_ick, "mdm_clkdm", mdm_ick_clksel, OMAP_CM_REGADDR(OMAP2430_MDM_MOD, CM_CLKSEL), OMAP2430_CLKSEL_MDM_MASK, OMAP_CM_REGADDR(OMAP2430_MDM_MOD, CM_ICLKEN), OMAP2430_CM_ICLKEN_MDM_EN_MDM_SHIFT, &clkhwops_iclk_wait, mdm_ick_parent_names, dsp_fck_ops); static struct clk mdm_intc_ick; static struct clk_hw_omap mdm_intc_ick_hw = { .hw = { .clk = &mdm_intc_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2), .enable_bit = OMAP2430_EN_MDM_INTC_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(mdm_intc_ick, aes_ick_parent_names, aes_ick_ops); static struct clk mdm_osc_ck; static struct clk_hw_omap mdm_osc_ck_hw = { .hw = { .clk = &mdm_osc_ck, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(OMAP2430_MDM_MOD, CM_FCLKEN), .enable_bit = OMAP2430_EN_OSC_SHIFT, .clkdm_name = "mdm_clkdm", }; DEFINE_STRUCT_CLK(mdm_osc_ck, sys_ck_parent_names, aes_ick_ops); static struct clk mmchs1_fck; static struct clk_hw_omap mmchs1_fck_hw = { .hw = { .clk = &mmchs1_fck, }, .ops = &clkhwops_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2), .enable_bit = OMAP2430_EN_MMCHS1_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(mmchs1_fck, cam_fck_parent_names, aes_ick_ops); static struct clk mmchs1_ick; static struct clk_hw_omap mmchs1_ick_hw = { .hw = { .clk = &mmchs1_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2), .enable_bit = OMAP2430_EN_MMCHS1_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(mmchs1_ick, aes_ick_parent_names, aes_ick_ops); static struct clk mmchs2_fck; static struct clk_hw_omap mmchs2_fck_hw = { .hw = { .clk = &mmchs2_fck, }, .ops = &clkhwops_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2), .enable_bit = OMAP2430_EN_MMCHS2_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(mmchs2_fck, cam_fck_parent_names, aes_ick_ops); static struct clk mmchs2_ick; static struct clk_hw_omap mmchs2_ick_hw = { .hw = { .clk = &mmchs2_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2), .enable_bit = OMAP2430_EN_MMCHS2_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(mmchs2_ick, aes_ick_parent_names, aes_ick_ops); static struct clk mmchsdb1_fck; static struct clk_hw_omap mmchsdb1_fck_hw = { .hw = { .clk = &mmchsdb1_fck, }, .ops = &clkhwops_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2), .enable_bit = OMAP2430_EN_MMCHSDB1_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(mmchsdb1_fck, gpio5_fck_parent_names, aes_ick_ops); static struct clk mmchsdb2_fck; static struct clk_hw_omap mmchsdb2_fck_hw = { .hw = { .clk = &mmchsdb2_fck, }, .ops = &clkhwops_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2), .enable_bit = OMAP2430_EN_MMCHSDB2_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(mmchsdb2_fck, gpio5_fck_parent_names, aes_ick_ops); DEFINE_CLK_DIVIDER(mpu_ck, "core_ck", &core_ck, 0x0, OMAP_CM_REGADDR(MPU_MOD, CM_CLKSEL), OMAP24XX_CLKSEL_MPU_SHIFT, OMAP24XX_CLKSEL_MPU_WIDTH, CLK_DIVIDER_ONE_BASED, NULL); static struct clk mpu_wdt_fck; static struct clk_hw_omap mpu_wdt_fck_hw = { .hw = { .clk = &mpu_wdt_fck, }, .ops = &clkhwops_wait, .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_FCLKEN), .enable_bit = OMAP24XX_EN_MPU_WDT_SHIFT, .clkdm_name = "wkup_clkdm", }; DEFINE_STRUCT_CLK(mpu_wdt_fck, gpio5_fck_parent_names, aes_ick_ops); static struct clk mpu_wdt_ick; static struct clk_hw_omap mpu_wdt_ick_hw = { .hw = { .clk = &mpu_wdt_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN), .enable_bit = OMAP24XX_EN_MPU_WDT_SHIFT, .clkdm_name = "wkup_clkdm", }; DEFINE_STRUCT_CLK(mpu_wdt_ick, gpios_ick_parent_names, aes_ick_ops); static struct clk mspro_fck; static struct clk_hw_omap mspro_fck_hw = { .hw = { .clk = &mspro_fck, }, .ops = &clkhwops_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), .enable_bit = OMAP24XX_EN_MSPRO_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(mspro_fck, cam_fck_parent_names, aes_ick_ops); static struct clk mspro_ick; static struct clk_hw_omap mspro_ick_hw = { .hw = { .clk = &mspro_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), .enable_bit = OMAP24XX_EN_MSPRO_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(mspro_ick, aes_ick_parent_names, aes_ick_ops); static struct clk omapctrl_ick; static struct clk_hw_omap omapctrl_ick_hw = { .hw = { .clk = &omapctrl_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN), .enable_bit = OMAP24XX_EN_OMAPCTRL_SHIFT, .flags = ENABLE_ON_INIT, .clkdm_name = "wkup_clkdm", }; DEFINE_STRUCT_CLK(omapctrl_ick, gpios_ick_parent_names, aes_ick_ops); static struct clk pka_ick; static struct clk_hw_omap pka_ick_hw = { .hw = { .clk = &pka_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4), .enable_bit = OMAP24XX_EN_PKA_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(pka_ick, aes_ick_parent_names, aes_ick_ops); static struct clk rng_ick; static struct clk_hw_omap rng_ick_hw = { .hw = { .clk = &rng_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4), .enable_bit = OMAP24XX_EN_RNG_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(rng_ick, aes_ick_parent_names, aes_ick_ops); static struct clk sdma_fck; DEFINE_STRUCT_CLK_HW_OMAP(sdma_fck, "core_l3_clkdm"); DEFINE_STRUCT_CLK(sdma_fck, gfx_ick_parent_names, core_ck_ops); static struct clk sdma_ick; static struct clk_hw_omap sdma_ick_hw = { .hw = { .clk = &sdma_ick, }, .ops = &clkhwops_iclk, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3), .enable_bit = OMAP24XX_AUTO_SDMA_SHIFT, .clkdm_name = "core_l3_clkdm", }; DEFINE_STRUCT_CLK(sdma_ick, gfx_ick_parent_names, core_ck_ops); static struct clk sdrc_ick; static struct clk_hw_omap sdrc_ick_hw = { .hw = { .clk = &sdrc_ick, }, .ops = &clkhwops_iclk, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN3), .enable_bit = OMAP2430_EN_SDRC_SHIFT, .flags = ENABLE_ON_INIT, .clkdm_name = "core_l3_clkdm", }; DEFINE_STRUCT_CLK(sdrc_ick, gfx_ick_parent_names, core_ck_ops); static struct clk sha_ick; static struct clk_hw_omap sha_ick_hw = { .hw = { .clk = &sha_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_ICLKEN4), .enable_bit = OMAP24XX_EN_SHA_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(sha_ick, aes_ick_parent_names, aes_ick_ops); static struct clk ssi_l4_ick; static struct clk_hw_omap ssi_l4_ick_hw = { .hw = { .clk = &ssi_l4_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2), .enable_bit = OMAP24XX_EN_SSI_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(ssi_l4_ick, aes_ick_parent_names, aes_ick_ops); static const struct clksel_rate ssi_ssr_sst_fck_core_rates[] = { { .div = 1, .val = 1, .flags = RATE_IN_24XX }, { .div = 2, .val = 2, .flags = RATE_IN_24XX }, { .div = 3, .val = 3, .flags = RATE_IN_24XX }, { .div = 4, .val = 4, .flags = RATE_IN_24XX }, { .div = 5, .val = 5, .flags = RATE_IN_243X }, { .div = 0 } }; static const struct clksel ssi_ssr_sst_fck_clksel[] = { { .parent = &core_ck, .rates = ssi_ssr_sst_fck_core_rates }, { .parent = NULL }, }; static const char *ssi_ssr_sst_fck_parent_names[] = { "core_ck", }; DEFINE_CLK_OMAP_MUX_GATE(ssi_ssr_sst_fck, "core_l3_clkdm", ssi_ssr_sst_fck_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1), OMAP24XX_CLKSEL_SSI_MASK, OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2), OMAP24XX_EN_SSI_SHIFT, &clkhwops_wait, ssi_ssr_sst_fck_parent_names, dsp_fck_ops); static struct clk sync_32k_ick; static struct clk_hw_omap sync_32k_ick_hw = { .hw = { .clk = &sync_32k_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN), .enable_bit = OMAP24XX_EN_32KSYNC_SHIFT, .flags = ENABLE_ON_INIT, .clkdm_name = "wkup_clkdm", }; DEFINE_STRUCT_CLK(sync_32k_ick, gpios_ick_parent_names, aes_ick_ops); static const struct clksel_rate common_clkout_src_core_rates[] = { { .div = 1, .val = 0, .flags = RATE_IN_24XX }, { .div = 0 } }; static const struct clksel_rate common_clkout_src_sys_rates[] = { { .div = 1, .val = 1, .flags = RATE_IN_24XX }, { .div = 0 } }; static const struct clksel_rate common_clkout_src_96m_rates[] = { { .div = 1, .val = 2, .flags = RATE_IN_24XX }, { .div = 0 } }; static const struct clksel_rate common_clkout_src_54m_rates[] = { { .div = 1, .val = 3, .flags = RATE_IN_24XX }, { .div = 0 } }; static const struct clksel common_clkout_src_clksel[] = { { .parent = &core_ck, .rates = common_clkout_src_core_rates }, { .parent = &sys_ck, .rates = common_clkout_src_sys_rates }, { .parent = &func_96m_ck, .rates = common_clkout_src_96m_rates }, { .parent = &func_54m_ck, .rates = common_clkout_src_54m_rates }, { .parent = NULL }, }; static const char *sys_clkout_src_parent_names[] = { "core_ck", "sys_ck", "func_96m_ck", "func_54m_ck", }; DEFINE_CLK_OMAP_MUX_GATE(sys_clkout_src, "wkup_clkdm", common_clkout_src_clksel, OMAP2430_PRCM_CLKOUT_CTRL, OMAP24XX_CLKOUT_SOURCE_MASK, OMAP2430_PRCM_CLKOUT_CTRL, OMAP24XX_CLKOUT_EN_SHIFT, NULL, sys_clkout_src_parent_names, gpt1_fck_ops); DEFINE_CLK_DIVIDER(sys_clkout, "sys_clkout_src", &sys_clkout_src, 0x0, OMAP2430_PRCM_CLKOUT_CTRL, OMAP24XX_CLKOUT_DIV_SHIFT, OMAP24XX_CLKOUT_DIV_WIDTH, CLK_DIVIDER_POWER_OF_TWO, NULL); static struct clk uart1_fck; static struct clk_hw_omap uart1_fck_hw = { .hw = { .clk = &uart1_fck, }, .ops = &clkhwops_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), .enable_bit = OMAP24XX_EN_UART1_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(uart1_fck, mcspi1_fck_parent_names, aes_ick_ops); static struct clk uart1_ick; static struct clk_hw_omap uart1_ick_hw = { .hw = { .clk = &uart1_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), .enable_bit = OMAP24XX_EN_UART1_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(uart1_ick, aes_ick_parent_names, aes_ick_ops); static struct clk uart2_fck; static struct clk_hw_omap uart2_fck_hw = { .hw = { .clk = &uart2_fck, }, .ops = &clkhwops_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), .enable_bit = OMAP24XX_EN_UART2_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(uart2_fck, mcspi1_fck_parent_names, aes_ick_ops); static struct clk uart2_ick; static struct clk_hw_omap uart2_ick_hw = { .hw = { .clk = &uart2_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), .enable_bit = OMAP24XX_EN_UART2_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(uart2_ick, aes_ick_parent_names, aes_ick_ops); static struct clk uart3_fck; static struct clk_hw_omap uart3_fck_hw = { .hw = { .clk = &uart3_fck, }, .ops = &clkhwops_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2), .enable_bit = OMAP24XX_EN_UART3_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(uart3_fck, mcspi1_fck_parent_names, aes_ick_ops); static struct clk uart3_ick; static struct clk_hw_omap uart3_ick_hw = { .hw = { .clk = &uart3_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2), .enable_bit = OMAP24XX_EN_UART3_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(uart3_ick, aes_ick_parent_names, aes_ick_ops); static struct clk usb_fck; static struct clk_hw_omap usb_fck_hw = { .hw = { .clk = &usb_fck, }, .ops = &clkhwops_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, OMAP24XX_CM_FCLKEN2), .enable_bit = OMAP24XX_EN_USB_SHIFT, .clkdm_name = "core_l3_clkdm", }; DEFINE_STRUCT_CLK(usb_fck, mcspi1_fck_parent_names, aes_ick_ops); static const struct clksel_rate usb_l4_ick_core_l3_rates[] = { { .div = 1, .val = 1, .flags = RATE_IN_24XX }, { .div = 2, .val = 2, .flags = RATE_IN_24XX }, { .div = 4, .val = 4, .flags = RATE_IN_24XX }, { .div = 0 } }; static const struct clksel usb_l4_ick_clksel[] = { { .parent = &core_l3_ck, .rates = usb_l4_ick_core_l3_rates }, { .parent = NULL }, }; static const char *usb_l4_ick_parent_names[] = { "core_l3_ck", }; DEFINE_CLK_OMAP_MUX_GATE(usb_l4_ick, "core_l4_clkdm", usb_l4_ick_clksel, OMAP_CM_REGADDR(CORE_MOD, CM_CLKSEL1), OMAP24XX_CLKSEL_USB_MASK, OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2), OMAP24XX_EN_USB_SHIFT, &clkhwops_iclk_wait, usb_l4_ick_parent_names, dsp_fck_ops); static struct clk usbhs_ick; static struct clk_hw_omap usbhs_ick_hw = { .hw = { .clk = &usbhs_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN2), .enable_bit = OMAP2430_EN_USBHS_SHIFT, .clkdm_name = "core_l3_clkdm", }; DEFINE_STRUCT_CLK(usbhs_ick, gfx_ick_parent_names, aes_ick_ops); static struct clk virt_prcm_set; static const char *virt_prcm_set_parent_names[] = { "mpu_ck", }; static const struct clk_ops virt_prcm_set_ops = { .recalc_rate = &omap2_table_mpu_recalc, .set_rate = &omap2_select_table_rate, .round_rate = &omap2_round_to_table_rate, }; DEFINE_STRUCT_CLK_HW_OMAP(virt_prcm_set, NULL); DEFINE_STRUCT_CLK(virt_prcm_set, virt_prcm_set_parent_names, virt_prcm_set_ops); static struct clk wdt1_ick; static struct clk_hw_omap wdt1_ick_hw = { .hw = { .clk = &wdt1_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(WKUP_MOD, CM_ICLKEN), .enable_bit = OMAP24XX_EN_WDT1_SHIFT, .clkdm_name = "wkup_clkdm", }; DEFINE_STRUCT_CLK(wdt1_ick, gpios_ick_parent_names, aes_ick_ops); static struct clk wdt4_fck; static struct clk_hw_omap wdt4_fck_hw = { .hw = { .clk = &wdt4_fck, }, .ops = &clkhwops_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_FCLKEN1), .enable_bit = OMAP24XX_EN_WDT4_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(wdt4_fck, gpio5_fck_parent_names, aes_ick_ops); static struct clk wdt4_ick; static struct clk_hw_omap wdt4_ick_hw = { .hw = { .clk = &wdt4_ick, }, .ops = &clkhwops_iclk_wait, .enable_reg = OMAP_CM_REGADDR(CORE_MOD, CM_ICLKEN1), .enable_bit = OMAP24XX_EN_WDT4_SHIFT, .clkdm_name = "core_l4_clkdm", }; DEFINE_STRUCT_CLK(wdt4_ick, aes_ick_parent_names, aes_ick_ops); /* * clkdev integration */ static struct omap_clk omap2430_clks[] = { /* external root sources */ CLK(NULL, "func_32k_ck", &func_32k_ck), CLK(NULL, "secure_32k_ck", &secure_32k_ck), CLK(NULL, "osc_ck", &osc_ck), CLK("twl", "fck", &osc_ck), CLK(NULL, "sys_ck", &sys_ck), CLK(NULL, "alt_ck", &alt_ck), CLK(NULL, "mcbsp_clks", &mcbsp_clks), /* internal analog sources */ CLK(NULL, "dpll_ck", &dpll_ck), CLK(NULL, "apll96_ck", &apll96_ck), CLK(NULL, "apll54_ck", &apll54_ck), /* internal prcm root sources */ CLK(NULL, "func_54m_ck", &func_54m_ck), CLK(NULL, "core_ck", &core_ck), CLK(NULL, "func_96m_ck", &func_96m_ck), CLK(NULL, "func_48m_ck", &func_48m_ck), CLK(NULL, "func_12m_ck", &func_12m_ck), CLK(NULL, "sys_clkout_src", &sys_clkout_src), CLK(NULL, "sys_clkout", &sys_clkout), CLK(NULL, "emul_ck", &emul_ck), /* mpu domain clocks */ CLK(NULL, "mpu_ck", &mpu_ck), /* dsp domain clocks */ CLK(NULL, "dsp_fck", &dsp_fck), CLK(NULL, "iva2_1_ick", &iva2_1_ick), /* GFX domain clocks */ CLK(NULL, "gfx_3d_fck", &gfx_3d_fck), CLK(NULL, "gfx_2d_fck", &gfx_2d_fck), CLK(NULL, "gfx_ick", &gfx_ick), /* Modem domain clocks */ CLK(NULL, "mdm_ick", &mdm_ick), CLK(NULL, "mdm_osc_ck", &mdm_osc_ck), /* DSS domain clocks */ CLK("omapdss_dss", "ick", &dss_ick), CLK(NULL, "dss_ick", &dss_ick), CLK(NULL, "dss1_fck", &dss1_fck), CLK(NULL, "dss2_fck", &dss2_fck), CLK(NULL, "dss_54m_fck", &dss_54m_fck), /* L3 domain clocks */ CLK(NULL, "core_l3_ck", &core_l3_ck), CLK(NULL, "ssi_fck", &ssi_ssr_sst_fck), CLK(NULL, "usb_l4_ick", &usb_l4_ick), /* L4 domain clocks */ CLK(NULL, "l4_ck", &l4_ck), CLK(NULL, "ssi_l4_ick", &ssi_l4_ick), /* virtual meta-group clock */ CLK(NULL, "virt_prcm_set", &virt_prcm_set), /* general l4 interface ck, multi-parent functional clk */ CLK(NULL, "gpt1_ick", &gpt1_ick), CLK(NULL, "gpt1_fck", &gpt1_fck), CLK(NULL, "gpt2_ick", &gpt2_ick), CLK(NULL, "gpt2_fck", &gpt2_fck), CLK(NULL, "gpt3_ick", &gpt3_ick), CLK(NULL, "gpt3_fck", &gpt3_fck), CLK(NULL, "gpt4_ick", &gpt4_ick), CLK(NULL, "gpt4_fck", &gpt4_fck), CLK(NULL, "gpt5_ick", &gpt5_ick), CLK(NULL, "gpt5_fck", &gpt5_fck), CLK(NULL, "gpt6_ick", &gpt6_ick), CLK(NULL, "gpt6_fck", &gpt6_fck), CLK(NULL, "gpt7_ick", &gpt7_ick), CLK(NULL, "gpt7_fck", &gpt7_fck), CLK(NULL, "gpt8_ick", &gpt8_ick), CLK(NULL, "gpt8_fck", &gpt8_fck), CLK(NULL, "gpt9_ick", &gpt9_ick), CLK(NULL, "gpt9_fck", &gpt9_fck), CLK(NULL, "gpt10_ick", &gpt10_ick), CLK(NULL, "gpt10_fck", &gpt10_fck), CLK(NULL, "gpt11_ick", &gpt11_ick), CLK(NULL, "gpt11_fck", &gpt11_fck), CLK(NULL, "gpt12_ick", &gpt12_ick), CLK(NULL, "gpt12_fck", &gpt12_fck), CLK("omap-mcbsp.1", "ick", &mcbsp1_ick), CLK(NULL, "mcbsp1_ick", &mcbsp1_ick), CLK(NULL, "mcbsp1_fck", &mcbsp1_fck), CLK("omap-mcbsp.2", "ick", &mcbsp2_ick), CLK(NULL, "mcbsp2_ick", &mcbsp2_ick), CLK(NULL, "mcbsp2_fck", &mcbsp2_fck), CLK("omap-mcbsp.3", "ick", &mcbsp3_ick), CLK(NULL, "mcbsp3_ick", &mcbsp3_ick), CLK(NULL, "mcbsp3_fck", &mcbsp3_fck), CLK("omap-mcbsp.4", "ick", &mcbsp4_ick), CLK(NULL, "mcbsp4_ick", &mcbsp4_ick), CLK(NULL, "mcbsp4_fck", &mcbsp4_fck), CLK("omap-mcbsp.5", "ick", &mcbsp5_ick), CLK(NULL, "mcbsp5_ick", &mcbsp5_ick), CLK(NULL, "mcbsp5_fck", &mcbsp5_fck), CLK("omap2_mcspi.1", "ick", &mcspi1_ick), CLK(NULL, "mcspi1_ick", &mcspi1_ick), CLK(NULL, "mcspi1_fck", &mcspi1_fck), CLK("omap2_mcspi.2", "ick", &mcspi2_ick), CLK(NULL, "mcspi2_ick", &mcspi2_ick), CLK(NULL, "mcspi2_fck", &mcspi2_fck), CLK("omap2_mcspi.3", "ick", &mcspi3_ick), CLK(NULL, "mcspi3_ick", &mcspi3_ick), CLK(NULL, "mcspi3_fck", &mcspi3_fck), CLK(NULL, "uart1_ick", &uart1_ick), CLK(NULL, "uart1_fck", &uart1_fck), CLK(NULL, "uart2_ick", &uart2_ick), CLK(NULL, "uart2_fck", &uart2_fck), CLK(NULL, "uart3_ick", &uart3_ick), CLK(NULL, "uart3_fck", &uart3_fck), CLK(NULL, "gpios_ick", &gpios_ick), CLK(NULL, "gpios_fck", &gpios_fck), CLK("omap_wdt", "ick", &mpu_wdt_ick), CLK(NULL, "mpu_wdt_ick", &mpu_wdt_ick), CLK(NULL, "mpu_wdt_fck", &mpu_wdt_fck), CLK(NULL, "sync_32k_ick", &sync_32k_ick), CLK(NULL, "wdt1_ick", &wdt1_ick), CLK(NULL, "omapctrl_ick", &omapctrl_ick), CLK(NULL, "icr_ick", &icr_ick), CLK("omap24xxcam", "fck", &cam_fck), CLK(NULL, "cam_fck", &cam_fck), CLK("omap24xxcam", "ick", &cam_ick), CLK(NULL, "cam_ick", &cam_ick), CLK(NULL, "mailboxes_ick", &mailboxes_ick), CLK(NULL, "wdt4_ick", &wdt4_ick), CLK(NULL, "wdt4_fck", &wdt4_fck), CLK(NULL, "mspro_ick", &mspro_ick), CLK(NULL, "mspro_fck", &mspro_fck), CLK(NULL, "fac_ick", &fac_ick), CLK(NULL, "fac_fck", &fac_fck), CLK("omap_hdq.0", "ick", &hdq_ick), CLK(NULL, "hdq_ick", &hdq_ick), CLK("omap_hdq.1", "fck", &hdq_fck), CLK(NULL, "hdq_fck", &hdq_fck), CLK("omap_i2c.1", "ick", &i2c1_ick), CLK(NULL, "i2c1_ick", &i2c1_ick), CLK(NULL, "i2chs1_fck", &i2chs1_fck), CLK("omap_i2c.2", "ick", &i2c2_ick), CLK(NULL, "i2c2_ick", &i2c2_ick), CLK(NULL, "i2chs2_fck", &i2chs2_fck), CLK(NULL, "gpmc_fck", &gpmc_fck), CLK(NULL, "sdma_fck", &sdma_fck), CLK(NULL, "sdma_ick", &sdma_ick), CLK(NULL, "sdrc_ick", &sdrc_ick), CLK(NULL, "des_ick", &des_ick), CLK("omap-sham", "ick", &sha_ick), CLK(NULL, "sha_ick", &sha_ick), CLK("omap_rng", "ick", &rng_ick), CLK(NULL, "rng_ick", &rng_ick), CLK("omap-aes", "ick", &aes_ick), CLK(NULL, "aes_ick", &aes_ick), CLK(NULL, "pka_ick", &pka_ick), CLK(NULL, "usb_fck", &usb_fck), CLK("musb-omap2430", "ick", &usbhs_ick), CLK(NULL, "usbhs_ick", &usbhs_ick), CLK("omap_hsmmc.0", "ick", &mmchs1_ick), CLK(NULL, "mmchs1_ick", &mmchs1_ick), CLK(NULL, "mmchs1_fck", &mmchs1_fck), CLK("omap_hsmmc.1", "ick", &mmchs2_ick), CLK(NULL, "mmchs2_ick", &mmchs2_ick), CLK(NULL, "mmchs2_fck", &mmchs2_fck), CLK(NULL, "gpio5_ick", &gpio5_ick), CLK(NULL, "gpio5_fck", &gpio5_fck), CLK(NULL, "mdm_intc_ick", &mdm_intc_ick), CLK("omap_hsmmc.0", "mmchsdb_fck", &mmchsdb1_fck), CLK(NULL, "mmchsdb1_fck", &mmchsdb1_fck), CLK("omap_hsmmc.1", "mmchsdb_fck", &mmchsdb2_fck), CLK(NULL, "mmchsdb2_fck", &mmchsdb2_fck), CLK(NULL, "timer_32k_ck", &func_32k_ck), CLK(NULL, "timer_sys_ck", &sys_ck), CLK(NULL, "timer_ext_ck", &alt_ck), CLK(NULL, "cpufreq_ck", &virt_prcm_set), }; static const char *enable_init_clks[] = { "apll96_ck", "apll54_ck", "sync_32k_ick", "omapctrl_ick", "gpmc_fck", "sdrc_ick", }; /* * init code */ int __init omap2430_clk_init(void) { prcm_clksrc_ctrl = OMAP2430_PRCM_CLKSRC_CTRL; cpu_mask = RATE_IN_243X; rate_table = omap2430_rate_table; omap2xxx_clkt_dpllcore_init(&dpll_ck_hw.hw); omap2xxx_clkt_vps_check_bootloader_rates(); omap_clocks_register(omap2430_clks, ARRAY_SIZE(omap2430_clks)); omap2xxx_clkt_vps_late_init(); omap2_clk_disable_autoidle_all(); omap2_clk_enable_init_clocks(enable_init_clks, ARRAY_SIZE(enable_init_clks)); pr_info("Clocking rate (Crystal/DPLL/MPU): %ld.%01ld/%ld/%ld MHz\n", (clk_get_rate(&sys_ck) / 1000000), (clk_get_rate(&sys_ck) / 100000) % 10, (clk_get_rate(&dpll_ck) / 1000000), (clk_get_rate(&mpu_ck) / 1000000)); return 0; }
gpl-2.0
jdlfg/Mecha-kernel-jdlfg
arch/sh/kernel/unwinder.c
3921
4293
/* * Copyright (C) 2009 Matt Fleming * * Based, in part, on kernel/time/clocksource.c. * * This file provides arbitration code for stack unwinders. * * Multiple stack unwinders can be available on a system, usually with * the most accurate unwinder being the currently active one. */ #include <linux/errno.h> #include <linux/list.h> #include <linux/spinlock.h> #include <linux/module.h> #include <asm/unwinder.h> #include <asm/atomic.h> /* * This is the most basic stack unwinder an architecture can * provide. For architectures without reliable frame pointers, e.g. * RISC CPUs, it can be implemented by looking through the stack for * addresses that lie within the kernel text section. * * Other CPUs, e.g. x86, can use their frame pointer register to * construct more accurate stack traces. */ static struct list_head unwinder_list; static struct unwinder stack_reader = { .name = "stack-reader", .dump = stack_reader_dump, .rating = 50, .list = { .next = &unwinder_list, .prev = &unwinder_list, }, }; /* * "curr_unwinder" points to the stack unwinder currently in use. This * is the unwinder with the highest rating. * * "unwinder_list" is a linked-list of all available unwinders, sorted * by rating. * * All modifications of "curr_unwinder" and "unwinder_list" must be * performed whilst holding "unwinder_lock". */ static struct unwinder *curr_unwinder = &stack_reader; static struct list_head unwinder_list = { .next = &stack_reader.list, .prev = &stack_reader.list, }; static DEFINE_SPINLOCK(unwinder_lock); /** * select_unwinder - Select the best registered stack unwinder. * * Private function. Must hold unwinder_lock when called. * * Select the stack unwinder with the best rating. This is useful for * setting up curr_unwinder. */ static struct unwinder *select_unwinder(void) { struct unwinder *best; if (list_empty(&unwinder_list)) return NULL; best = list_entry(unwinder_list.next, struct unwinder, list); if (best == curr_unwinder) return NULL; return best; } /* * Enqueue the stack unwinder sorted by rating. */ static int unwinder_enqueue(struct unwinder *ops) { struct list_head *tmp, *entry = &unwinder_list; list_for_each(tmp, &unwinder_list) { struct unwinder *o; o = list_entry(tmp, struct unwinder, list); if (o == ops) return -EBUSY; /* Keep track of the place, where to insert */ if (o->rating >= ops->rating) entry = tmp; } list_add(&ops->list, entry); return 0; } /** * unwinder_register - Used to install new stack unwinder * @u: unwinder to be registered * * Install the new stack unwinder on the unwinder list, which is sorted * by rating. * * Returns -EBUSY if registration fails, zero otherwise. */ int unwinder_register(struct unwinder *u) { unsigned long flags; int ret; spin_lock_irqsave(&unwinder_lock, flags); ret = unwinder_enqueue(u); if (!ret) curr_unwinder = select_unwinder(); spin_unlock_irqrestore(&unwinder_lock, flags); return ret; } int unwinder_faulted = 0; /* * Unwind the call stack and pass information to the stacktrace_ops * functions. Also handle the case where we need to switch to a new * stack dumper because the current one faulted unexpectedly. */ void unwind_stack(struct task_struct *task, struct pt_regs *regs, unsigned long *sp, const struct stacktrace_ops *ops, void *data) { unsigned long flags; /* * The problem with unwinders with high ratings is that they are * inherently more complicated than the simple ones with lower * ratings. We are therefore more likely to fault in the * complicated ones, e.g. hitting BUG()s. If we fault in the * code for the current stack unwinder we try to downgrade to * one with a lower rating. * * Hopefully this will give us a semi-reliable stacktrace so we * can diagnose why curr_unwinder->dump() faulted. */ if (unwinder_faulted) { spin_lock_irqsave(&unwinder_lock, flags); /* Make sure no one beat us to changing the unwinder */ if (unwinder_faulted && !list_is_singular(&unwinder_list)) { list_del(&curr_unwinder->list); curr_unwinder = select_unwinder(); unwinder_faulted = 0; } spin_unlock_irqrestore(&unwinder_lock, flags); } curr_unwinder->dump(task, regs, sp, ops, data); } EXPORT_SYMBOL_GPL(unwind_stack);
gpl-2.0
Vegaviet-DevTeam/android_kernel_pantech_ef50l
net/dns_resolver/dns_key.c
4433
8163
/* Key type used to cache DNS lookups made by the kernel * * See Documentation/networking/dns_resolver.txt * * Copyright (c) 2007 Igor Mammedov * Author(s): Igor Mammedov (niallain@gmail.com) * Steve French (sfrench@us.ibm.com) * Wang Lei (wang840925@gmail.com) * David Howells (dhowells@redhat.com) * * This library is free software; you can redistribute it and/or modify * it under the terms of the GNU Lesser General Public License as published * by the Free Software Foundation; either version 2.1 of the License, or * (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See * the GNU Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public License * along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include <linux/string.h> #include <linux/kernel.h> #include <linux/keyctl.h> #include <linux/err.h> #include <linux/seq_file.h> #include <keys/dns_resolver-type.h> #include <keys/user-type.h> #include "internal.h" MODULE_DESCRIPTION("DNS Resolver"); MODULE_AUTHOR("Wang Lei"); MODULE_LICENSE("GPL"); unsigned dns_resolver_debug; module_param_named(debug, dns_resolver_debug, uint, S_IWUSR | S_IRUGO); MODULE_PARM_DESC(debug, "DNS Resolver debugging mask"); const struct cred *dns_resolver_cache; #define DNS_ERRORNO_OPTION "dnserror" /* * Instantiate a user defined key for dns_resolver. * * The data must be a NUL-terminated string, with the NUL char accounted in * datalen. * * If the data contains a '#' characters, then we take the clause after each * one to be an option of the form 'key=value'. The actual data of interest is * the string leading up to the first '#'. For instance: * * "ip1,ip2,...#foo=bar" */ static int dns_resolver_instantiate(struct key *key, const void *_data, size_t datalen) { struct user_key_payload *upayload; unsigned long derrno; int ret; size_t result_len = 0; const char *data = _data, *end, *opt; kenter("%%%d,%s,'%*.*s',%zu", key->serial, key->description, (int)datalen, (int)datalen, data, datalen); if (datalen <= 1 || !data || data[datalen - 1] != '\0') return -EINVAL; datalen--; /* deal with any options embedded in the data */ end = data + datalen; opt = memchr(data, '#', datalen); if (!opt) { /* no options: the entire data is the result */ kdebug("no options"); result_len = datalen; } else { const char *next_opt; result_len = opt - data; opt++; kdebug("options: '%s'", opt); do { const char *eq; int opt_len, opt_nlen, opt_vlen, tmp; next_opt = memchr(opt, '#', end - opt) ?: end; opt_len = next_opt - opt; if (!opt_len) { printk(KERN_WARNING "Empty option to dns_resolver key %d\n", key->serial); return -EINVAL; } eq = memchr(opt, '=', opt_len) ?: end; opt_nlen = eq - opt; eq++; opt_vlen = next_opt - eq; /* will be -1 if no value */ tmp = opt_vlen >= 0 ? opt_vlen : 0; kdebug("option '%*.*s' val '%*.*s'", opt_nlen, opt_nlen, opt, tmp, tmp, eq); /* see if it's an error number representing a DNS error * that's to be recorded as the result in this key */ if (opt_nlen == sizeof(DNS_ERRORNO_OPTION) - 1 && memcmp(opt, DNS_ERRORNO_OPTION, opt_nlen) == 0) { kdebug("dns error number option"); if (opt_vlen <= 0) goto bad_option_value; ret = strict_strtoul(eq, 10, &derrno); if (ret < 0) goto bad_option_value; if (derrno < 1 || derrno > 511) goto bad_option_value; kdebug("dns error no. = %lu", derrno); key->type_data.x[0] = -derrno; continue; } bad_option_value: printk(KERN_WARNING "Option '%*.*s' to dns_resolver key %d:" " bad/missing value\n", opt_nlen, opt_nlen, opt, key->serial); return -EINVAL; } while (opt = next_opt + 1, opt < end); } /* don't cache the result if we're caching an error saying there's no * result */ if (key->type_data.x[0]) { kleave(" = 0 [h_error %ld]", key->type_data.x[0]); return 0; } kdebug("store result"); ret = key_payload_reserve(key, result_len); if (ret < 0) return -EINVAL; upayload = kmalloc(sizeof(*upayload) + result_len + 1, GFP_KERNEL); if (!upayload) { kleave(" = -ENOMEM"); return -ENOMEM; } upayload->datalen = result_len; memcpy(upayload->data, data, result_len); upayload->data[result_len] = '\0'; rcu_assign_pointer(key->payload.data, upayload); kleave(" = 0"); return 0; } /* * The description is of the form "[<type>:]<domain_name>" * * The domain name may be a simple name or an absolute domain name (which * should end with a period). The domain name is case-independent. */ static int dns_resolver_match(const struct key *key, const void *description) { int slen, dlen, ret = 0; const char *src = key->description, *dsp = description; kenter("%s,%s", src, dsp); if (!src || !dsp) goto no_match; if (strcasecmp(src, dsp) == 0) goto matched; slen = strlen(src); dlen = strlen(dsp); if (slen <= 0 || dlen <= 0) goto no_match; if (src[slen - 1] == '.') slen--; if (dsp[dlen - 1] == '.') dlen--; if (slen != dlen || strncasecmp(src, dsp, slen) != 0) goto no_match; matched: ret = 1; no_match: kleave(" = %d", ret); return ret; } /* * Describe a DNS key */ static void dns_resolver_describe(const struct key *key, struct seq_file *m) { int err = key->type_data.x[0]; seq_puts(m, key->description); if (key_is_instantiated(key)) { if (err) seq_printf(m, ": %d", err); else seq_printf(m, ": %u", key->datalen); } } /* * read the DNS data * - the key's semaphore is read-locked */ static long dns_resolver_read(const struct key *key, char __user *buffer, size_t buflen) { if (key->type_data.x[0]) return key->type_data.x[0]; return user_read(key, buffer, buflen); } struct key_type key_type_dns_resolver = { .name = "dns_resolver", .instantiate = dns_resolver_instantiate, .match = dns_resolver_match, .revoke = user_revoke, .destroy = user_destroy, .describe = dns_resolver_describe, .read = dns_resolver_read, }; static int __init init_dns_resolver(void) { struct cred *cred; struct key *keyring; int ret; printk(KERN_NOTICE "Registering the %s key type\n", key_type_dns_resolver.name); /* create an override credential set with a special thread keyring in * which DNS requests are cached * * this is used to prevent malicious redirections from being installed * with add_key(). */ cred = prepare_kernel_cred(NULL); if (!cred) return -ENOMEM; keyring = key_alloc(&key_type_keyring, ".dns_resolver", 0, 0, cred, (KEY_POS_ALL & ~KEY_POS_SETATTR) | KEY_USR_VIEW | KEY_USR_READ, KEY_ALLOC_NOT_IN_QUOTA); if (IS_ERR(keyring)) { ret = PTR_ERR(keyring); goto failed_put_cred; } ret = key_instantiate_and_link(keyring, NULL, 0, NULL, NULL); if (ret < 0) goto failed_put_key; ret = register_key_type(&key_type_dns_resolver); if (ret < 0) goto failed_put_key; /* instruct request_key() to use this special keyring as a cache for * the results it looks up */ set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags); cred->thread_keyring = keyring; cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING; dns_resolver_cache = cred; kdebug("DNS resolver keyring: %d\n", key_serial(keyring)); return 0; failed_put_key: key_put(keyring); failed_put_cred: put_cred(cred); return ret; } static void __exit exit_dns_resolver(void) { key_revoke(dns_resolver_cache->thread_keyring); unregister_key_type(&key_type_dns_resolver); put_cred(dns_resolver_cache); printk(KERN_NOTICE "Unregistered %s key type\n", key_type_dns_resolver.name); } module_init(init_dns_resolver) module_exit(exit_dns_resolver) MODULE_LICENSE("GPL");
gpl-2.0
Envious-Data/shinano-sirius_msm8974abpro-511
drivers/gpu/drm/nouveau/nouveau_dma.c
4945
7763
/* * Copyright (C) 2007 Ben Skeggs. * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining * a copy of this software and associated documentation files (the * "Software"), to deal in the Software without restriction, including * without limitation the rights to use, copy, modify, merge, publish, * distribute, sublicense, and/or sell copies of the Software, and to * permit persons to whom the Software is furnished to do so, subject to * the following conditions: * * The above copyright notice and this permission notice (including the * next paragraph) shall be included in all copies or substantial * portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. * IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. * */ #include "drmP.h" #include "drm.h" #include "nouveau_drv.h" #include "nouveau_dma.h" #include "nouveau_ramht.h" void nouveau_dma_init(struct nouveau_channel *chan) { struct drm_nouveau_private *dev_priv = chan->dev->dev_private; struct nouveau_bo *pushbuf = chan->pushbuf_bo; if (dev_priv->card_type >= NV_50) { const int ib_size = pushbuf->bo.mem.size / 2; chan->dma.ib_base = (pushbuf->bo.mem.size - ib_size) >> 2; chan->dma.ib_max = (ib_size / 8) - 1; chan->dma.ib_put = 0; chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put; chan->dma.max = (pushbuf->bo.mem.size - ib_size) >> 2; } else { chan->dma.max = (pushbuf->bo.mem.size >> 2) - 2; } chan->dma.put = 0; chan->dma.cur = chan->dma.put; chan->dma.free = chan->dma.max - chan->dma.cur; } void OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords) { bool is_iomem; u32 *mem = ttm_kmap_obj_virtual(&chan->pushbuf_bo->kmap, &is_iomem); mem = &mem[chan->dma.cur]; if (is_iomem) memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4); else memcpy(mem, data, nr_dwords * 4); chan->dma.cur += nr_dwords; } /* Fetch and adjust GPU GET pointer * * Returns: * value >= 0, the adjusted GET pointer * -EINVAL if GET pointer currently outside main push buffer * -EBUSY if timeout exceeded */ static inline int READ_GET(struct nouveau_channel *chan, uint64_t *prev_get, int *timeout) { uint64_t val; val = nvchan_rd32(chan, chan->user_get); if (chan->user_get_hi) val |= (uint64_t)nvchan_rd32(chan, chan->user_get_hi) << 32; /* reset counter as long as GET is still advancing, this is * to avoid misdetecting a GPU lockup if the GPU happens to * just be processing an operation that takes a long time */ if (val != *prev_get) { *prev_get = val; *timeout = 0; } if ((++*timeout & 0xff) == 0) { DRM_UDELAY(1); if (*timeout > 100000) return -EBUSY; } if (val < chan->pushbuf_base || val > chan->pushbuf_base + (chan->dma.max << 2)) return -EINVAL; return (val - chan->pushbuf_base) >> 2; } void nv50_dma_push(struct nouveau_channel *chan, struct nouveau_bo *bo, int delta, int length) { struct nouveau_bo *pb = chan->pushbuf_bo; struct nouveau_vma *vma; int ip = (chan->dma.ib_put * 2) + chan->dma.ib_base; u64 offset; vma = nouveau_bo_vma_find(bo, chan->vm); BUG_ON(!vma); offset = vma->offset + delta; BUG_ON(chan->dma.ib_free < 1); nouveau_bo_wr32(pb, ip++, lower_32_bits(offset)); nouveau_bo_wr32(pb, ip++, upper_32_bits(offset) | length << 8); chan->dma.ib_put = (chan->dma.ib_put + 1) & chan->dma.ib_max; DRM_MEMORYBARRIER(); /* Flush writes. */ nouveau_bo_rd32(pb, 0); nvchan_wr32(chan, 0x8c, chan->dma.ib_put); chan->dma.ib_free--; } static int nv50_dma_push_wait(struct nouveau_channel *chan, int count) { uint32_t cnt = 0, prev_get = 0; while (chan->dma.ib_free < count) { uint32_t get = nvchan_rd32(chan, 0x88); if (get != prev_get) { prev_get = get; cnt = 0; } if ((++cnt & 0xff) == 0) { DRM_UDELAY(1); if (cnt > 100000) return -EBUSY; } chan->dma.ib_free = get - chan->dma.ib_put; if (chan->dma.ib_free <= 0) chan->dma.ib_free += chan->dma.ib_max; } return 0; } static int nv50_dma_wait(struct nouveau_channel *chan, int slots, int count) { uint64_t prev_get = 0; int ret, cnt = 0; ret = nv50_dma_push_wait(chan, slots + 1); if (unlikely(ret)) return ret; while (chan->dma.free < count) { int get = READ_GET(chan, &prev_get, &cnt); if (unlikely(get < 0)) { if (get == -EINVAL) continue; return get; } if (get <= chan->dma.cur) { chan->dma.free = chan->dma.max - chan->dma.cur; if (chan->dma.free >= count) break; FIRE_RING(chan); do { get = READ_GET(chan, &prev_get, &cnt); if (unlikely(get < 0)) { if (get == -EINVAL) continue; return get; } } while (get == 0); chan->dma.cur = 0; chan->dma.put = 0; } chan->dma.free = get - chan->dma.cur - 1; } return 0; } int nouveau_dma_wait(struct nouveau_channel *chan, int slots, int size) { uint64_t prev_get = 0; int cnt = 0, get; if (chan->dma.ib_max) return nv50_dma_wait(chan, slots, size); while (chan->dma.free < size) { get = READ_GET(chan, &prev_get, &cnt); if (unlikely(get == -EBUSY)) return -EBUSY; /* loop until we have a usable GET pointer. the value * we read from the GPU may be outside the main ring if * PFIFO is processing a buffer called from the main ring, * discard these values until something sensible is seen. * * the other case we discard GET is while the GPU is fetching * from the SKIPS area, so the code below doesn't have to deal * with some fun corner cases. */ if (unlikely(get == -EINVAL) || get < NOUVEAU_DMA_SKIPS) continue; if (get <= chan->dma.cur) { /* engine is fetching behind us, or is completely * idle (GET == PUT) so we have free space up until * the end of the push buffer * * we can only hit that path once per call due to * looping back to the beginning of the push buffer, * we'll hit the fetching-ahead-of-us path from that * point on. * * the *one* exception to that rule is if we read * GET==PUT, in which case the below conditional will * always succeed and break us out of the wait loop. */ chan->dma.free = chan->dma.max - chan->dma.cur; if (chan->dma.free >= size) break; /* not enough space left at the end of the push buffer, * instruct the GPU to jump back to the start right * after processing the currently pending commands. */ OUT_RING(chan, chan->pushbuf_base | 0x20000000); /* wait for GET to depart from the skips area. * prevents writing GET==PUT and causing a race * condition that causes us to think the GPU is * idle when it's not. */ do { get = READ_GET(chan, &prev_get, &cnt); if (unlikely(get == -EBUSY)) return -EBUSY; if (unlikely(get == -EINVAL)) continue; } while (get <= NOUVEAU_DMA_SKIPS); WRITE_PUT(NOUVEAU_DMA_SKIPS); /* we're now submitting commands at the start of * the push buffer. */ chan->dma.cur = chan->dma.put = NOUVEAU_DMA_SKIPS; } /* engine fetching ahead of us, we have space up until the * current GET pointer. the "- 1" is to ensure there's * space left to emit a jump back to the beginning of the * push buffer if we require it. we can never get GET == PUT * here, so this is safe. */ chan->dma.free = get - chan->dma.cur - 1; } return 0; }
gpl-2.0
hyuh/kernel-dlx
drivers/media/video/ks0127.c
4945
21037
/* * Video Capture Driver (Video for Linux 1/2) * for the Matrox Marvel G200,G400 and Rainbow Runner-G series * * This module is an interface to the KS0127 video decoder chip. * * Copyright (C) 1999 Ryan Drake <stiletto@mediaone.net> * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. * ***************************************************************************** * * Modified and extended by * Mike Bernson <mike@mlb.org> * Gerard v.d. Horst * Leon van Stuivenberg <l.vanstuivenberg@chello.nl> * Gernot Ziegler <gz@lysator.liu.se> * * Version History: * V1.0 Ryan Drake Initial version by Ryan Drake * V1.1 Gerard v.d. Horst Added some debugoutput, reset the video-standard */ #include <linux/init.h> #include <linux/module.h> #include <linux/delay.h> #include <linux/errno.h> #include <linux/kernel.h> #include <linux/i2c.h> #include <linux/videodev2.h> #include <linux/slab.h> #include <media/v4l2-device.h> #include <media/v4l2-chip-ident.h> #include "ks0127.h" MODULE_DESCRIPTION("KS0127 video decoder driver"); MODULE_AUTHOR("Ryan Drake"); MODULE_LICENSE("GPL"); /* Addresses */ #define I2C_KS0127_ADDON 0xD8 #define I2C_KS0127_ONBOARD 0xDA /* ks0127 control registers */ #define KS_STAT 0x00 #define KS_CMDA 0x01 #define KS_CMDB 0x02 #define KS_CMDC 0x03 #define KS_CMDD 0x04 #define KS_HAVB 0x05 #define KS_HAVE 0x06 #define KS_HS1B 0x07 #define KS_HS1E 0x08 #define KS_HS2B 0x09 #define KS_HS2E 0x0a #define KS_AGC 0x0b #define KS_HXTRA 0x0c #define KS_CDEM 0x0d #define KS_PORTAB 0x0e #define KS_LUMA 0x0f #define KS_CON 0x10 #define KS_BRT 0x11 #define KS_CHROMA 0x12 #define KS_CHROMB 0x13 #define KS_DEMOD 0x14 #define KS_SAT 0x15 #define KS_HUE 0x16 #define KS_VERTIA 0x17 #define KS_VERTIB 0x18 #define KS_VERTIC 0x19 #define KS_HSCLL 0x1a #define KS_HSCLH 0x1b #define KS_VSCLL 0x1c #define KS_VSCLH 0x1d #define KS_OFMTA 0x1e #define KS_OFMTB 0x1f #define KS_VBICTL 0x20 #define KS_CCDAT2 0x21 #define KS_CCDAT1 0x22 #define KS_VBIL30 0x23 #define KS_VBIL74 0x24 #define KS_VBIL118 0x25 #define KS_VBIL1512 0x26 #define KS_TTFRAM 0x27 #define KS_TESTA 0x28 #define KS_UVOFFH 0x29 #define KS_UVOFFL 0x2a #define KS_UGAIN 0x2b #define KS_VGAIN 0x2c #define KS_VAVB 0x2d #define KS_VAVE 0x2e #define KS_CTRACK 0x2f #define KS_POLCTL 0x30 #define KS_REFCOD 0x31 #define KS_INVALY 0x32 #define KS_INVALU 0x33 #define KS_INVALV 0x34 #define KS_UNUSEY 0x35 #define KS_UNUSEU 0x36 #define KS_UNUSEV 0x37 #define KS_USRSAV 0x38 #define KS_USREAV 0x39 #define KS_SHS1A 0x3a #define KS_SHS1B 0x3b #define KS_SHS1C 0x3c #define KS_CMDE 0x3d #define KS_VSDEL 0x3e #define KS_CMDF 0x3f #define KS_GAMMA0 0x40 #define KS_GAMMA1 0x41 #define KS_GAMMA2 0x42 #define KS_GAMMA3 0x43 #define KS_GAMMA4 0x44 #define KS_GAMMA5 0x45 #define KS_GAMMA6 0x46 #define KS_GAMMA7 0x47 #define KS_GAMMA8 0x48 #define KS_GAMMA9 0x49 #define KS_GAMMA10 0x4a #define KS_GAMMA11 0x4b #define KS_GAMMA12 0x4c #define KS_GAMMA13 0x4d #define KS_GAMMA14 0x4e #define KS_GAMMA15 0x4f #define KS_GAMMA16 0x50 #define KS_GAMMA17 0x51 #define KS_GAMMA18 0x52 #define KS_GAMMA19 0x53 #define KS_GAMMA20 0x54 #define KS_GAMMA21 0x55 #define KS_GAMMA22 0x56 #define KS_GAMMA23 0x57 #define KS_GAMMA24 0x58 #define KS_GAMMA25 0x59 #define KS_GAMMA26 0x5a #define KS_GAMMA27 0x5b #define KS_GAMMA28 0x5c #define KS_GAMMA29 0x5d #define KS_GAMMA30 0x5e #define KS_GAMMA31 0x5f #define KS_GAMMAD0 0x60 #define KS_GAMMAD1 0x61 #define KS_GAMMAD2 0x62 #define KS_GAMMAD3 0x63 #define KS_GAMMAD4 0x64 #define KS_GAMMAD5 0x65 #define KS_GAMMAD6 0x66 #define KS_GAMMAD7 0x67 #define KS_GAMMAD8 0x68 #define KS_GAMMAD9 0x69 #define KS_GAMMAD10 0x6a #define KS_GAMMAD11 0x6b #define KS_GAMMAD12 0x6c #define KS_GAMMAD13 0x6d #define KS_GAMMAD14 0x6e #define KS_GAMMAD15 0x6f #define KS_GAMMAD16 0x70 #define KS_GAMMAD17 0x71 #define KS_GAMMAD18 0x72 #define KS_GAMMAD19 0x73 #define KS_GAMMAD20 0x74 #define KS_GAMMAD21 0x75 #define KS_GAMMAD22 0x76 #define KS_GAMMAD23 0x77 #define KS_GAMMAD24 0x78 #define KS_GAMMAD25 0x79 #define KS_GAMMAD26 0x7a #define KS_GAMMAD27 0x7b #define KS_GAMMAD28 0x7c #define KS_GAMMAD29 0x7d #define KS_GAMMAD30 0x7e #define KS_GAMMAD31 0x7f /**************************************************************************** * mga_dev : represents one ks0127 chip. ****************************************************************************/ struct adjust { int contrast; int bright; int hue; int ugain; int vgain; }; struct ks0127 { struct v4l2_subdev sd; v4l2_std_id norm; int ident; u8 regs[256]; }; static inline struct ks0127 *to_ks0127(struct v4l2_subdev *sd) { return container_of(sd, struct ks0127, sd); } static int debug; /* insmod parameter */ module_param(debug, int, 0); MODULE_PARM_DESC(debug, "Debug output"); static u8 reg_defaults[64]; static void init_reg_defaults(void) { static int initialized; u8 *table = reg_defaults; if (initialized) return; initialized = 1; table[KS_CMDA] = 0x2c; /* VSE=0, CCIR 601, autodetect standard */ table[KS_CMDB] = 0x12; /* VALIGN=0, AGC control and input */ table[KS_CMDC] = 0x00; /* Test options */ /* clock & input select, write 1 to PORTA */ table[KS_CMDD] = 0x01; table[KS_HAVB] = 0x00; /* HAV Start Control */ table[KS_HAVE] = 0x00; /* HAV End Control */ table[KS_HS1B] = 0x10; /* HS1 Start Control */ table[KS_HS1E] = 0x00; /* HS1 End Control */ table[KS_HS2B] = 0x00; /* HS2 Start Control */ table[KS_HS2E] = 0x00; /* HS2 End Control */ table[KS_AGC] = 0x53; /* Manual setting for AGC */ table[KS_HXTRA] = 0x00; /* Extra Bits for HAV and HS1/2 */ table[KS_CDEM] = 0x00; /* Chroma Demodulation Control */ table[KS_PORTAB] = 0x0f; /* port B is input, port A output GPPORT */ table[KS_LUMA] = 0x01; /* Luma control */ table[KS_CON] = 0x00; /* Contrast Control */ table[KS_BRT] = 0x00; /* Brightness Control */ table[KS_CHROMA] = 0x2a; /* Chroma control A */ table[KS_CHROMB] = 0x90; /* Chroma control B */ table[KS_DEMOD] = 0x00; /* Chroma Demodulation Control & Status */ table[KS_SAT] = 0x00; /* Color Saturation Control*/ table[KS_HUE] = 0x00; /* Hue Control */ table[KS_VERTIA] = 0x00; /* Vertical Processing Control A */ /* Vertical Processing Control B, luma 1 line delayed */ table[KS_VERTIB] = 0x12; table[KS_VERTIC] = 0x0b; /* Vertical Processing Control C */ table[KS_HSCLL] = 0x00; /* Horizontal Scaling Ratio Low */ table[KS_HSCLH] = 0x00; /* Horizontal Scaling Ratio High */ table[KS_VSCLL] = 0x00; /* Vertical Scaling Ratio Low */ table[KS_VSCLH] = 0x00; /* Vertical Scaling Ratio High */ /* 16 bit YCbCr 4:2:2 output; I can't make the bt866 like 8 bit /Sam */ table[KS_OFMTA] = 0x30; table[KS_OFMTB] = 0x00; /* Output Control B */ /* VBI Decoder Control; 4bit fmt: avoid Y overflow */ table[KS_VBICTL] = 0x5d; table[KS_CCDAT2] = 0x00; /* Read Only register */ table[KS_CCDAT1] = 0x00; /* Read Only register */ table[KS_VBIL30] = 0xa8; /* VBI data decoding options */ table[KS_VBIL74] = 0xaa; /* VBI data decoding options */ table[KS_VBIL118] = 0x2a; /* VBI data decoding options */ table[KS_VBIL1512] = 0x00; /* VBI data decoding options */ table[KS_TTFRAM] = 0x00; /* Teletext frame alignment pattern */ table[KS_TESTA] = 0x00; /* test register, shouldn't be written */ table[KS_UVOFFH] = 0x00; /* UV Offset Adjustment High */ table[KS_UVOFFL] = 0x00; /* UV Offset Adjustment Low */ table[KS_UGAIN] = 0x00; /* U Component Gain Adjustment */ table[KS_VGAIN] = 0x00; /* V Component Gain Adjustment */ table[KS_VAVB] = 0x07; /* VAV Begin */ table[KS_VAVE] = 0x00; /* VAV End */ table[KS_CTRACK] = 0x00; /* Chroma Tracking Control */ table[KS_POLCTL] = 0x41; /* Timing Signal Polarity Control */ table[KS_REFCOD] = 0x80; /* Reference Code Insertion Control */ table[KS_INVALY] = 0x10; /* Invalid Y Code */ table[KS_INVALU] = 0x80; /* Invalid U Code */ table[KS_INVALV] = 0x80; /* Invalid V Code */ table[KS_UNUSEY] = 0x10; /* Unused Y Code */ table[KS_UNUSEU] = 0x80; /* Unused U Code */ table[KS_UNUSEV] = 0x80; /* Unused V Code */ table[KS_USRSAV] = 0x00; /* reserved */ table[KS_USREAV] = 0x00; /* reserved */ table[KS_SHS1A] = 0x00; /* User Defined SHS1 A */ /* User Defined SHS1 B, ALT656=1 on 0127B */ table[KS_SHS1B] = 0x80; table[KS_SHS1C] = 0x00; /* User Defined SHS1 C */ table[KS_CMDE] = 0x00; /* Command Register E */ table[KS_VSDEL] = 0x00; /* VS Delay Control */ /* Command Register F, update -immediately- */ /* (there might come no vsync)*/ table[KS_CMDF] = 0x02; } /* We need to manually read because of a bug in the KS0127 chip. * * An explanation from kayork@mail.utexas.edu: * * During I2C reads, the KS0127 only samples for a stop condition * during the place where the acknowledge bit should be. Any standard * I2C implementation (correctly) throws in another clock transition * at the 9th bit, and the KS0127 will not recognize the stop condition * and will continue to clock out data. * * So we have to do the read ourself. Big deal. * workaround in i2c-algo-bit */ static u8 ks0127_read(struct v4l2_subdev *sd, u8 reg) { struct i2c_client *client = v4l2_get_subdevdata(sd); char val = 0; struct i2c_msg msgs[] = { { client->addr, 0, sizeof(reg), &reg }, { client->addr, I2C_M_RD | I2C_M_NO_RD_ACK, sizeof(val), &val } }; int ret; ret = i2c_transfer(client->adapter, msgs, ARRAY_SIZE(msgs)); if (ret != ARRAY_SIZE(msgs)) v4l2_dbg(1, debug, sd, "read error\n"); return val; } static void ks0127_write(struct v4l2_subdev *sd, u8 reg, u8 val) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ks0127 *ks = to_ks0127(sd); char msg[] = { reg, val }; if (i2c_master_send(client, msg, sizeof(msg)) != sizeof(msg)) v4l2_dbg(1, debug, sd, "write error\n"); ks->regs[reg] = val; } /* generic bit-twiddling */ static void ks0127_and_or(struct v4l2_subdev *sd, u8 reg, u8 and_v, u8 or_v) { struct ks0127 *ks = to_ks0127(sd); u8 val = ks->regs[reg]; val = (val & and_v) | or_v; ks0127_write(sd, reg, val); } /**************************************************************************** * ks0127 private api ****************************************************************************/ static void ks0127_init(struct v4l2_subdev *sd) { struct ks0127 *ks = to_ks0127(sd); u8 *table = reg_defaults; int i; ks->ident = V4L2_IDENT_KS0127; v4l2_dbg(1, debug, sd, "reset\n"); msleep(1); /* initialize all registers to known values */ /* (except STAT, 0x21, 0x22, TEST and 0x38,0x39) */ for (i = 1; i < 33; i++) ks0127_write(sd, i, table[i]); for (i = 35; i < 40; i++) ks0127_write(sd, i, table[i]); for (i = 41; i < 56; i++) ks0127_write(sd, i, table[i]); for (i = 58; i < 64; i++) ks0127_write(sd, i, table[i]); if ((ks0127_read(sd, KS_STAT) & 0x80) == 0) { ks->ident = V4L2_IDENT_KS0122S; v4l2_dbg(1, debug, sd, "ks0122s found\n"); return; } switch (ks0127_read(sd, KS_CMDE) & 0x0f) { case 0: v4l2_dbg(1, debug, sd, "ks0127 found\n"); break; case 9: ks->ident = V4L2_IDENT_KS0127B; v4l2_dbg(1, debug, sd, "ks0127B Revision A found\n"); break; default: v4l2_dbg(1, debug, sd, "unknown revision\n"); break; } } static int ks0127_s_routing(struct v4l2_subdev *sd, u32 input, u32 output, u32 config) { struct ks0127 *ks = to_ks0127(sd); switch (input) { case KS_INPUT_COMPOSITE_1: case KS_INPUT_COMPOSITE_2: case KS_INPUT_COMPOSITE_3: case KS_INPUT_COMPOSITE_4: case KS_INPUT_COMPOSITE_5: case KS_INPUT_COMPOSITE_6: v4l2_dbg(1, debug, sd, "s_routing %d: Composite\n", input); /* autodetect 50/60 Hz */ ks0127_and_or(sd, KS_CMDA, 0xfc, 0x00); /* VSE=0 */ ks0127_and_or(sd, KS_CMDA, ~0x40, 0x00); /* set input line */ ks0127_and_or(sd, KS_CMDB, 0xb0, input); /* non-freerunning mode */ ks0127_and_or(sd, KS_CMDC, 0x70, 0x0a); /* analog input */ ks0127_and_or(sd, KS_CMDD, 0x03, 0x00); /* enable chroma demodulation */ ks0127_and_or(sd, KS_CTRACK, 0xcf, 0x00); /* chroma trap, HYBWR=1 */ ks0127_and_or(sd, KS_LUMA, 0x00, (reg_defaults[KS_LUMA])|0x0c); /* scaler fullbw, luma comb off */ ks0127_and_or(sd, KS_VERTIA, 0x08, 0x81); /* manual chroma comb .25 .5 .25 */ ks0127_and_or(sd, KS_VERTIC, 0x0f, 0x90); /* chroma path delay */ ks0127_and_or(sd, KS_CHROMB, 0x0f, 0x90); ks0127_write(sd, KS_UGAIN, reg_defaults[KS_UGAIN]); ks0127_write(sd, KS_VGAIN, reg_defaults[KS_VGAIN]); ks0127_write(sd, KS_UVOFFH, reg_defaults[KS_UVOFFH]); ks0127_write(sd, KS_UVOFFL, reg_defaults[KS_UVOFFL]); break; case KS_INPUT_SVIDEO_1: case KS_INPUT_SVIDEO_2: case KS_INPUT_SVIDEO_3: v4l2_dbg(1, debug, sd, "s_routing %d: S-Video\n", input); /* autodetect 50/60 Hz */ ks0127_and_or(sd, KS_CMDA, 0xfc, 0x00); /* VSE=0 */ ks0127_and_or(sd, KS_CMDA, ~0x40, 0x00); /* set input line */ ks0127_and_or(sd, KS_CMDB, 0xb0, input); /* non-freerunning mode */ ks0127_and_or(sd, KS_CMDC, 0x70, 0x0a); /* analog input */ ks0127_and_or(sd, KS_CMDD, 0x03, 0x00); /* enable chroma demodulation */ ks0127_and_or(sd, KS_CTRACK, 0xcf, 0x00); ks0127_and_or(sd, KS_LUMA, 0x00, reg_defaults[KS_LUMA]); /* disable luma comb */ ks0127_and_or(sd, KS_VERTIA, 0x08, (reg_defaults[KS_VERTIA]&0xf0)|0x01); ks0127_and_or(sd, KS_VERTIC, 0x0f, reg_defaults[KS_VERTIC]&0xf0); ks0127_and_or(sd, KS_CHROMB, 0x0f, reg_defaults[KS_CHROMB]&0xf0); ks0127_write(sd, KS_UGAIN, reg_defaults[KS_UGAIN]); ks0127_write(sd, KS_VGAIN, reg_defaults[KS_VGAIN]); ks0127_write(sd, KS_UVOFFH, reg_defaults[KS_UVOFFH]); ks0127_write(sd, KS_UVOFFL, reg_defaults[KS_UVOFFL]); break; case KS_INPUT_YUV656: v4l2_dbg(1, debug, sd, "s_routing 15: YUV656\n"); if (ks->norm & V4L2_STD_525_60) /* force 60 Hz */ ks0127_and_or(sd, KS_CMDA, 0xfc, 0x03); else /* force 50 Hz */ ks0127_and_or(sd, KS_CMDA, 0xfc, 0x02); ks0127_and_or(sd, KS_CMDA, 0xff, 0x40); /* VSE=1 */ /* set input line and VALIGN */ ks0127_and_or(sd, KS_CMDB, 0xb0, (input | 0x40)); /* freerunning mode, */ /* TSTGEN = 1 TSTGFR=11 TSTGPH=0 TSTGPK=0 VMEM=1*/ ks0127_and_or(sd, KS_CMDC, 0x70, 0x87); /* digital input, SYNDIR = 0 INPSL=01 CLKDIR=0 EAV=0 */ ks0127_and_or(sd, KS_CMDD, 0x03, 0x08); /* disable chroma demodulation */ ks0127_and_or(sd, KS_CTRACK, 0xcf, 0x30); /* HYPK =01 CTRAP = 0 HYBWR=0 PED=1 RGBH=1 UNIT=1 */ ks0127_and_or(sd, KS_LUMA, 0x00, 0x71); ks0127_and_or(sd, KS_VERTIC, 0x0f, reg_defaults[KS_VERTIC]&0xf0); /* scaler fullbw, luma comb off */ ks0127_and_or(sd, KS_VERTIA, 0x08, 0x81); ks0127_and_or(sd, KS_CHROMB, 0x0f, reg_defaults[KS_CHROMB]&0xf0); ks0127_and_or(sd, KS_CON, 0x00, 0x00); ks0127_and_or(sd, KS_BRT, 0x00, 32); /* spec: 34 */ /* spec: 229 (e5) */ ks0127_and_or(sd, KS_SAT, 0x00, 0xe8); ks0127_and_or(sd, KS_HUE, 0x00, 0); ks0127_and_or(sd, KS_UGAIN, 0x00, 238); ks0127_and_or(sd, KS_VGAIN, 0x00, 0x00); /*UOFF:0x30, VOFF:0x30, TSTCGN=1 */ ks0127_and_or(sd, KS_UVOFFH, 0x00, 0x4f); ks0127_and_or(sd, KS_UVOFFL, 0x00, 0x00); break; default: v4l2_dbg(1, debug, sd, "s_routing: Unknown input %d\n", input); break; } /* hack: CDMLPF sometimes spontaneously switches on; */ /* force back off */ ks0127_write(sd, KS_DEMOD, reg_defaults[KS_DEMOD]); return 0; } static int ks0127_s_std(struct v4l2_subdev *sd, v4l2_std_id std) { struct ks0127 *ks = to_ks0127(sd); /* Set to automatic SECAM/Fsc mode */ ks0127_and_or(sd, KS_DEMOD, 0xf0, 0x00); ks->norm = std; if (std & V4L2_STD_NTSC) { v4l2_dbg(1, debug, sd, "s_std: NTSC_M\n"); ks0127_and_or(sd, KS_CHROMA, 0x9f, 0x20); } else if (std & V4L2_STD_PAL_N) { v4l2_dbg(1, debug, sd, "s_std: NTSC_N (fixme)\n"); ks0127_and_or(sd, KS_CHROMA, 0x9f, 0x40); } else if (std & V4L2_STD_PAL) { v4l2_dbg(1, debug, sd, "s_std: PAL_N\n"); ks0127_and_or(sd, KS_CHROMA, 0x9f, 0x20); } else if (std & V4L2_STD_PAL_M) { v4l2_dbg(1, debug, sd, "s_std: PAL_M (fixme)\n"); ks0127_and_or(sd, KS_CHROMA, 0x9f, 0x40); } else if (std & V4L2_STD_SECAM) { v4l2_dbg(1, debug, sd, "s_std: SECAM\n"); /* set to secam autodetection */ ks0127_and_or(sd, KS_CHROMA, 0xdf, 0x20); ks0127_and_or(sd, KS_DEMOD, 0xf0, 0x00); schedule_timeout_interruptible(HZ/10+1); /* did it autodetect? */ if (!(ks0127_read(sd, KS_DEMOD) & 0x40)) /* force to secam mode */ ks0127_and_or(sd, KS_DEMOD, 0xf0, 0x0f); } else { v4l2_dbg(1, debug, sd, "s_std: Unknown norm %llx\n", (unsigned long long)std); } return 0; } static int ks0127_s_stream(struct v4l2_subdev *sd, int enable) { v4l2_dbg(1, debug, sd, "s_stream(%d)\n", enable); if (enable) { /* All output pins on */ ks0127_and_or(sd, KS_OFMTA, 0xcf, 0x30); /* Obey the OEN pin */ ks0127_and_or(sd, KS_CDEM, 0x7f, 0x00); } else { /* Video output pins off */ ks0127_and_or(sd, KS_OFMTA, 0xcf, 0x00); /* Ignore the OEN pin */ ks0127_and_or(sd, KS_CDEM, 0x7f, 0x80); } return 0; } static int ks0127_status(struct v4l2_subdev *sd, u32 *pstatus, v4l2_std_id *pstd) { int stat = V4L2_IN_ST_NO_SIGNAL; u8 status; v4l2_std_id std = V4L2_STD_ALL; status = ks0127_read(sd, KS_STAT); if (!(status & 0x20)) /* NOVID not set */ stat = 0; if (!(status & 0x01)) /* CLOCK set */ stat |= V4L2_IN_ST_NO_COLOR; if ((status & 0x08)) /* PALDET set */ std = V4L2_STD_PAL; else std = V4L2_STD_NTSC; if (pstd) *pstd = std; if (pstatus) *pstatus = stat; return 0; } static int ks0127_querystd(struct v4l2_subdev *sd, v4l2_std_id *std) { v4l2_dbg(1, debug, sd, "querystd\n"); return ks0127_status(sd, NULL, std); } static int ks0127_g_input_status(struct v4l2_subdev *sd, u32 *status) { v4l2_dbg(1, debug, sd, "g_input_status\n"); return ks0127_status(sd, status, NULL); } static int ks0127_g_chip_ident(struct v4l2_subdev *sd, struct v4l2_dbg_chip_ident *chip) { struct i2c_client *client = v4l2_get_subdevdata(sd); struct ks0127 *ks = to_ks0127(sd); return v4l2_chip_ident_i2c_client(client, chip, ks->ident, 0); } /* ----------------------------------------------------------------------- */ static const struct v4l2_subdev_core_ops ks0127_core_ops = { .g_chip_ident = ks0127_g_chip_ident, .s_std = ks0127_s_std, }; static const struct v4l2_subdev_video_ops ks0127_video_ops = { .s_routing = ks0127_s_routing, .s_stream = ks0127_s_stream, .querystd = ks0127_querystd, .g_input_status = ks0127_g_input_status, }; static const struct v4l2_subdev_ops ks0127_ops = { .core = &ks0127_core_ops, .video = &ks0127_video_ops, }; /* ----------------------------------------------------------------------- */ static int ks0127_probe(struct i2c_client *client, const struct i2c_device_id *id) { struct ks0127 *ks; struct v4l2_subdev *sd; v4l_info(client, "%s chip found @ 0x%x (%s)\n", client->addr == (I2C_KS0127_ADDON >> 1) ? "addon" : "on-board", client->addr << 1, client->adapter->name); ks = kzalloc(sizeof(*ks), GFP_KERNEL); if (ks == NULL) return -ENOMEM; sd = &ks->sd; v4l2_i2c_subdev_init(sd, client, &ks0127_ops); /* power up */ init_reg_defaults(); ks0127_write(sd, KS_CMDA, 0x2c); mdelay(10); /* reset the device */ ks0127_init(sd); return 0; } static int ks0127_remove(struct i2c_client *client) { struct v4l2_subdev *sd = i2c_get_clientdata(client); v4l2_device_unregister_subdev(sd); ks0127_write(sd, KS_OFMTA, 0x20); /* tristate */ ks0127_write(sd, KS_CMDA, 0x2c | 0x80); /* power down */ kfree(to_ks0127(sd)); return 0; } static const struct i2c_device_id ks0127_id[] = { { "ks0127", 0 }, { "ks0127b", 0 }, { "ks0122s", 0 }, { } }; MODULE_DEVICE_TABLE(i2c, ks0127_id); static struct i2c_driver ks0127_driver = { .driver = { .owner = THIS_MODULE, .name = "ks0127", }, .probe = ks0127_probe, .remove = ks0127_remove, .id_table = ks0127_id, }; module_i2c_driver(ks0127_driver);
gpl-2.0
aicjofs/android_kernel_lge_v500_20d
arch/mips/jz4740/irq.c
4945
4022
/* * Copyright (C) 2009-2010, Lars-Peter Clausen <lars@metafoo.de> * JZ4740 platform IRQ support * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/errno.h> #include <linux/init.h> #include <linux/types.h> #include <linux/interrupt.h> #include <linux/ioport.h> #include <linux/timex.h> #include <linux/slab.h> #include <linux/delay.h> #include <linux/debugfs.h> #include <linux/seq_file.h> #include <asm/io.h> #include <asm/mipsregs.h> #include <asm/irq_cpu.h> #include <asm/mach-jz4740/base.h> static void __iomem *jz_intc_base; #define JZ_REG_INTC_STATUS 0x00 #define JZ_REG_INTC_MASK 0x04 #define JZ_REG_INTC_SET_MASK 0x08 #define JZ_REG_INTC_CLEAR_MASK 0x0c #define JZ_REG_INTC_PENDING 0x10 static irqreturn_t jz4740_cascade(int irq, void *data) { uint32_t irq_reg; irq_reg = readl(jz_intc_base + JZ_REG_INTC_PENDING); if (irq_reg) generic_handle_irq(__fls(irq_reg) + JZ4740_IRQ_BASE); return IRQ_HANDLED; } static void jz4740_irq_set_mask(struct irq_chip_generic *gc, uint32_t mask) { struct irq_chip_regs *regs = &gc->chip_types->regs; writel(mask, gc->reg_base + regs->enable); writel(~mask, gc->reg_base + regs->disable); } void jz4740_irq_suspend(struct irq_data *data) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); jz4740_irq_set_mask(gc, gc->wake_active); } void jz4740_irq_resume(struct irq_data *data) { struct irq_chip_generic *gc = irq_data_get_irq_chip_data(data); jz4740_irq_set_mask(gc, gc->mask_cache); } static struct irqaction jz4740_cascade_action = { .handler = jz4740_cascade, .name = "JZ4740 cascade interrupt", }; void __init arch_init_irq(void) { struct irq_chip_generic *gc; struct irq_chip_type *ct; mips_cpu_irq_init(); jz_intc_base = ioremap(JZ4740_INTC_BASE_ADDR, 0x14); /* Mask all irqs */ writel(0xffffffff, jz_intc_base + JZ_REG_INTC_SET_MASK); gc = irq_alloc_generic_chip("INTC", 1, JZ4740_IRQ_BASE, jz_intc_base, handle_level_irq); gc->wake_enabled = IRQ_MSK(32); ct = gc->chip_types; ct->regs.enable = JZ_REG_INTC_CLEAR_MASK; ct->regs.disable = JZ_REG_INTC_SET_MASK; ct->chip.irq_unmask = irq_gc_unmask_enable_reg; ct->chip.irq_mask = irq_gc_mask_disable_reg; ct->chip.irq_mask_ack = irq_gc_mask_disable_reg; ct->chip.irq_set_wake = irq_gc_set_wake; ct->chip.irq_suspend = jz4740_irq_suspend; ct->chip.irq_resume = jz4740_irq_resume; irq_setup_generic_chip(gc, IRQ_MSK(32), 0, 0, IRQ_NOPROBE | IRQ_LEVEL); setup_irq(2, &jz4740_cascade_action); } asmlinkage void plat_irq_dispatch(void) { unsigned int pending = read_c0_status() & read_c0_cause() & ST0_IM; if (pending & STATUSF_IP2) do_IRQ(2); else if (pending & STATUSF_IP3) do_IRQ(3); else spurious_interrupt(); } #ifdef CONFIG_DEBUG_FS static inline void intc_seq_reg(struct seq_file *s, const char *name, unsigned int reg) { seq_printf(s, "%s:\t\t%08x\n", name, readl(jz_intc_base + reg)); } static int intc_regs_show(struct seq_file *s, void *unused) { intc_seq_reg(s, "Status", JZ_REG_INTC_STATUS); intc_seq_reg(s, "Mask", JZ_REG_INTC_MASK); intc_seq_reg(s, "Pending", JZ_REG_INTC_PENDING); return 0; } static int intc_regs_open(struct inode *inode, struct file *file) { return single_open(file, intc_regs_show, NULL); } static const struct file_operations intc_regs_operations = { .open = intc_regs_open, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; static int __init intc_debugfs_init(void) { (void) debugfs_create_file("jz_regs_intc", S_IFREG | S_IRUGO, NULL, NULL, &intc_regs_operations); return 0; } subsys_initcall(intc_debugfs_init); #endif
gpl-2.0
1119553797/linux-sunxi
arch/sh/kernel/cpu/sh4a/setup-sh7770.c
7505
19247
/* * SH7770 Setup * * Copyright (C) 2006 - 2008 Paul Mundt * * This file is subject to the terms and conditions of the GNU General Public * License. See the file "COPYING" in the main directory of this archive * for more details. */ #include <linux/platform_device.h> #include <linux/init.h> #include <linux/serial.h> #include <linux/serial_sci.h> #include <linux/sh_timer.h> #include <linux/io.h> static struct plat_sci_port scif0_platform_data = { .mapbase = 0xff923000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 61, 61, 61, 61 }, }; static struct platform_device scif0_device = { .name = "sh-sci", .id = 0, .dev = { .platform_data = &scif0_platform_data, }, }; static struct plat_sci_port scif1_platform_data = { .mapbase = 0xff924000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 62, 62, 62, 62 }, }; static struct platform_device scif1_device = { .name = "sh-sci", .id = 1, .dev = { .platform_data = &scif1_platform_data, }, }; static struct plat_sci_port scif2_platform_data = { .mapbase = 0xff925000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 63, 63, 63, 63 }, }; static struct platform_device scif2_device = { .name = "sh-sci", .id = 2, .dev = { .platform_data = &scif2_platform_data, }, }; static struct plat_sci_port scif3_platform_data = { .mapbase = 0xff926000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 64, 64, 64, 64 }, }; static struct platform_device scif3_device = { .name = "sh-sci", .id = 3, .dev = { .platform_data = &scif3_platform_data, }, }; static struct plat_sci_port scif4_platform_data = { .mapbase = 0xff927000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 65, 65, 65, 65 }, }; static struct platform_device scif4_device = { .name = "sh-sci", .id = 4, .dev = { .platform_data = &scif4_platform_data, }, }; static struct plat_sci_port scif5_platform_data = { .mapbase = 0xff928000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 66, 66, 66, 66 }, }; static struct platform_device scif5_device = { .name = "sh-sci", .id = 5, .dev = { .platform_data = &scif5_platform_data, }, }; static struct plat_sci_port scif6_platform_data = { .mapbase = 0xff929000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 67, 67, 67, 67 }, }; static struct platform_device scif6_device = { .name = "sh-sci", .id = 6, .dev = { .platform_data = &scif6_platform_data, }, }; static struct plat_sci_port scif7_platform_data = { .mapbase = 0xff92a000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 68, 68, 68, 68 }, }; static struct platform_device scif7_device = { .name = "sh-sci", .id = 7, .dev = { .platform_data = &scif7_platform_data, }, }; static struct plat_sci_port scif8_platform_data = { .mapbase = 0xff92b000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 69, 69, 69, 69 }, }; static struct platform_device scif8_device = { .name = "sh-sci", .id = 8, .dev = { .platform_data = &scif8_platform_data, }, }; static struct plat_sci_port scif9_platform_data = { .mapbase = 0xff92c000, .flags = UPF_BOOT_AUTOCONF, .scscr = SCSCR_RE | SCSCR_TE | SCSCR_REIE | SCSCR_TOIE, .scbrr_algo_id = SCBRR_ALGO_2, .type = PORT_SCIF, .irqs = { 70, 70, 70, 70 }, }; static struct platform_device scif9_device = { .name = "sh-sci", .id = 9, .dev = { .platform_data = &scif9_platform_data, }, }; static struct sh_timer_config tmu0_platform_data = { .channel_offset = 0x04, .timer_bit = 0, .clockevent_rating = 200, }; static struct resource tmu0_resources[] = { [0] = { .start = 0xffd80008, .end = 0xffd80013, .flags = IORESOURCE_MEM, }, [1] = { .start = 16, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu0_device = { .name = "sh_tmu", .id = 0, .dev = { .platform_data = &tmu0_platform_data, }, .resource = tmu0_resources, .num_resources = ARRAY_SIZE(tmu0_resources), }; static struct sh_timer_config tmu1_platform_data = { .channel_offset = 0x10, .timer_bit = 1, .clocksource_rating = 200, }; static struct resource tmu1_resources[] = { [0] = { .start = 0xffd80014, .end = 0xffd8001f, .flags = IORESOURCE_MEM, }, [1] = { .start = 17, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu1_device = { .name = "sh_tmu", .id = 1, .dev = { .platform_data = &tmu1_platform_data, }, .resource = tmu1_resources, .num_resources = ARRAY_SIZE(tmu1_resources), }; static struct sh_timer_config tmu2_platform_data = { .channel_offset = 0x1c, .timer_bit = 2, }; static struct resource tmu2_resources[] = { [0] = { .start = 0xffd80020, .end = 0xffd8002f, .flags = IORESOURCE_MEM, }, [1] = { .start = 18, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu2_device = { .name = "sh_tmu", .id = 2, .dev = { .platform_data = &tmu2_platform_data, }, .resource = tmu2_resources, .num_resources = ARRAY_SIZE(tmu2_resources), }; static struct sh_timer_config tmu3_platform_data = { .channel_offset = 0x04, .timer_bit = 0, }; static struct resource tmu3_resources[] = { [0] = { .start = 0xffd81008, .end = 0xffd81013, .flags = IORESOURCE_MEM, }, [1] = { .start = 19, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu3_device = { .name = "sh_tmu", .id = 3, .dev = { .platform_data = &tmu3_platform_data, }, .resource = tmu3_resources, .num_resources = ARRAY_SIZE(tmu3_resources), }; static struct sh_timer_config tmu4_platform_data = { .channel_offset = 0x10, .timer_bit = 1, }; static struct resource tmu4_resources[] = { [0] = { .start = 0xffd81014, .end = 0xffd8101f, .flags = IORESOURCE_MEM, }, [1] = { .start = 20, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu4_device = { .name = "sh_tmu", .id = 4, .dev = { .platform_data = &tmu4_platform_data, }, .resource = tmu4_resources, .num_resources = ARRAY_SIZE(tmu4_resources), }; static struct sh_timer_config tmu5_platform_data = { .channel_offset = 0x1c, .timer_bit = 2, }; static struct resource tmu5_resources[] = { [0] = { .start = 0xffd81020, .end = 0xffd8102f, .flags = IORESOURCE_MEM, }, [1] = { .start = 21, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu5_device = { .name = "sh_tmu", .id = 5, .dev = { .platform_data = &tmu5_platform_data, }, .resource = tmu5_resources, .num_resources = ARRAY_SIZE(tmu5_resources), }; static struct sh_timer_config tmu6_platform_data = { .channel_offset = 0x04, .timer_bit = 0, }; static struct resource tmu6_resources[] = { [0] = { .start = 0xffd82008, .end = 0xffd82013, .flags = IORESOURCE_MEM, }, [1] = { .start = 22, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu6_device = { .name = "sh_tmu", .id = 6, .dev = { .platform_data = &tmu6_platform_data, }, .resource = tmu6_resources, .num_resources = ARRAY_SIZE(tmu6_resources), }; static struct sh_timer_config tmu7_platform_data = { .channel_offset = 0x10, .timer_bit = 1, }; static struct resource tmu7_resources[] = { [0] = { .start = 0xffd82014, .end = 0xffd8201f, .flags = IORESOURCE_MEM, }, [1] = { .start = 23, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu7_device = { .name = "sh_tmu", .id = 7, .dev = { .platform_data = &tmu7_platform_data, }, .resource = tmu7_resources, .num_resources = ARRAY_SIZE(tmu7_resources), }; static struct sh_timer_config tmu8_platform_data = { .channel_offset = 0x1c, .timer_bit = 2, }; static struct resource tmu8_resources[] = { [0] = { .start = 0xffd82020, .end = 0xffd8202b, .flags = IORESOURCE_MEM, }, [1] = { .start = 24, .flags = IORESOURCE_IRQ, }, }; static struct platform_device tmu8_device = { .name = "sh_tmu", .id = 8, .dev = { .platform_data = &tmu8_platform_data, }, .resource = tmu8_resources, .num_resources = ARRAY_SIZE(tmu8_resources), }; static struct platform_device *sh7770_devices[] __initdata = { &scif0_device, &scif1_device, &scif2_device, &scif3_device, &scif4_device, &scif5_device, &scif6_device, &scif7_device, &scif8_device, &scif9_device, &tmu0_device, &tmu1_device, &tmu2_device, &tmu3_device, &tmu4_device, &tmu5_device, &tmu6_device, &tmu7_device, &tmu8_device, }; static int __init sh7770_devices_setup(void) { return platform_add_devices(sh7770_devices, ARRAY_SIZE(sh7770_devices)); } arch_initcall(sh7770_devices_setup); static struct platform_device *sh7770_early_devices[] __initdata = { &scif0_device, &scif1_device, &scif2_device, &scif3_device, &scif4_device, &scif5_device, &scif6_device, &scif7_device, &scif8_device, &scif9_device, &tmu0_device, &tmu1_device, &tmu2_device, &tmu3_device, &tmu4_device, &tmu5_device, &tmu6_device, &tmu7_device, &tmu8_device, }; void __init plat_early_device_setup(void) { early_platform_add_devices(sh7770_early_devices, ARRAY_SIZE(sh7770_early_devices)); } enum { UNUSED = 0, /* interrupt sources */ IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, IRL_HHLL, IRL_HHLH, IRL_HHHL, IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, GPIO, TMU0, TMU1, TMU2, TMU2_TICPI, TMU3, TMU4, TMU5, TMU5_TICPI, TMU6, TMU7, TMU8, HAC, IPI, SPDIF, HUDI, I2C, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2, I2S0, I2S1, I2S2, I2S3, SRC_RX, SRC_TX, SRC_SPDIF, DU, VIDEO_IN, REMOTE, YUV, USB, ATAPI, CAN, GPS, GFX2D, GFX3D_MBX, GFX3D_DMAC, EXBUS_ATA, SPI0, SPI1, SCIF089, SCIF1234, SCIF567, ADC, BBDMAC_0_3, BBDMAC_4_7, BBDMAC_8_10, BBDMAC_11_14, BBDMAC_15_18, BBDMAC_19_22, BBDMAC_23_26, BBDMAC_27, BBDMAC_28, BBDMAC_29, BBDMAC_30, BBDMAC_31, /* interrupt groups */ TMU, DMAC, I2S, SRC, GFX3D, SPI, SCIF, BBDMAC, }; static struct intc_vect vectors[] __initdata = { INTC_VECT(GPIO, 0x3e0), INTC_VECT(TMU0, 0x400), INTC_VECT(TMU1, 0x420), INTC_VECT(TMU2, 0x440), INTC_VECT(TMU2_TICPI, 0x460), INTC_VECT(TMU3, 0x480), INTC_VECT(TMU4, 0x4a0), INTC_VECT(TMU5, 0x4c0), INTC_VECT(TMU5_TICPI, 0x4e0), INTC_VECT(TMU6, 0x500), INTC_VECT(TMU7, 0x520), INTC_VECT(TMU8, 0x540), INTC_VECT(HAC, 0x580), INTC_VECT(IPI, 0x5c0), INTC_VECT(SPDIF, 0x5e0), INTC_VECT(HUDI, 0x600), INTC_VECT(I2C, 0x620), INTC_VECT(DMAC0_DMINT0, 0x640), INTC_VECT(DMAC0_DMINT1, 0x660), INTC_VECT(DMAC0_DMINT2, 0x680), INTC_VECT(I2S0, 0x6a0), INTC_VECT(I2S1, 0x6c0), INTC_VECT(I2S2, 0x6e0), INTC_VECT(I2S3, 0x700), INTC_VECT(SRC_RX, 0x720), INTC_VECT(SRC_TX, 0x740), INTC_VECT(SRC_SPDIF, 0x760), INTC_VECT(DU, 0x780), INTC_VECT(VIDEO_IN, 0x7a0), INTC_VECT(REMOTE, 0x7c0), INTC_VECT(YUV, 0x7e0), INTC_VECT(USB, 0x840), INTC_VECT(ATAPI, 0x860), INTC_VECT(CAN, 0x880), INTC_VECT(GPS, 0x8a0), INTC_VECT(GFX2D, 0x8c0), INTC_VECT(GFX3D_MBX, 0x900), INTC_VECT(GFX3D_DMAC, 0x920), INTC_VECT(EXBUS_ATA, 0x940), INTC_VECT(SPI0, 0x960), INTC_VECT(SPI1, 0x980), INTC_VECT(SCIF089, 0x9a0), INTC_VECT(SCIF1234, 0x9c0), INTC_VECT(SCIF1234, 0x9e0), INTC_VECT(SCIF1234, 0xa00), INTC_VECT(SCIF1234, 0xa20), INTC_VECT(SCIF567, 0xa40), INTC_VECT(SCIF567, 0xa60), INTC_VECT(SCIF567, 0xa80), INTC_VECT(SCIF089, 0xaa0), INTC_VECT(SCIF089, 0xac0), INTC_VECT(ADC, 0xb20), INTC_VECT(BBDMAC_0_3, 0xba0), INTC_VECT(BBDMAC_0_3, 0xbc0), INTC_VECT(BBDMAC_0_3, 0xbe0), INTC_VECT(BBDMAC_0_3, 0xc00), INTC_VECT(BBDMAC_4_7, 0xc20), INTC_VECT(BBDMAC_4_7, 0xc40), INTC_VECT(BBDMAC_4_7, 0xc60), INTC_VECT(BBDMAC_4_7, 0xc80), INTC_VECT(BBDMAC_8_10, 0xca0), INTC_VECT(BBDMAC_8_10, 0xcc0), INTC_VECT(BBDMAC_8_10, 0xce0), INTC_VECT(BBDMAC_11_14, 0xd00), INTC_VECT(BBDMAC_11_14, 0xd20), INTC_VECT(BBDMAC_11_14, 0xd40), INTC_VECT(BBDMAC_11_14, 0xd60), INTC_VECT(BBDMAC_15_18, 0xd80), INTC_VECT(BBDMAC_15_18, 0xda0), INTC_VECT(BBDMAC_15_18, 0xdc0), INTC_VECT(BBDMAC_15_18, 0xde0), INTC_VECT(BBDMAC_19_22, 0xe00), INTC_VECT(BBDMAC_19_22, 0xe20), INTC_VECT(BBDMAC_19_22, 0xe40), INTC_VECT(BBDMAC_19_22, 0xe60), INTC_VECT(BBDMAC_23_26, 0xe80), INTC_VECT(BBDMAC_23_26, 0xea0), INTC_VECT(BBDMAC_23_26, 0xec0), INTC_VECT(BBDMAC_23_26, 0xee0), INTC_VECT(BBDMAC_27, 0xf00), INTC_VECT(BBDMAC_28, 0xf20), INTC_VECT(BBDMAC_29, 0xf40), INTC_VECT(BBDMAC_30, 0xf60), INTC_VECT(BBDMAC_31, 0xf80), }; static struct intc_group groups[] __initdata = { INTC_GROUP(TMU, TMU0, TMU1, TMU2, TMU2_TICPI, TMU3, TMU4, TMU5, TMU5_TICPI, TMU6, TMU7, TMU8), INTC_GROUP(DMAC, DMAC0_DMINT0, DMAC0_DMINT1, DMAC0_DMINT2), INTC_GROUP(I2S, I2S0, I2S1, I2S2, I2S3), INTC_GROUP(SRC, SRC_RX, SRC_TX, SRC_SPDIF), INTC_GROUP(GFX3D, GFX3D_MBX, GFX3D_DMAC), INTC_GROUP(SPI, SPI0, SPI1), INTC_GROUP(SCIF, SCIF089, SCIF1234, SCIF567), INTC_GROUP(BBDMAC, BBDMAC_0_3, BBDMAC_4_7, BBDMAC_8_10, BBDMAC_11_14, BBDMAC_15_18, BBDMAC_19_22, BBDMAC_23_26, BBDMAC_27, BBDMAC_28, BBDMAC_29, BBDMAC_30, BBDMAC_31), }; static struct intc_mask_reg mask_registers[] __initdata = { { 0xffe00040, 0xffe00044, 32, /* INT2MSKR / INT2MSKCR */ { 0, BBDMAC, ADC, SCIF, SPI, EXBUS_ATA, GFX3D, GFX2D, GPS, CAN, ATAPI, USB, YUV, REMOTE, VIDEO_IN, DU, SRC, I2S, DMAC, I2C, HUDI, SPDIF, IPI, HAC, TMU, GPIO } }, }; static struct intc_prio_reg prio_registers[] __initdata = { { 0xffe00000, 0, 32, 8, /* INT2PRI0 */ { GPIO, TMU0, 0, HAC } }, { 0xffe00004, 0, 32, 8, /* INT2PRI1 */ { IPI, SPDIF, HUDI, I2C } }, { 0xffe00008, 0, 32, 8, /* INT2PRI2 */ { DMAC, I2S, SRC, DU } }, { 0xffe0000c, 0, 32, 8, /* INT2PRI3 */ { VIDEO_IN, REMOTE, YUV, USB } }, { 0xffe00010, 0, 32, 8, /* INT2PRI4 */ { ATAPI, CAN, GPS, GFX2D } }, { 0xffe00014, 0, 32, 8, /* INT2PRI5 */ { 0, GFX3D, EXBUS_ATA, SPI } }, { 0xffe00018, 0, 32, 8, /* INT2PRI6 */ { SCIF1234, SCIF567, SCIF089 } }, { 0xffe0001c, 0, 32, 8, /* INT2PRI7 */ { ADC, 0, 0, BBDMAC_0_3 } }, { 0xffe00020, 0, 32, 8, /* INT2PRI8 */ { BBDMAC_4_7, BBDMAC_8_10, BBDMAC_11_14, BBDMAC_15_18 } }, { 0xffe00024, 0, 32, 8, /* INT2PRI9 */ { BBDMAC_19_22, BBDMAC_23_26, BBDMAC_27, BBDMAC_28 } }, { 0xffe00028, 0, 32, 8, /* INT2PRI10 */ { BBDMAC_29, BBDMAC_30, BBDMAC_31 } }, { 0xffe0002c, 0, 32, 8, /* INT2PRI11 */ { TMU1, TMU2, TMU2_TICPI, TMU3 } }, { 0xffe00030, 0, 32, 8, /* INT2PRI12 */ { TMU4, TMU5, TMU5_TICPI, TMU6 } }, { 0xffe00034, 0, 32, 8, /* INT2PRI13 */ { TMU7, TMU8 } }, }; static DECLARE_INTC_DESC(intc_desc, "sh7770", vectors, groups, mask_registers, prio_registers, NULL); /* Support for external interrupt pins in IRQ mode */ static struct intc_vect irq_vectors[] __initdata = { INTC_VECT(IRQ0, 0x240), INTC_VECT(IRQ1, 0x280), INTC_VECT(IRQ2, 0x2c0), INTC_VECT(IRQ3, 0x300), INTC_VECT(IRQ4, 0x340), INTC_VECT(IRQ5, 0x380), }; static struct intc_mask_reg irq_mask_registers[] __initdata = { { 0xffd00044, 0xffd00064, 32, /* INTMSK0 / INTMSKCLR0 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, } }, }; static struct intc_prio_reg irq_prio_registers[] __initdata = { { 0xffd00010, 0, 32, 4, /* INTPRI */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, } }, }; static struct intc_sense_reg irq_sense_registers[] __initdata = { { 0xffd0001c, 32, 2, /* ICR1 */ { IRQ0, IRQ1, IRQ2, IRQ3, IRQ4, IRQ5, } }, }; static DECLARE_INTC_DESC(intc_irq_desc, "sh7770-irq", irq_vectors, NULL, irq_mask_registers, irq_prio_registers, irq_sense_registers); /* External interrupt pins in IRL mode */ static struct intc_vect irl_vectors[] __initdata = { INTC_VECT(IRL_LLLL, 0x200), INTC_VECT(IRL_LLLH, 0x220), INTC_VECT(IRL_LLHL, 0x240), INTC_VECT(IRL_LLHH, 0x260), INTC_VECT(IRL_LHLL, 0x280), INTC_VECT(IRL_LHLH, 0x2a0), INTC_VECT(IRL_LHHL, 0x2c0), INTC_VECT(IRL_LHHH, 0x2e0), INTC_VECT(IRL_HLLL, 0x300), INTC_VECT(IRL_HLLH, 0x320), INTC_VECT(IRL_HLHL, 0x340), INTC_VECT(IRL_HLHH, 0x360), INTC_VECT(IRL_HHLL, 0x380), INTC_VECT(IRL_HHLH, 0x3a0), INTC_VECT(IRL_HHHL, 0x3c0), }; static struct intc_mask_reg irl3210_mask_registers[] __initdata = { { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */ { IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, IRL_HHLL, IRL_HHLH, IRL_HHHL, } }, }; static struct intc_mask_reg irl7654_mask_registers[] __initdata = { { 0xffd40080, 0xffd40084, 32, /* INTMSK2 / INTMSKCLR2 */ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, IRL_LLLL, IRL_LLLH, IRL_LLHL, IRL_LLHH, IRL_LHLL, IRL_LHLH, IRL_LHHL, IRL_LHHH, IRL_HLLL, IRL_HLLH, IRL_HLHL, IRL_HLHH, IRL_HHLL, IRL_HHLH, IRL_HHHL, } }, }; static DECLARE_INTC_DESC(intc_irl7654_desc, "sh7780-irl7654", irl_vectors, NULL, irl7654_mask_registers, NULL, NULL); static DECLARE_INTC_DESC(intc_irl3210_desc, "sh7780-irl3210", irl_vectors, NULL, irl3210_mask_registers, NULL, NULL); #define INTC_ICR0 0xffd00000 #define INTC_INTMSK0 0xffd00044 #define INTC_INTMSK1 0xffd00048 #define INTC_INTMSK2 0xffd40080 #define INTC_INTMSKCLR1 0xffd00068 #define INTC_INTMSKCLR2 0xffd40084 void __init plat_irq_setup(void) { /* disable IRQ7-0 */ __raw_writel(0xff000000, INTC_INTMSK0); /* disable IRL3-0 + IRL7-4 */ __raw_writel(0xc0000000, INTC_INTMSK1); __raw_writel(0xfffefffe, INTC_INTMSK2); /* select IRL mode for IRL3-0 + IRL7-4 */ __raw_writel(__raw_readl(INTC_ICR0) & ~0x00c00000, INTC_ICR0); /* disable holding function, ie enable "SH-4 Mode" */ __raw_writel(__raw_readl(INTC_ICR0) | 0x00200000, INTC_ICR0); register_intc_controller(&intc_desc); } void __init plat_irq_setup_pins(int mode) { switch (mode) { case IRQ_MODE_IRQ: /* select IRQ mode for IRL3-0 + IRL7-4 */ __raw_writel(__raw_readl(INTC_ICR0) | 0x00c00000, INTC_ICR0); register_intc_controller(&intc_irq_desc); break; case IRQ_MODE_IRL7654: /* enable IRL7-4 but don't provide any masking */ __raw_writel(0x40000000, INTC_INTMSKCLR1); __raw_writel(0x0000fffe, INTC_INTMSKCLR2); break; case IRQ_MODE_IRL3210: /* enable IRL0-3 but don't provide any masking */ __raw_writel(0x80000000, INTC_INTMSKCLR1); __raw_writel(0xfffe0000, INTC_INTMSKCLR2); break; case IRQ_MODE_IRL7654_MASK: /* enable IRL7-4 and mask using cpu intc controller */ __raw_writel(0x40000000, INTC_INTMSKCLR1); register_intc_controller(&intc_irl7654_desc); break; case IRQ_MODE_IRL3210_MASK: /* enable IRL0-3 and mask using cpu intc controller */ __raw_writel(0x80000000, INTC_INTMSKCLR1); register_intc_controller(&intc_irl3210_desc); break; default: BUG(); } }
gpl-2.0
Entropy512/kernel_galaxys2_ics
arch/blackfin/kernel/cplb-nompu/cplbinit.c
7505
5737
/* * Blackfin CPLB initialization * * Copyright 2007-2009 Analog Devices Inc. * * Licensed under the GPL-2 or later. */ #include <linux/module.h> #include <asm/blackfin.h> #include <asm/cacheflush.h> #include <asm/cplb.h> #include <asm/cplbinit.h> #include <asm/mem_map.h> struct cplb_entry icplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR; struct cplb_entry dcplb_tbl[NR_CPUS][MAX_CPLBS] PDT_ATTR; int first_switched_icplb PDT_ATTR; int first_switched_dcplb PDT_ATTR; struct cplb_boundary dcplb_bounds[9] PDT_ATTR; struct cplb_boundary icplb_bounds[9] PDT_ATTR; int icplb_nr_bounds PDT_ATTR; int dcplb_nr_bounds PDT_ATTR; void __init generate_cplb_tables_cpu(unsigned int cpu) { int i_d, i_i; unsigned long addr; struct cplb_entry *d_tbl = dcplb_tbl[cpu]; struct cplb_entry *i_tbl = icplb_tbl[cpu]; printk(KERN_INFO "NOMPU: setting up cplb tables\n"); i_d = i_i = 0; #ifdef CONFIG_DEBUG_HUNT_FOR_ZERO /* Set up the zero page. */ d_tbl[i_d].addr = 0; d_tbl[i_d++].data = SDRAM_OOPS | PAGE_SIZE_1KB; i_tbl[i_i].addr = 0; i_tbl[i_i++].data = SDRAM_OOPS | PAGE_SIZE_1KB; #endif /* Cover kernel memory with 4M pages. */ addr = 0; for (; addr < memory_start; addr += 4 * 1024 * 1024) { d_tbl[i_d].addr = addr; d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB; i_tbl[i_i].addr = addr; i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB; } #ifdef CONFIG_ROMKERNEL /* Cover kernel XIP flash area */ addr = CONFIG_ROM_BASE & ~(4 * 1024 * 1024 - 1); d_tbl[i_d].addr = addr; d_tbl[i_d++].data = SDRAM_DGENERIC | PAGE_SIZE_4MB; i_tbl[i_i].addr = addr; i_tbl[i_i++].data = SDRAM_IGENERIC | PAGE_SIZE_4MB; #endif /* Cover L1 memory. One 4M area for code and data each is enough. */ if (cpu == 0) { if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) { d_tbl[i_d].addr = L1_DATA_A_START; d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB; } i_tbl[i_i].addr = L1_CODE_START; i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB; } #ifdef CONFIG_SMP else { if (L1_DATA_A_LENGTH || L1_DATA_B_LENGTH) { d_tbl[i_d].addr = COREB_L1_DATA_A_START; d_tbl[i_d++].data = L1_DMEMORY | PAGE_SIZE_4MB; } i_tbl[i_i].addr = COREB_L1_CODE_START; i_tbl[i_i++].data = L1_IMEMORY | PAGE_SIZE_4MB; } #endif first_switched_dcplb = i_d; first_switched_icplb = i_i; BUG_ON(first_switched_dcplb > MAX_CPLBS); BUG_ON(first_switched_icplb > MAX_CPLBS); while (i_d < MAX_CPLBS) d_tbl[i_d++].data = 0; while (i_i < MAX_CPLBS) i_tbl[i_i++].data = 0; } void __init generate_cplb_tables_all(void) { unsigned long uncached_end; int i_d, i_i; i_d = 0; /* Normal RAM, including MTD FS. */ #ifdef CONFIG_MTD_UCLINUX uncached_end = memory_mtd_start + mtd_size; #else uncached_end = memory_end; #endif /* * if DMA uncached is less than 1MB, mark the 1MB chunk as uncached * so that we don't have to use 4kB pages and cause CPLB thrashing */ if ((DMA_UNCACHED_REGION >= 1 * 1024 * 1024) || !DMA_UNCACHED_REGION || ((_ramend - uncached_end) >= 1 * 1024 * 1024)) dcplb_bounds[i_d].eaddr = uncached_end; else dcplb_bounds[i_d].eaddr = uncached_end & ~(1 * 1024 * 1024 - 1); dcplb_bounds[i_d++].data = SDRAM_DGENERIC; /* DMA uncached region. */ if (DMA_UNCACHED_REGION) { dcplb_bounds[i_d].eaddr = _ramend; dcplb_bounds[i_d++].data = SDRAM_DNON_CHBL; } if (_ramend != physical_mem_end) { /* Reserved memory. */ dcplb_bounds[i_d].eaddr = physical_mem_end; dcplb_bounds[i_d++].data = (reserved_mem_dcache_on ? SDRAM_DGENERIC : SDRAM_DNON_CHBL); } /* Addressing hole up to the async bank. */ dcplb_bounds[i_d].eaddr = ASYNC_BANK0_BASE; dcplb_bounds[i_d++].data = 0; /* ASYNC banks. */ dcplb_bounds[i_d].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE; dcplb_bounds[i_d++].data = SDRAM_EBIU; /* Addressing hole up to BootROM. */ dcplb_bounds[i_d].eaddr = BOOT_ROM_START; dcplb_bounds[i_d++].data = 0; /* BootROM -- largest one should be less than 1 meg. */ dcplb_bounds[i_d].eaddr = BOOT_ROM_START + (1 * 1024 * 1024); dcplb_bounds[i_d++].data = SDRAM_DGENERIC; if (L2_LENGTH) { /* Addressing hole up to L2 SRAM. */ dcplb_bounds[i_d].eaddr = L2_START; dcplb_bounds[i_d++].data = 0; /* L2 SRAM. */ dcplb_bounds[i_d].eaddr = L2_START + L2_LENGTH; dcplb_bounds[i_d++].data = L2_DMEMORY; } dcplb_nr_bounds = i_d; BUG_ON(dcplb_nr_bounds > ARRAY_SIZE(dcplb_bounds)); i_i = 0; /* Normal RAM, including MTD FS. */ icplb_bounds[i_i].eaddr = uncached_end; icplb_bounds[i_i++].data = SDRAM_IGENERIC; if (_ramend != physical_mem_end) { /* DMA uncached region. */ if (DMA_UNCACHED_REGION) { /* Normally this hole is caught by the async below. */ icplb_bounds[i_i].eaddr = _ramend; icplb_bounds[i_i++].data = 0; } /* Reserved memory. */ icplb_bounds[i_i].eaddr = physical_mem_end; icplb_bounds[i_i++].data = (reserved_mem_icache_on ? SDRAM_IGENERIC : SDRAM_INON_CHBL); } /* Addressing hole up to the async bank. */ icplb_bounds[i_i].eaddr = ASYNC_BANK0_BASE; icplb_bounds[i_i++].data = 0; /* ASYNC banks. */ icplb_bounds[i_i].eaddr = ASYNC_BANK3_BASE + ASYNC_BANK3_SIZE; icplb_bounds[i_i++].data = SDRAM_EBIU; /* Addressing hole up to BootROM. */ icplb_bounds[i_i].eaddr = BOOT_ROM_START; icplb_bounds[i_i++].data = 0; /* BootROM -- largest one should be less than 1 meg. */ icplb_bounds[i_i].eaddr = BOOT_ROM_START + (1 * 1024 * 1024); icplb_bounds[i_i++].data = SDRAM_IGENERIC; if (L2_LENGTH) { /* Addressing hole up to L2 SRAM. */ icplb_bounds[i_i].eaddr = L2_START; icplb_bounds[i_i++].data = 0; /* L2 SRAM. */ icplb_bounds[i_i].eaddr = L2_START + L2_LENGTH; icplb_bounds[i_i++].data = L2_IMEMORY; } icplb_nr_bounds = i_i; BUG_ON(icplb_nr_bounds > ARRAY_SIZE(icplb_bounds)); }
gpl-2.0
devcreations07/spirit_cancro
drivers/w1/masters/omap_hdq.c
8017
18222
/* * drivers/w1/masters/omap_hdq.c * * Copyright (C) 2007 Texas Instruments, Inc. * * This file is licensed under the terms of the GNU General Public License * version 2. This program is licensed "as is" without any warranty of any * kind, whether express or implied. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/interrupt.h> #include <linux/slab.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/io.h> #include <linux/sched.h> #include <asm/irq.h> #include <mach/hardware.h> #include "../w1.h" #include "../w1_int.h" #define MOD_NAME "OMAP_HDQ:" #define OMAP_HDQ_REVISION 0x00 #define OMAP_HDQ_TX_DATA 0x04 #define OMAP_HDQ_RX_DATA 0x08 #define OMAP_HDQ_CTRL_STATUS 0x0c #define OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK (1<<6) #define OMAP_HDQ_CTRL_STATUS_CLOCKENABLE (1<<5) #define OMAP_HDQ_CTRL_STATUS_GO (1<<4) #define OMAP_HDQ_CTRL_STATUS_INITIALIZATION (1<<2) #define OMAP_HDQ_CTRL_STATUS_DIR (1<<1) #define OMAP_HDQ_CTRL_STATUS_MODE (1<<0) #define OMAP_HDQ_INT_STATUS 0x10 #define OMAP_HDQ_INT_STATUS_TXCOMPLETE (1<<2) #define OMAP_HDQ_INT_STATUS_RXCOMPLETE (1<<1) #define OMAP_HDQ_INT_STATUS_TIMEOUT (1<<0) #define OMAP_HDQ_SYSCONFIG 0x14 #define OMAP_HDQ_SYSCONFIG_SOFTRESET (1<<1) #define OMAP_HDQ_SYSCONFIG_AUTOIDLE (1<<0) #define OMAP_HDQ_SYSSTATUS 0x18 #define OMAP_HDQ_SYSSTATUS_RESETDONE (1<<0) #define OMAP_HDQ_FLAG_CLEAR 0 #define OMAP_HDQ_FLAG_SET 1 #define OMAP_HDQ_TIMEOUT (HZ/5) #define OMAP_HDQ_MAX_USER 4 static DECLARE_WAIT_QUEUE_HEAD(hdq_wait_queue); static int w1_id; struct hdq_data { struct device *dev; void __iomem *hdq_base; /* lock status update */ struct mutex hdq_mutex; int hdq_usecount; struct clk *hdq_ick; struct clk *hdq_fck; u8 hdq_irqstatus; /* device lock */ spinlock_t hdq_spinlock; /* * Used to control the call to omap_hdq_get and omap_hdq_put. * HDQ Protocol: Write the CMD|REG_address first, followed by * the data wrire or read. */ int init_trans; }; static int __devinit omap_hdq_probe(struct platform_device *pdev); static int omap_hdq_remove(struct platform_device *pdev); static struct platform_driver omap_hdq_driver = { .probe = omap_hdq_probe, .remove = omap_hdq_remove, .driver = { .name = "omap_hdq", }, }; static u8 omap_w1_read_byte(void *_hdq); static void omap_w1_write_byte(void *_hdq, u8 byte); static u8 omap_w1_reset_bus(void *_hdq); static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev, u8 search_type, w1_slave_found_callback slave_found); static struct w1_bus_master omap_w1_master = { .read_byte = omap_w1_read_byte, .write_byte = omap_w1_write_byte, .reset_bus = omap_w1_reset_bus, .search = omap_w1_search_bus, }; /* HDQ register I/O routines */ static inline u8 hdq_reg_in(struct hdq_data *hdq_data, u32 offset) { return __raw_readb(hdq_data->hdq_base + offset); } static inline void hdq_reg_out(struct hdq_data *hdq_data, u32 offset, u8 val) { __raw_writeb(val, hdq_data->hdq_base + offset); } static inline u8 hdq_reg_merge(struct hdq_data *hdq_data, u32 offset, u8 val, u8 mask) { u8 new_val = (__raw_readb(hdq_data->hdq_base + offset) & ~mask) | (val & mask); __raw_writeb(new_val, hdq_data->hdq_base + offset); return new_val; } /* * Wait for one or more bits in flag change. * HDQ_FLAG_SET: wait until any bit in the flag is set. * HDQ_FLAG_CLEAR: wait until all bits in the flag are cleared. * return 0 on success and -ETIMEDOUT in the case of timeout. */ static int hdq_wait_for_flag(struct hdq_data *hdq_data, u32 offset, u8 flag, u8 flag_set, u8 *status) { int ret = 0; unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT; if (flag_set == OMAP_HDQ_FLAG_CLEAR) { /* wait for the flag clear */ while (((*status = hdq_reg_in(hdq_data, offset)) & flag) && time_before(jiffies, timeout)) { schedule_timeout_uninterruptible(1); } if (*status & flag) ret = -ETIMEDOUT; } else if (flag_set == OMAP_HDQ_FLAG_SET) { /* wait for the flag set */ while (!((*status = hdq_reg_in(hdq_data, offset)) & flag) && time_before(jiffies, timeout)) { schedule_timeout_uninterruptible(1); } if (!(*status & flag)) ret = -ETIMEDOUT; } else return -EINVAL; return ret; } /* write out a byte and fill *status with HDQ_INT_STATUS */ static int hdq_write_byte(struct hdq_data *hdq_data, u8 val, u8 *status) { int ret; u8 tmp_status; unsigned long irqflags; *status = 0; spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags); /* clear interrupt flags via a dummy read */ hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); /* ISR loads it with new INT_STATUS */ hdq_data->hdq_irqstatus = 0; spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags); hdq_reg_out(hdq_data, OMAP_HDQ_TX_DATA, val); /* set the GO bit */ hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO); /* wait for the TXCOMPLETE bit */ ret = wait_event_timeout(hdq_wait_queue, hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT); if (ret == 0) { dev_dbg(hdq_data->dev, "TX wait elapsed\n"); goto out; } *status = hdq_data->hdq_irqstatus; /* check irqstatus */ if (!(*status & OMAP_HDQ_INT_STATUS_TXCOMPLETE)) { dev_dbg(hdq_data->dev, "timeout waiting for" "TXCOMPLETE/RXCOMPLETE, %x", *status); ret = -ETIMEDOUT; goto out; } /* wait for the GO bit return to zero */ ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR, &tmp_status); if (ret) { dev_dbg(hdq_data->dev, "timeout waiting GO bit" "return to zero, %x", tmp_status); } out: return ret; } /* HDQ Interrupt service routine */ static irqreturn_t hdq_isr(int irq, void *_hdq) { struct hdq_data *hdq_data = _hdq; unsigned long irqflags; spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags); hdq_data->hdq_irqstatus = hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags); dev_dbg(hdq_data->dev, "hdq_isr: %x", hdq_data->hdq_irqstatus); if (hdq_data->hdq_irqstatus & (OMAP_HDQ_INT_STATUS_TXCOMPLETE | OMAP_HDQ_INT_STATUS_RXCOMPLETE | OMAP_HDQ_INT_STATUS_TIMEOUT)) { /* wake up sleeping process */ wake_up(&hdq_wait_queue); } return IRQ_HANDLED; } /* HDQ Mode: always return success */ static u8 omap_w1_reset_bus(void *_hdq) { return 0; } /* W1 search callback function */ static void omap_w1_search_bus(void *_hdq, struct w1_master *master_dev, u8 search_type, w1_slave_found_callback slave_found) { u64 module_id, rn_le, cs, id; if (w1_id) module_id = w1_id; else module_id = 0x1; rn_le = cpu_to_le64(module_id); /* * HDQ might not obey truly the 1-wire spec. * So calculate CRC based on module parameter. */ cs = w1_calc_crc8((u8 *)&rn_le, 7); id = (cs << 56) | module_id; slave_found(master_dev, id); } static int _omap_hdq_reset(struct hdq_data *hdq_data) { int ret; u8 tmp_status; hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_SOFTRESET); /* * Select HDQ mode & enable clocks. * It is observed that INT flags can't be cleared via a read and GO/INIT * won't return to zero if interrupt is disabled. So we always enable * interrupt. */ hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_CLOCKENABLE | OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK); /* wait for reset to complete */ ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_SYSSTATUS, OMAP_HDQ_SYSSTATUS_RESETDONE, OMAP_HDQ_FLAG_SET, &tmp_status); if (ret) dev_dbg(hdq_data->dev, "timeout waiting HDQ reset, %x", tmp_status); else { hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_CLOCKENABLE | OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK); hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_AUTOIDLE); } return ret; } /* Issue break pulse to the device */ static int omap_hdq_break(struct hdq_data *hdq_data) { int ret = 0; u8 tmp_status; unsigned long irqflags; ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); if (ret < 0) { dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); ret = -EINTR; goto rtn; } spin_lock_irqsave(&hdq_data->hdq_spinlock, irqflags); /* clear interrupt flags via a dummy read */ hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); /* ISR loads it with new INT_STATUS */ hdq_data->hdq_irqstatus = 0; spin_unlock_irqrestore(&hdq_data->hdq_spinlock, irqflags); /* set the INIT and GO bit */ hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO); /* wait for the TIMEOUT bit */ ret = wait_event_timeout(hdq_wait_queue, hdq_data->hdq_irqstatus, OMAP_HDQ_TIMEOUT); if (ret == 0) { dev_dbg(hdq_data->dev, "break wait elapsed\n"); ret = -EINTR; goto out; } tmp_status = hdq_data->hdq_irqstatus; /* check irqstatus */ if (!(tmp_status & OMAP_HDQ_INT_STATUS_TIMEOUT)) { dev_dbg(hdq_data->dev, "timeout waiting for TIMEOUT, %x", tmp_status); ret = -ETIMEDOUT; goto out; } /* * wait for both INIT and GO bits rerurn to zero. * zero wait time expected for interrupt mode. */ ret = hdq_wait_for_flag(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_INITIALIZATION | OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_FLAG_CLEAR, &tmp_status); if (ret) dev_dbg(hdq_data->dev, "timeout waiting INIT&GO bits" "return to zero, %x", tmp_status); out: mutex_unlock(&hdq_data->hdq_mutex); rtn: return ret; } static int hdq_read_byte(struct hdq_data *hdq_data, u8 *val) { int ret = 0; u8 status; unsigned long timeout = jiffies + OMAP_HDQ_TIMEOUT; ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); if (ret < 0) { ret = -EINTR; goto rtn; } if (!hdq_data->hdq_usecount) { ret = -EINVAL; goto out; } if (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) { hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO, OMAP_HDQ_CTRL_STATUS_DIR | OMAP_HDQ_CTRL_STATUS_GO); /* * The RX comes immediately after TX. It * triggers another interrupt before we * sleep. So we have to wait for RXCOMPLETE bit. */ while (!(hdq_data->hdq_irqstatus & OMAP_HDQ_INT_STATUS_RXCOMPLETE) && time_before(jiffies, timeout)) { schedule_timeout_uninterruptible(1); } hdq_reg_merge(hdq_data, OMAP_HDQ_CTRL_STATUS, 0, OMAP_HDQ_CTRL_STATUS_DIR); status = hdq_data->hdq_irqstatus; /* check irqstatus */ if (!(status & OMAP_HDQ_INT_STATUS_RXCOMPLETE)) { dev_dbg(hdq_data->dev, "timeout waiting for" "RXCOMPLETE, %x", status); ret = -ETIMEDOUT; goto out; } } /* the data is ready. Read it in! */ *val = hdq_reg_in(hdq_data, OMAP_HDQ_RX_DATA); out: mutex_unlock(&hdq_data->hdq_mutex); rtn: return 0; } /* Enable clocks and set the controller to HDQ mode */ static int omap_hdq_get(struct hdq_data *hdq_data) { int ret = 0; ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); if (ret < 0) { ret = -EINTR; goto rtn; } if (OMAP_HDQ_MAX_USER == hdq_data->hdq_usecount) { dev_dbg(hdq_data->dev, "attempt to exceed the max use count"); ret = -EINVAL; goto out; } else { hdq_data->hdq_usecount++; try_module_get(THIS_MODULE); if (1 == hdq_data->hdq_usecount) { if (clk_enable(hdq_data->hdq_ick)) { dev_dbg(hdq_data->dev, "Can not enable ick\n"); ret = -ENODEV; goto clk_err; } if (clk_enable(hdq_data->hdq_fck)) { dev_dbg(hdq_data->dev, "Can not enable fck\n"); clk_disable(hdq_data->hdq_ick); ret = -ENODEV; goto clk_err; } /* make sure HDQ is out of reset */ if (!(hdq_reg_in(hdq_data, OMAP_HDQ_SYSSTATUS) & OMAP_HDQ_SYSSTATUS_RESETDONE)) { ret = _omap_hdq_reset(hdq_data); if (ret) /* back up the count */ hdq_data->hdq_usecount--; } else { /* select HDQ mode & enable clocks */ hdq_reg_out(hdq_data, OMAP_HDQ_CTRL_STATUS, OMAP_HDQ_CTRL_STATUS_CLOCKENABLE | OMAP_HDQ_CTRL_STATUS_INTERRUPTMASK); hdq_reg_out(hdq_data, OMAP_HDQ_SYSCONFIG, OMAP_HDQ_SYSCONFIG_AUTOIDLE); hdq_reg_in(hdq_data, OMAP_HDQ_INT_STATUS); } } } clk_err: clk_put(hdq_data->hdq_ick); clk_put(hdq_data->hdq_fck); out: mutex_unlock(&hdq_data->hdq_mutex); rtn: return ret; } /* Disable clocks to the module */ static int omap_hdq_put(struct hdq_data *hdq_data) { int ret = 0; ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); if (ret < 0) return -EINTR; if (0 == hdq_data->hdq_usecount) { dev_dbg(hdq_data->dev, "attempt to decrement use count" "when it is zero"); ret = -EINVAL; } else { hdq_data->hdq_usecount--; module_put(THIS_MODULE); if (0 == hdq_data->hdq_usecount) { clk_disable(hdq_data->hdq_ick); clk_disable(hdq_data->hdq_fck); } } mutex_unlock(&hdq_data->hdq_mutex); return ret; } /* Read a byte of data from the device */ static u8 omap_w1_read_byte(void *_hdq) { struct hdq_data *hdq_data = _hdq; u8 val = 0; int ret; ret = hdq_read_byte(hdq_data, &val); if (ret) { ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); if (ret < 0) { dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); return -EINTR; } hdq_data->init_trans = 0; mutex_unlock(&hdq_data->hdq_mutex); omap_hdq_put(hdq_data); return -1; } /* Write followed by a read, release the module */ if (hdq_data->init_trans) { ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); if (ret < 0) { dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); return -EINTR; } hdq_data->init_trans = 0; mutex_unlock(&hdq_data->hdq_mutex); omap_hdq_put(hdq_data); } return val; } /* Write a byte of data to the device */ static void omap_w1_write_byte(void *_hdq, u8 byte) { struct hdq_data *hdq_data = _hdq; int ret; u8 status; /* First write to initialize the transfer */ if (hdq_data->init_trans == 0) omap_hdq_get(hdq_data); ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); if (ret < 0) { dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); return; } hdq_data->init_trans++; mutex_unlock(&hdq_data->hdq_mutex); ret = hdq_write_byte(hdq_data, byte, &status); if (ret == 0) { dev_dbg(hdq_data->dev, "TX failure:Ctrl status %x\n", status); return; } /* Second write, data transferred. Release the module */ if (hdq_data->init_trans > 1) { omap_hdq_put(hdq_data); ret = mutex_lock_interruptible(&hdq_data->hdq_mutex); if (ret < 0) { dev_dbg(hdq_data->dev, "Could not acquire mutex\n"); return; } hdq_data->init_trans = 0; mutex_unlock(&hdq_data->hdq_mutex); } return; } static int __devinit omap_hdq_probe(struct platform_device *pdev) { struct hdq_data *hdq_data; struct resource *res; int ret, irq; u8 rev; hdq_data = kmalloc(sizeof(*hdq_data), GFP_KERNEL); if (!hdq_data) { dev_dbg(&pdev->dev, "unable to allocate memory\n"); ret = -ENOMEM; goto err_kmalloc; } hdq_data->dev = &pdev->dev; platform_set_drvdata(pdev, hdq_data); res = platform_get_resource(pdev, IORESOURCE_MEM, 0); if (!res) { dev_dbg(&pdev->dev, "unable to get resource\n"); ret = -ENXIO; goto err_resource; } hdq_data->hdq_base = ioremap(res->start, SZ_4K); if (!hdq_data->hdq_base) { dev_dbg(&pdev->dev, "ioremap failed\n"); ret = -EINVAL; goto err_ioremap; } /* get interface & functional clock objects */ hdq_data->hdq_ick = clk_get(&pdev->dev, "ick"); if (IS_ERR(hdq_data->hdq_ick)) { dev_dbg(&pdev->dev, "Can't get HDQ ick clock object\n"); ret = PTR_ERR(hdq_data->hdq_ick); goto err_ick; } hdq_data->hdq_fck = clk_get(&pdev->dev, "fck"); if (IS_ERR(hdq_data->hdq_fck)) { dev_dbg(&pdev->dev, "Can't get HDQ fck clock object\n"); ret = PTR_ERR(hdq_data->hdq_fck); goto err_fck; } hdq_data->hdq_usecount = 0; mutex_init(&hdq_data->hdq_mutex); if (clk_enable(hdq_data->hdq_ick)) { dev_dbg(&pdev->dev, "Can not enable ick\n"); ret = -ENODEV; goto err_intfclk; } if (clk_enable(hdq_data->hdq_fck)) { dev_dbg(&pdev->dev, "Can not enable fck\n"); ret = -ENODEV; goto err_fnclk; } rev = hdq_reg_in(hdq_data, OMAP_HDQ_REVISION); dev_info(&pdev->dev, "OMAP HDQ Hardware Rev %c.%c. Driver in %s mode\n", (rev >> 4) + '0', (rev & 0x0f) + '0', "Interrupt"); spin_lock_init(&hdq_data->hdq_spinlock); irq = platform_get_irq(pdev, 0); if (irq < 0) { ret = -ENXIO; goto err_irq; } ret = request_irq(irq, hdq_isr, IRQF_DISABLED, "omap_hdq", hdq_data); if (ret < 0) { dev_dbg(&pdev->dev, "could not request irq\n"); goto err_irq; } omap_hdq_break(hdq_data); /* don't clock the HDQ until it is needed */ clk_disable(hdq_data->hdq_ick); clk_disable(hdq_data->hdq_fck); omap_w1_master.data = hdq_data; ret = w1_add_master_device(&omap_w1_master); if (ret) { dev_dbg(&pdev->dev, "Failure in registering w1 master\n"); goto err_w1; } return 0; err_w1: err_irq: clk_disable(hdq_data->hdq_fck); err_fnclk: clk_disable(hdq_data->hdq_ick); err_intfclk: clk_put(hdq_data->hdq_fck); err_fck: clk_put(hdq_data->hdq_ick); err_ick: iounmap(hdq_data->hdq_base); err_ioremap: err_resource: platform_set_drvdata(pdev, NULL); kfree(hdq_data); err_kmalloc: return ret; } static int omap_hdq_remove(struct platform_device *pdev) { struct hdq_data *hdq_data = platform_get_drvdata(pdev); mutex_lock(&hdq_data->hdq_mutex); if (hdq_data->hdq_usecount) { dev_dbg(&pdev->dev, "removed when use count is not zero\n"); mutex_unlock(&hdq_data->hdq_mutex); return -EBUSY; } mutex_unlock(&hdq_data->hdq_mutex); /* remove module dependency */ clk_put(hdq_data->hdq_ick); clk_put(hdq_data->hdq_fck); free_irq(INT_24XX_HDQ_IRQ, hdq_data); platform_set_drvdata(pdev, NULL); iounmap(hdq_data->hdq_base); kfree(hdq_data); return 0; } static int __init omap_hdq_init(void) { return platform_driver_register(&omap_hdq_driver); } module_init(omap_hdq_init); static void __exit omap_hdq_exit(void) { platform_driver_unregister(&omap_hdq_driver); } module_exit(omap_hdq_exit); module_param(w1_id, int, S_IRUSR); MODULE_PARM_DESC(w1_id, "1-wire id for the slave detection"); MODULE_AUTHOR("Texas Instruments"); MODULE_DESCRIPTION("HDQ driver Library"); MODULE_LICENSE("GPL");
gpl-2.0
ghsr/android_kernel_samsung_i9152
drivers/staging/rtl8192u/r819xU_cmdpkt.c
9041
22690
/****************************************************************************** (c) Copyright 2008, RealTEK Technologies Inc. All Rights Reserved. Module: r819xusb_cmdpkt.c (RTL8190 TX/RX command packet handler Source C File) Note: The module is responsible for handling TX and RX command packet. 1. TX : Send set and query configuration command packet. 2. RX : Receive tx feedback, beacon state, query configuration command packet. Function: Export: Abbrev: History: Data Who Remark 05/06/2008 amy Create initial version porting from windows driver. ******************************************************************************/ #include "r8192U.h" #include "r819xU_cmdpkt.h" /*---------------------------Define Local Constant---------------------------*/ /* Debug constant*/ #define CMPK_DEBOUNCE_CNT 1 /* 2007/10/24 MH Add for printing a range of data. */ #define CMPK_PRINT(Address)\ {\ unsigned char i;\ u32 temp[10];\ \ memcpy(temp, Address, 40);\ for (i = 0; i <40; i+=4)\ printk("\r\n %08x", temp[i]);\ }\ /*---------------------------Define functions---------------------------------*/ rt_status SendTxCommandPacket( struct net_device *dev, void* pData, u32 DataLen ) { rt_status rtStatus = RT_STATUS_SUCCESS; struct r8192_priv *priv = ieee80211_priv(dev); struct sk_buff *skb; cb_desc *tcb_desc; unsigned char *ptr_buf; //bool bLastInitPacket = false; //PlatformAcquireSpinLock(Adapter, RT_TX_SPINLOCK); //Get TCB and local buffer from common pool. (It is shared by CmdQ, MgntQ, and USB coalesce DataQ) skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + DataLen + 4); memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev)); tcb_desc = (cb_desc*)(skb->cb + MAX_DEV_ADDR_SIZE); tcb_desc->queue_index = TXCMD_QUEUE; tcb_desc->bCmdOrInit = DESC_PACKET_TYPE_NORMAL; tcb_desc->bLastIniPkt = 0; skb_reserve(skb, USB_HWDESC_HEADER_LEN); ptr_buf = skb_put(skb, DataLen); memcpy(ptr_buf,pData,DataLen); tcb_desc->txbuf_size= (u16)DataLen; if(!priv->ieee80211->check_nic_enough_desc(dev,tcb_desc->queue_index)|| (!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index]))||\ (priv->ieee80211->queue_stop) ) { RT_TRACE(COMP_FIRMWARE,"===================NULL packet==================================> tx full!\n"); skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb); } else { priv->ieee80211->softmac_hard_start_xmit(skb,dev); } //PlatformReleaseSpinLock(Adapter, RT_TX_SPINLOCK); return rtStatus; } /*----------------------------------------------------------------------------- * Function: cmpk_message_handle_tx() * * Overview: Driver internal module can call the API to send message to * firmware side. For example, you can send a debug command packet. * Or you can send a request for FW to modify RLX4181 LBUS HW bank. * Otherwise, you can change MAC/PHT/RF register by firmware at * run time. We do not support message more than one segment now. * * Input: NONE * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 05/06/2008 amy porting from windows code. * *---------------------------------------------------------------------------*/ extern rt_status cmpk_message_handle_tx( struct net_device *dev, u8* codevirtualaddress, u32 packettype, u32 buffer_len) { bool rt_status = true; #ifdef RTL8192U return rt_status; #else struct r8192_priv *priv = ieee80211_priv(dev); u16 frag_threshold; u16 frag_length, frag_offset = 0; //u16 total_size; //int i; rt_firmware *pfirmware = priv->pFirmware; struct sk_buff *skb; unsigned char *seg_ptr; cb_desc *tcb_desc; u8 bLastIniPkt; firmware_init_param(dev); //Fragmentation might be required frag_threshold = pfirmware->cmdpacket_frag_thresold; do { if((buffer_len - frag_offset) > frag_threshold) { frag_length = frag_threshold ; bLastIniPkt = 0; } else { frag_length = buffer_len - frag_offset; bLastIniPkt = 1; } /* Allocate skb buffer to contain firmware info and tx descriptor info * add 4 to avoid packet appending overflow. * */ #ifdef RTL8192U skb = dev_alloc_skb(USB_HWDESC_HEADER_LEN + frag_length + 4); #else skb = dev_alloc_skb(frag_length + 4); #endif memcpy((unsigned char *)(skb->cb),&dev,sizeof(dev)); tcb_desc = (cb_desc*)(skb->cb + MAX_DEV_ADDR_SIZE); tcb_desc->queue_index = TXCMD_QUEUE; tcb_desc->bCmdOrInit = packettype; tcb_desc->bLastIniPkt = bLastIniPkt; #ifdef RTL8192U skb_reserve(skb, USB_HWDESC_HEADER_LEN); #endif seg_ptr = skb_put(skb, buffer_len); /* * Transform from little endian to big endian * and pending zero */ memcpy(seg_ptr,codevirtualaddress,buffer_len); tcb_desc->txbuf_size= (u16)buffer_len; if(!priv->ieee80211->check_nic_enough_desc(dev,tcb_desc->queue_index)|| (!skb_queue_empty(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index]))||\ (priv->ieee80211->queue_stop) ) { RT_TRACE(COMP_FIRMWARE,"=====================================================> tx full!\n"); skb_queue_tail(&priv->ieee80211->skb_waitQ[tcb_desc->queue_index], skb); } else { priv->ieee80211->softmac_hard_start_xmit(skb,dev); } codevirtualaddress += frag_length; frag_offset += frag_length; }while(frag_offset < buffer_len); return rt_status; #endif } /* CMPK_Message_Handle_Tx */ /*----------------------------------------------------------------------------- * Function: cmpk_counttxstatistic() * * Overview: * * Input: PADAPTER pAdapter - . * CMPK_TXFB_T *psTx_FB - . * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 05/12/2008 amy Create Version 0 porting from windows code. * *---------------------------------------------------------------------------*/ static void cmpk_count_txstatistic( struct net_device *dev, cmpk_txfb_t *pstx_fb) { struct r8192_priv *priv = ieee80211_priv(dev); #ifdef ENABLE_PS RT_RF_POWER_STATE rtState; pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE, (pu1Byte)(&rtState)); // When RF is off, we should not count the packet for hw/sw synchronize // reason, ie. there may be a duration while sw switch is changed and hw // switch is being changed. 2006.12.04, by shien chang. if (rtState == eRfOff) { return; } #endif #ifdef TODO if(pAdapter->bInHctTest) return; #endif /* We can not know the packet length and transmit type: broadcast or uni or multicast. So the relative statistics must be collected in tx feedback info. */ if (pstx_fb->tok) { priv->stats.txfeedbackok++; priv->stats.txoktotal++; priv->stats.txokbytestotal += pstx_fb->pkt_length; priv->stats.txokinperiod++; /* We can not make sure broadcast/multicast or unicast mode. */ if (pstx_fb->pkt_type == PACKET_MULTICAST) { priv->stats.txmulticast++; priv->stats.txbytesmulticast += pstx_fb->pkt_length; } else if (pstx_fb->pkt_type == PACKET_BROADCAST) { priv->stats.txbroadcast++; priv->stats.txbytesbroadcast += pstx_fb->pkt_length; } else { priv->stats.txunicast++; priv->stats.txbytesunicast += pstx_fb->pkt_length; } } else { priv->stats.txfeedbackfail++; priv->stats.txerrtotal++; priv->stats.txerrbytestotal += pstx_fb->pkt_length; /* We can not make sure broadcast/multicast or unicast mode. */ if (pstx_fb->pkt_type == PACKET_MULTICAST) { priv->stats.txerrmulticast++; } else if (pstx_fb->pkt_type == PACKET_BROADCAST) { priv->stats.txerrbroadcast++; } else { priv->stats.txerrunicast++; } } priv->stats.txretrycount += pstx_fb->retry_cnt; priv->stats.txfeedbackretry += pstx_fb->retry_cnt; } /* cmpk_CountTxStatistic */ /*----------------------------------------------------------------------------- * Function: cmpk_handle_tx_feedback() * * Overview: The function is responsible for extract the message inside TX * feedbck message from firmware. It will contain dedicated info in * ws-06-0063-rtl8190-command-packet-specification. Please * refer to chapter "TX Feedback Element". We have to read 20 bytes * in the command packet. * * Input: struct net_device * dev * u8 * pmsg - Msg Ptr of the command packet. * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 05/08/2008 amy Create Version 0 porting from windows code. * *---------------------------------------------------------------------------*/ static void cmpk_handle_tx_feedback( struct net_device *dev, u8 * pmsg) { struct r8192_priv *priv = ieee80211_priv(dev); cmpk_txfb_t rx_tx_fb; /* */ priv->stats.txfeedback++; /* 0. Display received message. */ //cmpk_Display_Message(CMPK_RX_TX_FB_SIZE, pMsg); /* 1. Extract TX feedback info from RFD to temp structure buffer. */ /* It seems that FW use big endian(MIPS) and DRV use little endian in windows OS. So we have to read the content byte by byte or transfer endian type before copy the message copy. */ /* 2007/07/05 MH Use pointer to transfer structure memory. */ //memcpy((UINT8 *)&rx_tx_fb, pMsg, sizeof(CMPK_TXFB_T)); memcpy((u8*)&rx_tx_fb, pmsg, sizeof(cmpk_txfb_t)); /* 2. Use tx feedback info to count TX statistics. */ cmpk_count_txstatistic(dev, &rx_tx_fb); /* 2007/01/17 MH Comment previous method for TX statistic function. */ /* Collect info TX feedback packet to fill TCB. */ /* We can not know the packet length and transmit type: broadcast or uni or multicast. */ //CountTxStatistics( pAdapter, &tcb ); } /* cmpk_Handle_Tx_Feedback */ void cmdpkt_beacontimerinterrupt_819xusb( struct net_device *dev ) { struct r8192_priv *priv = ieee80211_priv(dev); u16 tx_rate; { // // 070117, rcnjko: 87B have to S/W beacon for DTM encryption_cmn. // if(priv->ieee80211->current_network.mode == IEEE_A || priv->ieee80211->current_network.mode == IEEE_N_5G || (priv->ieee80211->current_network.mode == IEEE_N_24G && (!priv->ieee80211->pHTInfo->bCurSuppCCK))) { tx_rate = 60; DMESG("send beacon frame tx rate is 6Mbpm\n"); } else { tx_rate =10; DMESG("send beacon frame tx rate is 1Mbpm\n"); } rtl819xusb_beacon_tx(dev,tx_rate); // HW Beacon } } /*----------------------------------------------------------------------------- * Function: cmpk_handle_interrupt_status() * * Overview: The function is responsible for extract the message from * firmware. It will contain dedicated info in * ws-07-0063-v06-rtl819x-command-packet-specification-070315.doc. * Please refer to chapter "Interrupt Status Element". * * Input: struct net_device *dev, * u8* pmsg - Message Pointer of the command packet. * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 05/12/2008 amy Add this for rtl8192 porting from windows code. * *---------------------------------------------------------------------------*/ static void cmpk_handle_interrupt_status( struct net_device *dev, u8* pmsg) { cmpk_intr_sta_t rx_intr_status; /* */ struct r8192_priv *priv = ieee80211_priv(dev); DMESG("---> cmpk_Handle_Interrupt_Status()\n"); /* 0. Display received message. */ //cmpk_Display_Message(CMPK_RX_BEACON_STATE_SIZE, pMsg); /* 1. Extract TX feedback info from RFD to temp structure buffer. */ /* It seems that FW use big endian(MIPS) and DRV use little endian in windows OS. So we have to read the content byte by byte or transfer endian type before copy the message copy. */ //rx_bcn_state.Element_ID = pMsg[0]; //rx_bcn_state.Length = pMsg[1]; rx_intr_status.length = pmsg[1]; if (rx_intr_status.length != (sizeof(cmpk_intr_sta_t) - 2)) { DMESG("cmpk_Handle_Interrupt_Status: wrong length!\n"); return; } // Statistics of beacon for ad-hoc mode. if( priv->ieee80211->iw_mode == IW_MODE_ADHOC) { //2 maybe need endian transform? rx_intr_status.interrupt_status = *((u32 *)(pmsg + 4)); //rx_intr_status.InterruptStatus = N2H4BYTE(*((UINT32 *)(pMsg + 4))); DMESG("interrupt status = 0x%x\n", rx_intr_status.interrupt_status); if (rx_intr_status.interrupt_status & ISR_TxBcnOk) { priv->ieee80211->bibsscoordinator = true; priv->stats.txbeaconokint++; } else if (rx_intr_status.interrupt_status & ISR_TxBcnErr) { priv->ieee80211->bibsscoordinator = false; priv->stats.txbeaconerr++; } if (rx_intr_status.interrupt_status & ISR_BcnTimerIntr) { cmdpkt_beacontimerinterrupt_819xusb(dev); } } // Other informations in interrupt status we need? DMESG("<---- cmpk_handle_interrupt_status()\n"); } /* cmpk_handle_interrupt_status */ /*----------------------------------------------------------------------------- * Function: cmpk_handle_query_config_rx() * * Overview: The function is responsible for extract the message from * firmware. It will contain dedicated info in * ws-06-0063-rtl8190-command-packet-specification. Please * refer to chapter "Beacon State Element". * * Input: u8 * pmsg - Message Pointer of the command packet. * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 05/12/2008 amy Create Version 0 porting from windows code. * *---------------------------------------------------------------------------*/ static void cmpk_handle_query_config_rx( struct net_device *dev, u8* pmsg) { cmpk_query_cfg_t rx_query_cfg; /* */ /* 0. Display received message. */ //cmpk_Display_Message(CMPK_RX_BEACON_STATE_SIZE, pMsg); /* 1. Extract TX feedback info from RFD to temp structure buffer. */ /* It seems that FW use big endian(MIPS) and DRV use little endian in windows OS. So we have to read the content byte by byte or transfer endian type before copy the message copy. */ //rx_query_cfg.Element_ID = pMsg[0]; //rx_query_cfg.Length = pMsg[1]; rx_query_cfg.cfg_action = (pmsg[4] & 0x80000000)>>31; rx_query_cfg.cfg_type = (pmsg[4] & 0x60) >> 5; rx_query_cfg.cfg_size = (pmsg[4] & 0x18) >> 3; rx_query_cfg.cfg_page = (pmsg[6] & 0x0F) >> 0; rx_query_cfg.cfg_offset = pmsg[7]; rx_query_cfg.value = (pmsg[8] << 24) | (pmsg[9] << 16) | (pmsg[10] << 8) | (pmsg[11] << 0); rx_query_cfg.mask = (pmsg[12] << 24) | (pmsg[13] << 16) | (pmsg[14] << 8) | (pmsg[15] << 0); } /* cmpk_Handle_Query_Config_Rx */ /*----------------------------------------------------------------------------- * Function: cmpk_count_tx_status() * * Overview: Count aggregated tx status from firmwar of one type rx command * packet element id = RX_TX_STATUS. * * Input: NONE * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 05/12/2008 amy Create Version 0 porting from windows code. * *---------------------------------------------------------------------------*/ static void cmpk_count_tx_status( struct net_device *dev, cmpk_tx_status_t *pstx_status) { struct r8192_priv *priv = ieee80211_priv(dev); #ifdef ENABLE_PS RT_RF_POWER_STATE rtstate; pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE, (pu1Byte)(&rtState)); // When RF is off, we should not count the packet for hw/sw synchronize // reason, ie. there may be a duration while sw switch is changed and hw // switch is being changed. 2006.12.04, by shien chang. if (rtState == eRfOff) { return; } #endif priv->stats.txfeedbackok += pstx_status->txok; priv->stats.txoktotal += pstx_status->txok; priv->stats.txfeedbackfail += pstx_status->txfail; priv->stats.txerrtotal += pstx_status->txfail; priv->stats.txretrycount += pstx_status->txretry; priv->stats.txfeedbackretry += pstx_status->txretry; //pAdapter->TxStats.NumTxOkBytesTotal += psTx_FB->pkt_length; //pAdapter->TxStats.NumTxErrBytesTotal += psTx_FB->pkt_length; //pAdapter->MgntInfo.LinkDetectInfo.NumTxOkInPeriod++; priv->stats.txmulticast += pstx_status->txmcok; priv->stats.txbroadcast += pstx_status->txbcok; priv->stats.txunicast += pstx_status->txucok; priv->stats.txerrmulticast += pstx_status->txmcfail; priv->stats.txerrbroadcast += pstx_status->txbcfail; priv->stats.txerrunicast += pstx_status->txucfail; priv->stats.txbytesmulticast += pstx_status->txmclength; priv->stats.txbytesbroadcast += pstx_status->txbclength; priv->stats.txbytesunicast += pstx_status->txuclength; priv->stats.last_packet_rate = pstx_status->rate; } /* cmpk_CountTxStatus */ /*----------------------------------------------------------------------------- * Function: cmpk_handle_tx_status() * * Overview: Firmware add a new tx feedback status to reduce rx command * packet buffer operation load. * * Input: NONE * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 05/12/2008 amy Create Version 0 porting from windows code. * *---------------------------------------------------------------------------*/ static void cmpk_handle_tx_status( struct net_device *dev, u8* pmsg) { cmpk_tx_status_t rx_tx_sts; /* */ memcpy((void*)&rx_tx_sts, (void*)pmsg, sizeof(cmpk_tx_status_t)); /* 2. Use tx feedback info to count TX statistics. */ cmpk_count_tx_status(dev, &rx_tx_sts); } /* cmpk_Handle_Tx_Status */ /*----------------------------------------------------------------------------- * Function: cmpk_handle_tx_rate_history() * * Overview: Firmware add a new tx rate history * * Input: NONE * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 05/12/2008 amy Create Version 0 porting from windows code. * *---------------------------------------------------------------------------*/ static void cmpk_handle_tx_rate_history( struct net_device *dev, u8* pmsg) { cmpk_tx_rahis_t *ptxrate; // RT_RF_POWER_STATE rtState; u8 i, j; u16 length = sizeof(cmpk_tx_rahis_t); u32 *ptemp; struct r8192_priv *priv = ieee80211_priv(dev); #ifdef ENABLE_PS pAdapter->HalFunc.GetHwRegHandler(pAdapter, HW_VAR_RF_STATE, (pu1Byte)(&rtState)); // When RF is off, we should not count the packet for hw/sw synchronize // reason, ie. there may be a duration while sw switch is changed and hw // switch is being changed. 2006.12.04, by shien chang. if (rtState == eRfOff) { return; } #endif ptemp = (u32 *)pmsg; // // Do endian transfer to word alignment(16 bits) for windows system. // You must do different endian transfer for linux and MAC OS // for (i = 0; i < (length/4); i++) { u16 temp1, temp2; temp1 = ptemp[i]&0x0000FFFF; temp2 = ptemp[i]>>16; ptemp[i] = (temp1<<16)|temp2; } ptxrate = (cmpk_tx_rahis_t *)pmsg; if (ptxrate == NULL ) { return; } for (i = 0; i < 16; i++) { // Collect CCK rate packet num if (i < 4) priv->stats.txrate.cck[i] += ptxrate->cck[i]; // Collect OFDM rate packet num if (i< 8) priv->stats.txrate.ofdm[i] += ptxrate->ofdm[i]; for (j = 0; j < 4; j++) priv->stats.txrate.ht_mcs[j][i] += ptxrate->ht_mcs[j][i]; } } /* cmpk_Handle_Tx_Rate_History */ /*----------------------------------------------------------------------------- * Function: cmpk_message_handle_rx() * * Overview: In the function, we will capture different RX command packet * info. Every RX command packet element has different message * length and meaning in content. We only support three type of RX * command packet now. Please refer to document * ws-06-0063-rtl8190-command-packet-specification. * * Input: NONE * * Output: NONE * * Return: NONE * * Revised History: * When Who Remark * 05/06/2008 amy Create Version 0 porting from windows code. * *---------------------------------------------------------------------------*/ extern u32 cmpk_message_handle_rx( struct net_device *dev, struct ieee80211_rx_stats *pstats) { // u32 debug_level = DBG_LOUD; struct r8192_priv *priv = ieee80211_priv(dev); int total_length; u8 cmd_length, exe_cnt = 0; u8 element_id; u8 *pcmd_buff; /* 0. Check inpt arguments. If is is a command queue message or pointer is null. */ if (/*(prfd->queue_id != CMPK_RX_QUEUE_ID) || */(pstats== NULL)) { /* Print error message. */ /*RT_TRACE(COMP_SEND, DebugLevel, ("\n\r[CMPK]-->Err queue id or pointer"));*/ return 0; /* This is not a command packet. */ } /* 1. Read received command packet message length from RFD. */ total_length = pstats->Length; /* 2. Read virtual address from RFD. */ pcmd_buff = pstats->virtual_address; /* 3. Read command pakcet element id and length. */ element_id = pcmd_buff[0]; /*RT_TRACE(COMP_SEND, DebugLevel, ("\n\r[CMPK]-->element ID=%d Len=%d", element_id, total_length));*/ /* 4. Check every received command packet conent according to different element type. Because FW may aggregate RX command packet to minimize transmit time between DRV and FW.*/ // Add a counter to prevent to locked in the loop too long while (total_length > 0 || exe_cnt++ >100) { /* 2007/01/17 MH We support aggregation of different cmd in the same packet. */ element_id = pcmd_buff[0]; switch(element_id) { case RX_TX_FEEDBACK: cmpk_handle_tx_feedback (dev, pcmd_buff); cmd_length = CMPK_RX_TX_FB_SIZE; break; case RX_INTERRUPT_STATUS: cmpk_handle_interrupt_status(dev, pcmd_buff); cmd_length = sizeof(cmpk_intr_sta_t); break; case BOTH_QUERY_CONFIG: cmpk_handle_query_config_rx(dev, pcmd_buff); cmd_length = CMPK_BOTH_QUERY_CONFIG_SIZE; break; case RX_TX_STATUS: cmpk_handle_tx_status(dev, pcmd_buff); cmd_length = CMPK_RX_TX_STS_SIZE; break; case RX_TX_PER_PKT_FEEDBACK: // You must at lease add a switch case element here, // Otherwise, we will jump to default case. //DbgPrint("CCX Test\r\n"); cmd_length = CMPK_RX_TX_FB_SIZE; break; case RX_TX_RATE_HISTORY: //DbgPrint(" rx tx rate history\r\n"); cmpk_handle_tx_rate_history(dev, pcmd_buff); cmd_length = CMPK_TX_RAHIS_SIZE; break; default: RT_TRACE(COMP_ERR, "---->cmpk_message_handle_rx():unknow CMD Element\n"); return 1; /* This is a command packet. */ } // 2007/01/22 MH Display received rx command packet info. //cmpk_Display_Message(cmd_length, pcmd_buff); // 2007/01/22 MH Add to display tx statistic. //cmpk_DisplayTxStatistic(pAdapter); /* 2007/03/09 MH Collect sidderent cmd element pkt num. */ priv->stats.rxcmdpkt[element_id]++; total_length -= cmd_length; pcmd_buff += cmd_length; } /* while (total_length > 0) */ return 1; /* This is a command packet. */ } /* CMPK_Message_Handle_Rx */
gpl-2.0
mgbotoe/GT-I8552-kernel-source
arch/arm/mach-bcmring/csp/chipc/chipcHw_str.c
9553
2009
/***************************************************************************** * Copyright 2008 Broadcom Corporation. All rights reserved. * * Unless you and Broadcom execute a separate written software license * agreement governing use of this software, this software is licensed to you * under the terms of the GNU General Public License version 2, available at * http://www.broadcom.com/licenses/GPLv2.php (the "GPL"). * * Notwithstanding the above, under no circumstances may you combine this * software in any way with any other Broadcom software provided under a * license other than the GPL, without Broadcom's express prior written * consent. *****************************************************************************/ /****************************************************************************/ /** * @file chipcHw_str.c * * @brief Contains strings which are useful to linux and csp * * @note */ /****************************************************************************/ /* ---- Include Files ---------------------------------------------------- */ #include <mach/csp/chipcHw_inline.h> /* ---- Private Constants and Types --------------------------------------- */ static const char *gMuxStr[] = { "GPIO", /* 0 */ "KeyPad", /* 1 */ "I2C-Host", /* 2 */ "SPI", /* 3 */ "Uart", /* 4 */ "LED-Mtx-P", /* 5 */ "LED-Mtx-S", /* 6 */ "SDIO-0", /* 7 */ "SDIO-1", /* 8 */ "PCM", /* 9 */ "I2S", /* 10 */ "ETM", /* 11 */ "Debug", /* 12 */ "Misc", /* 13 */ "0xE", /* 14 */ "0xF", /* 15 */ }; /****************************************************************************/ /** * @brief Retrieves a string representation of the mux setting for a pin. * * @return Pointer to a character string. */ /****************************************************************************/ const char *chipcHw_getGpioPinFunctionStr(int pin) { if ((pin < 0) || (pin >= chipcHw_GPIO_COUNT)) { return ""; } return gMuxStr[chipcHw_getGpioPinFunction(pin)]; }
gpl-2.0
eoghan2t9/Oppo-Find5-4.2-Kernel
drivers/isdn/i4l/isdn_audio.c
9553
20305
/* $Id: isdn_audio.c,v 1.1.2.2 2004/01/12 22:37:18 keil Exp $ * * Linux ISDN subsystem, audio conversion and compression (linklevel). * * Copyright 1994-1999 by Fritz Elfert (fritz@isdn4linux.de) * DTMF code (c) 1996 by Christian Mock (cm@kukuruz.ping.at) * Silence detection (c) 1998 by Armin Schindler (mac@gismo.telekom.de) * * This software may be used and distributed according to the terms * of the GNU General Public License, incorporated herein by reference. * */ #include <linux/isdn.h> #include <linux/slab.h> #include "isdn_audio.h" #include "isdn_common.h" char *isdn_audio_revision = "$Revision: 1.1.2.2 $"; /* * Misc. lookup-tables. */ /* ulaw -> signed 16-bit */ static short isdn_audio_ulaw_to_s16[] = { 0x8284, 0x8684, 0x8a84, 0x8e84, 0x9284, 0x9684, 0x9a84, 0x9e84, 0xa284, 0xa684, 0xaa84, 0xae84, 0xb284, 0xb684, 0xba84, 0xbe84, 0xc184, 0xc384, 0xc584, 0xc784, 0xc984, 0xcb84, 0xcd84, 0xcf84, 0xd184, 0xd384, 0xd584, 0xd784, 0xd984, 0xdb84, 0xdd84, 0xdf84, 0xe104, 0xe204, 0xe304, 0xe404, 0xe504, 0xe604, 0xe704, 0xe804, 0xe904, 0xea04, 0xeb04, 0xec04, 0xed04, 0xee04, 0xef04, 0xf004, 0xf0c4, 0xf144, 0xf1c4, 0xf244, 0xf2c4, 0xf344, 0xf3c4, 0xf444, 0xf4c4, 0xf544, 0xf5c4, 0xf644, 0xf6c4, 0xf744, 0xf7c4, 0xf844, 0xf8a4, 0xf8e4, 0xf924, 0xf964, 0xf9a4, 0xf9e4, 0xfa24, 0xfa64, 0xfaa4, 0xfae4, 0xfb24, 0xfb64, 0xfba4, 0xfbe4, 0xfc24, 0xfc64, 0xfc94, 0xfcb4, 0xfcd4, 0xfcf4, 0xfd14, 0xfd34, 0xfd54, 0xfd74, 0xfd94, 0xfdb4, 0xfdd4, 0xfdf4, 0xfe14, 0xfe34, 0xfe54, 0xfe74, 0xfe8c, 0xfe9c, 0xfeac, 0xfebc, 0xfecc, 0xfedc, 0xfeec, 0xfefc, 0xff0c, 0xff1c, 0xff2c, 0xff3c, 0xff4c, 0xff5c, 0xff6c, 0xff7c, 0xff88, 0xff90, 0xff98, 0xffa0, 0xffa8, 0xffb0, 0xffb8, 0xffc0, 0xffc8, 0xffd0, 0xffd8, 0xffe0, 0xffe8, 0xfff0, 0xfff8, 0x0000, 0x7d7c, 0x797c, 0x757c, 0x717c, 0x6d7c, 0x697c, 0x657c, 0x617c, 0x5d7c, 0x597c, 0x557c, 0x517c, 0x4d7c, 0x497c, 0x457c, 0x417c, 0x3e7c, 0x3c7c, 0x3a7c, 0x387c, 0x367c, 0x347c, 0x327c, 0x307c, 0x2e7c, 0x2c7c, 0x2a7c, 0x287c, 0x267c, 0x247c, 0x227c, 0x207c, 0x1efc, 0x1dfc, 0x1cfc, 0x1bfc, 0x1afc, 0x19fc, 0x18fc, 0x17fc, 0x16fc, 0x15fc, 0x14fc, 0x13fc, 0x12fc, 0x11fc, 0x10fc, 0x0ffc, 0x0f3c, 0x0ebc, 0x0e3c, 0x0dbc, 0x0d3c, 0x0cbc, 0x0c3c, 0x0bbc, 0x0b3c, 0x0abc, 0x0a3c, 0x09bc, 0x093c, 0x08bc, 0x083c, 0x07bc, 0x075c, 0x071c, 0x06dc, 0x069c, 0x065c, 0x061c, 0x05dc, 0x059c, 0x055c, 0x051c, 0x04dc, 0x049c, 0x045c, 0x041c, 0x03dc, 0x039c, 0x036c, 0x034c, 0x032c, 0x030c, 0x02ec, 0x02cc, 0x02ac, 0x028c, 0x026c, 0x024c, 0x022c, 0x020c, 0x01ec, 0x01cc, 0x01ac, 0x018c, 0x0174, 0x0164, 0x0154, 0x0144, 0x0134, 0x0124, 0x0114, 0x0104, 0x00f4, 0x00e4, 0x00d4, 0x00c4, 0x00b4, 0x00a4, 0x0094, 0x0084, 0x0078, 0x0070, 0x0068, 0x0060, 0x0058, 0x0050, 0x0048, 0x0040, 0x0038, 0x0030, 0x0028, 0x0020, 0x0018, 0x0010, 0x0008, 0x0000 }; /* alaw -> signed 16-bit */ static short isdn_audio_alaw_to_s16[] = { 0x13fc, 0xec04, 0x0144, 0xfebc, 0x517c, 0xae84, 0x051c, 0xfae4, 0x0a3c, 0xf5c4, 0x0048, 0xffb8, 0x287c, 0xd784, 0x028c, 0xfd74, 0x1bfc, 0xe404, 0x01cc, 0xfe34, 0x717c, 0x8e84, 0x071c, 0xf8e4, 0x0e3c, 0xf1c4, 0x00c4, 0xff3c, 0x387c, 0xc784, 0x039c, 0xfc64, 0x0ffc, 0xf004, 0x0104, 0xfefc, 0x417c, 0xbe84, 0x041c, 0xfbe4, 0x083c, 0xf7c4, 0x0008, 0xfff8, 0x207c, 0xdf84, 0x020c, 0xfdf4, 0x17fc, 0xe804, 0x018c, 0xfe74, 0x617c, 0x9e84, 0x061c, 0xf9e4, 0x0c3c, 0xf3c4, 0x0084, 0xff7c, 0x307c, 0xcf84, 0x030c, 0xfcf4, 0x15fc, 0xea04, 0x0164, 0xfe9c, 0x597c, 0xa684, 0x059c, 0xfa64, 0x0b3c, 0xf4c4, 0x0068, 0xff98, 0x2c7c, 0xd384, 0x02cc, 0xfd34, 0x1dfc, 0xe204, 0x01ec, 0xfe14, 0x797c, 0x8684, 0x07bc, 0xf844, 0x0f3c, 0xf0c4, 0x00e4, 0xff1c, 0x3c7c, 0xc384, 0x03dc, 0xfc24, 0x11fc, 0xee04, 0x0124, 0xfedc, 0x497c, 0xb684, 0x049c, 0xfb64, 0x093c, 0xf6c4, 0x0028, 0xffd8, 0x247c, 0xdb84, 0x024c, 0xfdb4, 0x19fc, 0xe604, 0x01ac, 0xfe54, 0x697c, 0x9684, 0x069c, 0xf964, 0x0d3c, 0xf2c4, 0x00a4, 0xff5c, 0x347c, 0xcb84, 0x034c, 0xfcb4, 0x12fc, 0xed04, 0x0134, 0xfecc, 0x4d7c, 0xb284, 0x04dc, 0xfb24, 0x09bc, 0xf644, 0x0038, 0xffc8, 0x267c, 0xd984, 0x026c, 0xfd94, 0x1afc, 0xe504, 0x01ac, 0xfe54, 0x6d7c, 0x9284, 0x06dc, 0xf924, 0x0dbc, 0xf244, 0x00b4, 0xff4c, 0x367c, 0xc984, 0x036c, 0xfc94, 0x0f3c, 0xf0c4, 0x00f4, 0xff0c, 0x3e7c, 0xc184, 0x03dc, 0xfc24, 0x07bc, 0xf844, 0x0008, 0xfff8, 0x1efc, 0xe104, 0x01ec, 0xfe14, 0x16fc, 0xe904, 0x0174, 0xfe8c, 0x5d7c, 0xa284, 0x05dc, 0xfa24, 0x0bbc, 0xf444, 0x0078, 0xff88, 0x2e7c, 0xd184, 0x02ec, 0xfd14, 0x14fc, 0xeb04, 0x0154, 0xfeac, 0x557c, 0xaa84, 0x055c, 0xfaa4, 0x0abc, 0xf544, 0x0058, 0xffa8, 0x2a7c, 0xd584, 0x02ac, 0xfd54, 0x1cfc, 0xe304, 0x01cc, 0xfe34, 0x757c, 0x8a84, 0x075c, 0xf8a4, 0x0ebc, 0xf144, 0x00d4, 0xff2c, 0x3a7c, 0xc584, 0x039c, 0xfc64, 0x10fc, 0xef04, 0x0114, 0xfeec, 0x457c, 0xba84, 0x045c, 0xfba4, 0x08bc, 0xf744, 0x0018, 0xffe8, 0x227c, 0xdd84, 0x022c, 0xfdd4, 0x18fc, 0xe704, 0x018c, 0xfe74, 0x657c, 0x9a84, 0x065c, 0xf9a4, 0x0cbc, 0xf344, 0x0094, 0xff6c, 0x327c, 0xcd84, 0x032c, 0xfcd4 }; /* alaw -> ulaw */ static char isdn_audio_alaw_to_ulaw[] = { 0xab, 0x2b, 0xe3, 0x63, 0x8b, 0x0b, 0xc9, 0x49, 0xba, 0x3a, 0xf6, 0x76, 0x9b, 0x1b, 0xd7, 0x57, 0xa3, 0x23, 0xdd, 0x5d, 0x83, 0x03, 0xc1, 0x41, 0xb2, 0x32, 0xeb, 0x6b, 0x93, 0x13, 0xcf, 0x4f, 0xaf, 0x2f, 0xe7, 0x67, 0x8f, 0x0f, 0xcd, 0x4d, 0xbe, 0x3e, 0xfe, 0x7e, 0x9f, 0x1f, 0xdb, 0x5b, 0xa7, 0x27, 0xdf, 0x5f, 0x87, 0x07, 0xc5, 0x45, 0xb6, 0x36, 0xef, 0x6f, 0x97, 0x17, 0xd3, 0x53, 0xa9, 0x29, 0xe1, 0x61, 0x89, 0x09, 0xc7, 0x47, 0xb8, 0x38, 0xf2, 0x72, 0x99, 0x19, 0xd5, 0x55, 0xa1, 0x21, 0xdc, 0x5c, 0x81, 0x01, 0xbf, 0x3f, 0xb0, 0x30, 0xe9, 0x69, 0x91, 0x11, 0xce, 0x4e, 0xad, 0x2d, 0xe5, 0x65, 0x8d, 0x0d, 0xcb, 0x4b, 0xbc, 0x3c, 0xfa, 0x7a, 0x9d, 0x1d, 0xd9, 0x59, 0xa5, 0x25, 0xde, 0x5e, 0x85, 0x05, 0xc3, 0x43, 0xb4, 0x34, 0xed, 0x6d, 0x95, 0x15, 0xd1, 0x51, 0xac, 0x2c, 0xe4, 0x64, 0x8c, 0x0c, 0xca, 0x4a, 0xbb, 0x3b, 0xf8, 0x78, 0x9c, 0x1c, 0xd8, 0x58, 0xa4, 0x24, 0xde, 0x5e, 0x84, 0x04, 0xc2, 0x42, 0xb3, 0x33, 0xec, 0x6c, 0x94, 0x14, 0xd0, 0x50, 0xb0, 0x30, 0xe8, 0x68, 0x90, 0x10, 0xce, 0x4e, 0xbf, 0x3f, 0xfe, 0x7e, 0xa0, 0x20, 0xdc, 0x5c, 0xa8, 0x28, 0xe0, 0x60, 0x88, 0x08, 0xc6, 0x46, 0xb7, 0x37, 0xf0, 0x70, 0x98, 0x18, 0xd4, 0x54, 0xaa, 0x2a, 0xe2, 0x62, 0x8a, 0x0a, 0xc8, 0x48, 0xb9, 0x39, 0xf4, 0x74, 0x9a, 0x1a, 0xd6, 0x56, 0xa2, 0x22, 0xdd, 0x5d, 0x82, 0x02, 0xc0, 0x40, 0xb1, 0x31, 0xea, 0x6a, 0x92, 0x12, 0xcf, 0x4f, 0xae, 0x2e, 0xe6, 0x66, 0x8e, 0x0e, 0xcc, 0x4c, 0xbd, 0x3d, 0xfc, 0x7c, 0x9e, 0x1e, 0xda, 0x5a, 0xa6, 0x26, 0xdf, 0x5f, 0x86, 0x06, 0xc4, 0x44, 0xb5, 0x35, 0xee, 0x6e, 0x96, 0x16, 0xd2, 0x52 }; /* ulaw -> alaw */ static char isdn_audio_ulaw_to_alaw[] = { 0xab, 0x55, 0xd5, 0x15, 0x95, 0x75, 0xf5, 0x35, 0xb5, 0x45, 0xc5, 0x05, 0x85, 0x65, 0xe5, 0x25, 0xa5, 0x5d, 0xdd, 0x1d, 0x9d, 0x7d, 0xfd, 0x3d, 0xbd, 0x4d, 0xcd, 0x0d, 0x8d, 0x6d, 0xed, 0x2d, 0xad, 0x51, 0xd1, 0x11, 0x91, 0x71, 0xf1, 0x31, 0xb1, 0x41, 0xc1, 0x01, 0x81, 0x61, 0xe1, 0x21, 0x59, 0xd9, 0x19, 0x99, 0x79, 0xf9, 0x39, 0xb9, 0x49, 0xc9, 0x09, 0x89, 0x69, 0xe9, 0x29, 0xa9, 0xd7, 0x17, 0x97, 0x77, 0xf7, 0x37, 0xb7, 0x47, 0xc7, 0x07, 0x87, 0x67, 0xe7, 0x27, 0xa7, 0xdf, 0x9f, 0x7f, 0xff, 0x3f, 0xbf, 0x4f, 0xcf, 0x0f, 0x8f, 0x6f, 0xef, 0x2f, 0x53, 0x13, 0x73, 0x33, 0xb3, 0x43, 0xc3, 0x03, 0x83, 0x63, 0xe3, 0x23, 0xa3, 0x5b, 0xdb, 0x1b, 0x9b, 0x7b, 0xfb, 0x3b, 0xbb, 0xbb, 0x4b, 0x4b, 0xcb, 0xcb, 0x0b, 0x0b, 0x8b, 0x8b, 0x6b, 0x6b, 0xeb, 0xeb, 0x2b, 0x2b, 0xab, 0x54, 0xd4, 0x14, 0x94, 0x74, 0xf4, 0x34, 0xb4, 0x44, 0xc4, 0x04, 0x84, 0x64, 0xe4, 0x24, 0xa4, 0x5c, 0xdc, 0x1c, 0x9c, 0x7c, 0xfc, 0x3c, 0xbc, 0x4c, 0xcc, 0x0c, 0x8c, 0x6c, 0xec, 0x2c, 0xac, 0x50, 0xd0, 0x10, 0x90, 0x70, 0xf0, 0x30, 0xb0, 0x40, 0xc0, 0x00, 0x80, 0x60, 0xe0, 0x20, 0x58, 0xd8, 0x18, 0x98, 0x78, 0xf8, 0x38, 0xb8, 0x48, 0xc8, 0x08, 0x88, 0x68, 0xe8, 0x28, 0xa8, 0xd6, 0x16, 0x96, 0x76, 0xf6, 0x36, 0xb6, 0x46, 0xc6, 0x06, 0x86, 0x66, 0xe6, 0x26, 0xa6, 0xde, 0x9e, 0x7e, 0xfe, 0x3e, 0xbe, 0x4e, 0xce, 0x0e, 0x8e, 0x6e, 0xee, 0x2e, 0x52, 0x12, 0x72, 0x32, 0xb2, 0x42, 0xc2, 0x02, 0x82, 0x62, 0xe2, 0x22, 0xa2, 0x5a, 0xda, 0x1a, 0x9a, 0x7a, 0xfa, 0x3a, 0xba, 0xba, 0x4a, 0x4a, 0xca, 0xca, 0x0a, 0x0a, 0x8a, 0x8a, 0x6a, 0x6a, 0xea, 0xea, 0x2a, 0x2a }; #define NCOEFF 8 /* number of frequencies to be analyzed */ #define DTMF_TRESH 4000 /* above this is dtmf */ #define SILENCE_TRESH 200 /* below this is silence */ #define AMP_BITS 9 /* bits per sample, reduced to avoid overflow */ #define LOGRP 0 #define HIGRP 1 /* For DTMF recognition: * 2 * cos(2 * PI * k / N) precalculated for all k */ static int cos2pik[NCOEFF] = { 55813, 53604, 51193, 48591, 38114, 33057, 25889, 18332 }; static char dtmf_matrix[4][4] = { {'1', '2', '3', 'A'}, {'4', '5', '6', 'B'}, {'7', '8', '9', 'C'}, {'*', '0', '#', 'D'} }; static inline void isdn_audio_tlookup(const u_char *table, u_char *buff, unsigned long n) { #ifdef __i386__ unsigned long d0, d1, d2, d3; __asm__ __volatile__( "cld\n" "1:\tlodsb\n\t" "xlatb\n\t" "stosb\n\t" "loop 1b\n\t" : "=&b"(d0), "=&c"(d1), "=&D"(d2), "=&S"(d3) : "0"((long) table), "1"(n), "2"((long) buff), "3"((long) buff) : "memory", "ax"); #else while (n--) *buff = table[*(unsigned char *)buff], buff++; #endif } void isdn_audio_ulaw2alaw(unsigned char *buff, unsigned long len) { isdn_audio_tlookup(isdn_audio_ulaw_to_alaw, buff, len); } void isdn_audio_alaw2ulaw(unsigned char *buff, unsigned long len) { isdn_audio_tlookup(isdn_audio_alaw_to_ulaw, buff, len); } /* * linear <-> adpcm conversion stuff * Most parts from the mgetty-package. * (C) by Gert Doering and Klaus Weidner * Used by permission of Gert Doering */ #define ZEROTRAP /* turn on the trap as per the MIL-STD */ #undef ZEROTRAP #define BIAS 0x84 /* define the add-in bias for 16 bit samples */ #define CLIP 32635 static unsigned char isdn_audio_linear2ulaw(int sample) { static int exp_lut[256] = { 0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7 }; int sign, exponent, mantissa; unsigned char ulawbyte; /* Get the sample into sign-magnitude. */ sign = (sample >> 8) & 0x80; /* set aside the sign */ if (sign != 0) sample = -sample; /* get magnitude */ if (sample > CLIP) sample = CLIP; /* clip the magnitude */ /* Convert from 16 bit linear to ulaw. */ sample = sample + BIAS; exponent = exp_lut[(sample >> 7) & 0xFF]; mantissa = (sample >> (exponent + 3)) & 0x0F; ulawbyte = ~(sign | (exponent << 4) | mantissa); #ifdef ZEROTRAP /* optional CCITT trap */ if (ulawbyte == 0) ulawbyte = 0x02; #endif return (ulawbyte); } static int Mx[3][8] = { {0x3800, 0x5600, 0, 0, 0, 0, 0, 0}, {0x399a, 0x3a9f, 0x4d14, 0x6607, 0, 0, 0, 0}, {0x3556, 0x3556, 0x399A, 0x3A9F, 0x4200, 0x4D14, 0x6607, 0x6607}, }; static int bitmask[9] = { 0, 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f, 0xff }; static int isdn_audio_get_bits(adpcm_state *s, unsigned char **in, int *len) { while (s->nleft < s->nbits) { int d = *((*in)++); (*len)--; s->word = (s->word << 8) | d; s->nleft += 8; } s->nleft -= s->nbits; return (s->word >> s->nleft) & bitmask[s->nbits]; } static void isdn_audio_put_bits(int data, int nbits, adpcm_state *s, unsigned char **out, int *len) { s->word = (s->word << nbits) | (data & bitmask[nbits]); s->nleft += nbits; while (s->nleft >= 8) { int d = (s->word >> (s->nleft - 8)); *(out[0]++) = d & 255; (*len)++; s->nleft -= 8; } } adpcm_state * isdn_audio_adpcm_init(adpcm_state *s, int nbits) { if (!s) s = kmalloc(sizeof(adpcm_state), GFP_ATOMIC); if (s) { s->a = 0; s->d = 5; s->word = 0; s->nleft = 0; s->nbits = nbits; } return s; } dtmf_state * isdn_audio_dtmf_init(dtmf_state *s) { if (!s) s = kmalloc(sizeof(dtmf_state), GFP_ATOMIC); if (s) { s->idx = 0; s->last = ' '; } return s; } /* * Decompression of adpcm data to a/u-law * */ int isdn_audio_adpcm2xlaw(adpcm_state *s, int fmt, unsigned char *in, unsigned char *out, int len) { int a = s->a; int d = s->d; int nbits = s->nbits; int olen = 0; while (len) { int e = isdn_audio_get_bits(s, &in, &len); int sign; if (nbits == 4 && e == 0) d = 4; sign = (e >> (nbits - 1)) ? -1 : 1; e &= bitmask[nbits - 1]; a += sign * ((e << 1) + 1) * d >> 1; if (d & 1) a++; if (fmt) *out++ = isdn_audio_ulaw_to_alaw[ isdn_audio_linear2ulaw(a << 2)]; else *out++ = isdn_audio_linear2ulaw(a << 2); olen++; d = (d * Mx[nbits - 2][e] + 0x2000) >> 14; if (d < 5) d = 5; } s->a = a; s->d = d; return olen; } int isdn_audio_xlaw2adpcm(adpcm_state *s, int fmt, unsigned char *in, unsigned char *out, int len) { int a = s->a; int d = s->d; int nbits = s->nbits; int olen = 0; while (len--) { int e = 0, nmax = 1 << (nbits - 1); int sign, delta; if (fmt) delta = (isdn_audio_alaw_to_s16[*in++] >> 2) - a; else delta = (isdn_audio_ulaw_to_s16[*in++] >> 2) - a; if (delta < 0) { e = nmax; delta = -delta; } while (--nmax && delta > d) { delta -= d; e++; } if (nbits == 4 && ((e & 0x0f) == 0)) e = 8; isdn_audio_put_bits(e, nbits, s, &out, &olen); sign = (e >> (nbits - 1)) ? -1 : 1; e &= bitmask[nbits - 1]; a += sign * ((e << 1) + 1) * d >> 1; if (d & 1) a++; d = (d * Mx[nbits - 2][e] + 0x2000) >> 14; if (d < 5) d = 5; } s->a = a; s->d = d; return olen; } /* * Goertzel algorithm. * See http://ptolemy.eecs.berkeley.edu/papers/96/dtmf_ict/ * for more info. * Result is stored into an sk_buff and queued up for later * evaluation. */ static void isdn_audio_goertzel(int *sample, modem_info *info) { int sk, sk1, sk2; int k, n; struct sk_buff *skb; int *result; skb = dev_alloc_skb(sizeof(int) * NCOEFF); if (!skb) { printk(KERN_WARNING "isdn_audio: Could not alloc DTMF result for ttyI%d\n", info->line); return; } result = (int *) skb_put(skb, sizeof(int) * NCOEFF); for (k = 0; k < NCOEFF; k++) { sk = sk1 = sk2 = 0; for (n = 0; n < DTMF_NPOINTS; n++) { sk = sample[n] + ((cos2pik[k] * sk1) >> 15) - sk2; sk2 = sk1; sk1 = sk; } /* Avoid overflows */ sk >>= 1; sk2 >>= 1; /* compute |X(k)|**2 */ /* report overflows. This should not happen. */ /* Comment this out if desired */ if (sk < -32768 || sk > 32767) printk(KERN_DEBUG "isdn_audio: dtmf goertzel overflow, sk=%d\n", sk); if (sk2 < -32768 || sk2 > 32767) printk(KERN_DEBUG "isdn_audio: dtmf goertzel overflow, sk2=%d\n", sk2); result[k] = ((sk * sk) >> AMP_BITS) - ((((cos2pik[k] * sk) >> 15) * sk2) >> AMP_BITS) + ((sk2 * sk2) >> AMP_BITS); } skb_queue_tail(&info->dtmf_queue, skb); isdn_timer_ctrl(ISDN_TIMER_MODEMREAD, 1); } void isdn_audio_eval_dtmf(modem_info *info) { struct sk_buff *skb; int *result; dtmf_state *s; int silence; int i; int di; int ch; int grp[2]; char what; char *p; int thresh; while ((skb = skb_dequeue(&info->dtmf_queue))) { result = (int *) skb->data; s = info->dtmf_state; grp[LOGRP] = grp[HIGRP] = -1; silence = 0; thresh = 0; for (i = 0; i < NCOEFF; i++) { if (result[i] > DTMF_TRESH) { if (result[i] > thresh) thresh = result[i]; } else if (result[i] < SILENCE_TRESH) silence++; } if (silence == NCOEFF) what = ' '; else { if (thresh > 0) { thresh = thresh >> 4; /* touchtones must match within 12 dB */ for (i = 0; i < NCOEFF; i++) { if (result[i] < thresh) continue; /* ignore */ /* good level found. This is allowed only one time per group */ if (i < NCOEFF / 2) { /* lowgroup*/ if (grp[LOGRP] >= 0) { // Bad. Another tone found. */ grp[LOGRP] = -1; break; } else grp[LOGRP] = i; } else { /* higroup */ if (grp[HIGRP] >= 0) { // Bad. Another tone found. */ grp[HIGRP] = -1; break; } else grp[HIGRP] = i - NCOEFF/2; } } if ((grp[LOGRP] >= 0) && (grp[HIGRP] >= 0)) { what = dtmf_matrix[grp[LOGRP]][grp[HIGRP]]; if (s->last != ' ' && s->last != '.') s->last = what; /* min. 1 non-DTMF between DTMF */ } else what = '.'; } else what = '.'; } if ((what != s->last) && (what != ' ') && (what != '.')) { printk(KERN_DEBUG "dtmf: tt='%c'\n", what); p = skb->data; *p++ = 0x10; *p = what; skb_trim(skb, 2); ISDN_AUDIO_SKB_DLECOUNT(skb) = 0; ISDN_AUDIO_SKB_LOCK(skb) = 0; di = info->isdn_driver; ch = info->isdn_channel; __skb_queue_tail(&dev->drv[di]->rpqueue[ch], skb); dev->drv[di]->rcvcount[ch] += 2; /* Schedule dequeuing */ if ((dev->modempoll) && (info->rcvsched)) isdn_timer_ctrl(ISDN_TIMER_MODEMREAD, 1); wake_up_interruptible(&dev->drv[di]->rcv_waitq[ch]); } else kfree_skb(skb); s->last = what; } } /* * Decode DTMF tones, queue result in separate sk_buf for * later examination. * Parameters: * s = pointer to state-struct. * buf = input audio data * len = size of audio data. * fmt = audio data format (0 = ulaw, 1 = alaw) */ void isdn_audio_calc_dtmf(modem_info *info, unsigned char *buf, int len, int fmt) { dtmf_state *s = info->dtmf_state; int i; int c; while (len) { c = DTMF_NPOINTS - s->idx; if (c > len) c = len; if (c <= 0) break; for (i = 0; i < c; i++) { if (fmt) s->buf[s->idx++] = isdn_audio_alaw_to_s16[*buf++] >> (15 - AMP_BITS); else s->buf[s->idx++] = isdn_audio_ulaw_to_s16[*buf++] >> (15 - AMP_BITS); } if (s->idx == DTMF_NPOINTS) { isdn_audio_goertzel(s->buf, info); s->idx = 0; } len -= c; } } silence_state * isdn_audio_silence_init(silence_state *s) { if (!s) s = kmalloc(sizeof(silence_state), GFP_ATOMIC); if (s) { s->idx = 0; s->state = 0; } return s; } void isdn_audio_calc_silence(modem_info *info, unsigned char *buf, int len, int fmt) { silence_state *s = info->silence_state; int i; signed char c; if (!info->emu.vpar[1]) return; for (i = 0; i < len; i++) { if (fmt) c = isdn_audio_alaw_to_ulaw[*buf++]; else c = *buf++; if (c > 0) c -= 128; c = abs(c); if (c > (info->emu.vpar[1] * 4)) { s->idx = 0; s->state = 1; } else { if (s->idx < 210000) s->idx++; } } } void isdn_audio_put_dle_code(modem_info *info, u_char code) { struct sk_buff *skb; int di; int ch; char *p; skb = dev_alloc_skb(2); if (!skb) { printk(KERN_WARNING "isdn_audio: Could not alloc skb for ttyI%d\n", info->line); return; } p = (char *) skb_put(skb, 2); p[0] = 0x10; p[1] = code; ISDN_AUDIO_SKB_DLECOUNT(skb) = 0; ISDN_AUDIO_SKB_LOCK(skb) = 0; di = info->isdn_driver; ch = info->isdn_channel; __skb_queue_tail(&dev->drv[di]->rpqueue[ch], skb); dev->drv[di]->rcvcount[ch] += 2; /* Schedule dequeuing */ if ((dev->modempoll) && (info->rcvsched)) isdn_timer_ctrl(ISDN_TIMER_MODEMREAD, 1); wake_up_interruptible(&dev->drv[di]->rcv_waitq[ch]); } void isdn_audio_eval_silence(modem_info *info) { silence_state *s = info->silence_state; char what; what = ' '; if (s->idx > (info->emu.vpar[2] * 800)) { s->idx = 0; if (!s->state) { /* silence from beginning of rec */ what = 's'; } else { what = 'q'; } } if ((what == 's') || (what == 'q')) { printk(KERN_DEBUG "ttyI%d: %s\n", info->line, (what == 's') ? "silence" : "quiet"); isdn_audio_put_dle_code(info, what); } }
gpl-2.0
ceisendle/llcp-over-nci
sound/isa/gus/gus_main.c
9809
13283
/* * Routines for Gravis UltraSound soundcards * Copyright (c) by Jaroslav Kysela <perex@perex.cz> * * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/delay.h> #include <linux/slab.h> #include <linux/ioport.h> #include <linux/module.h> #include <sound/core.h> #include <sound/gus.h> #include <sound/control.h> #include <asm/dma.h> MODULE_AUTHOR("Jaroslav Kysela <perex@perex.cz>"); MODULE_DESCRIPTION("Routines for Gravis UltraSound soundcards"); MODULE_LICENSE("GPL"); static int snd_gus_init_dma_irq(struct snd_gus_card * gus, int latches); int snd_gus_use_inc(struct snd_gus_card * gus) { if (!try_module_get(gus->card->module)) return 0; return 1; } void snd_gus_use_dec(struct snd_gus_card * gus) { module_put(gus->card->module); } static int snd_gus_joystick_info(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_info *uinfo) { uinfo->type = SNDRV_CTL_ELEM_TYPE_INTEGER; uinfo->count = 1; uinfo->value.integer.min = 0; uinfo->value.integer.max = 31; return 0; } static int snd_gus_joystick_get(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_gus_card *gus = snd_kcontrol_chip(kcontrol); ucontrol->value.integer.value[0] = gus->joystick_dac & 31; return 0; } static int snd_gus_joystick_put(struct snd_kcontrol *kcontrol, struct snd_ctl_elem_value *ucontrol) { struct snd_gus_card *gus = snd_kcontrol_chip(kcontrol); unsigned long flags; int change; unsigned char nval; nval = ucontrol->value.integer.value[0] & 31; spin_lock_irqsave(&gus->reg_lock, flags); change = gus->joystick_dac != nval; gus->joystick_dac = nval; snd_gf1_write8(gus, SNDRV_GF1_GB_JOYSTICK_DAC_LEVEL, gus->joystick_dac); spin_unlock_irqrestore(&gus->reg_lock, flags); return change; } static struct snd_kcontrol_new snd_gus_joystick_control = { .iface = SNDRV_CTL_ELEM_IFACE_CARD, .name = "Joystick Speed", .info = snd_gus_joystick_info, .get = snd_gus_joystick_get, .put = snd_gus_joystick_put }; static void snd_gus_init_control(struct snd_gus_card *gus) { if (!gus->ace_flag) snd_ctl_add(gus->card, snd_ctl_new1(&snd_gus_joystick_control, gus)); } /* * */ static int snd_gus_free(struct snd_gus_card *gus) { if (gus->gf1.res_port2 == NULL) goto __hw_end; snd_gf1_stop(gus); snd_gus_init_dma_irq(gus, 0); __hw_end: release_and_free_resource(gus->gf1.res_port1); release_and_free_resource(gus->gf1.res_port2); if (gus->gf1.irq >= 0) free_irq(gus->gf1.irq, (void *) gus); if (gus->gf1.dma1 >= 0) { disable_dma(gus->gf1.dma1); free_dma(gus->gf1.dma1); } if (!gus->equal_dma && gus->gf1.dma2 >= 0) { disable_dma(gus->gf1.dma2); free_dma(gus->gf1.dma2); } kfree(gus); return 0; } static int snd_gus_dev_free(struct snd_device *device) { struct snd_gus_card *gus = device->device_data; return snd_gus_free(gus); } int snd_gus_create(struct snd_card *card, unsigned long port, int irq, int dma1, int dma2, int timer_dev, int voices, int pcm_channels, int effect, struct snd_gus_card **rgus) { struct snd_gus_card *gus; int err; static struct snd_device_ops ops = { .dev_free = snd_gus_dev_free, }; *rgus = NULL; gus = kzalloc(sizeof(*gus), GFP_KERNEL); if (gus == NULL) return -ENOMEM; spin_lock_init(&gus->reg_lock); spin_lock_init(&gus->voice_alloc); spin_lock_init(&gus->active_voice_lock); spin_lock_init(&gus->event_lock); spin_lock_init(&gus->dma_lock); spin_lock_init(&gus->pcm_volume_level_lock); spin_lock_init(&gus->uart_cmd_lock); mutex_init(&gus->dma_mutex); gus->gf1.irq = -1; gus->gf1.dma1 = -1; gus->gf1.dma2 = -1; gus->card = card; gus->gf1.port = port; /* fill register variables for speedup */ gus->gf1.reg_page = GUSP(gus, GF1PAGE); gus->gf1.reg_regsel = GUSP(gus, GF1REGSEL); gus->gf1.reg_data8 = GUSP(gus, GF1DATAHIGH); gus->gf1.reg_data16 = GUSP(gus, GF1DATALOW); gus->gf1.reg_irqstat = GUSP(gus, IRQSTAT); gus->gf1.reg_dram = GUSP(gus, DRAM); gus->gf1.reg_timerctrl = GUSP(gus, TIMERCNTRL); gus->gf1.reg_timerdata = GUSP(gus, TIMERDATA); /* allocate resources */ if ((gus->gf1.res_port1 = request_region(port, 16, "GUS GF1 (Adlib/SB)")) == NULL) { snd_printk(KERN_ERR "gus: can't grab SB port 0x%lx\n", port); snd_gus_free(gus); return -EBUSY; } if ((gus->gf1.res_port2 = request_region(port + 0x100, 12, "GUS GF1 (Synth)")) == NULL) { snd_printk(KERN_ERR "gus: can't grab synth port 0x%lx\n", port + 0x100); snd_gus_free(gus); return -EBUSY; } if (irq >= 0 && request_irq(irq, snd_gus_interrupt, 0, "GUS GF1", (void *) gus)) { snd_printk(KERN_ERR "gus: can't grab irq %d\n", irq); snd_gus_free(gus); return -EBUSY; } gus->gf1.irq = irq; if (request_dma(dma1, "GUS - 1")) { snd_printk(KERN_ERR "gus: can't grab DMA1 %d\n", dma1); snd_gus_free(gus); return -EBUSY; } gus->gf1.dma1 = dma1; if (dma2 >= 0 && dma1 != dma2) { if (request_dma(dma2, "GUS - 2")) { snd_printk(KERN_ERR "gus: can't grab DMA2 %d\n", dma2); snd_gus_free(gus); return -EBUSY; } gus->gf1.dma2 = dma2; } else { gus->gf1.dma2 = gus->gf1.dma1; gus->equal_dma = 1; } gus->timer_dev = timer_dev; if (voices < 14) voices = 14; if (voices > 32) voices = 32; if (pcm_channels < 0) pcm_channels = 0; if (pcm_channels > 8) pcm_channels = 8; pcm_channels++; pcm_channels &= ~1; gus->gf1.effect = effect ? 1 : 0; gus->gf1.active_voices = voices; gus->gf1.pcm_channels = pcm_channels; gus->gf1.volume_ramp = 25; gus->gf1.smooth_pan = 1; if ((err = snd_device_new(card, SNDRV_DEV_LOWLEVEL, gus, &ops)) < 0) { snd_gus_free(gus); return err; } *rgus = gus; return 0; } /* * Memory detection routine for plain GF1 soundcards */ static int snd_gus_detect_memory(struct snd_gus_card * gus) { int l, idx, local; unsigned char d; snd_gf1_poke(gus, 0L, 0xaa); snd_gf1_poke(gus, 1L, 0x55); if (snd_gf1_peek(gus, 0L) != 0xaa || snd_gf1_peek(gus, 1L) != 0x55) { snd_printk(KERN_ERR "plain GF1 card at 0x%lx without onboard DRAM?\n", gus->gf1.port); return -ENOMEM; } for (idx = 1, d = 0xab; idx < 4; idx++, d++) { local = idx << 18; snd_gf1_poke(gus, local, d); snd_gf1_poke(gus, local + 1, d + 1); if (snd_gf1_peek(gus, local) != d || snd_gf1_peek(gus, local + 1) != d + 1 || snd_gf1_peek(gus, 0L) != 0xaa) break; } #if 1 gus->gf1.memory = idx << 18; #else gus->gf1.memory = 256 * 1024; #endif for (l = 0, local = gus->gf1.memory; l < 4; l++, local -= 256 * 1024) { gus->gf1.mem_alloc.banks_8[l].address = gus->gf1.mem_alloc.banks_8[l].size = 0; gus->gf1.mem_alloc.banks_16[l].address = l << 18; gus->gf1.mem_alloc.banks_16[l].size = local > 0 ? 256 * 1024 : 0; } gus->gf1.mem_alloc.banks_8[0].size = gus->gf1.memory; return 0; /* some memory were detected */ } static int snd_gus_init_dma_irq(struct snd_gus_card * gus, int latches) { struct snd_card *card; unsigned long flags; int irq, dma1, dma2; static unsigned char irqs[16] = {0, 0, 1, 3, 0, 2, 0, 4, 0, 1, 0, 5, 6, 0, 0, 7}; static unsigned char dmas[8] = {6, 1, 0, 2, 0, 3, 4, 5}; if (snd_BUG_ON(!gus)) return -EINVAL; card = gus->card; if (snd_BUG_ON(!card)) return -EINVAL; gus->mix_cntrl_reg &= 0xf8; gus->mix_cntrl_reg |= 0x01; /* disable MIC, LINE IN, enable LINE OUT */ if (gus->codec_flag || gus->ess_flag) { gus->mix_cntrl_reg &= ~1; /* enable LINE IN */ gus->mix_cntrl_reg |= 4; /* enable MIC */ } dma1 = gus->gf1.dma1; dma1 = abs(dma1); dma1 = dmas[dma1 & 7]; dma2 = gus->gf1.dma2; dma2 = abs(dma2); dma2 = dmas[dma2 & 7]; dma1 |= gus->equal_dma ? 0x40 : (dma2 << 3); if ((dma1 & 7) == 0 || (dma2 & 7) == 0) { snd_printk(KERN_ERR "Error! DMA isn't defined.\n"); return -EINVAL; } irq = gus->gf1.irq; irq = abs(irq); irq = irqs[irq & 0x0f]; if (irq == 0) { snd_printk(KERN_ERR "Error! IRQ isn't defined.\n"); return -EINVAL; } irq |= 0x40; #if 0 card->mixer.mix_ctrl_reg |= 0x10; #endif spin_lock_irqsave(&gus->reg_lock, flags); outb(5, GUSP(gus, REGCNTRLS)); outb(gus->mix_cntrl_reg, GUSP(gus, MIXCNTRLREG)); outb(0x00, GUSP(gus, IRQDMACNTRLREG)); outb(0, GUSP(gus, REGCNTRLS)); spin_unlock_irqrestore(&gus->reg_lock, flags); udelay(100); spin_lock_irqsave(&gus->reg_lock, flags); outb(0x00 | gus->mix_cntrl_reg, GUSP(gus, MIXCNTRLREG)); outb(dma1, GUSP(gus, IRQDMACNTRLREG)); if (latches) { outb(0x40 | gus->mix_cntrl_reg, GUSP(gus, MIXCNTRLREG)); outb(irq, GUSP(gus, IRQDMACNTRLREG)); } spin_unlock_irqrestore(&gus->reg_lock, flags); udelay(100); spin_lock_irqsave(&gus->reg_lock, flags); outb(0x00 | gus->mix_cntrl_reg, GUSP(gus, MIXCNTRLREG)); outb(dma1, GUSP(gus, IRQDMACNTRLREG)); if (latches) { outb(0x40 | gus->mix_cntrl_reg, GUSP(gus, MIXCNTRLREG)); outb(irq, GUSP(gus, IRQDMACNTRLREG)); } spin_unlock_irqrestore(&gus->reg_lock, flags); snd_gf1_delay(gus); if (latches) gus->mix_cntrl_reg |= 0x08; /* enable latches */ else gus->mix_cntrl_reg &= ~0x08; /* disable latches */ spin_lock_irqsave(&gus->reg_lock, flags); outb(gus->mix_cntrl_reg, GUSP(gus, MIXCNTRLREG)); outb(0, GUSP(gus, GF1PAGE)); spin_unlock_irqrestore(&gus->reg_lock, flags); return 0; } static int snd_gus_check_version(struct snd_gus_card * gus) { unsigned long flags; unsigned char val, rev; struct snd_card *card; card = gus->card; spin_lock_irqsave(&gus->reg_lock, flags); outb(0x20, GUSP(gus, REGCNTRLS)); val = inb(GUSP(gus, REGCNTRLS)); rev = inb(GUSP(gus, BOARDVERSION)); spin_unlock_irqrestore(&gus->reg_lock, flags); snd_printdd("GF1 [0x%lx] init - val = 0x%x, rev = 0x%x\n", gus->gf1.port, val, rev); strcpy(card->driver, "GUS"); strcpy(card->longname, "Gravis UltraSound Classic (2.4)"); if ((val != 255 && (val & 0x06)) || (rev >= 5 && rev != 255)) { if (rev >= 5 && rev <= 9) { gus->ics_flag = 1; if (rev == 5) gus->ics_flipped = 1; card->longname[27] = '3'; card->longname[29] = rev == 5 ? '5' : '7'; } if (rev >= 10 && rev != 255) { if (rev >= 10 && rev <= 11) { strcpy(card->driver, "GUS MAX"); strcpy(card->longname, "Gravis UltraSound MAX"); gus->max_flag = 1; } else if (rev == 0x30) { strcpy(card->driver, "GUS ACE"); strcpy(card->longname, "Gravis UltraSound Ace"); gus->ace_flag = 1; } else if (rev == 0x50) { strcpy(card->driver, "GUS Extreme"); strcpy(card->longname, "Gravis UltraSound Extreme"); gus->ess_flag = 1; } else { snd_printk(KERN_ERR "unknown GF1 revision number at 0x%lx - 0x%x (0x%x)\n", gus->gf1.port, rev, val); snd_printk(KERN_ERR " please - report to <perex@perex.cz>\n"); } } } strcpy(card->shortname, card->longname); gus->uart_enable = 1; /* standard GUSes doesn't have midi uart trouble */ snd_gus_init_control(gus); return 0; } int snd_gus_initialize(struct snd_gus_card *gus) { int err; if (!gus->interwave) { if ((err = snd_gus_check_version(gus)) < 0) { snd_printk(KERN_ERR "version check failed\n"); return err; } if ((err = snd_gus_detect_memory(gus)) < 0) return err; } if ((err = snd_gus_init_dma_irq(gus, 1)) < 0) return err; snd_gf1_start(gus); gus->initialized = 1; return 0; } /* gus_io.c */ EXPORT_SYMBOL(snd_gf1_delay); EXPORT_SYMBOL(snd_gf1_write8); EXPORT_SYMBOL(snd_gf1_look8); EXPORT_SYMBOL(snd_gf1_write16); EXPORT_SYMBOL(snd_gf1_look16); EXPORT_SYMBOL(snd_gf1_i_write8); EXPORT_SYMBOL(snd_gf1_i_look8); EXPORT_SYMBOL(snd_gf1_i_look16); EXPORT_SYMBOL(snd_gf1_dram_addr); EXPORT_SYMBOL(snd_gf1_write_addr); EXPORT_SYMBOL(snd_gf1_poke); EXPORT_SYMBOL(snd_gf1_peek); /* gus_reset.c */ EXPORT_SYMBOL(snd_gf1_alloc_voice); EXPORT_SYMBOL(snd_gf1_free_voice); EXPORT_SYMBOL(snd_gf1_ctrl_stop); EXPORT_SYMBOL(snd_gf1_stop_voice); /* gus_mixer.c */ EXPORT_SYMBOL(snd_gf1_new_mixer); /* gus_pcm.c */ EXPORT_SYMBOL(snd_gf1_pcm_new); /* gus.c */ EXPORT_SYMBOL(snd_gus_use_inc); EXPORT_SYMBOL(snd_gus_use_dec); EXPORT_SYMBOL(snd_gus_create); EXPORT_SYMBOL(snd_gus_initialize); /* gus_irq.c */ EXPORT_SYMBOL(snd_gus_interrupt); /* gus_uart.c */ EXPORT_SYMBOL(snd_gf1_rawmidi_new); /* gus_dram.c */ EXPORT_SYMBOL(snd_gus_dram_write); EXPORT_SYMBOL(snd_gus_dram_read); /* gus_volume.c */ EXPORT_SYMBOL(snd_gf1_lvol_to_gvol_raw); EXPORT_SYMBOL(snd_gf1_translate_freq); /* gus_mem.c */ EXPORT_SYMBOL(snd_gf1_mem_alloc); EXPORT_SYMBOL(snd_gf1_mem_xfree); EXPORT_SYMBOL(snd_gf1_mem_free); EXPORT_SYMBOL(snd_gf1_mem_lock); /* * INIT part */ static int __init alsa_gus_init(void) { return 0; } static void __exit alsa_gus_exit(void) { } module_init(alsa_gus_init) module_exit(alsa_gus_exit)
gpl-2.0
kyleterry/linux
sound/isa/sb/emu8000_patch.c
13137
7566
/* * Patch routines for the emu8000 (AWE32/64) * * Copyright (C) 1999 Steve Ratcliffe * Copyright (C) 1999-2000 Takashi Iwai <tiwai@suse.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #include "emu8000_local.h" #include <asm/uaccess.h> #include <linux/moduleparam.h> static int emu8000_reset_addr; module_param(emu8000_reset_addr, int, 0444); MODULE_PARM_DESC(emu8000_reset_addr, "reset write address at each time (makes slowdown)"); /* * Open up channels. */ static int snd_emu8000_open_dma(struct snd_emu8000 *emu, int write) { int i; /* reserve all 30 voices for loading */ for (i = 0; i < EMU8000_DRAM_VOICES; i++) { snd_emux_lock_voice(emu->emu, i); snd_emu8000_dma_chan(emu, i, write); } /* assign voice 31 and 32 to ROM */ EMU8000_VTFT_WRITE(emu, 30, 0); EMU8000_PSST_WRITE(emu, 30, 0x1d8); EMU8000_CSL_WRITE(emu, 30, 0x1e0); EMU8000_CCCA_WRITE(emu, 30, 0x1d8); EMU8000_VTFT_WRITE(emu, 31, 0); EMU8000_PSST_WRITE(emu, 31, 0x1d8); EMU8000_CSL_WRITE(emu, 31, 0x1e0); EMU8000_CCCA_WRITE(emu, 31, 0x1d8); return 0; } /* * Close all dram channels. */ static void snd_emu8000_close_dma(struct snd_emu8000 *emu) { int i; for (i = 0; i < EMU8000_DRAM_VOICES; i++) { snd_emu8000_dma_chan(emu, i, EMU8000_RAM_CLOSE); snd_emux_unlock_voice(emu->emu, i); } } /* */ #define BLANK_LOOP_START 4 #define BLANK_LOOP_END 8 #define BLANK_LOOP_SIZE 12 #define BLANK_HEAD_SIZE 48 /* * Read a word from userland, taking care of conversions from * 8bit samples etc. */ static unsigned short read_word(const void __user *buf, int offset, int mode) { unsigned short c; if (mode & SNDRV_SFNT_SAMPLE_8BITS) { unsigned char cc; get_user(cc, (unsigned char __user *)buf + offset); c = cc << 8; /* convert 8bit -> 16bit */ } else { #ifdef SNDRV_LITTLE_ENDIAN get_user(c, (unsigned short __user *)buf + offset); #else unsigned short cc; get_user(cc, (unsigned short __user *)buf + offset); c = swab16(cc); #endif } if (mode & SNDRV_SFNT_SAMPLE_UNSIGNED) c ^= 0x8000; /* unsigned -> signed */ return c; } /* */ static void snd_emu8000_write_wait(struct snd_emu8000 *emu) { while ((EMU8000_SMALW_READ(emu) & 0x80000000) != 0) { schedule_timeout_interruptible(1); if (signal_pending(current)) break; } } /* * write sample word data * * You should not have to keep resetting the address each time * as the chip is supposed to step on the next address automatically. * It mostly does, but during writes of some samples at random it * completely loses words (every one in 16 roughly but with no * obvious pattern). * * This is therefore much slower than need be, but is at least * working. */ static inline void write_word(struct snd_emu8000 *emu, int *offset, unsigned short data) { if (emu8000_reset_addr) { if (emu8000_reset_addr > 1) snd_emu8000_write_wait(emu); EMU8000_SMALW_WRITE(emu, *offset); } EMU8000_SMLD_WRITE(emu, data); *offset += 1; } /* * Write the sample to EMU800 memory. This routine is invoked out of * the generic soundfont routines as a callback. */ int snd_emu8000_sample_new(struct snd_emux *rec, struct snd_sf_sample *sp, struct snd_util_memhdr *hdr, const void __user *data, long count) { int i; int rc; int offset; int truesize; int dram_offset, dram_start; struct snd_emu8000 *emu; emu = rec->hw; if (snd_BUG_ON(!sp)) return -EINVAL; if (sp->v.size == 0) return 0; /* be sure loop points start < end */ if (sp->v.loopstart > sp->v.loopend) { int tmp = sp->v.loopstart; sp->v.loopstart = sp->v.loopend; sp->v.loopend = tmp; } /* compute true data size to be loaded */ truesize = sp->v.size; if (sp->v.mode_flags & (SNDRV_SFNT_SAMPLE_BIDIR_LOOP|SNDRV_SFNT_SAMPLE_REVERSE_LOOP)) truesize += sp->v.loopend - sp->v.loopstart; if (sp->v.mode_flags & SNDRV_SFNT_SAMPLE_NO_BLANK) truesize += BLANK_LOOP_SIZE; sp->block = snd_util_mem_alloc(hdr, truesize * 2); if (sp->block == NULL) { /*snd_printd("EMU8000: out of memory\n");*/ /* not ENOMEM (for compatibility) */ return -ENOSPC; } if (sp->v.mode_flags & SNDRV_SFNT_SAMPLE_8BITS) { if (!access_ok(VERIFY_READ, data, sp->v.size)) return -EFAULT; } else { if (!access_ok(VERIFY_READ, data, sp->v.size * 2)) return -EFAULT; } /* recalculate address offset */ sp->v.end -= sp->v.start; sp->v.loopstart -= sp->v.start; sp->v.loopend -= sp->v.start; sp->v.start = 0; /* dram position (in word) -- mem_offset is byte */ dram_offset = EMU8000_DRAM_OFFSET + (sp->block->offset >> 1); dram_start = dram_offset; /* set the total size (store onto obsolete checksum value) */ sp->v.truesize = truesize * 2; /* in bytes */ snd_emux_terminate_all(emu->emu); if ((rc = snd_emu8000_open_dma(emu, EMU8000_RAM_WRITE)) != 0) return rc; /* Set the address to start writing at */ snd_emu8000_write_wait(emu); EMU8000_SMALW_WRITE(emu, dram_offset); /*snd_emu8000_init_fm(emu);*/ #if 0 /* first block - write 48 samples for silence */ if (! sp->block->offset) { for (i = 0; i < BLANK_HEAD_SIZE; i++) { write_word(emu, &dram_offset, 0); } } #endif offset = 0; for (i = 0; i < sp->v.size; i++) { unsigned short s; s = read_word(data, offset, sp->v.mode_flags); offset++; write_word(emu, &dram_offset, s); /* we may take too long time in this loop. * so give controls back to kernel if needed. */ cond_resched(); if (i == sp->v.loopend && (sp->v.mode_flags & (SNDRV_SFNT_SAMPLE_BIDIR_LOOP|SNDRV_SFNT_SAMPLE_REVERSE_LOOP))) { int looplen = sp->v.loopend - sp->v.loopstart; int k; /* copy reverse loop */ for (k = 1; k <= looplen; k++) { s = read_word(data, offset - k, sp->v.mode_flags); write_word(emu, &dram_offset, s); } if (sp->v.mode_flags & SNDRV_SFNT_SAMPLE_BIDIR_LOOP) { sp->v.loopend += looplen; } else { sp->v.loopstart += looplen; sp->v.loopend += looplen; } sp->v.end += looplen; } } /* if no blank loop is attached in the sample, add it */ if (sp->v.mode_flags & SNDRV_SFNT_SAMPLE_NO_BLANK) { for (i = 0; i < BLANK_LOOP_SIZE; i++) { write_word(emu, &dram_offset, 0); } if (sp->v.mode_flags & SNDRV_SFNT_SAMPLE_SINGLESHOT) { sp->v.loopstart = sp->v.end + BLANK_LOOP_START; sp->v.loopend = sp->v.end + BLANK_LOOP_END; } } /* add dram offset */ sp->v.start += dram_start; sp->v.end += dram_start; sp->v.loopstart += dram_start; sp->v.loopend += dram_start; snd_emu8000_close_dma(emu); snd_emu8000_init_fm(emu); return 0; } /* * free a sample block */ int snd_emu8000_sample_free(struct snd_emux *rec, struct snd_sf_sample *sp, struct snd_util_memhdr *hdr) { if (sp->block) { snd_util_mem_free(hdr, sp->block); sp->block = NULL; } return 0; } /* * sample_reset callback - terminate voices */ void snd_emu8000_sample_reset(struct snd_emux *rec) { snd_emux_terminate_all(rec); }
gpl-2.0
jderrick/linux-blkdev
drivers/media/platform/soc_camera/sh_mobile_ceu_camera.c
82
56333
/* * V4L2 Driver for SuperH Mobile CEU interface * * Copyright (C) 2008 Magnus Damm * * Based on V4L2 Driver for PXA camera host - "pxa_camera.c", * * Copyright (C) 2006, Sascha Hauer, Pengutronix * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/init.h> #include <linux/module.h> #include <linux/io.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/err.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/moduleparam.h> #include <linux/of.h> #include <linux/time.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/videodev2.h> #include <linux/pm_runtime.h> #include <linux/sched.h> #include <media/v4l2-async.h> #include <media/v4l2-common.h> #include <media/v4l2-dev.h> #include <media/soc_camera.h> #include <media/drv-intf/sh_mobile_ceu.h> #include <media/drv-intf/sh_mobile_csi2.h> #include <media/videobuf2-dma-contig.h> #include <media/v4l2-mediabus.h> #include <media/drv-intf/soc_mediabus.h> #include "soc_scale_crop.h" /* register offsets for sh7722 / sh7723 */ #define CAPSR 0x00 /* Capture start register */ #define CAPCR 0x04 /* Capture control register */ #define CAMCR 0x08 /* Capture interface control register */ #define CMCYR 0x0c /* Capture interface cycle register */ #define CAMOR 0x10 /* Capture interface offset register */ #define CAPWR 0x14 /* Capture interface width register */ #define CAIFR 0x18 /* Capture interface input format register */ #define CSTCR 0x20 /* Camera strobe control register (<= sh7722) */ #define CSECR 0x24 /* Camera strobe emission count register (<= sh7722) */ #define CRCNTR 0x28 /* CEU register control register */ #define CRCMPR 0x2c /* CEU register forcible control register */ #define CFLCR 0x30 /* Capture filter control register */ #define CFSZR 0x34 /* Capture filter size clip register */ #define CDWDR 0x38 /* Capture destination width register */ #define CDAYR 0x3c /* Capture data address Y register */ #define CDACR 0x40 /* Capture data address C register */ #define CDBYR 0x44 /* Capture data bottom-field address Y register */ #define CDBCR 0x48 /* Capture data bottom-field address C register */ #define CBDSR 0x4c /* Capture bundle destination size register */ #define CFWCR 0x5c /* Firewall operation control register */ #define CLFCR 0x60 /* Capture low-pass filter control register */ #define CDOCR 0x64 /* Capture data output control register */ #define CDDCR 0x68 /* Capture data complexity level register */ #define CDDAR 0x6c /* Capture data complexity level address register */ #define CEIER 0x70 /* Capture event interrupt enable register */ #define CETCR 0x74 /* Capture event flag clear register */ #define CSTSR 0x7c /* Capture status register */ #define CSRTR 0x80 /* Capture software reset register */ #define CDSSR 0x84 /* Capture data size register */ #define CDAYR2 0x90 /* Capture data address Y register 2 */ #define CDACR2 0x94 /* Capture data address C register 2 */ #define CDBYR2 0x98 /* Capture data bottom-field address Y register 2 */ #define CDBCR2 0x9c /* Capture data bottom-field address C register 2 */ #undef DEBUG_GEOMETRY #ifdef DEBUG_GEOMETRY #define dev_geo dev_info #else #define dev_geo dev_dbg #endif /* per video frame buffer */ struct sh_mobile_ceu_buffer { struct vb2_v4l2_buffer vb; /* v4l buffer must be first */ struct list_head queue; }; struct sh_mobile_ceu_dev { struct soc_camera_host ici; /* Asynchronous CSI2 linking */ struct v4l2_async_subdev *csi2_asd; struct v4l2_subdev *csi2_sd; /* Synchronous probing compatibility */ struct platform_device *csi2_pdev; unsigned int irq; void __iomem *base; size_t video_limit; size_t buf_total; spinlock_t lock; /* Protects video buffer lists */ struct list_head capture; struct vb2_v4l2_buffer *active; struct vb2_alloc_ctx *alloc_ctx; struct sh_mobile_ceu_info *pdata; struct completion complete; u32 cflcr; /* static max sizes either from platform data or default */ int max_width; int max_height; enum v4l2_field field; int sequence; unsigned long flags; unsigned int image_mode:1; unsigned int is_16bit:1; unsigned int frozen:1; }; struct sh_mobile_ceu_cam { /* CEU offsets within the camera output, before the CEU scaler */ unsigned int ceu_left; unsigned int ceu_top; /* Client output, as seen by the CEU */ unsigned int width; unsigned int height; /* * User window from S_CROP / G_CROP, produced by client cropping and * scaling, CEU scaling and CEU cropping, mapped back onto the client * input window */ struct v4l2_rect subrect; /* Camera cropping rectangle */ struct v4l2_rect rect; const struct soc_mbus_pixelfmt *extra_fmt; u32 code; }; static struct sh_mobile_ceu_buffer *to_ceu_vb(struct vb2_v4l2_buffer *vbuf) { return container_of(vbuf, struct sh_mobile_ceu_buffer, vb); } static void ceu_write(struct sh_mobile_ceu_dev *priv, unsigned long reg_offs, u32 data) { iowrite32(data, priv->base + reg_offs); } static u32 ceu_read(struct sh_mobile_ceu_dev *priv, unsigned long reg_offs) { return ioread32(priv->base + reg_offs); } static int sh_mobile_ceu_soft_reset(struct sh_mobile_ceu_dev *pcdev) { int i, success = 0; ceu_write(pcdev, CAPSR, 1 << 16); /* reset */ /* wait CSTSR.CPTON bit */ for (i = 0; i < 1000; i++) { if (!(ceu_read(pcdev, CSTSR) & 1)) { success++; break; } udelay(1); } /* wait CAPSR.CPKIL bit */ for (i = 0; i < 1000; i++) { if (!(ceu_read(pcdev, CAPSR) & (1 << 16))) { success++; break; } udelay(1); } if (2 != success) { dev_warn(pcdev->ici.v4l2_dev.dev, "soft reset time out\n"); return -EIO; } return 0; } /* * Videobuf operations */ /* * .queue_setup() is called to check, whether the driver can accept the * requested number of buffers and to fill in plane sizes * for the current frame format if required */ static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq, unsigned int *count, unsigned int *num_planes, unsigned int sizes[], void *alloc_ctxs[]) { struct soc_camera_device *icd = soc_camera_from_vb2q(vq); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; alloc_ctxs[0] = pcdev->alloc_ctx; if (!vq->num_buffers) pcdev->sequence = 0; if (!*count) *count = 2; /* Called from VIDIOC_REQBUFS or in compatibility mode */ if (!*num_planes) sizes[0] = icd->sizeimage; else if (sizes[0] < icd->sizeimage) return -EINVAL; /* If *num_planes != 0, we have already verified *count. */ if (pcdev->video_limit) { size_t size = PAGE_ALIGN(sizes[0]) * *count; if (size + pcdev->buf_total > pcdev->video_limit) *count = (pcdev->video_limit - pcdev->buf_total) / PAGE_ALIGN(sizes[0]); } *num_planes = 1; dev_dbg(icd->parent, "count=%d, size=%u\n", *count, sizes[0]); return 0; } #define CEU_CETCR_MAGIC 0x0317f313 /* acknowledge magical interrupt sources */ #define CEU_CETCR_IGRW (1 << 4) /* prohibited register access interrupt bit */ #define CEU_CEIER_CPEIE (1 << 0) /* one-frame capture end interrupt */ #define CEU_CEIER_VBP (1 << 20) /* vbp error */ #define CEU_CAPCR_CTNCP (1 << 16) /* continuous capture mode (if set) */ #define CEU_CEIER_MASK (CEU_CEIER_CPEIE | CEU_CEIER_VBP) /* * return value doesn't reflex the success/failure to queue the new buffer, * but rather the status of the previous buffer. */ static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev) { struct soc_camera_device *icd = pcdev->ici.icd; dma_addr_t phys_addr_top, phys_addr_bottom; unsigned long top1, top2; unsigned long bottom1, bottom2; u32 status; bool planar; int ret = 0; /* * The hardware is _very_ picky about this sequence. Especially * the CEU_CETCR_MAGIC value. It seems like we need to acknowledge * several not-so-well documented interrupt sources in CETCR. */ ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) & ~CEU_CEIER_MASK); status = ceu_read(pcdev, CETCR); ceu_write(pcdev, CETCR, ~status & CEU_CETCR_MAGIC); if (!pcdev->frozen) ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) | CEU_CEIER_MASK); ceu_write(pcdev, CAPCR, ceu_read(pcdev, CAPCR) & ~CEU_CAPCR_CTNCP); ceu_write(pcdev, CETCR, CEU_CETCR_MAGIC ^ CEU_CETCR_IGRW); /* * When a VBP interrupt occurs, a capture end interrupt does not occur * and the image of that frame is not captured correctly. So, soft reset * is needed here. */ if (status & CEU_CEIER_VBP) { sh_mobile_ceu_soft_reset(pcdev); ret = -EIO; } if (pcdev->frozen) { complete(&pcdev->complete); return ret; } if (!pcdev->active) return ret; if (V4L2_FIELD_INTERLACED_BT == pcdev->field) { top1 = CDBYR; top2 = CDBCR; bottom1 = CDAYR; bottom2 = CDACR; } else { top1 = CDAYR; top2 = CDACR; bottom1 = CDBYR; bottom2 = CDBCR; } phys_addr_top = vb2_dma_contig_plane_dma_addr(&pcdev->active->vb2_buf, 0); switch (icd->current_fmt->host_fmt->fourcc) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: planar = true; break; default: planar = false; } ceu_write(pcdev, top1, phys_addr_top); if (V4L2_FIELD_NONE != pcdev->field) { phys_addr_bottom = phys_addr_top + icd->bytesperline; ceu_write(pcdev, bottom1, phys_addr_bottom); } if (planar) { phys_addr_top += icd->bytesperline * icd->user_height; ceu_write(pcdev, top2, phys_addr_top); if (V4L2_FIELD_NONE != pcdev->field) { phys_addr_bottom = phys_addr_top + icd->bytesperline; ceu_write(pcdev, bottom2, phys_addr_bottom); } } ceu_write(pcdev, CAPSR, 0x1); /* start capture */ return ret; } static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf); /* Added list head initialization on alloc */ WARN(!list_empty(&buf->queue), "Buffer %p on queue!\n", vb); return 0; } static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf); unsigned long size; size = icd->sizeimage; if (vb2_plane_size(vb, 0) < size) { dev_err(icd->parent, "Buffer #%d too small (%lu < %lu)\n", vb->index, vb2_plane_size(vb, 0), size); goto error; } vb2_set_plane_payload(vb, 0, size); dev_dbg(icd->parent, "%s (vb=0x%p) 0x%p %lu\n", __func__, vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0)); #ifdef DEBUG /* * This can be useful if you want to see if we actually fill * the buffer with something */ if (vb2_plane_vaddr(vb, 0)) memset(vb2_plane_vaddr(vb, 0), 0xaa, vb2_get_plane_payload(vb, 0)); #endif spin_lock_irq(&pcdev->lock); list_add_tail(&buf->queue, &pcdev->capture); if (!pcdev->active) { /* * Because there were no active buffer at this moment, * we are not interested in the return value of * sh_mobile_ceu_capture here. */ pcdev->active = vbuf; sh_mobile_ceu_capture(pcdev); } spin_unlock_irq(&pcdev->lock); return; error: vb2_buffer_done(vb, VB2_BUF_STATE_ERROR); } static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vbuf); struct sh_mobile_ceu_dev *pcdev = ici->priv; spin_lock_irq(&pcdev->lock); if (pcdev->active == vbuf) { /* disable capture (release DMA buffer), reset */ ceu_write(pcdev, CAPSR, 1 << 16); pcdev->active = NULL; } /* * Doesn't hurt also if the list is empty, but it hurts, if queuing the * buffer failed, and .buf_init() hasn't been called */ if (buf->queue.next) list_del_init(&buf->queue); pcdev->buf_total -= PAGE_ALIGN(vb2_plane_size(vb, 0)); dev_dbg(icd->parent, "%s() %zu bytes buffers\n", __func__, pcdev->buf_total); spin_unlock_irq(&pcdev->lock); } static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb) { struct vb2_v4l2_buffer *vbuf = to_vb2_v4l2_buffer(vb); struct soc_camera_device *icd = soc_camera_from_vb2q(vb->vb2_queue); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; pcdev->buf_total += PAGE_ALIGN(vb2_plane_size(vb, 0)); dev_dbg(icd->parent, "%s() %zu bytes buffers\n", __func__, pcdev->buf_total); /* This is for locking debugging only */ INIT_LIST_HEAD(&to_ceu_vb(vbuf)->queue); return 0; } static void sh_mobile_ceu_stop_streaming(struct vb2_queue *q) { struct soc_camera_device *icd = soc_camera_from_vb2q(q); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct list_head *buf_head, *tmp; spin_lock_irq(&pcdev->lock); pcdev->active = NULL; list_for_each_safe(buf_head, tmp, &pcdev->capture) list_del_init(buf_head); spin_unlock_irq(&pcdev->lock); sh_mobile_ceu_soft_reset(pcdev); } static struct vb2_ops sh_mobile_ceu_videobuf_ops = { .queue_setup = sh_mobile_ceu_videobuf_setup, .buf_prepare = sh_mobile_ceu_videobuf_prepare, .buf_queue = sh_mobile_ceu_videobuf_queue, .buf_cleanup = sh_mobile_ceu_videobuf_release, .buf_init = sh_mobile_ceu_videobuf_init, .wait_prepare = vb2_ops_wait_prepare, .wait_finish = vb2_ops_wait_finish, .stop_streaming = sh_mobile_ceu_stop_streaming, }; static irqreturn_t sh_mobile_ceu_irq(int irq, void *data) { struct sh_mobile_ceu_dev *pcdev = data; struct vb2_v4l2_buffer *vbuf; int ret; spin_lock(&pcdev->lock); vbuf = pcdev->active; if (!vbuf) /* Stale interrupt from a released buffer */ goto out; list_del_init(&to_ceu_vb(vbuf)->queue); if (!list_empty(&pcdev->capture)) pcdev->active = &list_entry(pcdev->capture.next, struct sh_mobile_ceu_buffer, queue)->vb; else pcdev->active = NULL; ret = sh_mobile_ceu_capture(pcdev); vbuf->vb2_buf.timestamp = ktime_get_ns(); if (!ret) { vbuf->field = pcdev->field; vbuf->sequence = pcdev->sequence++; } vb2_buffer_done(&vbuf->vb2_buf, ret < 0 ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); out: spin_unlock(&pcdev->lock); return IRQ_HANDLED; } static struct v4l2_subdev *find_csi2(struct sh_mobile_ceu_dev *pcdev) { struct v4l2_subdev *sd; if (pcdev->csi2_sd) return pcdev->csi2_sd; if (pcdev->csi2_asd) { char name[] = "sh-mobile-csi2"; v4l2_device_for_each_subdev(sd, &pcdev->ici.v4l2_dev) if (!strncmp(name, sd->name, sizeof(name) - 1)) { pcdev->csi2_sd = sd; return sd; } } return NULL; } static struct v4l2_subdev *csi2_subdev(struct sh_mobile_ceu_dev *pcdev, struct soc_camera_device *icd) { struct v4l2_subdev *sd = pcdev->csi2_sd; return sd && sd->grp_id == soc_camera_grp_id(icd) ? sd : NULL; } static int sh_mobile_ceu_add_device(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct v4l2_subdev *csi2_sd = find_csi2(pcdev); int ret; if (csi2_sd) { csi2_sd->grp_id = soc_camera_grp_id(icd); v4l2_set_subdev_hostdata(csi2_sd, icd); } ret = v4l2_subdev_call(csi2_sd, core, s_power, 1); if (ret < 0 && ret != -ENOIOCTLCMD && ret != -ENODEV) return ret; /* * -ENODEV is special: either csi2_sd == NULL or the CSI-2 driver * has not found this soc-camera device among its clients */ if (csi2_sd && ret == -ENODEV) csi2_sd->grp_id = 0; dev_info(icd->parent, "SuperH Mobile CEU%s driver attached to camera %d\n", csi2_sd && csi2_sd->grp_id ? "/CSI-2" : "", icd->devnum); return 0; } static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct v4l2_subdev *csi2_sd = find_csi2(pcdev); dev_info(icd->parent, "SuperH Mobile CEU driver detached from camera %d\n", icd->devnum); v4l2_subdev_call(csi2_sd, core, s_power, 0); } /* Called with .host_lock held */ static int sh_mobile_ceu_clock_start(struct soc_camera_host *ici) { struct sh_mobile_ceu_dev *pcdev = ici->priv; pm_runtime_get_sync(ici->v4l2_dev.dev); pcdev->buf_total = 0; sh_mobile_ceu_soft_reset(pcdev); return 0; } /* Called with .host_lock held */ static void sh_mobile_ceu_clock_stop(struct soc_camera_host *ici) { struct sh_mobile_ceu_dev *pcdev = ici->priv; /* disable capture, disable interrupts */ ceu_write(pcdev, CEIER, 0); sh_mobile_ceu_soft_reset(pcdev); /* make sure active buffer is canceled */ spin_lock_irq(&pcdev->lock); if (pcdev->active) { list_del_init(&to_ceu_vb(pcdev->active)->queue); vb2_buffer_done(&pcdev->active->vb2_buf, VB2_BUF_STATE_ERROR); pcdev->active = NULL; } spin_unlock_irq(&pcdev->lock); pm_runtime_put(ici->v4l2_dev.dev); } /* * See chapter 29.4.12 "Capture Filter Control Register (CFLCR)" * in SH7722 Hardware Manual */ static unsigned int size_dst(unsigned int src, unsigned int scale) { unsigned int mant_pre = scale >> 12; if (!src || !scale) return src; return ((mant_pre + 2 * (src - 1)) / (2 * mant_pre) - 1) * mant_pre * 4096 / scale + 1; } static u16 calc_scale(unsigned int src, unsigned int *dst) { u16 scale; if (src == *dst) return 0; scale = (src * 4096 / *dst) & ~7; while (scale > 4096 && size_dst(src, scale) < *dst) scale -= 8; *dst = size_dst(src, scale); return scale; } /* rect is guaranteed to not exceed the scaled camera rectangle */ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_cam *cam = icd->host_priv; struct sh_mobile_ceu_dev *pcdev = ici->priv; unsigned int height, width, cdwdr_width, in_width, in_height; unsigned int left_offset, top_offset; u32 camor; dev_geo(icd->parent, "Crop %ux%u@%u:%u\n", icd->user_width, icd->user_height, cam->ceu_left, cam->ceu_top); left_offset = cam->ceu_left; top_offset = cam->ceu_top; WARN_ON(icd->user_width & 3 || icd->user_height & 3); width = icd->user_width; if (pcdev->image_mode) { in_width = cam->width; if (!pcdev->is_16bit) { in_width *= 2; left_offset *= 2; } } else { unsigned int w_factor; switch (icd->current_fmt->host_fmt->packing) { case SOC_MBUS_PACKING_2X8_PADHI: w_factor = 2; break; default: w_factor = 1; } in_width = cam->width * w_factor; left_offset *= w_factor; } cdwdr_width = icd->bytesperline; height = icd->user_height; in_height = cam->height; if (V4L2_FIELD_NONE != pcdev->field) { height = (height / 2) & ~3; in_height /= 2; top_offset /= 2; cdwdr_width *= 2; } /* CSI2 special configuration */ if (csi2_subdev(pcdev, icd)) { in_width = ((in_width - 2) * 2); left_offset *= 2; } /* Set CAMOR, CAPWR, CFSZR, take care of CDWDR */ camor = left_offset | (top_offset << 16); dev_geo(icd->parent, "CAMOR 0x%x, CAPWR 0x%x, CFSZR 0x%x, CDWDR 0x%x\n", camor, (in_height << 16) | in_width, (height << 16) | width, cdwdr_width); ceu_write(pcdev, CAMOR, camor); ceu_write(pcdev, CAPWR, (in_height << 16) | in_width); /* CFSZR clipping is applied _after_ the scaling filter (CFLCR) */ ceu_write(pcdev, CFSZR, (height << 16) | width); ceu_write(pcdev, CDWDR, cdwdr_width); } static u32 capture_save_reset(struct sh_mobile_ceu_dev *pcdev) { u32 capsr = ceu_read(pcdev, CAPSR); ceu_write(pcdev, CAPSR, 1 << 16); /* reset, stop capture */ return capsr; } static void capture_restore(struct sh_mobile_ceu_dev *pcdev, u32 capsr) { unsigned long timeout = jiffies + 10 * HZ; /* * Wait until the end of the current frame. It can take a long time, * but if it has been aborted by a CAPSR reset, it shoule exit sooner. */ while ((ceu_read(pcdev, CSTSR) & 1) && time_before(jiffies, timeout)) msleep(1); if (time_after(jiffies, timeout)) { dev_err(pcdev->ici.v4l2_dev.dev, "Timeout waiting for frame end! Interface problem?\n"); return; } /* Wait until reset clears, this shall not hang... */ while (ceu_read(pcdev, CAPSR) & (1 << 16)) udelay(10); /* Anything to restore? */ if (capsr & ~(1 << 16)) ceu_write(pcdev, CAPSR, capsr); } /* Find the bus subdevice driver, e.g., CSI2 */ static struct v4l2_subdev *find_bus_subdev(struct sh_mobile_ceu_dev *pcdev, struct soc_camera_device *icd) { return csi2_subdev(pcdev, icd) ? : soc_camera_to_subdev(icd); } #define CEU_BUS_FLAGS (V4L2_MBUS_MASTER | \ V4L2_MBUS_PCLK_SAMPLE_RISING | \ V4L2_MBUS_HSYNC_ACTIVE_HIGH | \ V4L2_MBUS_HSYNC_ACTIVE_LOW | \ V4L2_MBUS_VSYNC_ACTIVE_HIGH | \ V4L2_MBUS_VSYNC_ACTIVE_LOW | \ V4L2_MBUS_DATA_ACTIVE_HIGH) /* Capture is not running, no interrupts, no locking needed */ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct v4l2_subdev *sd = find_bus_subdev(pcdev, icd); struct sh_mobile_ceu_cam *cam = icd->host_priv; struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,}; unsigned long value, common_flags = CEU_BUS_FLAGS; u32 capsr = capture_save_reset(pcdev); unsigned int yuv_lineskip; int ret; /* * If the client doesn't implement g_mbus_config, we just use our * platform data */ ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg); if (!ret) { common_flags = soc_mbus_config_compatible(&cfg, common_flags); if (!common_flags) return -EINVAL; } else if (ret != -ENOIOCTLCMD) { return ret; } /* Make choises, based on platform preferences */ if ((common_flags & V4L2_MBUS_HSYNC_ACTIVE_HIGH) && (common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW)) { if (pcdev->flags & SH_CEU_FLAG_HSYNC_LOW) common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_HIGH; else common_flags &= ~V4L2_MBUS_HSYNC_ACTIVE_LOW; } if ((common_flags & V4L2_MBUS_VSYNC_ACTIVE_HIGH) && (common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW)) { if (pcdev->flags & SH_CEU_FLAG_VSYNC_LOW) common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_HIGH; else common_flags &= ~V4L2_MBUS_VSYNC_ACTIVE_LOW; } cfg.flags = common_flags; ret = v4l2_subdev_call(sd, video, s_mbus_config, &cfg); if (ret < 0 && ret != -ENOIOCTLCMD) return ret; if (icd->current_fmt->host_fmt->bits_per_sample > 8) pcdev->is_16bit = 1; else pcdev->is_16bit = 0; ceu_write(pcdev, CRCNTR, 0); ceu_write(pcdev, CRCMPR, 0); value = 0x00000010; /* data fetch by default */ yuv_lineskip = 0x10; switch (icd->current_fmt->host_fmt->fourcc) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: /* convert 4:2:2 -> 4:2:0 */ yuv_lineskip = 0; /* skip for NV12/21, no skip for NV16/61 */ /* fall-through */ case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: switch (cam->code) { case MEDIA_BUS_FMT_UYVY8_2X8: value = 0x00000000; /* Cb0, Y0, Cr0, Y1 */ break; case MEDIA_BUS_FMT_VYUY8_2X8: value = 0x00000100; /* Cr0, Y0, Cb0, Y1 */ break; case MEDIA_BUS_FMT_YUYV8_2X8: value = 0x00000200; /* Y0, Cb0, Y1, Cr0 */ break; case MEDIA_BUS_FMT_YVYU8_2X8: value = 0x00000300; /* Y0, Cr0, Y1, Cb0 */ break; default: BUG(); } } if (icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV21 || icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV61) value ^= 0x00000100; /* swap U, V to change from NV1x->NVx1 */ value |= common_flags & V4L2_MBUS_VSYNC_ACTIVE_LOW ? 1 << 1 : 0; value |= common_flags & V4L2_MBUS_HSYNC_ACTIVE_LOW ? 1 << 0 : 0; if (csi2_subdev(pcdev, icd)) /* CSI2 mode */ value |= 3 << 12; else if (pcdev->is_16bit) value |= 1 << 12; else if (pcdev->flags & SH_CEU_FLAG_LOWER_8BIT) value |= 2 << 12; ceu_write(pcdev, CAMCR, value); ceu_write(pcdev, CAPCR, 0x00300000); switch (pcdev->field) { case V4L2_FIELD_INTERLACED_TB: value = 0x101; break; case V4L2_FIELD_INTERLACED_BT: value = 0x102; break; default: value = 0; break; } ceu_write(pcdev, CAIFR, value); sh_mobile_ceu_set_rect(icd); mdelay(1); dev_geo(icd->parent, "CFLCR 0x%x\n", pcdev->cflcr); ceu_write(pcdev, CFLCR, pcdev->cflcr); /* * A few words about byte order (observed in Big Endian mode) * * In data fetch mode bytes are received in chunks of 8 bytes. * D0, D1, D2, D3, D4, D5, D6, D7 (D0 received first) * * The data is however by default written to memory in reverse order: * D7, D6, D5, D4, D3, D2, D1, D0 (D7 written to lowest byte) * * The lowest three bits of CDOCR allows us to do swapping, * using 7 we swap the data bytes to match the incoming order: * D0, D1, D2, D3, D4, D5, D6, D7 */ value = 0x00000007 | yuv_lineskip; ceu_write(pcdev, CDOCR, value); ceu_write(pcdev, CFWCR, 0); /* keep "datafetch firewall" disabled */ capture_restore(pcdev, capsr); /* not in bundle mode: skip CBDSR, CDAYR2, CDACR2, CDBYR2, CDBCR2 */ return 0; } static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd, unsigned char buswidth) { struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct v4l2_subdev *sd = find_bus_subdev(pcdev, icd); unsigned long common_flags = CEU_BUS_FLAGS; struct v4l2_mbus_config cfg = {.type = V4L2_MBUS_PARALLEL,}; int ret; ret = v4l2_subdev_call(sd, video, g_mbus_config, &cfg); if (!ret) common_flags = soc_mbus_config_compatible(&cfg, common_flags); else if (ret != -ENOIOCTLCMD) return ret; if (!common_flags || buswidth > 16) return -EINVAL; return 0; } static const struct soc_mbus_pixelfmt sh_mobile_ceu_formats[] = { { .fourcc = V4L2_PIX_FMT_NV12, .name = "NV12", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_1_5X8, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PLANAR_2Y_C, }, { .fourcc = V4L2_PIX_FMT_NV21, .name = "NV21", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_1_5X8, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PLANAR_2Y_C, }, { .fourcc = V4L2_PIX_FMT_NV16, .name = "NV16", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PLANAR_Y_C, }, { .fourcc = V4L2_PIX_FMT_NV61, .name = "NV61", .bits_per_sample = 8, .packing = SOC_MBUS_PACKING_2X8_PADHI, .order = SOC_MBUS_ORDER_LE, .layout = SOC_MBUS_LAYOUT_PLANAR_Y_C, }, }; /* This will be corrected as we get more formats */ static bool sh_mobile_ceu_packing_supported(const struct soc_mbus_pixelfmt *fmt) { return fmt->packing == SOC_MBUS_PACKING_NONE || (fmt->bits_per_sample == 8 && fmt->packing == SOC_MBUS_PACKING_1_5X8) || (fmt->bits_per_sample == 8 && fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) || (fmt->bits_per_sample > 8 && fmt->packing == SOC_MBUS_PACKING_EXTEND16); } static struct soc_camera_device *ctrl_to_icd(struct v4l2_ctrl *ctrl) { return container_of(ctrl->handler, struct soc_camera_device, ctrl_handler); } static int sh_mobile_ceu_s_ctrl(struct v4l2_ctrl *ctrl) { struct soc_camera_device *icd = ctrl_to_icd(ctrl); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; switch (ctrl->id) { case V4L2_CID_SHARPNESS: switch (icd->current_fmt->host_fmt->fourcc) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: ceu_write(pcdev, CLFCR, !ctrl->val); return 0; } break; } return -EINVAL; } static const struct v4l2_ctrl_ops sh_mobile_ceu_ctrl_ops = { .s_ctrl = sh_mobile_ceu_s_ctrl, }; static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int idx, struct soc_camera_format_xlate *xlate) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct device *dev = icd->parent; struct soc_camera_host *ici = to_soc_camera_host(dev); struct sh_mobile_ceu_dev *pcdev = ici->priv; int ret, k, n; int formats = 0; struct sh_mobile_ceu_cam *cam; struct v4l2_subdev_mbus_code_enum code = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, .index = idx, }; const struct soc_mbus_pixelfmt *fmt; ret = v4l2_subdev_call(sd, pad, enum_mbus_code, NULL, &code); if (ret < 0) /* No more formats */ return 0; fmt = soc_mbus_get_fmtdesc(code.code); if (!fmt) { dev_warn(dev, "unsupported format code #%u: %d\n", idx, code.code); return 0; } if (!csi2_subdev(pcdev, icd)) { /* Are there any restrictions in the CSI-2 case? */ ret = sh_mobile_ceu_try_bus_param(icd, fmt->bits_per_sample); if (ret < 0) return 0; } if (!icd->host_priv) { struct v4l2_subdev_format fmt = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; struct v4l2_mbus_framefmt *mf = &fmt.format; struct v4l2_rect rect; int shift = 0; /* Add our control */ v4l2_ctrl_new_std(&icd->ctrl_handler, &sh_mobile_ceu_ctrl_ops, V4L2_CID_SHARPNESS, 0, 1, 1, 1); if (icd->ctrl_handler.error) return icd->ctrl_handler.error; /* FIXME: subwindow is lost between close / open */ /* Cache current client geometry */ ret = soc_camera_client_g_rect(sd, &rect); if (ret < 0) return ret; /* First time */ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt); if (ret < 0) return ret; /* * All currently existing CEU implementations support 2560x1920 * or larger frames. If the sensor is proposing too big a frame, * don't bother with possibly supportred by the CEU larger * sizes, just try VGA multiples. If needed, this can be * adjusted in the future. */ while ((mf->width > pcdev->max_width || mf->height > pcdev->max_height) && shift < 4) { /* Try 2560x1920, 1280x960, 640x480, 320x240 */ mf->width = 2560 >> shift; mf->height = 1920 >> shift; ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd), pad, set_fmt, NULL, &fmt); if (ret < 0) return ret; shift++; } if (shift == 4) { dev_err(dev, "Failed to configure the client below %ux%x\n", mf->width, mf->height); return -EIO; } dev_geo(dev, "camera fmt %ux%u\n", mf->width, mf->height); cam = kzalloc(sizeof(*cam), GFP_KERNEL); if (!cam) return -ENOMEM; /* We are called with current camera crop, initialise subrect with it */ cam->rect = rect; cam->subrect = rect; cam->width = mf->width; cam->height = mf->height; icd->host_priv = cam; } else { cam = icd->host_priv; } /* Beginning of a pass */ if (!idx) cam->extra_fmt = NULL; switch (code.code) { case MEDIA_BUS_FMT_UYVY8_2X8: case MEDIA_BUS_FMT_VYUY8_2X8: case MEDIA_BUS_FMT_YUYV8_2X8: case MEDIA_BUS_FMT_YVYU8_2X8: if (cam->extra_fmt) break; /* * Our case is simple so far: for any of the above four camera * formats we add all our four synthesized NV* formats, so, * just marking the device with a single flag suffices. If * the format generation rules are more complex, you would have * to actually hang your already added / counted formats onto * the host_priv pointer and check whether the format you're * going to add now is already there. */ cam->extra_fmt = sh_mobile_ceu_formats; n = ARRAY_SIZE(sh_mobile_ceu_formats); formats += n; for (k = 0; xlate && k < n; k++) { xlate->host_fmt = &sh_mobile_ceu_formats[k]; xlate->code = code.code; xlate++; dev_dbg(dev, "Providing format %s using code %d\n", sh_mobile_ceu_formats[k].name, code.code); } break; default: if (!sh_mobile_ceu_packing_supported(fmt)) return 0; } /* Generic pass-through */ formats++; if (xlate) { xlate->host_fmt = fmt; xlate->code = code.code; xlate++; dev_dbg(dev, "Providing format %s in pass-through mode\n", fmt->name); } return formats; } static void sh_mobile_ceu_put_formats(struct soc_camera_device *icd) { kfree(icd->host_priv); icd->host_priv = NULL; } #define scale_down(size, scale) soc_camera_shift_scale(size, 12, scale) #define calc_generic_scale(in, out) soc_camera_calc_scale(in, 12, out) /* * CEU can scale and crop, but we don't want to waste bandwidth and kill the * framerate by always requesting the maximum image from the client. See * Documentation/video4linux/sh_mobile_ceu_camera.txt for a description of * scaling and cropping algorithms and for the meaning of referenced here steps. */ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd, const struct v4l2_crop *a) { struct v4l2_crop a_writable = *a; const struct v4l2_rect *rect = &a_writable.c; struct device *dev = icd->parent; struct soc_camera_host *ici = to_soc_camera_host(dev); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct v4l2_crop cam_crop; struct sh_mobile_ceu_cam *cam = icd->host_priv; struct v4l2_rect *cam_rect = &cam_crop.c; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct v4l2_subdev_format fmt = { .which = V4L2_SUBDEV_FORMAT_ACTIVE, }; struct v4l2_mbus_framefmt *mf = &fmt.format; unsigned int scale_cam_h, scale_cam_v, scale_ceu_h, scale_ceu_v, out_width, out_height; int interm_width, interm_height; u32 capsr, cflcr; int ret; dev_geo(dev, "S_CROP(%ux%u@%u:%u)\n", rect->width, rect->height, rect->left, rect->top); /* During camera cropping its output window can change too, stop CEU */ capsr = capture_save_reset(pcdev); dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr); /* * 1. - 2. Apply iterative camera S_CROP for new input window, read back * actual camera rectangle. */ ret = soc_camera_client_s_crop(sd, &a_writable, &cam_crop, &cam->rect, &cam->subrect); if (ret < 0) return ret; dev_geo(dev, "1-2: camera cropped to %ux%u@%u:%u\n", cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top); /* On success cam_crop contains current camera crop */ /* 3. Retrieve camera output window */ ret = v4l2_subdev_call(sd, pad, get_fmt, NULL, &fmt); if (ret < 0) return ret; if (mf->width > pcdev->max_width || mf->height > pcdev->max_height) return -EINVAL; /* 4. Calculate camera scales */ scale_cam_h = calc_generic_scale(cam_rect->width, mf->width); scale_cam_v = calc_generic_scale(cam_rect->height, mf->height); /* Calculate intermediate window */ interm_width = scale_down(rect->width, scale_cam_h); interm_height = scale_down(rect->height, scale_cam_v); if (interm_width < icd->user_width) { u32 new_scale_h; new_scale_h = calc_generic_scale(rect->width, icd->user_width); mf->width = scale_down(cam_rect->width, new_scale_h); } if (interm_height < icd->user_height) { u32 new_scale_v; new_scale_v = calc_generic_scale(rect->height, icd->user_height); mf->height = scale_down(cam_rect->height, new_scale_v); } if (interm_width < icd->user_width || interm_height < icd->user_height) { ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd), pad, set_fmt, NULL, &fmt); if (ret < 0) return ret; dev_geo(dev, "New camera output %ux%u\n", mf->width, mf->height); scale_cam_h = calc_generic_scale(cam_rect->width, mf->width); scale_cam_v = calc_generic_scale(cam_rect->height, mf->height); interm_width = scale_down(rect->width, scale_cam_h); interm_height = scale_down(rect->height, scale_cam_v); } /* Cache camera output window */ cam->width = mf->width; cam->height = mf->height; if (pcdev->image_mode) { out_width = min(interm_width, icd->user_width); out_height = min(interm_height, icd->user_height); } else { out_width = interm_width; out_height = interm_height; } /* * 5. Calculate CEU scales from camera scales from results of (5) and * the user window */ scale_ceu_h = calc_scale(interm_width, &out_width); scale_ceu_v = calc_scale(interm_height, &out_height); dev_geo(dev, "5: CEU scales %u:%u\n", scale_ceu_h, scale_ceu_v); /* Apply CEU scales. */ cflcr = scale_ceu_h | (scale_ceu_v << 16); if (cflcr != pcdev->cflcr) { pcdev->cflcr = cflcr; ceu_write(pcdev, CFLCR, cflcr); } icd->user_width = out_width & ~3; icd->user_height = out_height & ~3; /* Offsets are applied at the CEU scaling filter input */ cam->ceu_left = scale_down(rect->left - cam_rect->left, scale_cam_h) & ~1; cam->ceu_top = scale_down(rect->top - cam_rect->top, scale_cam_v) & ~1; /* 6. Use CEU cropping to crop to the new window. */ sh_mobile_ceu_set_rect(icd); cam->subrect = *rect; dev_geo(dev, "6: CEU cropped to %ux%u@%u:%u\n", icd->user_width, icd->user_height, cam->ceu_left, cam->ceu_top); /* Restore capture. The CE bit can be cleared by the hardware */ if (pcdev->active) capsr |= 1; capture_restore(pcdev, capsr); /* Even if only camera cropping succeeded */ return ret; } static int sh_mobile_ceu_get_crop(struct soc_camera_device *icd, struct v4l2_crop *a) { struct sh_mobile_ceu_cam *cam = icd->host_priv; a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; a->c = cam->subrect; return 0; } /* Similar to set_crop multistage iterative algorithm */ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { struct device *dev = icd->parent; struct soc_camera_host *ici = to_soc_camera_host(dev); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct sh_mobile_ceu_cam *cam = icd->host_priv; struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_mbus_framefmt mf; __u32 pixfmt = pix->pixelformat; const struct soc_camera_format_xlate *xlate; unsigned int ceu_sub_width = pcdev->max_width, ceu_sub_height = pcdev->max_height; u16 scale_v, scale_h; int ret; bool image_mode; enum v4l2_field field; switch (pix->field) { default: pix->field = V4L2_FIELD_NONE; /* fall-through */ case V4L2_FIELD_INTERLACED_TB: case V4L2_FIELD_INTERLACED_BT: case V4L2_FIELD_NONE: field = pix->field; break; case V4L2_FIELD_INTERLACED: field = V4L2_FIELD_INTERLACED_TB; break; } xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); if (!xlate) { dev_warn(dev, "Format %x not found\n", pixfmt); return -EINVAL; } /* 1.-4. Calculate desired client output geometry */ soc_camera_calc_client_output(icd, &cam->rect, &cam->subrect, pix, &mf, 12); mf.field = pix->field; mf.colorspace = pix->colorspace; mf.code = xlate->code; switch (pixfmt) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: image_mode = true; break; default: image_mode = false; } dev_geo(dev, "S_FMT(pix=0x%x, fld 0x%x, code 0x%x, %ux%u)\n", pixfmt, mf.field, mf.code, pix->width, pix->height); dev_geo(dev, "4: request camera output %ux%u\n", mf.width, mf.height); /* 5. - 9. */ ret = soc_camera_client_scale(icd, &cam->rect, &cam->subrect, &mf, &ceu_sub_width, &ceu_sub_height, image_mode && V4L2_FIELD_NONE == field, 12); dev_geo(dev, "5-9: client scale return %d\n", ret); /* Done with the camera. Now see if we can improve the result */ dev_geo(dev, "fmt %ux%u, requested %ux%u\n", mf.width, mf.height, pix->width, pix->height); if (ret < 0) return ret; if (mf.code != xlate->code) return -EINVAL; /* 9. Prepare CEU crop */ cam->width = mf.width; cam->height = mf.height; /* 10. Use CEU scaling to scale to the requested user window. */ /* We cannot scale up */ if (pix->width > ceu_sub_width) ceu_sub_width = pix->width; if (pix->height > ceu_sub_height) ceu_sub_height = pix->height; pix->colorspace = mf.colorspace; if (image_mode) { /* Scale pix->{width x height} down to width x height */ scale_h = calc_scale(ceu_sub_width, &pix->width); scale_v = calc_scale(ceu_sub_height, &pix->height); } else { pix->width = ceu_sub_width; pix->height = ceu_sub_height; scale_h = 0; scale_v = 0; } pcdev->cflcr = scale_h | (scale_v << 16); /* * We have calculated CFLCR, the actual configuration will be performed * in sh_mobile_ceu_set_bus_param() */ dev_geo(dev, "10: W: %u : 0x%x = %u, H: %u : 0x%x = %u\n", ceu_sub_width, scale_h, pix->width, ceu_sub_height, scale_v, pix->height); cam->code = xlate->code; icd->current_fmt = xlate; pcdev->field = field; pcdev->image_mode = image_mode; /* CFSZR requirement */ pix->width &= ~3; pix->height &= ~3; return 0; } #define CEU_CHDW_MAX 8188U /* Maximum line stride */ static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; const struct soc_camera_format_xlate *xlate; struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct v4l2_subdev_pad_config pad_cfg; struct v4l2_subdev_format format = { .which = V4L2_SUBDEV_FORMAT_TRY, }; struct v4l2_mbus_framefmt *mf = &format.format; __u32 pixfmt = pix->pixelformat; int width, height; int ret; dev_geo(icd->parent, "TRY_FMT(pix=0x%x, %ux%u)\n", pixfmt, pix->width, pix->height); xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); if (!xlate) { xlate = icd->current_fmt; dev_dbg(icd->parent, "Format %x not found, keeping %x\n", pixfmt, xlate->host_fmt->fourcc); pixfmt = xlate->host_fmt->fourcc; pix->pixelformat = pixfmt; pix->colorspace = icd->colorspace; } /* FIXME: calculate using depth and bus width */ /* CFSZR requires height and width to be 4-pixel aligned */ v4l_bound_align_image(&pix->width, 2, pcdev->max_width, 2, &pix->height, 4, pcdev->max_height, 2, 0); width = pix->width; height = pix->height; /* limit to sensor capabilities */ mf->width = pix->width; mf->height = pix->height; mf->field = pix->field; mf->code = xlate->code; mf->colorspace = pix->colorspace; ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd), pad, set_fmt, &pad_cfg, &format); if (ret < 0) return ret; pix->width = mf->width; pix->height = mf->height; pix->field = mf->field; pix->colorspace = mf->colorspace; switch (pixfmt) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: /* FIXME: check against rect_max after converting soc-camera */ /* We can scale precisely, need a bigger image from camera */ if (pix->width < width || pix->height < height) { /* * We presume, the sensor behaves sanely, i.e., if * requested a bigger rectangle, it will not return a * smaller one. */ mf->width = pcdev->max_width; mf->height = pcdev->max_height; ret = v4l2_device_call_until_err(sd->v4l2_dev, soc_camera_grp_id(icd), pad, set_fmt, &pad_cfg, &format); if (ret < 0) { /* Shouldn't actually happen... */ dev_err(icd->parent, "FIXME: client try_fmt() = %d\n", ret); return ret; } } /* We will scale exactly */ if (mf->width > width) pix->width = width; if (mf->height > height) pix->height = height; pix->bytesperline = max(pix->bytesperline, pix->width); pix->bytesperline = min(pix->bytesperline, CEU_CHDW_MAX); pix->bytesperline &= ~3; break; default: /* Configurable stride isn't supported in pass-through mode. */ pix->bytesperline = 0; } pix->width &= ~3; pix->height &= ~3; pix->sizeimage = 0; dev_geo(icd->parent, "%s(): return %d, fmt 0x%x, %ux%u\n", __func__, ret, pix->pixelformat, pix->width, pix->height); return ret; } static int sh_mobile_ceu_set_livecrop(struct soc_camera_device *icd, const struct v4l2_crop *a) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct soc_camera_host *ici = to_soc_camera_host(icd->parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; u32 out_width = icd->user_width, out_height = icd->user_height; int ret; /* Freeze queue */ pcdev->frozen = 1; /* Wait for frame */ ret = wait_for_completion_interruptible(&pcdev->complete); /* Stop the client */ ret = v4l2_subdev_call(sd, video, s_stream, 0); if (ret < 0) dev_warn(icd->parent, "Client failed to stop the stream: %d\n", ret); else /* Do the crop, if it fails, there's nothing more we can do */ sh_mobile_ceu_set_crop(icd, a); dev_geo(icd->parent, "Output after crop: %ux%u\n", icd->user_width, icd->user_height); if (icd->user_width != out_width || icd->user_height != out_height) { struct v4l2_format f = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .fmt.pix = { .width = out_width, .height = out_height, .pixelformat = icd->current_fmt->host_fmt->fourcc, .field = pcdev->field, .colorspace = icd->colorspace, }, }; ret = sh_mobile_ceu_set_fmt(icd, &f); if (!ret && (out_width != f.fmt.pix.width || out_height != f.fmt.pix.height)) ret = -EINVAL; if (!ret) { icd->user_width = out_width & ~3; icd->user_height = out_height & ~3; ret = sh_mobile_ceu_set_bus_param(icd); } } /* Thaw the queue */ pcdev->frozen = 0; spin_lock_irq(&pcdev->lock); sh_mobile_ceu_capture(pcdev); spin_unlock_irq(&pcdev->lock); /* Start the client */ ret = v4l2_subdev_call(sd, video, s_stream, 1); return ret; } static unsigned int sh_mobile_ceu_poll(struct file *file, poll_table *pt) { struct soc_camera_device *icd = file->private_data; return vb2_poll(&icd->vb2_vidq, file, pt); } static int sh_mobile_ceu_querycap(struct soc_camera_host *ici, struct v4l2_capability *cap) { strlcpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card)); strlcpy(cap->driver, "sh_mobile_ceu", sizeof(cap->driver)); strlcpy(cap->bus_info, "platform:sh_mobile_ceu", sizeof(cap->bus_info)); cap->device_caps = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; cap->capabilities = cap->device_caps | V4L2_CAP_DEVICE_CAPS; return 0; } static int sh_mobile_ceu_init_videobuf(struct vb2_queue *q, struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->parent); q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q->io_modes = VB2_MMAP | VB2_USERPTR; q->drv_priv = icd; q->ops = &sh_mobile_ceu_videobuf_ops; q->mem_ops = &vb2_dma_contig_memops; q->buf_struct_size = sizeof(struct sh_mobile_ceu_buffer); q->timestamp_flags = V4L2_BUF_FLAG_TIMESTAMP_MONOTONIC; q->lock = &ici->host_lock; return vb2_queue_init(q); } static struct soc_camera_host_ops sh_mobile_ceu_host_ops = { .owner = THIS_MODULE, .add = sh_mobile_ceu_add_device, .remove = sh_mobile_ceu_remove_device, .clock_start = sh_mobile_ceu_clock_start, .clock_stop = sh_mobile_ceu_clock_stop, .get_formats = sh_mobile_ceu_get_formats, .put_formats = sh_mobile_ceu_put_formats, .get_crop = sh_mobile_ceu_get_crop, .set_crop = sh_mobile_ceu_set_crop, .set_livecrop = sh_mobile_ceu_set_livecrop, .set_fmt = sh_mobile_ceu_set_fmt, .try_fmt = sh_mobile_ceu_try_fmt, .poll = sh_mobile_ceu_poll, .querycap = sh_mobile_ceu_querycap, .set_bus_param = sh_mobile_ceu_set_bus_param, .init_videobuf2 = sh_mobile_ceu_init_videobuf, }; struct bus_wait { struct notifier_block notifier; struct completion completion; struct device *dev; }; static int bus_notify(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; struct bus_wait *wait = container_of(nb, struct bus_wait, notifier); if (wait->dev != dev) return NOTIFY_DONE; switch (action) { case BUS_NOTIFY_UNBOUND_DRIVER: /* Protect from module unloading */ wait_for_completion(&wait->completion); return NOTIFY_OK; } return NOTIFY_DONE; } static int sh_mobile_ceu_probe(struct platform_device *pdev) { struct sh_mobile_ceu_dev *pcdev; struct resource *res; void __iomem *base; unsigned int irq; int err, i; struct bus_wait wait = { .completion = COMPLETION_INITIALIZER_ONSTACK(wait.completion), .notifier.notifier_call = bus_notify, }; struct sh_mobile_ceu_companion *csi2; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!res || (int)irq <= 0) { dev_err(&pdev->dev, "Not enough CEU platform resources.\n"); return -ENODEV; } pcdev = devm_kzalloc(&pdev->dev, sizeof(*pcdev), GFP_KERNEL); if (!pcdev) { dev_err(&pdev->dev, "Could not allocate pcdev\n"); return -ENOMEM; } INIT_LIST_HEAD(&pcdev->capture); spin_lock_init(&pcdev->lock); init_completion(&pcdev->complete); pcdev->pdata = pdev->dev.platform_data; if (!pcdev->pdata && !pdev->dev.of_node) { dev_err(&pdev->dev, "CEU platform data not set.\n"); return -EINVAL; } /* TODO: implement per-device bus flags */ if (pcdev->pdata) { pcdev->max_width = pcdev->pdata->max_width; pcdev->max_height = pcdev->pdata->max_height; pcdev->flags = pcdev->pdata->flags; } pcdev->field = V4L2_FIELD_NONE; if (!pcdev->max_width) { unsigned int v; err = of_property_read_u32(pdev->dev.of_node, "renesas,max-width", &v); if (!err) pcdev->max_width = v; if (!pcdev->max_width) pcdev->max_width = 2560; } if (!pcdev->max_height) { unsigned int v; err = of_property_read_u32(pdev->dev.of_node, "renesas,max-height", &v); if (!err) pcdev->max_height = v; if (!pcdev->max_height) pcdev->max_height = 1920; } base = devm_ioremap_resource(&pdev->dev, res); if (IS_ERR(base)) return PTR_ERR(base); pcdev->irq = irq; pcdev->base = base; pcdev->video_limit = 0; /* only enabled if second resource exists */ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (res) { err = dma_declare_coherent_memory(&pdev->dev, res->start, res->start, resource_size(res), DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE); if (!err) { dev_err(&pdev->dev, "Unable to declare CEU memory.\n"); return -ENXIO; } pcdev->video_limit = resource_size(res); } /* request irq */ err = devm_request_irq(&pdev->dev, pcdev->irq, sh_mobile_ceu_irq, 0, dev_name(&pdev->dev), pcdev); if (err) { dev_err(&pdev->dev, "Unable to register CEU interrupt.\n"); goto exit_release_mem; } pm_suspend_ignore_children(&pdev->dev, true); pm_runtime_enable(&pdev->dev); pm_runtime_resume(&pdev->dev); pcdev->ici.priv = pcdev; pcdev->ici.v4l2_dev.dev = &pdev->dev; pcdev->ici.nr = pdev->id; pcdev->ici.drv_name = dev_name(&pdev->dev); pcdev->ici.ops = &sh_mobile_ceu_host_ops; pcdev->ici.capabilities = SOCAM_HOST_CAP_STRIDE; pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev); if (IS_ERR(pcdev->alloc_ctx)) { err = PTR_ERR(pcdev->alloc_ctx); goto exit_free_clk; } if (pcdev->pdata && pcdev->pdata->asd_sizes) { struct v4l2_async_subdev **asd; char name[] = "sh-mobile-csi2"; int j; /* * CSI2 interfacing: several groups can use CSI2, pick up the * first one */ asd = pcdev->pdata->asd; for (j = 0; pcdev->pdata->asd_sizes[j]; j++) { for (i = 0; i < pcdev->pdata->asd_sizes[j]; i++, asd++) { dev_dbg(&pdev->dev, "%s(): subdev #%d, type %u\n", __func__, i, (*asd)->match_type); if ((*asd)->match_type == V4L2_ASYNC_MATCH_DEVNAME && !strncmp(name, (*asd)->match.device_name.name, sizeof(name) - 1)) { pcdev->csi2_asd = *asd; break; } } if (pcdev->csi2_asd) break; } pcdev->ici.asd = pcdev->pdata->asd; pcdev->ici.asd_sizes = pcdev->pdata->asd_sizes; } /* Legacy CSI2 interfacing */ csi2 = pcdev->pdata ? pcdev->pdata->csi2 : NULL; if (csi2) { /* * TODO: remove this once all users are converted to * asynchronous CSI2 probing. If it has to be kept, csi2 * platform device resources have to be added, using * platform_device_add_resources() */ struct platform_device *csi2_pdev = platform_device_alloc("sh-mobile-csi2", csi2->id); struct sh_csi2_pdata *csi2_pdata = csi2->platform_data; if (!csi2_pdev) { err = -ENOMEM; goto exit_free_ctx; } pcdev->csi2_pdev = csi2_pdev; err = platform_device_add_data(csi2_pdev, csi2_pdata, sizeof(*csi2_pdata)); if (err < 0) goto exit_pdev_put; csi2_pdev->resource = csi2->resource; csi2_pdev->num_resources = csi2->num_resources; err = platform_device_add(csi2_pdev); if (err < 0) goto exit_pdev_put; wait.dev = &csi2_pdev->dev; err = bus_register_notifier(&platform_bus_type, &wait.notifier); if (err < 0) goto exit_pdev_unregister; /* * From this point the driver module will not unload, until * we complete the completion. */ if (!csi2_pdev->dev.driver) { complete(&wait.completion); /* Either too late, or probing failed */ bus_unregister_notifier(&platform_bus_type, &wait.notifier); err = -ENXIO; goto exit_pdev_unregister; } /* * The module is still loaded, in the worst case it is hanging * in device release on our completion. So, _now_ dereferencing * the "owner" is safe! */ err = try_module_get(csi2_pdev->dev.driver->owner); /* Let notifier complete, if it has been locked */ complete(&wait.completion); bus_unregister_notifier(&platform_bus_type, &wait.notifier); if (!err) { err = -ENODEV; goto exit_pdev_unregister; } pcdev->csi2_sd = platform_get_drvdata(csi2_pdev); } err = soc_camera_host_register(&pcdev->ici); if (err) goto exit_csi2_unregister; if (csi2) { err = v4l2_device_register_subdev(&pcdev->ici.v4l2_dev, pcdev->csi2_sd); dev_dbg(&pdev->dev, "%s(): ret(register_subdev) = %d\n", __func__, err); if (err < 0) goto exit_host_unregister; /* v4l2_device_register_subdev() took a reference too */ module_put(pcdev->csi2_sd->owner); } return 0; exit_host_unregister: soc_camera_host_unregister(&pcdev->ici); exit_csi2_unregister: if (csi2) { module_put(pcdev->csi2_pdev->dev.driver->owner); exit_pdev_unregister: platform_device_del(pcdev->csi2_pdev); exit_pdev_put: pcdev->csi2_pdev->resource = NULL; platform_device_put(pcdev->csi2_pdev); } exit_free_ctx: vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx); exit_free_clk: pm_runtime_disable(&pdev->dev); exit_release_mem: if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) dma_release_declared_memory(&pdev->dev); return err; } static int sh_mobile_ceu_remove(struct platform_device *pdev) { struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev); struct sh_mobile_ceu_dev *pcdev = container_of(soc_host, struct sh_mobile_ceu_dev, ici); struct platform_device *csi2_pdev = pcdev->csi2_pdev; soc_camera_host_unregister(soc_host); pm_runtime_disable(&pdev->dev); if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) dma_release_declared_memory(&pdev->dev); vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx); if (csi2_pdev && csi2_pdev->dev.driver) { struct module *csi2_drv = csi2_pdev->dev.driver->owner; platform_device_del(csi2_pdev); csi2_pdev->resource = NULL; platform_device_put(csi2_pdev); module_put(csi2_drv); } return 0; } static int sh_mobile_ceu_runtime_nop(struct device *dev) { /* Runtime PM callback shared between ->runtime_suspend() * and ->runtime_resume(). Simply returns success. * * This driver re-initializes all registers after * pm_runtime_get_sync() anyway so there is no need * to save and restore registers here. */ return 0; } static const struct dev_pm_ops sh_mobile_ceu_dev_pm_ops = { .runtime_suspend = sh_mobile_ceu_runtime_nop, .runtime_resume = sh_mobile_ceu_runtime_nop, }; static const struct of_device_id sh_mobile_ceu_of_match[] = { { .compatible = "renesas,sh-mobile-ceu" }, { } }; MODULE_DEVICE_TABLE(of, sh_mobile_ceu_of_match); static struct platform_driver sh_mobile_ceu_driver = { .driver = { .name = "sh_mobile_ceu", .pm = &sh_mobile_ceu_dev_pm_ops, .of_match_table = sh_mobile_ceu_of_match, }, .probe = sh_mobile_ceu_probe, .remove = sh_mobile_ceu_remove, }; static int __init sh_mobile_ceu_init(void) { /* Whatever return code */ request_module("sh_mobile_csi2"); return platform_driver_register(&sh_mobile_ceu_driver); } static void __exit sh_mobile_ceu_exit(void) { platform_driver_unregister(&sh_mobile_ceu_driver); } module_init(sh_mobile_ceu_init); module_exit(sh_mobile_ceu_exit); MODULE_DESCRIPTION("SuperH Mobile CEU driver"); MODULE_AUTHOR("Magnus Damm"); MODULE_LICENSE("GPL"); MODULE_VERSION("0.1.0"); MODULE_ALIAS("platform:sh_mobile_ceu");
gpl-2.0
jstotero/Cucciolone-Rewrite
net/wireless/chan.c
82
3087
/* * This file contains helper code to handle channel * settings and keeping track of what is possible at * any point in time. * * Copyright 2009 Johannes Berg <johannes@sipsolutions.net> */ #include <net/cfg80211.h> #include "core.h" struct ieee80211_channel * rdev_freq_to_chan(struct cfg80211_registered_device *rdev, int freq, enum nl80211_channel_type channel_type) { struct ieee80211_channel *chan; struct ieee80211_sta_ht_cap *ht_cap; chan = ieee80211_get_channel(&rdev->wiphy, freq); /* Primary channel not allowed */ if (!chan || chan->flags & IEEE80211_CHAN_DISABLED) return NULL; if (channel_type == NL80211_CHAN_HT40MINUS && chan->flags & IEEE80211_CHAN_NO_HT40MINUS) return NULL; else if (channel_type == NL80211_CHAN_HT40PLUS && chan->flags & IEEE80211_CHAN_NO_HT40PLUS) return NULL; ht_cap = &rdev->wiphy.bands[chan->band]->ht_cap; if (channel_type != NL80211_CHAN_NO_HT) { if (!ht_cap->ht_supported) return NULL; if (!(ht_cap->cap & IEEE80211_HT_CAP_SUP_WIDTH_20_40) || ht_cap->cap & IEEE80211_HT_CAP_40MHZ_INTOLERANT) return NULL; } return chan; } static bool can_beacon_sec_chan(struct wiphy *wiphy, struct ieee80211_channel *chan, enum nl80211_channel_type channel_type) { struct ieee80211_channel *sec_chan; int diff; switch (channel_type) { case NL80211_CHAN_HT40PLUS: diff = 20; case NL80211_CHAN_HT40MINUS: diff = -20; default: return false; } sec_chan = ieee80211_get_channel(wiphy, chan->center_freq + diff); if (!sec_chan) return false; /* we'll need a DFS capability later */ if (sec_chan->flags & (IEEE80211_CHAN_DISABLED | IEEE80211_CHAN_PASSIVE_SCAN | IEEE80211_CHAN_NO_IBSS | IEEE80211_CHAN_RADAR)) return false; return true; } int cfg80211_set_freq(struct cfg80211_registered_device *rdev, struct wireless_dev *wdev, int freq, enum nl80211_channel_type channel_type) { struct ieee80211_channel *chan; int result; if (wdev && wdev->iftype == NL80211_IFTYPE_MONITOR) wdev = NULL; if (wdev) { ASSERT_WDEV_LOCK(wdev); if (!netif_running(wdev->netdev)) return -ENETDOWN; } if (!rdev->ops->set_channel) return -EOPNOTSUPP; chan = rdev_freq_to_chan(rdev, freq, channel_type); if (!chan) return -EINVAL; /* Both channels should be able to initiate communication */ if (wdev && (wdev->iftype == NL80211_IFTYPE_ADHOC || wdev->iftype == NL80211_IFTYPE_AP || wdev->iftype == NL80211_IFTYPE_AP_VLAN || wdev->iftype == NL80211_IFTYPE_MESH_POINT)) { switch (channel_type) { case NL80211_CHAN_HT40PLUS: case NL80211_CHAN_HT40MINUS: if (!can_beacon_sec_chan(&rdev->wiphy, chan, channel_type)) { printk(KERN_DEBUG "cfg80211: Secondary channel not " "allowed to initiate communication\n"); return -EINVAL; } break; default: break; } } result = rdev->ops->set_channel(&rdev->wiphy, wdev ? wdev->netdev : NULL, chan, channel_type); if (result) return result; if (wdev) wdev->channel = chan; return 0; }
gpl-2.0
mrimp/SM-N910T_Kernel
drivers/video/msm/mdss/mdss_mdp_pipe.c
82
40772
/* Copyright (c) 2012-2014, The Linux Foundation. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and * only version 2 as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #define pr_fmt(fmt) "%s: " fmt, __func__ #include <linux/bitmap.h> #include <linux/errno.h> #include <linux/iopoll.h> #include <linux/mutex.h> #include "mdss_mdp.h" #define SMP_MB_SIZE (mdss_res->smp_mb_size) #define SMP_MB_CNT (mdss_res->smp_mb_cnt) #define SMP_MB_ENTRY_SIZE 16 #define MAX_BPP 4 #define PIPE_HALT_TIMEOUT_US 0x4000 /* following offsets are relative to ctrl register bit offset */ #define CLK_FORCE_ON_OFFSET 0x0 #define CLK_FORCE_OFF_OFFSET 0x1 /* following offsets are relative to status register bit offset */ #define CLK_STATUS_OFFSET 0x0 static DEFINE_MUTEX(mdss_mdp_sspp_lock); static DEFINE_MUTEX(mdss_mdp_smp_lock); static void mdss_mdp_pipe_free(struct kref *kref); static int mdss_mdp_smp_mmb_set(int client_id, unsigned long *smp); static void mdss_mdp_smp_mmb_free(unsigned long *smp, bool write); static struct mdss_mdp_pipe *mdss_mdp_pipe_search_by_client_id( struct mdss_data_type *mdata, int client_id); static inline void mdss_mdp_pipe_write(struct mdss_mdp_pipe *pipe, u32 reg, u32 val) { writel_relaxed(val, pipe->base + reg); } static inline u32 mdss_mdp_pipe_read(struct mdss_mdp_pipe *pipe, u32 reg) { return readl_relaxed(pipe->base + reg); } static inline bool is_unused_smp_allowed(void) { struct mdss_data_type *mdata = mdss_mdp_get_mdata(); switch (MDSS_GET_MAJOR_MINOR(mdata->mdp_rev)) { case MDSS_GET_MAJOR_MINOR(MDSS_MDP_HW_REV_103): return true; default: return false; } } static u32 mdss_mdp_smp_mmb_reserve(struct mdss_mdp_pipe_smp_map *smp_map, size_t n) { u32 i, mmb; u32 fixed_cnt = bitmap_weight(smp_map->fixed, SMP_MB_CNT); struct mdss_data_type *mdata = mdss_mdp_get_mdata(); if (n <= fixed_cnt) return fixed_cnt; else n -= fixed_cnt; i = bitmap_weight(smp_map->allocated, SMP_MB_CNT); /* * SMP programming is not double buffered. Fail the request, * that calls for change in smp configuration (addition/removal * of smp blocks), so that fallback solution happens. */ if (i != 0 && (((n < i) && !is_unused_smp_allowed()) || (n > i))) { pr_debug("Can't change mmb config, num_blks: %d alloc: %d\n", n, i); return 0; } /* * Clear previous SMP reservations and reserve according to the * latest configuration */ mdss_mdp_smp_mmb_free(smp_map->reserved, false); /* Reserve mmb blocks*/ for (; i < n; i++) { if (bitmap_full(mdata->mmb_alloc_map, SMP_MB_CNT)) break; mmb = find_first_zero_bit(mdata->mmb_alloc_map, SMP_MB_CNT); set_bit(mmb, smp_map->reserved); set_bit(mmb, mdata->mmb_alloc_map); } return i + fixed_cnt; } static int mdss_mdp_smp_mmb_set(int client_id, unsigned long *smp) { u32 mmb, off, data, s; int cnt = 0; struct mdss_data_type *mdata = mdss_mdp_get_mdata(); for_each_set_bit(mmb, smp, SMP_MB_CNT) { off = (mmb / 3) * 4; s = (mmb % 3) * 8; data = readl_relaxed(mdata->mdp_base + MDSS_MDP_REG_SMP_ALLOC_W0 + off); data &= ~(0xFF << s); data |= client_id << s; writel_relaxed(data, mdata->mdp_base + MDSS_MDP_REG_SMP_ALLOC_W0 + off); writel_relaxed(data, mdata->mdp_base + MDSS_MDP_REG_SMP_ALLOC_R0 + off); cnt++; } return cnt; } static void mdss_mdp_smp_mmb_amend(unsigned long *smp, unsigned long *extra) { bitmap_or(smp, smp, extra, SMP_MB_CNT); bitmap_zero(extra, SMP_MB_CNT); } static void mdss_mdp_smp_mmb_free(unsigned long *smp, bool write) { struct mdss_data_type *mdata = mdss_mdp_get_mdata(); if (!bitmap_empty(smp, SMP_MB_CNT)) { if (write) mdss_mdp_smp_mmb_set(0, smp); bitmap_andnot(mdata->mmb_alloc_map, mdata->mmb_alloc_map, smp, SMP_MB_CNT); bitmap_zero(smp, SMP_MB_CNT); } } /** * @mdss_mdp_smp_get_size - get allocated smp size for a pipe * @pipe: pointer to a pipe * * Function counts number of blocks that are currently allocated for a * pipe, then smp buffer size is number of blocks multiplied by block * size. */ u32 mdss_mdp_smp_get_size(struct mdss_mdp_pipe *pipe) { int i, mb_cnt = 0; for (i = 0; i < MAX_PLANES; i++) { mb_cnt += bitmap_weight(pipe->smp_map[i].allocated, SMP_MB_CNT); mb_cnt += bitmap_weight(pipe->smp_map[i].fixed, SMP_MB_CNT); } return mb_cnt * SMP_MB_SIZE; } static void mdss_mdp_smp_set_wm_levels(struct mdss_mdp_pipe *pipe, int mb_cnt) { u32 useable_space, val, wm[3]; useable_space = mb_cnt * SMP_MB_SIZE; /* * when source format is macrotile then useable space within total * allocated SMP space is limited to src_w * bpp * nlines. Unlike * linear format, any extra space left over is not filled. */ if (pipe->src_fmt->tile) { useable_space = pipe->src.w * pipe->src_fmt->bpp; } else if (pipe->flags & MDP_FLIP_LR) { /* * when doing hflip, one line is reserved to be consumed down * the pipeline. This line will always be marked as full even * if it doesn't have any data. In order to generate proper * priority levels ignore this region while setting up * watermark levels */ u8 bpp = pipe->src_fmt->is_yuv ? 1 : pipe->src_fmt->bpp; useable_space -= (pipe->src.w * bpp); } if (pipe->src_fmt->tile) { val = useable_space / SMP_MB_ENTRY_SIZE; wm[0] = (val * 5) / 8; wm[1] = (val * 6) / 8; wm[2] = (val * 7) / 8; } else { /* 1/4 of SMP pool that is being fetched */ val = (useable_space / SMP_MB_ENTRY_SIZE) >> 2; wm[0] = val; wm[1] = wm[0] + val; wm[2] = wm[1] + val; } pr_debug("pnum=%d useable_space=%u watermarks %u,%u,%u\n", pipe->num, useable_space, wm[0], wm[1], wm[2]); mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_REQPRIO_FIFO_WM_0, wm[0]); mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_REQPRIO_FIFO_WM_1, wm[1]); mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_REQPRIO_FIFO_WM_2, wm[2]); } static void mdss_mdp_smp_free(struct mdss_mdp_pipe *pipe) { int i; mutex_lock(&mdss_mdp_smp_lock); for (i = 0; i < MAX_PLANES; i++) { mdss_mdp_smp_mmb_free(pipe->smp_map[i].reserved, false); mdss_mdp_smp_mmb_free(pipe->smp_map[i].allocated, true); } mutex_unlock(&mdss_mdp_smp_lock); } void mdss_mdp_smp_unreserve(struct mdss_mdp_pipe *pipe) { int i; mutex_lock(&mdss_mdp_smp_lock); for (i = 0; i < MAX_PLANES; i++) mdss_mdp_smp_mmb_free(pipe->smp_map[i].reserved, false); mutex_unlock(&mdss_mdp_smp_lock); } int mdss_mdp_smp_reserve(struct mdss_mdp_pipe *pipe) { struct mdss_data_type *mdata = mdss_mdp_get_mdata(); u32 num_blks = 0, reserved = 0; struct mdss_mdp_plane_sizes ps; int i; int rc = 0, rot_mode = 0, wb_mixer = 0; u32 nlines, format, seg_w; u16 width; width = pipe->src.w >> pipe->horz_deci; if (pipe->bwc_mode) { rc = mdss_mdp_get_rau_strides(pipe->src.w, pipe->src.h, pipe->src_fmt, &ps); if (rc) return rc; /* * Override fetch strides with SMP buffer size for both the * planes. BWC line buffer needs to be divided into 16 * segments and every segment is aligned to format * specific RAU size */ seg_w = DIV_ROUND_UP(pipe->src.w, 16); if (pipe->src_fmt->fetch_planes == MDSS_MDP_PLANE_INTERLEAVED) { ps.ystride[0] = ALIGN(seg_w, 32) * 16 * ps.rau_h[0] * pipe->src_fmt->bpp; ps.ystride[1] = 0; } else { u32 bwc_width = ALIGN(seg_w, 64) * 16; ps.ystride[0] = bwc_width * ps.rau_h[0]; ps.ystride[1] = bwc_width * ps.rau_h[1]; /* * Since chroma for H1V2 is not subsampled it needs * to be accounted for with bpp factor */ if (pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_H1V2) ps.ystride[1] *= 2; } pr_debug("BWC SMP strides ystride0=%x ystride1=%x\n", ps.ystride[0], ps.ystride[1]); } else { format = pipe->src_fmt->format; /* * when decimation block is present, all chroma planes * are fetched on a single SMP plane for chroma pixels */ if (mdata->has_decimation) { switch (pipe->src_fmt->chroma_sample) { case MDSS_MDP_CHROMA_H2V1: format = MDP_Y_CRCB_H2V1; break; case MDSS_MDP_CHROMA_420: format = MDP_Y_CBCR_H2V2; break; default: break; } } rc = mdss_mdp_get_plane_sizes(format, width, pipe->src.h, &ps, 0); if (rc) return rc; if (pipe->mixer_left && pipe->mixer_left->rotator_mode) { rot_mode = 1; } else if (pipe->mixer_left && (ps.num_planes == 1)) { ps.ystride[0] = MAX_BPP * max(pipe->mixer_left->width, width); } else if (mdata->has_decimation) { /* * To avoid quailty loss, MDP does one less decimation * on chroma components if they are subsampled. * Account for this to have enough SMPs for latency */ switch (pipe->src_fmt->chroma_sample) { case MDSS_MDP_CHROMA_H2V1: case MDSS_MDP_CHROMA_420: ps.ystride[1] <<= 1; break; } } } if (pipe->src_fmt->tile) nlines = 8; else nlines = pipe->bwc_mode ? 1 : 2; if (pipe->mixer_left->type == MDSS_MDP_MIXER_TYPE_WRITEBACK) wb_mixer = 1; mutex_lock(&mdss_mdp_smp_lock); if (!is_unused_smp_allowed()) { for (i = (MAX_PLANES - 1); i >= ps.num_planes; i--) { if (bitmap_weight(pipe->smp_map[i].allocated, SMP_MB_CNT)) { pr_debug("unsed mmb for pipe%d plane%d not allowed\n", pipe->num, i); mutex_unlock(&mdss_mdp_smp_lock); return -EAGAIN; } } } for (i = 0; i < ps.num_planes; i++) { if (rot_mode || wb_mixer) { num_blks = 1; } else { num_blks = DIV_ROUND_UP(ps.ystride[i] * nlines, SMP_MB_SIZE); if (mdata->mdp_rev == MDSS_MDP_HW_REV_100) num_blks = roundup_pow_of_two(num_blks); if (mdata->smp_mb_per_pipe && (num_blks > mdata->smp_mb_per_pipe) && !(pipe->flags & MDP_FLIP_LR)) num_blks = mdata->smp_mb_per_pipe; } pr_debug("reserving %d mmb for pnum=%d plane=%d\n", num_blks, pipe->num, i); reserved = mdss_mdp_smp_mmb_reserve(&pipe->smp_map[i], num_blks); if (reserved < num_blks) break; } if (reserved < num_blks) { pr_debug("insufficient MMB blocks. pnum:%d\n", pipe->num); for (; i >= 0; i--) mdss_mdp_smp_mmb_free(pipe->smp_map[i].reserved, false); rc = -ENOBUFS; } mutex_unlock(&mdss_mdp_smp_lock); return rc; } /* * mdss_mdp_smp_alloc() -- set smp mmb and and wm levels for a staged pipe * @pipe: pointer to a pipe * * Function amends reserved smp mmbs to allocated bitmap and ties respective * mmbs to their pipe fetch_ids. Based on the number of total allocated mmbs * for a staged pipe, it also sets the watermark levels (wm). * * This function will be called on every commit where pipe params might not * have changed. In such cases, we need to ensure that wm levels are not * wiped out. Also in some rare situations hw might have reset and wiped out * smp mmb programming but new smp reservation is not done. In such cases we * need to ensure that for a staged pipes, mmbs are set properly based on * allocated bitmap. */ static int mdss_mdp_smp_alloc(struct mdss_mdp_pipe *pipe) { int i; int cnt = 0; mutex_lock(&mdss_mdp_smp_lock); for (i = 0; i < MAX_PLANES; i++) { cnt += bitmap_weight(pipe->smp_map[i].fixed, SMP_MB_CNT); if (bitmap_empty(pipe->smp_map[i].reserved, SMP_MB_CNT)) { cnt += mdss_mdp_smp_mmb_set(pipe->ftch_id + i, pipe->smp_map[i].allocated); continue; } mdss_mdp_smp_mmb_amend(pipe->smp_map[i].allocated, pipe->smp_map[i].reserved); cnt += mdss_mdp_smp_mmb_set(pipe->ftch_id + i, pipe->smp_map[i].allocated); } mdss_mdp_smp_set_wm_levels(pipe, cnt); mutex_unlock(&mdss_mdp_smp_lock); return 0; } void mdss_mdp_smp_release(struct mdss_mdp_pipe *pipe) { mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false); mdss_mdp_smp_free(pipe); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false); } int mdss_mdp_smp_setup(struct mdss_data_type *mdata, u32 cnt, u32 size) { if (!mdata) return -EINVAL; mdata->smp_mb_cnt = cnt; mdata->smp_mb_size = size; return 0; } /** * mdss_mdp_smp_handoff() - Handoff SMP MMBs in use by staged pipes * @mdata: pointer to the global mdss data structure. * * Iterate through the list of all SMP MMBs and check to see if any * of them are assigned to a pipe being marked as being handed-off. * If so, update the corresponding software allocation map to reflect * this. * * This function would typically be called during MDP probe for the case * when certain pipes might be programmed in the bootloader to display * the splash screen. */ int mdss_mdp_smp_handoff(struct mdss_data_type *mdata) { int rc = 0; int i, client_id, prev_id = 0; u32 off, s, data; struct mdss_mdp_pipe *pipe = NULL; /* * figure out what SMP MMBs are allocated for each of the pipes * that need to be handed off. */ for (i = 0; i < SMP_MB_CNT; i++) { off = (i / 3) * 4; s = (i % 3) * 8; data = readl_relaxed(mdata->mdp_base + MDSS_MDP_REG_SMP_ALLOC_W0 + off); client_id = (data >> s) & 0xFF; if (test_bit(i, mdata->mmb_alloc_map)) { /* * Certain pipes may have a dedicated set of * SMP MMBs statically allocated to them. In * such cases, we do not need to do anything * here. */ pr_debug("smp mmb %d already assigned to pipe %d (client_id %d)" , i, pipe->num, client_id); continue; } if (client_id) { if (client_id != prev_id) { pipe = mdss_mdp_pipe_search_by_client_id(mdata, client_id); prev_id = client_id; } if (!pipe) { pr_warn("Invalid client id %d for SMP MMB %d\n", client_id, i); continue; } if (!pipe->is_handed_off) { pr_warn("SMP MMB %d assigned to a pipe not marked for handoff (client id %d)" , i, client_id); continue; } /* * Assume that the source format only has * one plane */ pr_debug("Assigning smp mmb %d to pipe %d (client_id %d)\n" , i, pipe->num, client_id); set_bit(i, pipe->smp_map[0].allocated); set_bit(i, mdata->mmb_alloc_map); } } return rc; } void mdss_mdp_pipe_unmap(struct mdss_mdp_pipe *pipe) { if (kref_put_mutex(&pipe->kref, mdss_mdp_pipe_free, &mdss_mdp_sspp_lock)) { WARN(1, "Unexpected free pipe during unmap"); mutex_unlock(&mdss_mdp_sspp_lock); } } int mdss_mdp_pipe_map(struct mdss_mdp_pipe *pipe) { if (!kref_get_unless_zero(&pipe->kref)) return -EINVAL; return 0; } static struct mdss_mdp_pipe *mdss_mdp_pipe_init(struct mdss_mdp_mixer *mixer, u32 type, u32 off, struct mdss_mdp_pipe *left_blend_pipe) { struct mdss_mdp_pipe *pipe = NULL; struct mdss_data_type *mdata; struct mdss_mdp_pipe *pipe_pool = NULL; u32 npipes; bool pipe_share = false; u32 i, reg_val, force_off_mask; if (!mixer || !mixer->ctl || !mixer->ctl->mdata) return NULL; mdata = mixer->ctl->mdata; switch (type) { case MDSS_MDP_PIPE_TYPE_VIG: pipe_pool = mdata->vig_pipes; npipes = mdata->nvig_pipes; break; case MDSS_MDP_PIPE_TYPE_RGB: pipe_pool = mdata->rgb_pipes; npipes = mdata->nrgb_pipes; break; case MDSS_MDP_PIPE_TYPE_DMA: pipe_pool = mdata->dma_pipes; npipes = mdata->ndma_pipes; if ((mdata->wfd_mode == MDSS_MDP_WFD_SHARED) && (mixer->type == MDSS_MDP_MIXER_TYPE_WRITEBACK)) pipe_share = true; break; default: npipes = 0; pr_err("invalid pipe type %d\n", type); break; } for (i = off; i < npipes; i++) { pipe = pipe_pool + i; if (atomic_read(&pipe->kref.refcount) == 0) { pipe->mixer_left = mixer; break; } pipe = NULL; } if (left_blend_pipe && pipe && pipe->priority <= left_blend_pipe->priority) { pr_debug("priority limitation. l_pipe_prio:%d r_pipe_prio:%d\n", left_blend_pipe->priority, pipe->priority); return NULL; } if (pipe && mdss_mdp_pipe_fetch_halt(pipe)) { pr_err("%d failed because pipe is in bad state\n", pipe->num); return NULL; } if (pipe && mdss_mdp_pipe_is_sw_reset_available(mdata)) { force_off_mask = BIT(pipe->clk_ctrl.bit_off + CLK_FORCE_OFF_OFFSET); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false); mutex_lock(&mdata->reg_lock); reg_val = readl_relaxed(mdata->mdp_base + pipe->clk_ctrl.reg_off); if (reg_val & force_off_mask) { reg_val &= ~force_off_mask; writel_relaxed(reg_val, mdata->mdp_base + pipe->clk_ctrl.reg_off); } mutex_unlock(&mdata->reg_lock); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false); } if (pipe) { pr_debug("type=%x pnum=%d\n", pipe->type, pipe->num); mutex_init(&pipe->pp_res.hist.hist_mutex); spin_lock_init(&pipe->pp_res.hist.hist_lock); kref_init(&pipe->kref); } else if (pipe_share) { /* * when there is no dedicated wfd blk, DMA pipe can be * shared as long as its attached to a writeback mixer */ pipe = mdata->dma_pipes + mixer->num; if (pipe->mixer_left->type != MDSS_MDP_MIXER_TYPE_WRITEBACK) return NULL; kref_get(&pipe->kref); pr_debug("pipe sharing for pipe=%d\n", pipe->num); } else { pr_err("no %d type pipes available\n", type); } return pipe; } struct mdss_mdp_pipe *mdss_mdp_pipe_alloc_dma(struct mdss_mdp_mixer *mixer) { struct mdss_mdp_pipe *pipe = NULL; struct mdss_data_type *mdata; mutex_lock(&mdss_mdp_sspp_lock); mdata = mixer->ctl->mdata; pipe = mdss_mdp_pipe_init(mixer, MDSS_MDP_PIPE_TYPE_DMA, mixer->num, NULL); if (!pipe) { pr_err("DMA pipes not available for mixer=%d\n", mixer->num); } else if (pipe != &mdata->dma_pipes[mixer->num]) { pr_err("Requested DMA pnum=%d not available\n", mdata->dma_pipes[mixer->num].num); kref_put(&pipe->kref, mdss_mdp_pipe_free); pipe = NULL; } else { pipe->mixer_left = mixer; } mutex_unlock(&mdss_mdp_sspp_lock); return pipe; } struct mdss_mdp_pipe *mdss_mdp_pipe_alloc(struct mdss_mdp_mixer *mixer, u32 type, struct mdss_mdp_pipe *left_blend_pipe) { struct mdss_mdp_pipe *pipe; mutex_lock(&mdss_mdp_sspp_lock); pipe = mdss_mdp_pipe_init(mixer, type, 0, left_blend_pipe); mutex_unlock(&mdss_mdp_sspp_lock); return pipe; } struct mdss_mdp_pipe *mdss_mdp_pipe_get(struct mdss_data_type *mdata, u32 ndx) { struct mdss_mdp_pipe *pipe = NULL; if (!ndx) return ERR_PTR(-EINVAL); mutex_lock(&mdss_mdp_sspp_lock); pipe = mdss_mdp_pipe_search(mdata, ndx); if (!pipe) { pipe = ERR_PTR(-EINVAL); goto error; } if (mdss_mdp_pipe_map(pipe)) pipe = ERR_PTR(-EACCES); error: mutex_unlock(&mdss_mdp_sspp_lock); return pipe; } static struct mdss_mdp_pipe *mdss_mdp_pipe_search_by_client_id( struct mdss_data_type *mdata, int client_id) { u32 i; for (i = 0; i < mdata->nrgb_pipes; i++) { if (mdata->rgb_pipes[i].ftch_id == client_id) return &mdata->rgb_pipes[i]; } for (i = 0; i < mdata->nvig_pipes; i++) { if (mdata->vig_pipes[i].ftch_id == client_id) return &mdata->vig_pipes[i]; } for (i = 0; i < mdata->ndma_pipes; i++) { if (mdata->dma_pipes[i].ftch_id == client_id) return &mdata->dma_pipes[i]; } return NULL; } struct mdss_mdp_pipe *mdss_mdp_pipe_search(struct mdss_data_type *mdata, u32 ndx) { u32 i; for (i = 0; i < mdata->nvig_pipes; i++) { if (mdata->vig_pipes[i].ndx == ndx) return &mdata->vig_pipes[i]; } for (i = 0; i < mdata->nrgb_pipes; i++) { if (mdata->rgb_pipes[i].ndx == ndx) return &mdata->rgb_pipes[i]; } for (i = 0; i < mdata->ndma_pipes; i++) { if (mdata->dma_pipes[i].ndx == ndx) return &mdata->dma_pipes[i]; } return NULL; } static void mdss_mdp_pipe_free(struct kref *kref) { struct mdss_mdp_pipe *pipe; pipe = container_of(kref, struct mdss_mdp_pipe, kref); pr_debug("ndx=%x pnum=%d\n", pipe->ndx, pipe->num); if (pipe->play_cnt) { mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false); mdss_mdp_pipe_fetch_halt(pipe); mdss_mdp_pipe_sspp_term(pipe); mdss_mdp_smp_free(pipe); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false); } else { mdss_mdp_smp_unreserve(pipe); } pipe->flags = 0; pipe->is_right_blend = false; pipe->src_split_req = false; pipe->bwc_mode = 0; pipe->mfd = NULL; pipe->mixer_left = pipe->mixer_right = NULL; memset(&pipe->scale, 0, sizeof(struct mdp_scale_data)); } static bool mdss_mdp_check_pipe_in_use(struct mdss_mdp_pipe *pipe) { int i; u32 mixercfg, stage_off_mask = BIT(0) | BIT(1) | BIT(2); bool in_use = false; struct mdss_data_type *mdata = mdss_mdp_get_mdata(); struct mdss_mdp_ctl *ctl; struct mdss_mdp_mixer *mixer; if (pipe->num == MDSS_MDP_SSPP_VIG3 || pipe->num == MDSS_MDP_SSPP_RGB3) stage_off_mask = stage_off_mask << ((3 * pipe->num) + 2); else stage_off_mask = stage_off_mask << (3 * pipe->num); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false); for (i = 0; i < mdata->nctl; i++) { ctl = mdata->ctl_off + i; if (!ctl || !ctl->ref_cnt) continue; mixer = ctl->mixer_left; if (mixer && mixer->rotator_mode) continue; mixercfg = mdss_mdp_get_mixercfg(mixer); if ((mixercfg & stage_off_mask) && ctl->play_cnt) { pr_err("BUG. pipe%d is active. mcfg:0x%x mask:0x%x\n", pipe->num, mixercfg, stage_off_mask); BUG(); } } mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false); return in_use; } static int mdss_mdp_is_pipe_idle(struct mdss_mdp_pipe *pipe, bool ignore_force_on) { u32 reg_val; u32 vbif_idle_mask, forced_on_mask, clk_status_idle_mask; bool is_idle = false, is_forced_on; struct mdss_data_type *mdata = mdss_mdp_get_mdata(); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false); forced_on_mask = BIT(pipe->clk_ctrl.bit_off + CLK_FORCE_ON_OFFSET); reg_val = readl_relaxed(mdata->mdp_base + pipe->clk_ctrl.reg_off); is_forced_on = (reg_val & forced_on_mask) ? true : false; pr_debug("pipe#:%d clk_ctrl: 0x%x forced_on_mask: 0x%x\n", pipe->num, reg_val, forced_on_mask); /* if forced on then no need to check status */ if (!is_forced_on) { clk_status_idle_mask = BIT(pipe->clk_status.bit_off + CLK_STATUS_OFFSET); reg_val = readl_relaxed(mdata->mdp_base + pipe->clk_status.reg_off); if (reg_val & clk_status_idle_mask) is_idle = false; pr_debug("pipe#:%d clk_status:0x%x clk_status_idle_mask:0x%x\n", pipe->num, reg_val, clk_status_idle_mask); } if (!ignore_force_on && (is_forced_on || !is_idle)) goto exit; vbif_idle_mask = BIT(pipe->xin_id + 16); reg_val = readl_relaxed(mdata->vbif_base + MMSS_VBIF_XIN_HALT_CTRL1); if (reg_val & vbif_idle_mask) is_idle = true; pr_debug("pipe#:%d XIN_HALT_CTRL1: 0x%x\n", pipe->num, reg_val); exit: mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false); return is_idle; } /** * mdss_mdp_pipe_fetch_halt() - Halt VBIF client corresponding to specified pipe * @pipe: pointer to the pipe data structure which needs to be halted. * * Check if VBIF client corresponding to specified pipe is idle or not. If not * send a halt request for the client in question and wait for it be idle. * * This function would typically be called after pipe is unstaged or before it * is initialized. On success it should be assumed that pipe is in idle state * and would not fetch any more data. This function cannot be called from * interrupt context. */ int mdss_mdp_pipe_fetch_halt(struct mdss_mdp_pipe *pipe) { bool is_idle, in_use = false; int rc = 0; u32 reg_val, idle_mask, status; struct mdss_data_type *mdata = mdss_mdp_get_mdata(); bool sw_reset_avail = mdss_mdp_pipe_is_sw_reset_available(mdata); u32 sw_reset_off = pipe->sw_reset.reg_off; u32 clk_ctrl_off = pipe->clk_ctrl.reg_off; is_idle = mdss_mdp_is_pipe_idle(pipe, true); if (!is_idle) in_use = mdss_mdp_check_pipe_in_use(pipe); if (!is_idle && !in_use) { pr_err("%pS: pipe%d is not idle. xin_id=%d\n", __builtin_return_address(0), pipe->num, pipe->xin_id); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false); mutex_lock(&mdata->reg_lock); idle_mask = BIT(pipe->xin_id + 16); reg_val = readl_relaxed(mdata->vbif_base + MMSS_VBIF_XIN_HALT_CTRL0); writel_relaxed(reg_val | BIT(pipe->xin_id), mdata->vbif_base + MMSS_VBIF_XIN_HALT_CTRL0); if (sw_reset_avail) { reg_val = readl_relaxed(mdata->mdp_base + sw_reset_off); writel_relaxed(reg_val | BIT(pipe->sw_reset.bit_off), mdata->mdp_base + sw_reset_off); wmb(); } mutex_unlock(&mdata->reg_lock); rc = readl_poll_timeout(mdata->vbif_base + MMSS_VBIF_XIN_HALT_CTRL1, status, (status & idle_mask), 1000, PIPE_HALT_TIMEOUT_US); if (rc == -ETIMEDOUT) pr_err("VBIF client %d not halting. TIMEDOUT.\n", pipe->xin_id); else pr_debug("VBIF client %d is halted\n", pipe->xin_id); mutex_lock(&mdata->reg_lock); reg_val = readl_relaxed(mdata->vbif_base + MMSS_VBIF_XIN_HALT_CTRL0); writel_relaxed(reg_val & ~BIT(pipe->xin_id), mdata->vbif_base + MMSS_VBIF_XIN_HALT_CTRL0); if (sw_reset_avail) { reg_val = readl_relaxed(mdata->mdp_base + sw_reset_off); writel_relaxed(reg_val & ~BIT(pipe->sw_reset.bit_off), mdata->mdp_base + sw_reset_off); wmb(); reg_val = readl_relaxed(mdata->mdp_base + clk_ctrl_off); reg_val |= BIT(pipe->clk_ctrl.bit_off + CLK_FORCE_OFF_OFFSET); writel_relaxed(reg_val, mdata->mdp_base + clk_ctrl_off); } mutex_unlock(&mdata->reg_lock); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false); } return rc; } int mdss_mdp_pipe_destroy(struct mdss_mdp_pipe *pipe) { if (!kref_put_mutex(&pipe->kref, mdss_mdp_pipe_free, &mdss_mdp_sspp_lock)) { pr_err("unable to free pipe %d while still in use\n", pipe->num); return -EBUSY; } mutex_unlock(&mdss_mdp_sspp_lock); return 0; } /** * mdss_mdp_pipe_handoff() - Handoff staged pipes during bootup * @pipe: pointer to the pipe to be handed-off * * Populate the software structures for the pipe based on the current * configuration of the hardware pipe by the reading the appropriate MDP * registers. * * This function would typically be called during MDP probe for the case * when certain pipes might be programmed in the bootloader to display * the splash screen. */ int mdss_mdp_pipe_handoff(struct mdss_mdp_pipe *pipe) { int rc = 0; u32 src_fmt, reg = 0, bpp = 0; /* * todo: for now, only reading pipe src and dest size details * from the registers. This is needed for appropriately * calculating perf metrics for the handed off pipes. * We may need to parse some more details at a later date. */ reg = mdss_mdp_pipe_read(pipe, MDSS_MDP_REG_SSPP_SRC_SIZE); pipe->src.h = reg >> 16; pipe->src.w = reg & 0xFFFF; reg = mdss_mdp_pipe_read(pipe, MDSS_MDP_REG_SSPP_OUT_SIZE); pipe->dst.h = reg >> 16; pipe->dst.w = reg & 0xFFFF; /* Assume that the source format is RGB */ reg = mdss_mdp_pipe_read(pipe, MDSS_MDP_REG_SSPP_SRC_FORMAT); bpp = ((reg >> 9) & 0x3) + 1; switch (bpp) { case 4: src_fmt = MDP_RGBA_8888; break; case 3: src_fmt = MDP_RGB_888; break; case 2: src_fmt = MDP_RGB_565; break; default: pr_err("Invalid bpp=%d found\n", bpp); rc = -EINVAL; goto error; } pipe->src_fmt = mdss_mdp_get_format_params(src_fmt); pr_debug("Pipe settings: src.h=%d src.w=%d dst.h=%d dst.w=%d bpp=%d\n" , pipe->src.h, pipe->src.w, pipe->dst.h, pipe->dst.w, pipe->src_fmt->bpp); pipe->is_handed_off = true; pipe->play_cnt = 1; kref_init(&pipe->kref); error: return rc; } static int mdss_mdp_image_setup(struct mdss_mdp_pipe *pipe, struct mdss_mdp_data *data) { u32 img_size, src_size, src_xy, dst_size, dst_xy, ystride0, ystride1; u32 width, height; u32 decimation, reg_data; u32 tmp_src_xy, tmp_src_size; int ret = 0; struct mdss_data_type *mdata = mdss_mdp_get_mdata(); struct mdss_rect sci, dst, src; pr_debug("ctl: %d pnum=%d wh=%dx%d src={%d,%d,%d,%d} dst={%d,%d,%d,%d}\n", pipe->mixer_left->ctl->num, pipe->num, pipe->img_width, pipe->img_height, pipe->src.x, pipe->src.y, pipe->src.w, pipe->src.h, pipe->dst.x, pipe->dst.y, pipe->dst.w, pipe->dst.h); width = pipe->img_width; height = pipe->img_height; mdss_mdp_get_plane_sizes(pipe->src_fmt->format, width, height, &pipe->src_planes, pipe->bwc_mode); if (data != NULL) { ret = mdss_mdp_data_check(data, &pipe->src_planes); if (ret) return ret; } if ((pipe->flags & MDP_DEINTERLACE) && !(pipe->flags & MDP_SOURCE_ROTATED_90)) { int i; for (i = 0; i < pipe->src_planes.num_planes; i++) pipe->src_planes.ystride[i] *= 2; width *= 2; height /= 2; } decimation = ((1 << pipe->horz_deci) - 1) << 8; decimation |= ((1 << pipe->vert_deci) - 1); if (decimation) pr_debug("Image decimation h=%d v=%d\n", pipe->horz_deci, pipe->vert_deci); sci = pipe->mixer_left->ctl->roi; dst = pipe->dst; src = pipe->src; if ((pipe->mixer_left->type != MDSS_MDP_MIXER_TYPE_WRITEBACK) && !pipe->mixer_left->ctl->is_video_mode && !pipe->src_split_req) { mdss_mdp_crop_rect(&src, &dst, &sci); if (pipe->flags & MDP_FLIP_LR) { src.x = pipe->src.x + (pipe->src.x + pipe->src.w) - (src.x + src.w); } if (pipe->flags & MDP_FLIP_UD) { src.y = pipe->src.y + (pipe->src.y + pipe->src.h) - (src.y + src.h); } } src_size = (src.h << 16) | src.w; src_xy = (src.y << 16) | src.x; dst_size = (dst.h << 16) | dst.w; dst_xy = (dst.y << 16) | dst.x; ystride0 = (pipe->src_planes.ystride[0]) | (pipe->src_planes.ystride[1] << 16); ystride1 = (pipe->src_planes.ystride[2]) | (pipe->src_planes.ystride[3] << 16); /* * Software overfetch is used when scalar pixel extension is * not enabled */ if (pipe->overfetch_disable && !pipe->scale.enable_pxl_ext) { if (pipe->overfetch_disable & OVERFETCH_DISABLE_BOTTOM) { height = pipe->src.h; if (!(pipe->overfetch_disable & OVERFETCH_DISABLE_TOP)) height += pipe->src.y; } if (pipe->overfetch_disable & OVERFETCH_DISABLE_RIGHT) { width = pipe->src.w; if (!(pipe->overfetch_disable & OVERFETCH_DISABLE_LEFT)) width += pipe->src.x; } if (pipe->overfetch_disable & OVERFETCH_DISABLE_LEFT) src_xy &= ~0xFFFF; if (pipe->overfetch_disable & OVERFETCH_DISABLE_TOP) src_xy &= ~(0xFFFF << 16); pr_debug("overfetch w=%d/%d h=%d/%d src_xy=0x%08x\n", width, pipe->img_width, height, pipe->img_height, src_xy); } img_size = (height << 16) | width; if (IS_MDSS_MAJOR_MINOR_SAME(mdata->mdp_rev, MDSS_MDP_HW_REV_103) && pipe->bwc_mode) { /* check source dimensions change */ tmp_src_size = mdss_mdp_pipe_read(pipe, MDSS_MDP_REG_SSPP_SRC_SIZE); tmp_src_xy = mdss_mdp_pipe_read(pipe, MDSS_MDP_REG_SSPP_SRC_XY); if (src_xy != tmp_src_xy || tmp_src_size != src_size) { reg_data = readl_relaxed(mdata->mdp_base + AHB_CLK_OFFSET); reg_data |= BIT(28); writel_relaxed(reg_data, mdata->mdp_base + AHB_CLK_OFFSET); } } mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_IMG_SIZE, img_size); mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_SIZE, src_size); mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_XY, src_xy); mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_OUT_SIZE, dst_size); mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_OUT_XY, dst_xy); mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_YSTRIDE0, ystride0); mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_YSTRIDE1, ystride1); mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_DECIMATION_CONFIG, decimation); return 0; } static int mdss_mdp_format_setup(struct mdss_mdp_pipe *pipe) { struct mdss_mdp_format_params *fmt; u32 chroma_samp, unpack, src_format; u32 secure = 0; u32 opmode; struct mdss_data_type *mdata = mdss_mdp_get_mdata(); fmt = pipe->src_fmt; if (pipe->flags & MDP_SECURE_OVERLAY_SESSION) secure = 0xF; opmode = pipe->bwc_mode; if (pipe->flags & MDP_FLIP_LR) opmode |= MDSS_MDP_OP_FLIP_LR; if (pipe->flags & MDP_FLIP_UD) opmode |= MDSS_MDP_OP_FLIP_UD; pr_debug("pnum=%d format=%d opmode=%x\n", pipe->num, fmt->format, opmode); chroma_samp = fmt->chroma_sample; if (pipe->flags & MDP_SOURCE_ROTATED_90) { if (chroma_samp == MDSS_MDP_CHROMA_H2V1) chroma_samp = MDSS_MDP_CHROMA_H1V2; else if (chroma_samp == MDSS_MDP_CHROMA_H1V2) chroma_samp = MDSS_MDP_CHROMA_H2V1; } src_format = (chroma_samp << 23) | (fmt->fetch_planes << 19) | (fmt->bits[C3_ALPHA] << 6) | (fmt->bits[C2_R_Cr] << 4) | (fmt->bits[C1_B_Cb] << 2) | (fmt->bits[C0_G_Y] << 0); if (fmt->tile) src_format |= BIT(30); if (pipe->flags & MDP_ROT_90) src_format |= BIT(11); /* ROT90 */ if (fmt->alpha_enable && fmt->fetch_planes != MDSS_MDP_PLANE_INTERLEAVED) src_format |= BIT(8); /* SRCC3_EN */ unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) | (fmt->element[1] << 8) | (fmt->element[0] << 0); src_format |= ((fmt->unpack_count - 1) << 12) | (fmt->unpack_tight << 17) | (fmt->unpack_align_msb << 18) | ((fmt->bpp - 1) << 9); mdss_mdp_pipe_sspp_setup(pipe, &opmode); if (fmt->tile && mdata->highest_bank_bit) { mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_FETCH_CONFIG, MDSS_MDP_FETCH_CONFIG_RESET_VALUE | mdata->highest_bank_bit << 18); } if (pipe->scale.enable_pxl_ext) opmode |= (1 << 31); mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_FORMAT, src_format); mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN, unpack); mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_OP_MODE, opmode); mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_ADDR_SW_STATUS, secure); return 0; } int mdss_mdp_pipe_addr_setup(struct mdss_data_type *mdata, struct mdss_mdp_pipe *head, u32 *offsets, u32 *ftch_id, u32 *xin_id, u32 type, u32 num_base, u32 len, u8 priority_base) { u32 i; if (!head || !mdata) { pr_err("unable to setup pipe type=%d: invalid input\n", type); return -EINVAL; } for (i = 0; i < len; i++) { head[i].type = type; head[i].ftch_id = ftch_id[i]; head[i].xin_id = xin_id[i]; head[i].num = i + num_base; head[i].ndx = BIT(i + num_base); head[i].priority = i + priority_base; head[i].base = mdata->mdss_base + offsets[i]; pr_info("type:%d ftchid:%d xinid:%d num:%d ndx:0x%x prio:%d\n", head[i].type, head[i].ftch_id, head[i].xin_id, head[i].num, head[i].ndx, head[i].priority); } return 0; } static int mdss_mdp_src_addr_setup(struct mdss_mdp_pipe *pipe, struct mdss_mdp_data *src_data) { struct mdss_data_type *mdata = mdss_mdp_get_mdata(); struct mdss_mdp_data data = *src_data; int ret = 0; pr_debug("pnum=%d\n", pipe->num); data.bwc_enabled = pipe->bwc_mode; ret = mdss_mdp_data_check(&data, &pipe->src_planes); if (ret) return ret; if (pipe->overfetch_disable && !pipe->scale.enable_pxl_ext) { u32 x = 0, y = 0; if (pipe->overfetch_disable & OVERFETCH_DISABLE_LEFT) x = pipe->src.x; if (pipe->overfetch_disable & OVERFETCH_DISABLE_TOP) y = pipe->src.y; mdss_mdp_data_calc_offset(&data, x, y, &pipe->src_planes, pipe->src_fmt); } /* planar format expects YCbCr, swap chroma planes if YCrCb */ if (mdata->mdp_rev < MDSS_MDP_HW_REV_102 && (pipe->src_fmt->fetch_planes == MDSS_MDP_PLANE_PLANAR) && (pipe->src_fmt->element[0] == C1_B_Cb)) swap(data.p[1].addr, data.p[2].addr); mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC0_ADDR, data.p[0].addr); mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC1_ADDR, data.p[1].addr); mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC2_ADDR, data.p[2].addr); mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC3_ADDR, data.p[3].addr); /* Flush Sel register only exists in mpq */ if ((mdata->mdp_rev == MDSS_MDP_HW_REV_200) && (pipe->flags & MDP_VPU_PIPE)) mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_VIG_FLUSH_SEL, 0); return 0; } static int mdss_mdp_pipe_solidfill_setup(struct mdss_mdp_pipe *pipe) { int ret; u32 secure, format, unpack; pr_debug("solid fill setup on pnum=%d\n", pipe->num); ret = mdss_mdp_image_setup(pipe, NULL); if (ret) { pr_err("image setup error for pnum=%d\n", pipe->num); return ret; } format = MDSS_MDP_FMT_SOLID_FILL; secure = (pipe->flags & MDP_SECURE_OVERLAY_SESSION ? 0xF : 0x0); /* support ARGB color format only */ unpack = (C3_ALPHA << 24) | (C2_R_Cr << 16) | (C1_B_Cb << 8) | (C0_G_Y << 0); mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_FORMAT, format); mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_CONSTANT_COLOR, pipe->bg_color); mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_UNPACK_PATTERN, unpack); mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_SRC_ADDR_SW_STATUS, secure); return 0; } int mdss_mdp_pipe_queue_data(struct mdss_mdp_pipe *pipe, struct mdss_mdp_data *src_data) { int ret = 0; struct mdss_mdp_ctl *ctl; u32 params_changed; u32 opmode = 0; struct mdss_data_type *mdata = mdss_mdp_get_mdata(); if (!pipe) { pr_err("pipe not setup properly for queue\n"); return -ENODEV; } if (!pipe->mixer_left || !pipe->mixer_left->ctl) { if (src_data) pr_err("pipe%d mixer not setup properly\n", pipe->num); return -ENODEV; } if (pipe->src_split_req && !mdata->has_src_split) { pr_err("src split can't be requested on mdp:0x%x\n", mdata->mdp_rev); return -EINVAL; } pr_debug("pnum=%x mixer=%d play_cnt=%u\n", pipe->num, pipe->mixer_left->num, pipe->play_cnt); mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON, false); ctl = pipe->mixer_left->ctl; /* * Reprogram the pipe when there is no dedicated wfd blk and * virtual mixer is allocated for the DMA pipe during concurrent * line and block mode operations */ params_changed = (pipe->params_changed) || ((pipe->type == MDSS_MDP_PIPE_TYPE_DMA) && (pipe->mixer_left->type == MDSS_MDP_MIXER_TYPE_WRITEBACK) && (ctl->mdata->mixer_switched)) || ctl->roi_changed; if ((!(pipe->flags & MDP_VPU_PIPE) && (src_data == NULL)) || (pipe->flags & MDP_SOLID_FILL)) { pipe->params_changed = 0; mdss_mdp_pipe_solidfill_setup(pipe); goto update_nobuf; } if (params_changed) { pipe->params_changed = 0; ret = mdss_mdp_pipe_pp_setup(pipe, &opmode); if (ret) { pr_err("pipe pp setup error for pnum=%d\n", pipe->num); goto done; } ret = mdss_mdp_image_setup(pipe, src_data); if (ret) { pr_err("image setup error for pnum=%d\n", pipe->num); goto done; } ret = mdss_mdp_format_setup(pipe); if (ret) { pr_err("format %d setup error pnum=%d\n", pipe->src_fmt->format, pipe->num); goto done; } if (pipe->type == MDSS_MDP_PIPE_TYPE_VIG) mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_VIG_OP_MODE, opmode); } if (src_data == NULL) { pr_debug("src_data=%p pipe num=%dx\n", src_data, pipe->num); goto update_nobuf; } mdss_mdp_smp_alloc(pipe); ret = mdss_mdp_src_addr_setup(pipe, src_data); if (ret) { pr_err("addr setup error for pnum=%d\n", pipe->num); goto done; } update_nobuf: if (pipe->src_split_req) { pr_debug("src_split_enabled. pnum:%d\n", pipe->num); mdss_mdp_mixer_pipe_update(pipe, ctl->mixer_left, params_changed); mdss_mdp_mixer_pipe_update(pipe, ctl->mixer_right, params_changed); pipe->mixer_right = ctl->mixer_right; } else { mdss_mdp_mixer_pipe_update(pipe, pipe->mixer_left, params_changed); } pipe->play_cnt++; done: mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF, false); return ret; } int mdss_mdp_pipe_is_staged(struct mdss_mdp_pipe *pipe) { return (pipe == pipe->mixer_left->stage_pipe[pipe->mixer_stage]); } static inline void __mdss_mdp_pipe_program_pixel_extn_helper( struct mdss_mdp_pipe *pipe, u32 plane, u32 off) { u32 src_h = pipe->src.h >> pipe->vert_deci; u32 mask = 0xFF; /* * CB CR plane required pxls need to be accounted * for chroma decimation. */ if (plane == 1) src_h >>= pipe->chroma_sample_v; writel_relaxed(((pipe->scale.right_ftch[plane] & mask) << 24)| ((pipe->scale.right_rpt[plane] & mask) << 16)| ((pipe->scale.left_ftch[plane] & mask) << 8)| (pipe->scale.left_rpt[plane] & mask), pipe->base + MDSS_MDP_REG_SSPP_SW_PIX_EXT_C0_LR + off); writel_relaxed(((pipe->scale.btm_ftch[plane] & mask) << 24)| ((pipe->scale.btm_rpt[plane] & mask) << 16)| ((pipe->scale.top_ftch[plane] & mask) << 8)| (pipe->scale.top_rpt[plane] & mask), pipe->base + MDSS_MDP_REG_SSPP_SW_PIX_EXT_C0_TB + off); mask = 0xFFFF; writel_relaxed((((src_h + pipe->scale.num_ext_pxls_top[plane] + pipe->scale.num_ext_pxls_btm[plane]) & mask) << 16) | ((pipe->scale.roi_w[plane] + pipe->scale.num_ext_pxls_left[plane] + pipe->scale.num_ext_pxls_right[plane]) & mask), pipe->base + MDSS_MDP_REG_SSPP_SW_PIX_EXT_C0_REQ_PIXELS + off); } /** * mdss_mdp_pipe_program_pixel_extn - Program the source pipe's * sw pixel extension * @pipe: Source pipe struct containing pixel extn values * * Function programs the pixel extn values calculated during * scale setup. */ int mdss_mdp_pipe_program_pixel_extn(struct mdss_mdp_pipe *pipe) { /* Y plane pixel extn */ __mdss_mdp_pipe_program_pixel_extn_helper(pipe, 0, 0); /* CB CR plane pixel extn */ __mdss_mdp_pipe_program_pixel_extn_helper(pipe, 1, 16); /* Alpha plane pixel extn */ __mdss_mdp_pipe_program_pixel_extn_helper(pipe, 3, 32); return 0; }
gpl-2.0
giantdisaster/btrfs
net/ipv4/syncookies.c
82
10669
/* * Syncookies implementation for the Linux kernel * * Copyright (C) 1997 Andi Kleen * Based on ideas by D.J.Bernstein and Eric Schenk. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. */ #include <linux/tcp.h> #include <linux/slab.h> #include <linux/random.h> #include <linux/cryptohash.h> #include <linux/kernel.h> #include <linux/export.h> #include <net/tcp.h> #include <net/route.h> /* Timestamps: lowest bits store TCP options */ #define TSBITS 6 #define TSMASK (((__u32)1 << TSBITS) - 1) extern int sysctl_tcp_syncookies; __u32 syncookie_secret[2][16-4+SHA_DIGEST_WORDS]; EXPORT_SYMBOL(syncookie_secret); static __init int init_syncookies(void) { get_random_bytes(syncookie_secret, sizeof(syncookie_secret)); return 0; } __initcall(init_syncookies); #define COOKIEBITS 24 /* Upper bits store count */ #define COOKIEMASK (((__u32)1 << COOKIEBITS) - 1) static DEFINE_PER_CPU(__u32 [16 + 5 + SHA_WORKSPACE_WORDS], ipv4_cookie_scratch); static u32 cookie_hash(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, u32 count, int c) { __u32 *tmp = __get_cpu_var(ipv4_cookie_scratch); memcpy(tmp + 4, syncookie_secret[c], sizeof(syncookie_secret[c])); tmp[0] = (__force u32)saddr; tmp[1] = (__force u32)daddr; tmp[2] = ((__force u32)sport << 16) + (__force u32)dport; tmp[3] = count; sha_transform(tmp + 16, (__u8 *)tmp, tmp + 16 + 5); return tmp[17]; } /* * when syncookies are in effect and tcp timestamps are enabled we encode * tcp options in the lower bits of the timestamp value that will be * sent in the syn-ack. * Since subsequent timestamps use the normal tcp_time_stamp value, we * must make sure that the resulting initial timestamp is <= tcp_time_stamp. */ __u32 cookie_init_timestamp(struct request_sock *req) { struct inet_request_sock *ireq; u32 ts, ts_now = tcp_time_stamp; u32 options = 0; ireq = inet_rsk(req); options = ireq->wscale_ok ? ireq->snd_wscale : 0xf; options |= ireq->sack_ok << 4; options |= ireq->ecn_ok << 5; ts = ts_now & ~TSMASK; ts |= options; if (ts > ts_now) { ts >>= TSBITS; ts--; ts <<= TSBITS; ts |= options; } return ts; } static __u32 secure_tcp_syn_cookie(__be32 saddr, __be32 daddr, __be16 sport, __be16 dport, __u32 sseq, __u32 count, __u32 data) { /* * Compute the secure sequence number. * The output should be: * HASH(sec1,saddr,sport,daddr,dport,sec1) + sseq + (count * 2^24) * + (HASH(sec2,saddr,sport,daddr,dport,count,sec2) % 2^24). * Where sseq is their sequence number and count increases every * minute by 1. * As an extra hack, we add a small "data" value that encodes the * MSS into the second hash value. */ return (cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq + (count << COOKIEBITS) + ((cookie_hash(saddr, daddr, sport, dport, count, 1) + data) & COOKIEMASK)); } /* * This retrieves the small "data" value from the syncookie. * If the syncookie is bad, the data returned will be out of * range. This must be checked by the caller. * * The count value used to generate the cookie must be within * "maxdiff" if the current (passed-in) "count". The return value * is (__u32)-1 if this test fails. */ static __u32 check_tcp_syn_cookie(__u32 cookie, __be32 saddr, __be32 daddr, __be16 sport, __be16 dport, __u32 sseq, __u32 count, __u32 maxdiff) { __u32 diff; /* Strip away the layers from the cookie */ cookie -= cookie_hash(saddr, daddr, sport, dport, 0, 0) + sseq; /* Cookie is now reduced to (count * 2^24) ^ (hash % 2^24) */ diff = (count - (cookie >> COOKIEBITS)) & ((__u32) - 1 >> COOKIEBITS); if (diff >= maxdiff) return (__u32)-1; return (cookie - cookie_hash(saddr, daddr, sport, dport, count - diff, 1)) & COOKIEMASK; /* Leaving the data behind */ } /* * MSS Values are taken from the 2009 paper * 'Measuring TCP Maximum Segment Size' by S. Alcock and R. Nelson: * - values 1440 to 1460 accounted for 80% of observed mss values * - values outside the 536-1460 range are rare (<0.2%). * * Table must be sorted. */ static __u16 const msstab[] = { 64, 512, 536, 1024, 1440, 1460, 4312, 8960, }; /* * Generate a syncookie. mssp points to the mss, which is returned * rounded down to the value encoded in the cookie. */ __u32 cookie_v4_init_sequence(struct sock *sk, struct sk_buff *skb, __u16 *mssp) { const struct iphdr *iph = ip_hdr(skb); const struct tcphdr *th = tcp_hdr(skb); int mssind; const __u16 mss = *mssp; tcp_synq_overflow(sk); for (mssind = ARRAY_SIZE(msstab) - 1; mssind ; mssind--) if (mss >= msstab[mssind]) break; *mssp = msstab[mssind]; NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESSENT); return secure_tcp_syn_cookie(iph->saddr, iph->daddr, th->source, th->dest, ntohl(th->seq), jiffies / (HZ * 60), mssind); } /* * This (misnamed) value is the age of syncookie which is permitted. * Its ideal value should be dependent on TCP_TIMEOUT_INIT and * sysctl_tcp_retries1. It's a rather complicated formula (exponential * backoff) to compute at runtime so it's currently hardcoded here. */ #define COUNTER_TRIES 4 /* * Check if a ack sequence number is a valid syncookie. * Return the decoded mss if it is, or 0 if not. */ static inline int cookie_check(struct sk_buff *skb, __u32 cookie) { const struct iphdr *iph = ip_hdr(skb); const struct tcphdr *th = tcp_hdr(skb); __u32 seq = ntohl(th->seq) - 1; __u32 mssind = check_tcp_syn_cookie(cookie, iph->saddr, iph->daddr, th->source, th->dest, seq, jiffies / (HZ * 60), COUNTER_TRIES); return mssind < ARRAY_SIZE(msstab) ? msstab[mssind] : 0; } static inline struct sock *get_cookie_sock(struct sock *sk, struct sk_buff *skb, struct request_sock *req, struct dst_entry *dst) { struct inet_connection_sock *icsk = inet_csk(sk); struct sock *child; child = icsk->icsk_af_ops->syn_recv_sock(sk, skb, req, dst); if (child) inet_csk_reqsk_queue_add(sk, req, child); else reqsk_free(req); return child; } /* * when syncookies are in effect and tcp timestamps are enabled we stored * additional tcp options in the timestamp. * This extracts these options from the timestamp echo. * * The lowest 4 bits store snd_wscale. * next 2 bits indicate SACK and ECN support. * * return false if we decode an option that should not be. */ bool cookie_check_timestamp(struct tcp_options_received *tcp_opt, bool *ecn_ok) { /* echoed timestamp, lowest bits contain options */ u32 options = tcp_opt->rcv_tsecr & TSMASK; if (!tcp_opt->saw_tstamp) { tcp_clear_options(tcp_opt); return true; } if (!sysctl_tcp_timestamps) return false; tcp_opt->sack_ok = (options & (1 << 4)) ? TCP_SACK_SEEN : 0; *ecn_ok = (options >> 5) & 1; if (*ecn_ok && !sysctl_tcp_ecn) return false; if (tcp_opt->sack_ok && !sysctl_tcp_sack) return false; if ((options & 0xf) == 0xf) return true; /* no window scaling */ tcp_opt->wscale_ok = 1; tcp_opt->snd_wscale = options & 0xf; return sysctl_tcp_window_scaling != 0; } EXPORT_SYMBOL(cookie_check_timestamp); struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, struct ip_options *opt) { struct tcp_options_received tcp_opt; const u8 *hash_location; struct inet_request_sock *ireq; struct tcp_request_sock *treq; struct tcp_sock *tp = tcp_sk(sk); const struct tcphdr *th = tcp_hdr(skb); __u32 cookie = ntohl(th->ack_seq) - 1; struct sock *ret = sk; struct request_sock *req; int mss; struct rtable *rt; __u8 rcv_wscale; bool ecn_ok = false; struct flowi4 fl4; if (!sysctl_tcp_syncookies || !th->ack || th->rst) goto out; if (tcp_synq_no_recent_overflow(sk) || (mss = cookie_check(skb, cookie)) == 0) { NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESFAILED); goto out; } NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_SYNCOOKIESRECV); /* check for timestamp cookie support */ memset(&tcp_opt, 0, sizeof(tcp_opt)); tcp_parse_options(skb, &tcp_opt, &hash_location, 0, NULL); if (!cookie_check_timestamp(&tcp_opt, &ecn_ok)) goto out; ret = NULL; req = inet_reqsk_alloc(&tcp_request_sock_ops); /* for safety */ if (!req) goto out; ireq = inet_rsk(req); treq = tcp_rsk(req); treq->rcv_isn = ntohl(th->seq) - 1; treq->snt_isn = cookie; req->mss = mss; ireq->loc_port = th->dest; ireq->rmt_port = th->source; ireq->loc_addr = ip_hdr(skb)->daddr; ireq->rmt_addr = ip_hdr(skb)->saddr; ireq->ecn_ok = ecn_ok; ireq->snd_wscale = tcp_opt.snd_wscale; ireq->sack_ok = tcp_opt.sack_ok; ireq->wscale_ok = tcp_opt.wscale_ok; ireq->tstamp_ok = tcp_opt.saw_tstamp; req->ts_recent = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsval : 0; treq->snt_synack = tcp_opt.saw_tstamp ? tcp_opt.rcv_tsecr : 0; /* We throwed the options of the initial SYN away, so we hope * the ACK carries the same options again (see RFC1122 4.2.3.8) */ if (opt && opt->optlen) { int opt_size = sizeof(struct ip_options_rcu) + opt->optlen; ireq->opt = kmalloc(opt_size, GFP_ATOMIC); if (ireq->opt != NULL && ip_options_echo(&ireq->opt->opt, skb)) { kfree(ireq->opt); ireq->opt = NULL; } } if (security_inet_conn_request(sk, skb, req)) { reqsk_free(req); goto out; } req->expires = 0UL; req->retrans = 0; /* * We need to lookup the route here to get at the correct * window size. We should better make sure that the window size * hasn't changed since we received the original syn, but I see * no easy way to do this. */ flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP, inet_sk_flowi_flags(sk), (opt && opt->srr) ? opt->faddr : ireq->rmt_addr, ireq->loc_addr, th->source, th->dest); security_req_classify_flow(req, flowi4_to_flowi(&fl4)); rt = ip_route_output_key(sock_net(sk), &fl4); if (IS_ERR(rt)) { reqsk_free(req); goto out; } /* Try to redo what tcp_v4_send_synack did. */ req->window_clamp = tp->window_clamp ? :dst_metric(&rt->dst, RTAX_WINDOW); tcp_select_initial_window(tcp_full_space(sk), req->mss, &req->rcv_wnd, &req->window_clamp, ireq->wscale_ok, &rcv_wscale, dst_metric(&rt->dst, RTAX_INITRWND)); ireq->rcv_wscale = rcv_wscale; ret = get_cookie_sock(sk, skb, req, &rt->dst); /* ip_queue_xmit() depends on our flow being setup * Normal sockets get it right from inet_csk_route_child_sock() */ if (ret) inet_sk(ret)->cork.fl.u.ip4 = fl4; out: return ret; }
gpl-2.0
sancao2/Telegram
TMessagesProj/jni/libjpeg/jcparam.c
1106
21275
/* * jcparam.c * * Copyright (C) 1991-1998, Thomas G. Lane. * This file is part of the Independent JPEG Group's software. * For conditions of distribution and use, see the accompanying README file. * * This file contains optional default-setting code for the JPEG compressor. * Applications do not have to use this file, but those that don't use it * must know a lot more about the innards of the JPEG code. */ #define JPEG_INTERNALS #include "jinclude.h" #include "jpeglib.h" /* * Quantization table setup routines */ GLOBAL(void) jpeg_add_quant_table (j_compress_ptr cinfo, int which_tbl, const unsigned int *basic_table, int scale_factor, boolean force_baseline) /* Define a quantization table equal to the basic_table times * a scale factor (given as a percentage). * If force_baseline is TRUE, the computed quantization table entries * are limited to 1..255 for JPEG baseline compatibility. */ { JQUANT_TBL ** qtblptr; int i; long temp; /* Safety check to ensure start_compress not called yet. */ if (cinfo->global_state != CSTATE_START) ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); if (which_tbl < 0 || which_tbl >= NUM_QUANT_TBLS) ERREXIT1(cinfo, JERR_DQT_INDEX, which_tbl); qtblptr = & cinfo->quant_tbl_ptrs[which_tbl]; if (*qtblptr == NULL) *qtblptr = jpeg_alloc_quant_table((j_common_ptr) cinfo); for (i = 0; i < DCTSIZE2; i++) { temp = ((long) basic_table[i] * scale_factor + 50L) / 100L; /* limit the values to the valid range */ if (temp <= 0L) temp = 1L; if (temp > 32767L) temp = 32767L; /* max quantizer needed for 12 bits */ if (force_baseline && temp > 255L) temp = 255L; /* limit to baseline range if requested */ (*qtblptr)->quantval[i] = (UINT16) temp; } /* Initialize sent_table FALSE so table will be written to JPEG file. */ (*qtblptr)->sent_table = FALSE; } GLOBAL(void) jpeg_set_linear_quality (j_compress_ptr cinfo, int scale_factor, boolean force_baseline) /* Set or change the 'quality' (quantization) setting, using default tables * and a straight percentage-scaling quality scale. In most cases it's better * to use jpeg_set_quality (below); this entry point is provided for * applications that insist on a linear percentage scaling. */ { /* These are the sample quantization tables given in JPEG spec section K.1. * The spec says that the values given produce "good" quality, and * when divided by 2, "very good" quality. */ static const unsigned int std_luminance_quant_tbl[DCTSIZE2] = { 16, 11, 10, 16, 24, 40, 51, 61, 12, 12, 14, 19, 26, 58, 60, 55, 14, 13, 16, 24, 40, 57, 69, 56, 14, 17, 22, 29, 51, 87, 80, 62, 18, 22, 37, 56, 68, 109, 103, 77, 24, 35, 55, 64, 81, 104, 113, 92, 49, 64, 78, 87, 103, 121, 120, 101, 72, 92, 95, 98, 112, 100, 103, 99 }; static const unsigned int std_chrominance_quant_tbl[DCTSIZE2] = { 17, 18, 24, 47, 99, 99, 99, 99, 18, 21, 26, 66, 99, 99, 99, 99, 24, 26, 56, 99, 99, 99, 99, 99, 47, 66, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99, 99 }; /* Set up two quantization tables using the specified scaling */ jpeg_add_quant_table(cinfo, 0, std_luminance_quant_tbl, scale_factor, force_baseline); jpeg_add_quant_table(cinfo, 1, std_chrominance_quant_tbl, scale_factor, force_baseline); } GLOBAL(int) jpeg_quality_scaling (int quality) /* Convert a user-specified quality rating to a percentage scaling factor * for an underlying quantization table, using our recommended scaling curve. * The input 'quality' factor should be 0 (terrible) to 100 (very good). */ { /* Safety limit on quality factor. Convert 0 to 1 to avoid zero divide. */ if (quality <= 0) quality = 1; if (quality > 100) quality = 100; /* The basic table is used as-is (scaling 100) for a quality of 50. * Qualities 50..100 are converted to scaling percentage 200 - 2*Q; * note that at Q=100 the scaling is 0, which will cause jpeg_add_quant_table * to make all the table entries 1 (hence, minimum quantization loss). * Qualities 1..50 are converted to scaling percentage 5000/Q. */ if (quality < 50) quality = 5000 / quality; else quality = 200 - quality*2; return quality; } GLOBAL(void) jpeg_set_quality (j_compress_ptr cinfo, int quality, boolean force_baseline) /* Set or change the 'quality' (quantization) setting, using default tables. * This is the standard quality-adjusting entry point for typical user * interfaces; only those who want detailed control over quantization tables * would use the preceding three routines directly. */ { /* Convert user 0-100 rating to percentage scaling */ quality = jpeg_quality_scaling(quality); /* Set up standard quality tables */ jpeg_set_linear_quality(cinfo, quality, force_baseline); } /* * Huffman table setup routines */ LOCAL(void) add_huff_table (j_compress_ptr cinfo, JHUFF_TBL **htblptr, const UINT8 *bits, const UINT8 *val) /* Define a Huffman table */ { int nsymbols, len; if (*htblptr == NULL) *htblptr = jpeg_alloc_huff_table((j_common_ptr) cinfo); /* Copy the number-of-symbols-of-each-code-length counts */ MEMCOPY((*htblptr)->bits, bits, SIZEOF((*htblptr)->bits)); /* Validate the counts. We do this here mainly so we can copy the right * number of symbols from the val[] array, without risking marching off * the end of memory. jchuff.c will do a more thorough test later. */ nsymbols = 0; for (len = 1; len <= 16; len++) nsymbols += bits[len]; if (nsymbols < 1 || nsymbols > 256) ERREXIT(cinfo, JERR_BAD_HUFF_TABLE); MEMCOPY((*htblptr)->huffval, val, nsymbols * SIZEOF(UINT8)); /* Initialize sent_table FALSE so table will be written to JPEG file. */ (*htblptr)->sent_table = FALSE; } LOCAL(void) std_huff_tables (j_compress_ptr cinfo) /* Set up the standard Huffman tables (cf. JPEG standard section K.3) */ /* IMPORTANT: these are only valid for 8-bit data precision! */ { static const UINT8 bits_dc_luminance[17] = { /* 0-base */ 0, 0, 1, 5, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0 }; static const UINT8 val_dc_luminance[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 }; static const UINT8 bits_dc_chrominance[17] = { /* 0-base */ 0, 0, 3, 1, 1, 1, 1, 1, 1, 1, 1, 1, 0, 0, 0, 0, 0 }; static const UINT8 val_dc_chrominance[] = { 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11 }; static const UINT8 bits_ac_luminance[17] = { /* 0-base */ 0, 0, 2, 1, 3, 3, 2, 4, 3, 5, 5, 4, 4, 0, 0, 1, 0x7d }; static const UINT8 val_ac_luminance[] = { 0x01, 0x02, 0x03, 0x00, 0x04, 0x11, 0x05, 0x12, 0x21, 0x31, 0x41, 0x06, 0x13, 0x51, 0x61, 0x07, 0x22, 0x71, 0x14, 0x32, 0x81, 0x91, 0xa1, 0x08, 0x23, 0x42, 0xb1, 0xc1, 0x15, 0x52, 0xd1, 0xf0, 0x24, 0x33, 0x62, 0x72, 0x82, 0x09, 0x0a, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe1, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xf1, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa }; static const UINT8 bits_ac_chrominance[17] = { /* 0-base */ 0, 0, 2, 1, 2, 4, 4, 3, 4, 7, 5, 4, 4, 0, 1, 2, 0x77 }; static const UINT8 val_ac_chrominance[] = { 0x00, 0x01, 0x02, 0x03, 0x11, 0x04, 0x05, 0x21, 0x31, 0x06, 0x12, 0x41, 0x51, 0x07, 0x61, 0x71, 0x13, 0x22, 0x32, 0x81, 0x08, 0x14, 0x42, 0x91, 0xa1, 0xb1, 0xc1, 0x09, 0x23, 0x33, 0x52, 0xf0, 0x15, 0x62, 0x72, 0xd1, 0x0a, 0x16, 0x24, 0x34, 0xe1, 0x25, 0xf1, 0x17, 0x18, 0x19, 0x1a, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x43, 0x44, 0x45, 0x46, 0x47, 0x48, 0x49, 0x4a, 0x53, 0x54, 0x55, 0x56, 0x57, 0x58, 0x59, 0x5a, 0x63, 0x64, 0x65, 0x66, 0x67, 0x68, 0x69, 0x6a, 0x73, 0x74, 0x75, 0x76, 0x77, 0x78, 0x79, 0x7a, 0x82, 0x83, 0x84, 0x85, 0x86, 0x87, 0x88, 0x89, 0x8a, 0x92, 0x93, 0x94, 0x95, 0x96, 0x97, 0x98, 0x99, 0x9a, 0xa2, 0xa3, 0xa4, 0xa5, 0xa6, 0xa7, 0xa8, 0xa9, 0xaa, 0xb2, 0xb3, 0xb4, 0xb5, 0xb6, 0xb7, 0xb8, 0xb9, 0xba, 0xc2, 0xc3, 0xc4, 0xc5, 0xc6, 0xc7, 0xc8, 0xc9, 0xca, 0xd2, 0xd3, 0xd4, 0xd5, 0xd6, 0xd7, 0xd8, 0xd9, 0xda, 0xe2, 0xe3, 0xe4, 0xe5, 0xe6, 0xe7, 0xe8, 0xe9, 0xea, 0xf2, 0xf3, 0xf4, 0xf5, 0xf6, 0xf7, 0xf8, 0xf9, 0xfa }; add_huff_table(cinfo, &cinfo->dc_huff_tbl_ptrs[0], bits_dc_luminance, val_dc_luminance); add_huff_table(cinfo, &cinfo->ac_huff_tbl_ptrs[0], bits_ac_luminance, val_ac_luminance); add_huff_table(cinfo, &cinfo->dc_huff_tbl_ptrs[1], bits_dc_chrominance, val_dc_chrominance); add_huff_table(cinfo, &cinfo->ac_huff_tbl_ptrs[1], bits_ac_chrominance, val_ac_chrominance); } /* * Default parameter setup for compression. * * Applications that don't choose to use this routine must do their * own setup of all these parameters. Alternately, you can call this * to establish defaults and then alter parameters selectively. This * is the recommended approach since, if we add any new parameters, * your code will still work (they'll be set to reasonable defaults). */ GLOBAL(void) jpeg_set_defaults (j_compress_ptr cinfo) { int i; /* Safety check to ensure start_compress not called yet. */ if (cinfo->global_state != CSTATE_START) ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); /* Allocate comp_info array large enough for maximum component count. * Array is made permanent in case application wants to compress * multiple images at same param settings. */ if (cinfo->comp_info == NULL) cinfo->comp_info = (jpeg_component_info *) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_PERMANENT, MAX_COMPONENTS * SIZEOF(jpeg_component_info)); /* Initialize everything not dependent on the color space */ cinfo->data_precision = BITS_IN_JSAMPLE; /* Set up two quantization tables using default quality of 75 */ jpeg_set_quality(cinfo, 75, TRUE); /* Set up two Huffman tables */ std_huff_tables(cinfo); /* Initialize default arithmetic coding conditioning */ for (i = 0; i < NUM_ARITH_TBLS; i++) { cinfo->arith_dc_L[i] = 0; cinfo->arith_dc_U[i] = 1; cinfo->arith_ac_K[i] = 5; } /* Default is no multiple-scan output */ cinfo->scan_info = NULL; cinfo->num_scans = 0; /* Expect normal source image, not raw downsampled data */ cinfo->raw_data_in = FALSE; /* Use Huffman coding, not arithmetic coding, by default */ cinfo->arith_code = FALSE; /* By default, don't do extra passes to optimize entropy coding */ cinfo->optimize_coding = FALSE; /* The standard Huffman tables are only valid for 8-bit data precision. * If the precision is higher, force optimization on so that usable * tables will be computed. This test can be removed if default tables * are supplied that are valid for the desired precision. */ if (cinfo->data_precision > 8) cinfo->optimize_coding = TRUE; /* By default, use the simpler non-cosited sampling alignment */ cinfo->CCIR601_sampling = FALSE; /* No input smoothing */ cinfo->smoothing_factor = 0; /* DCT algorithm preference */ cinfo->dct_method = JDCT_DEFAULT; /* No restart markers */ cinfo->restart_interval = 0; cinfo->restart_in_rows = 0; /* Fill in default JFIF marker parameters. Note that whether the marker * will actually be written is determined by jpeg_set_colorspace. * * By default, the library emits JFIF version code 1.01. * An application that wants to emit JFIF 1.02 extension markers should set * JFIF_minor_version to 2. We could probably get away with just defaulting * to 1.02, but there may still be some decoders in use that will complain * about that; saying 1.01 should minimize compatibility problems. */ cinfo->JFIF_major_version = 1; /* Default JFIF version = 1.01 */ cinfo->JFIF_minor_version = 1; cinfo->density_unit = 0; /* Pixel size is unknown by default */ cinfo->X_density = 1; /* Pixel aspect ratio is square by default */ cinfo->Y_density = 1; /* Choose JPEG colorspace based on input space, set defaults accordingly */ jpeg_default_colorspace(cinfo); } /* * Select an appropriate JPEG colorspace for in_color_space. */ GLOBAL(void) jpeg_default_colorspace (j_compress_ptr cinfo) { switch (cinfo->in_color_space) { case JCS_GRAYSCALE: jpeg_set_colorspace(cinfo, JCS_GRAYSCALE); break; case JCS_RGB: jpeg_set_colorspace(cinfo, JCS_YCbCr); break; case JCS_YCbCr: jpeg_set_colorspace(cinfo, JCS_YCbCr); break; case JCS_CMYK: jpeg_set_colorspace(cinfo, JCS_CMYK); /* By default, no translation */ break; case JCS_YCCK: jpeg_set_colorspace(cinfo, JCS_YCCK); break; case JCS_UNKNOWN: jpeg_set_colorspace(cinfo, JCS_UNKNOWN); break; default: ERREXIT(cinfo, JERR_BAD_IN_COLORSPACE); } } /* * Set the JPEG colorspace, and choose colorspace-dependent default values. */ GLOBAL(void) jpeg_set_colorspace (j_compress_ptr cinfo, J_COLOR_SPACE colorspace) { jpeg_component_info * compptr; int ci; #define SET_COMP(index,id,hsamp,vsamp,quant,dctbl,actbl) \ (compptr = &cinfo->comp_info[index], \ compptr->component_id = (id), \ compptr->h_samp_factor = (hsamp), \ compptr->v_samp_factor = (vsamp), \ compptr->quant_tbl_no = (quant), \ compptr->dc_tbl_no = (dctbl), \ compptr->ac_tbl_no = (actbl) ) /* Safety check to ensure start_compress not called yet. */ if (cinfo->global_state != CSTATE_START) ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); /* For all colorspaces, we use Q and Huff tables 0 for luminance components, * tables 1 for chrominance components. */ cinfo->jpeg_color_space = colorspace; cinfo->write_JFIF_header = FALSE; /* No marker for non-JFIF colorspaces */ cinfo->write_Adobe_marker = FALSE; /* write no Adobe marker by default */ switch (colorspace) { case JCS_GRAYSCALE: cinfo->write_JFIF_header = TRUE; /* Write a JFIF marker */ cinfo->num_components = 1; /* JFIF specifies component ID 1 */ SET_COMP(0, 1, 1,1, 0, 0,0); break; case JCS_RGB: cinfo->write_Adobe_marker = TRUE; /* write Adobe marker to flag RGB */ cinfo->num_components = 3; SET_COMP(0, 0x52 /* 'R' */, 1,1, 0, 0,0); SET_COMP(1, 0x47 /* 'G' */, 1,1, 0, 0,0); SET_COMP(2, 0x42 /* 'B' */, 1,1, 0, 0,0); break; case JCS_YCbCr: cinfo->write_JFIF_header = TRUE; /* Write a JFIF marker */ cinfo->num_components = 3; /* JFIF specifies component IDs 1,2,3 */ /* We default to 2x2 subsamples of chrominance */ SET_COMP(0, 1, 2,2, 0, 0,0); SET_COMP(1, 2, 1,1, 1, 1,1); SET_COMP(2, 3, 1,1, 1, 1,1); break; case JCS_CMYK: cinfo->write_Adobe_marker = TRUE; /* write Adobe marker to flag CMYK */ cinfo->num_components = 4; SET_COMP(0, 0x43 /* 'C' */, 1,1, 0, 0,0); SET_COMP(1, 0x4D /* 'M' */, 1,1, 0, 0,0); SET_COMP(2, 0x59 /* 'Y' */, 1,1, 0, 0,0); SET_COMP(3, 0x4B /* 'K' */, 1,1, 0, 0,0); break; case JCS_YCCK: cinfo->write_Adobe_marker = TRUE; /* write Adobe marker to flag YCCK */ cinfo->num_components = 4; SET_COMP(0, 1, 2,2, 0, 0,0); SET_COMP(1, 2, 1,1, 1, 1,1); SET_COMP(2, 3, 1,1, 1, 1,1); SET_COMP(3, 4, 2,2, 0, 0,0); break; case JCS_UNKNOWN: cinfo->num_components = cinfo->input_components; if (cinfo->num_components < 1 || cinfo->num_components > MAX_COMPONENTS) ERREXIT2(cinfo, JERR_COMPONENT_COUNT, cinfo->num_components, MAX_COMPONENTS); for (ci = 0; ci < cinfo->num_components; ci++) { SET_COMP(ci, ci, 1,1, 0, 0,0); } break; default: ERREXIT(cinfo, JERR_BAD_J_COLORSPACE); } } #ifdef C_PROGRESSIVE_SUPPORTED LOCAL(jpeg_scan_info *) fill_a_scan (jpeg_scan_info * scanptr, int ci, int Ss, int Se, int Ah, int Al) /* Support routine: generate one scan for specified component */ { scanptr->comps_in_scan = 1; scanptr->component_index[0] = ci; scanptr->Ss = Ss; scanptr->Se = Se; scanptr->Ah = Ah; scanptr->Al = Al; scanptr++; return scanptr; } LOCAL(jpeg_scan_info *) fill_scans (jpeg_scan_info * scanptr, int ncomps, int Ss, int Se, int Ah, int Al) /* Support routine: generate one scan for each component */ { int ci; for (ci = 0; ci < ncomps; ci++) { scanptr->comps_in_scan = 1; scanptr->component_index[0] = ci; scanptr->Ss = Ss; scanptr->Se = Se; scanptr->Ah = Ah; scanptr->Al = Al; scanptr++; } return scanptr; } LOCAL(jpeg_scan_info *) fill_dc_scans (jpeg_scan_info * scanptr, int ncomps, int Ah, int Al) /* Support routine: generate interleaved DC scan if possible, else N scans */ { int ci; if (ncomps <= MAX_COMPS_IN_SCAN) { /* Single interleaved DC scan */ scanptr->comps_in_scan = ncomps; for (ci = 0; ci < ncomps; ci++) scanptr->component_index[ci] = ci; scanptr->Ss = scanptr->Se = 0; scanptr->Ah = Ah; scanptr->Al = Al; scanptr++; } else { /* Noninterleaved DC scan for each component */ scanptr = fill_scans(scanptr, ncomps, 0, 0, Ah, Al); } return scanptr; } /* * Create a recommended progressive-JPEG script. * cinfo->num_components and cinfo->jpeg_color_space must be correct. */ GLOBAL(void) jpeg_simple_progression (j_compress_ptr cinfo) { int ncomps = cinfo->num_components; int nscans; jpeg_scan_info * scanptr; /* Safety check to ensure start_compress not called yet. */ if (cinfo->global_state != CSTATE_START) ERREXIT1(cinfo, JERR_BAD_STATE, cinfo->global_state); /* Figure space needed for script. Calculation must match code below! */ if (ncomps == 3 && cinfo->jpeg_color_space == JCS_YCbCr) { /* Custom script for YCbCr color images. */ nscans = 10; } else { /* All-purpose script for other color spaces. */ if (ncomps > MAX_COMPS_IN_SCAN) nscans = 6 * ncomps; /* 2 DC + 4 AC scans per component */ else nscans = 2 + 4 * ncomps; /* 2 DC scans; 4 AC scans per component */ } /* Allocate space for script. * We need to put it in the permanent pool in case the application performs * multiple compressions without changing the settings. To avoid a memory * leak if jpeg_simple_progression is called repeatedly for the same JPEG * object, we try to re-use previously allocated space, and we allocate * enough space to handle YCbCr even if initially asked for grayscale. */ if (cinfo->script_space == NULL || cinfo->script_space_size < nscans) { cinfo->script_space_size = MAX(nscans, 10); cinfo->script_space = (jpeg_scan_info *) (*cinfo->mem->alloc_small) ((j_common_ptr) cinfo, JPOOL_PERMANENT, cinfo->script_space_size * SIZEOF(jpeg_scan_info)); } scanptr = cinfo->script_space; cinfo->scan_info = scanptr; cinfo->num_scans = nscans; if (ncomps == 3 && cinfo->jpeg_color_space == JCS_YCbCr) { /* Custom script for YCbCr color images. */ /* Initial DC scan */ scanptr = fill_dc_scans(scanptr, ncomps, 0, 1); /* Initial AC scan: get some luma data out in a hurry */ scanptr = fill_a_scan(scanptr, 0, 1, 5, 0, 2); /* Chroma data is too small to be worth expending many scans on */ scanptr = fill_a_scan(scanptr, 2, 1, 63, 0, 1); scanptr = fill_a_scan(scanptr, 1, 1, 63, 0, 1); /* Complete spectral selection for luma AC */ scanptr = fill_a_scan(scanptr, 0, 6, 63, 0, 2); /* Refine next bit of luma AC */ scanptr = fill_a_scan(scanptr, 0, 1, 63, 2, 1); /* Finish DC successive approximation */ scanptr = fill_dc_scans(scanptr, ncomps, 1, 0); /* Finish AC successive approximation */ scanptr = fill_a_scan(scanptr, 2, 1, 63, 1, 0); scanptr = fill_a_scan(scanptr, 1, 1, 63, 1, 0); /* Luma bottom bit comes last since it's usually largest scan */ scanptr = fill_a_scan(scanptr, 0, 1, 63, 1, 0); } else { /* All-purpose script for other color spaces. */ /* Successive approximation first pass */ scanptr = fill_dc_scans(scanptr, ncomps, 0, 1); scanptr = fill_scans(scanptr, ncomps, 1, 5, 0, 2); scanptr = fill_scans(scanptr, ncomps, 6, 63, 0, 2); /* Successive approximation second pass */ scanptr = fill_scans(scanptr, ncomps, 1, 63, 2, 1); /* Successive approximation final pass */ scanptr = fill_dc_scans(scanptr, ncomps, 1, 0); scanptr = fill_scans(scanptr, ncomps, 1, 63, 1, 0); } } #endif /* C_PROGRESSIVE_SUPPORTED */
gpl-2.0
lujiefeng/bocom_camera
drivers/serial/sn_console.c
1362
30165
/* * C-Brick Serial Port (and console) driver for SGI Altix machines. * * This driver is NOT suitable for talking to the l1-controller for * anything other than 'console activities' --- please use the l1 * driver for that. * * * Copyright (c) 2004-2006 Silicon Graphics, Inc. All Rights Reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of version 2 of the GNU General Public License * as published by the Free Software Foundation. * * This program is distributed in the hope that it would be useful, but * WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. * * Further, this software is distributed without any warranty that it is * free of the rightful claim of any third person regarding infringement * or the like. Any license provided herein, whether implied or * otherwise, applies only to this software file. Patent licenses, if * any, provided herein do not apply to combinations of this program with * other software, or any other product whatsoever. * * You should have received a copy of the GNU General Public * License along with this program; if not, write the Free Software * Foundation, Inc., 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * Contact information: Silicon Graphics, Inc., 1500 Crittenden Lane, * Mountain View, CA 94043, or: * * http://www.sgi.com * * For further information regarding this notice, see: * * http://oss.sgi.com/projects/GenInfo/NoticeExplan */ #include <linux/interrupt.h> #include <linux/tty.h> #include <linux/serial.h> #include <linux/console.h> #include <linux/module.h> #include <linux/sysrq.h> #include <linux/circ_buf.h> #include <linux/serial_reg.h> #include <linux/delay.h> /* for mdelay */ #include <linux/miscdevice.h> #include <linux/serial_core.h> #include <asm/io.h> #include <asm/sn/simulator.h> #include <asm/sn/sn_sal.h> /* number of characters we can transmit to the SAL console at a time */ #define SN_SAL_MAX_CHARS 120 /* 64K, when we're asynch, it must be at least printk's LOG_BUF_LEN to * avoid losing chars, (always has to be a power of 2) */ #define SN_SAL_BUFFER_SIZE (64 * (1 << 10)) #define SN_SAL_UART_FIFO_DEPTH 16 #define SN_SAL_UART_FIFO_SPEED_CPS (9600/10) /* sn_transmit_chars() calling args */ #define TRANSMIT_BUFFERED 0 #define TRANSMIT_RAW 1 /* To use dynamic numbers only and not use the assigned major and minor, * define the following.. */ /* #define USE_DYNAMIC_MINOR 1 *//* use dynamic minor number */ #define USE_DYNAMIC_MINOR 0 /* Don't rely on misc_register dynamic minor */ /* Device name we're using */ #define DEVICE_NAME "ttySG" #define DEVICE_NAME_DYNAMIC "ttySG0" /* need full name for misc_register */ /* The major/minor we are using, ignored for USE_DYNAMIC_MINOR */ #define DEVICE_MAJOR 204 #define DEVICE_MINOR 40 #ifdef CONFIG_MAGIC_SYSRQ static char sysrq_serial_str[] = "\eSYS"; static char *sysrq_serial_ptr = sysrq_serial_str; static unsigned long sysrq_requested; #endif /* CONFIG_MAGIC_SYSRQ */ /* * Port definition - this kinda drives it all */ struct sn_cons_port { struct timer_list sc_timer; struct uart_port sc_port; struct sn_sal_ops { int (*sal_puts_raw) (const char *s, int len); int (*sal_puts) (const char *s, int len); int (*sal_getc) (void); int (*sal_input_pending) (void); void (*sal_wakeup_transmit) (struct sn_cons_port *, int); } *sc_ops; unsigned long sc_interrupt_timeout; int sc_is_asynch; }; static struct sn_cons_port sal_console_port; static int sn_process_input; /* Only used if USE_DYNAMIC_MINOR is set to 1 */ static struct miscdevice misc; /* used with misc_register for dynamic */ extern void early_sn_setup(void); #undef DEBUG #ifdef DEBUG static int sn_debug_printf(const char *fmt, ...); #define DPRINTF(x...) sn_debug_printf(x) #else #define DPRINTF(x...) do { } while (0) #endif /* Prototypes */ static int snt_hw_puts_raw(const char *, int); static int snt_hw_puts_buffered(const char *, int); static int snt_poll_getc(void); static int snt_poll_input_pending(void); static int snt_intr_getc(void); static int snt_intr_input_pending(void); static void sn_transmit_chars(struct sn_cons_port *, int); /* A table for polling: */ static struct sn_sal_ops poll_ops = { .sal_puts_raw = snt_hw_puts_raw, .sal_puts = snt_hw_puts_raw, .sal_getc = snt_poll_getc, .sal_input_pending = snt_poll_input_pending }; /* A table for interrupts enabled */ static struct sn_sal_ops intr_ops = { .sal_puts_raw = snt_hw_puts_raw, .sal_puts = snt_hw_puts_buffered, .sal_getc = snt_intr_getc, .sal_input_pending = snt_intr_input_pending, .sal_wakeup_transmit = sn_transmit_chars }; /* the console does output in two distinctly different ways: * synchronous (raw) and asynchronous (buffered). initally, early_printk * does synchronous output. any data written goes directly to the SAL * to be output (incidentally, it is internally buffered by the SAL) * after interrupts and timers are initialized and available for use, * the console init code switches to asynchronous output. this is * also the earliest opportunity to begin polling for console input. * after console initialization, console output and tty (serial port) * output is buffered and sent to the SAL asynchronously (either by * timer callback or by UART interrupt) */ /* routines for running the console in polling mode */ /** * snt_poll_getc - Get a character from the console in polling mode * */ static int snt_poll_getc(void) { int ch; ia64_sn_console_getc(&ch); return ch; } /** * snt_poll_input_pending - Check if any input is waiting - polling mode. * */ static int snt_poll_input_pending(void) { int status, input; status = ia64_sn_console_check(&input); return !status && input; } /* routines for an interrupt driven console (normal) */ /** * snt_intr_getc - Get a character from the console, interrupt mode * */ static int snt_intr_getc(void) { return ia64_sn_console_readc(); } /** * snt_intr_input_pending - Check if input is pending, interrupt mode * */ static int snt_intr_input_pending(void) { return ia64_sn_console_intr_status() & SAL_CONSOLE_INTR_RECV; } /* these functions are polled and interrupt */ /** * snt_hw_puts_raw - Send raw string to the console, polled or interrupt mode * @s: String * @len: Length * */ static int snt_hw_puts_raw(const char *s, int len) { /* this will call the PROM and not return until this is done */ return ia64_sn_console_putb(s, len); } /** * snt_hw_puts_buffered - Send string to console, polled or interrupt mode * @s: String * @len: Length * */ static int snt_hw_puts_buffered(const char *s, int len) { /* queue data to the PROM */ return ia64_sn_console_xmit_chars((char *)s, len); } /* uart interface structs * These functions are associated with the uart_port that the serial core * infrastructure calls. * * Note: Due to how the console works, many routines are no-ops. */ /** * snp_type - What type of console are we? * @port: Port to operate with (we ignore since we only have one port) * */ static const char *snp_type(struct uart_port *port) { return ("SGI SN L1"); } /** * snp_tx_empty - Is the transmitter empty? We pretend we're always empty * @port: Port to operate on (we ignore since we only have one port) * */ static unsigned int snp_tx_empty(struct uart_port *port) { return 1; } /** * snp_stop_tx - stop the transmitter - no-op for us * @port: Port to operat eon - we ignore - no-op function * */ static void snp_stop_tx(struct uart_port *port) { } /** * snp_release_port - Free i/o and resources for port - no-op for us * @port: Port to operate on - we ignore - no-op function * */ static void snp_release_port(struct uart_port *port) { } /** * snp_enable_ms - Force modem status interrupts on - no-op for us * @port: Port to operate on - we ignore - no-op function * */ static void snp_enable_ms(struct uart_port *port) { } /** * snp_shutdown - shut down the port - free irq and disable - no-op for us * @port: Port to shut down - we ignore * */ static void snp_shutdown(struct uart_port *port) { } /** * snp_set_mctrl - set control lines (dtr, rts, etc) - no-op for our console * @port: Port to operate on - we ignore * @mctrl: Lines to set/unset - we ignore * */ static void snp_set_mctrl(struct uart_port *port, unsigned int mctrl) { } /** * snp_get_mctrl - get contorl line info, we just return a static value * @port: port to operate on - we only have one port so we ignore this * */ static unsigned int snp_get_mctrl(struct uart_port *port) { return TIOCM_CAR | TIOCM_RNG | TIOCM_DSR | TIOCM_CTS; } /** * snp_stop_rx - Stop the receiver - we ignor ethis * @port: Port to operate on - we ignore * */ static void snp_stop_rx(struct uart_port *port) { } /** * snp_start_tx - Start transmitter * @port: Port to operate on * */ static void snp_start_tx(struct uart_port *port) { if (sal_console_port.sc_ops->sal_wakeup_transmit) sal_console_port.sc_ops->sal_wakeup_transmit(&sal_console_port, TRANSMIT_BUFFERED); } /** * snp_break_ctl - handle breaks - ignored by us * @port: Port to operate on * @break_state: Break state * */ static void snp_break_ctl(struct uart_port *port, int break_state) { } /** * snp_startup - Start up the serial port - always return 0 (We're always on) * @port: Port to operate on * */ static int snp_startup(struct uart_port *port) { return 0; } /** * snp_set_termios - set termios stuff - we ignore these * @port: port to operate on * @termios: New settings * @termios: Old * */ static void snp_set_termios(struct uart_port *port, struct ktermios *termios, struct ktermios *old) { } /** * snp_request_port - allocate resources for port - ignored by us * @port: port to operate on * */ static int snp_request_port(struct uart_port *port) { return 0; } /** * snp_config_port - allocate resources, set up - we ignore, we're always on * @port: Port to operate on * @flags: flags used for port setup * */ static void snp_config_port(struct uart_port *port, int flags) { } /* Associate the uart functions above - given to serial core */ static struct uart_ops sn_console_ops = { .tx_empty = snp_tx_empty, .set_mctrl = snp_set_mctrl, .get_mctrl = snp_get_mctrl, .stop_tx = snp_stop_tx, .start_tx = snp_start_tx, .stop_rx = snp_stop_rx, .enable_ms = snp_enable_ms, .break_ctl = snp_break_ctl, .startup = snp_startup, .shutdown = snp_shutdown, .set_termios = snp_set_termios, .pm = NULL, .type = snp_type, .release_port = snp_release_port, .request_port = snp_request_port, .config_port = snp_config_port, .verify_port = NULL, }; /* End of uart struct functions and defines */ #ifdef DEBUG /** * sn_debug_printf - close to hardware debugging printf * @fmt: printf format * * This is as "close to the metal" as we can get, used when the driver * itself may be broken. * */ static int sn_debug_printf(const char *fmt, ...) { static char printk_buf[1024]; int printed_len; va_list args; va_start(args, fmt); printed_len = vsnprintf(printk_buf, sizeof(printk_buf), fmt, args); if (!sal_console_port.sc_ops) { sal_console_port.sc_ops = &poll_ops; early_sn_setup(); } sal_console_port.sc_ops->sal_puts_raw(printk_buf, printed_len); va_end(args); return printed_len; } #endif /* DEBUG */ /* * Interrupt handling routines. */ /** * sn_receive_chars - Grab characters, pass them to tty layer * @port: Port to operate on * @flags: irq flags * * Note: If we're not registered with the serial core infrastructure yet, * we don't try to send characters to it... * */ static void sn_receive_chars(struct sn_cons_port *port, unsigned long flags) { int ch; struct tty_struct *tty; if (!port) { printk(KERN_ERR "sn_receive_chars - port NULL so can't receieve\n"); return; } if (!port->sc_ops) { printk(KERN_ERR "sn_receive_chars - port->sc_ops NULL so can't receieve\n"); return; } if (port->sc_port.state) { /* The serial_core stuffs are initilized, use them */ tty = port->sc_port.state->port.tty; } else { /* Not registered yet - can't pass to tty layer. */ tty = NULL; } while (port->sc_ops->sal_input_pending()) { ch = port->sc_ops->sal_getc(); if (ch < 0) { printk(KERN_ERR "sn_console: An error occured while " "obtaining data from the console (0x%0x)\n", ch); break; } #ifdef CONFIG_MAGIC_SYSRQ if (sysrq_requested) { unsigned long sysrq_timeout = sysrq_requested + HZ*5; sysrq_requested = 0; if (ch && time_before(jiffies, sysrq_timeout)) { spin_unlock_irqrestore(&port->sc_port.lock, flags); handle_sysrq(ch, NULL); spin_lock_irqsave(&port->sc_port.lock, flags); /* ignore actual sysrq command char */ continue; } } if (ch == *sysrq_serial_ptr) { if (!(*++sysrq_serial_ptr)) { sysrq_requested = jiffies; sysrq_serial_ptr = sysrq_serial_str; } /* * ignore the whole sysrq string except for the * leading escape */ if (ch != '\e') continue; } else sysrq_serial_ptr = sysrq_serial_str; #endif /* CONFIG_MAGIC_SYSRQ */ /* record the character to pass up to the tty layer */ if (tty) { if(tty_insert_flip_char(tty, ch, TTY_NORMAL) == 0) break; } port->sc_port.icount.rx++; } if (tty) tty_flip_buffer_push(tty); } /** * sn_transmit_chars - grab characters from serial core, send off * @port: Port to operate on * @raw: Transmit raw or buffered * * Note: If we're early, before we're registered with serial core, the * writes are going through sn_sal_console_write because that's how * register_console has been set up. We currently could have asynch * polls calling this function due to sn_sal_switch_to_asynch but we can * ignore them until we register with the serial core stuffs. * */ static void sn_transmit_chars(struct sn_cons_port *port, int raw) { int xmit_count, tail, head, loops, ii; int result; char *start; struct circ_buf *xmit; if (!port) return; BUG_ON(!port->sc_is_asynch); if (port->sc_port.state) { /* We're initilized, using serial core infrastructure */ xmit = &port->sc_port.state->xmit; } else { /* Probably sn_sal_switch_to_asynch has been run but serial core isn't * initilized yet. Just return. Writes are going through * sn_sal_console_write (due to register_console) at this time. */ return; } if (uart_circ_empty(xmit) || uart_tx_stopped(&port->sc_port)) { /* Nothing to do. */ ia64_sn_console_intr_disable(SAL_CONSOLE_INTR_XMIT); return; } head = xmit->head; tail = xmit->tail; start = &xmit->buf[tail]; /* twice around gets the tail to the end of the buffer and * then to the head, if needed */ loops = (head < tail) ? 2 : 1; for (ii = 0; ii < loops; ii++) { xmit_count = (head < tail) ? (UART_XMIT_SIZE - tail) : (head - tail); if (xmit_count > 0) { if (raw == TRANSMIT_RAW) result = port->sc_ops->sal_puts_raw(start, xmit_count); else result = port->sc_ops->sal_puts(start, xmit_count); #ifdef DEBUG if (!result) DPRINTF("`"); #endif if (result > 0) { xmit_count -= result; port->sc_port.icount.tx += result; tail += result; tail &= UART_XMIT_SIZE - 1; xmit->tail = tail; start = &xmit->buf[tail]; } } } if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS) uart_write_wakeup(&port->sc_port); if (uart_circ_empty(xmit)) snp_stop_tx(&port->sc_port); /* no-op for us */ } /** * sn_sal_interrupt - Handle console interrupts * @irq: irq #, useful for debug statements * @dev_id: our pointer to our port (sn_cons_port which contains the uart port) * */ static irqreturn_t sn_sal_interrupt(int irq, void *dev_id) { struct sn_cons_port *port = (struct sn_cons_port *)dev_id; unsigned long flags; int status = ia64_sn_console_intr_status(); if (!port) return IRQ_NONE; spin_lock_irqsave(&port->sc_port.lock, flags); if (status & SAL_CONSOLE_INTR_RECV) { sn_receive_chars(port, flags); } if (status & SAL_CONSOLE_INTR_XMIT) { sn_transmit_chars(port, TRANSMIT_BUFFERED); } spin_unlock_irqrestore(&port->sc_port.lock, flags); return IRQ_HANDLED; } /** * sn_sal_timer_poll - this function handles polled console mode * @data: A pointer to our sn_cons_port (which contains the uart port) * * data is the pointer that init_timer will store for us. This function is * associated with init_timer to see if there is any console traffic. * Obviously not used in interrupt mode * */ static void sn_sal_timer_poll(unsigned long data) { struct sn_cons_port *port = (struct sn_cons_port *)data; unsigned long flags; if (!port) return; if (!port->sc_port.irq) { spin_lock_irqsave(&port->sc_port.lock, flags); if (sn_process_input) sn_receive_chars(port, flags); sn_transmit_chars(port, TRANSMIT_RAW); spin_unlock_irqrestore(&port->sc_port.lock, flags); mod_timer(&port->sc_timer, jiffies + port->sc_interrupt_timeout); } } /* * Boot-time initialization code */ /** * sn_sal_switch_to_asynch - Switch to async mode (as opposed to synch) * @port: Our sn_cons_port (which contains the uart port) * * So this is used by sn_sal_serial_console_init (early on, before we're * registered with serial core). It's also used by sn_sal_module_init * right after we've registered with serial core. The later only happens * if we didn't already come through here via sn_sal_serial_console_init. * */ static void __init sn_sal_switch_to_asynch(struct sn_cons_port *port) { unsigned long flags; if (!port) return; DPRINTF("sn_console: about to switch to asynchronous console\n"); /* without early_printk, we may be invoked late enough to race * with other cpus doing console IO at this point, however * console interrupts will never be enabled */ spin_lock_irqsave(&port->sc_port.lock, flags); /* early_printk invocation may have done this for us */ if (!port->sc_ops) port->sc_ops = &poll_ops; /* we can't turn on the console interrupt (as request_irq * calls kmalloc, which isn't set up yet), so we rely on a * timer to poll for input and push data from the console * buffer. */ init_timer(&port->sc_timer); port->sc_timer.function = sn_sal_timer_poll; port->sc_timer.data = (unsigned long)port; if (IS_RUNNING_ON_SIMULATOR()) port->sc_interrupt_timeout = 6; else { /* 960cps / 16 char FIFO = 60HZ * HZ / (SN_SAL_FIFO_SPEED_CPS / SN_SAL_FIFO_DEPTH) */ port->sc_interrupt_timeout = HZ * SN_SAL_UART_FIFO_DEPTH / SN_SAL_UART_FIFO_SPEED_CPS; } mod_timer(&port->sc_timer, jiffies + port->sc_interrupt_timeout); port->sc_is_asynch = 1; spin_unlock_irqrestore(&port->sc_port.lock, flags); } /** * sn_sal_switch_to_interrupts - Switch to interrupt driven mode * @port: Our sn_cons_port (which contains the uart port) * * In sn_sal_module_init, after we're registered with serial core and * the port is added, this function is called to switch us to interrupt * mode. We were previously in asynch/polling mode (using init_timer). * * We attempt to switch to interrupt mode here by calling * request_irq. If that works out, we enable receive interrupts. */ static void __init sn_sal_switch_to_interrupts(struct sn_cons_port *port) { unsigned long flags; if (port) { DPRINTF("sn_console: switching to interrupt driven console\n"); if (request_irq(SGI_UART_VECTOR, sn_sal_interrupt, IRQF_DISABLED | IRQF_SHARED, "SAL console driver", port) >= 0) { spin_lock_irqsave(&port->sc_port.lock, flags); port->sc_port.irq = SGI_UART_VECTOR; port->sc_ops = &intr_ops; /* turn on receive interrupts */ ia64_sn_console_intr_enable(SAL_CONSOLE_INTR_RECV); spin_unlock_irqrestore(&port->sc_port.lock, flags); } else { printk(KERN_INFO "sn_console: console proceeding in polled mode\n"); } } } /* * Kernel console definitions */ static void sn_sal_console_write(struct console *, const char *, unsigned); static int sn_sal_console_setup(struct console *, char *); static struct uart_driver sal_console_uart; extern struct tty_driver *uart_console_device(struct console *, int *); static struct console sal_console = { .name = DEVICE_NAME, .write = sn_sal_console_write, .device = uart_console_device, .setup = sn_sal_console_setup, .index = -1, /* unspecified */ .data = &sal_console_uart, }; #define SAL_CONSOLE &sal_console static struct uart_driver sal_console_uart = { .owner = THIS_MODULE, .driver_name = "sn_console", .dev_name = DEVICE_NAME, .major = 0, /* major/minor set at registration time per USE_DYNAMIC_MINOR */ .minor = 0, .nr = 1, /* one port */ .cons = SAL_CONSOLE, }; /** * sn_sal_module_init - When the kernel loads us, get us rolling w/ serial core * * Before this is called, we've been printing kernel messages in a special * early mode not making use of the serial core infrastructure. When our * driver is loaded for real, we register the driver and port with serial * core and try to enable interrupt driven mode. * */ static int __init sn_sal_module_init(void) { int retval; if (!ia64_platform_is("sn2")) return 0; printk(KERN_INFO "sn_console: Console driver init\n"); if (USE_DYNAMIC_MINOR == 1) { misc.minor = MISC_DYNAMIC_MINOR; misc.name = DEVICE_NAME_DYNAMIC; retval = misc_register(&misc); if (retval != 0) { printk(KERN_WARNING "Failed to register console " "device using misc_register.\n"); return -ENODEV; } sal_console_uart.major = MISC_MAJOR; sal_console_uart.minor = misc.minor; } else { sal_console_uart.major = DEVICE_MAJOR; sal_console_uart.minor = DEVICE_MINOR; } /* We register the driver and the port before switching to interrupts * or async above so the proper uart structures are populated */ if (uart_register_driver(&sal_console_uart) < 0) { printk ("ERROR sn_sal_module_init failed uart_register_driver, line %d\n", __LINE__); return -ENODEV; } spin_lock_init(&sal_console_port.sc_port.lock); /* Setup the port struct with the minimum needed */ sal_console_port.sc_port.membase = (char *)1; /* just needs to be non-zero */ sal_console_port.sc_port.type = PORT_16550A; sal_console_port.sc_port.fifosize = SN_SAL_MAX_CHARS; sal_console_port.sc_port.ops = &sn_console_ops; sal_console_port.sc_port.line = 0; if (uart_add_one_port(&sal_console_uart, &sal_console_port.sc_port) < 0) { /* error - not sure what I'd do - so I'll do nothing */ printk(KERN_ERR "%s: unable to add port\n", __func__); } /* when this driver is compiled in, the console initialization * will have already switched us into asynchronous operation * before we get here through the module initcalls */ if (!sal_console_port.sc_is_asynch) { sn_sal_switch_to_asynch(&sal_console_port); } /* at this point (module_init) we can try to turn on interrupts */ if (!IS_RUNNING_ON_SIMULATOR()) { sn_sal_switch_to_interrupts(&sal_console_port); } sn_process_input = 1; return 0; } /** * sn_sal_module_exit - When we're unloaded, remove the driver/port * */ static void __exit sn_sal_module_exit(void) { del_timer_sync(&sal_console_port.sc_timer); uart_remove_one_port(&sal_console_uart, &sal_console_port.sc_port); uart_unregister_driver(&sal_console_uart); misc_deregister(&misc); } module_init(sn_sal_module_init); module_exit(sn_sal_module_exit); /** * puts_raw_fixed - sn_sal_console_write helper for adding \r's as required * @puts_raw : puts function to do the writing * @s: input string * @count: length * * We need a \r ahead of every \n for direct writes through * ia64_sn_console_putb (what sal_puts_raw below actually does). * */ static void puts_raw_fixed(int (*puts_raw) (const char *s, int len), const char *s, int count) { const char *s1; /* Output '\r' before each '\n' */ while ((s1 = memchr(s, '\n', count)) != NULL) { puts_raw(s, s1 - s); puts_raw("\r\n", 2); count -= s1 + 1 - s; s = s1 + 1; } puts_raw(s, count); } /** * sn_sal_console_write - Print statements before serial core available * @console: Console to operate on - we ignore since we have just one * @s: String to send * @count: length * * This is referenced in the console struct. It is used for early * console printing before we register with serial core and for things * such as kdb. The console_lock must be held when we get here. * * This function has some code for trying to print output even if the lock * is held. We try to cover the case where a lock holder could have died. * We don't use this special case code if we're not registered with serial * core yet. After we're registered with serial core, the only time this * function would be used is for high level kernel output like magic sys req, * kdb, and printk's. */ static void sn_sal_console_write(struct console *co, const char *s, unsigned count) { unsigned long flags = 0; struct sn_cons_port *port = &sal_console_port; static int stole_lock = 0; BUG_ON(!port->sc_is_asynch); /* We can't look at the xmit buffer if we're not registered with serial core * yet. So only do the fancy recovery after registering */ if (!port->sc_port.state) { /* Not yet registered with serial core - simple case */ puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count); return; } /* somebody really wants this output, might be an * oops, kdb, panic, etc. make sure they get it. */ if (spin_is_locked(&port->sc_port.lock)) { int lhead = port->sc_port.state->xmit.head; int ltail = port->sc_port.state->xmit.tail; int counter, got_lock = 0; /* * We attempt to determine if someone has died with the * lock. We wait ~20 secs after the head and tail ptrs * stop moving and assume the lock holder is not functional * and plow ahead. If the lock is freed within the time out * period we re-get the lock and go ahead normally. We also * remember if we have plowed ahead so that we don't have * to wait out the time out period again - the asumption * is that we will time out again. */ for (counter = 0; counter < 150; mdelay(125), counter++) { if (!spin_is_locked(&port->sc_port.lock) || stole_lock) { if (!stole_lock) { spin_lock_irqsave(&port->sc_port.lock, flags); got_lock = 1; } break; } else { /* still locked */ if ((lhead != port->sc_port.state->xmit.head) || (ltail != port->sc_port.state->xmit.tail)) { lhead = port->sc_port.state->xmit.head; ltail = port->sc_port.state->xmit.tail; counter = 0; } } } /* flush anything in the serial core xmit buffer, raw */ sn_transmit_chars(port, 1); if (got_lock) { spin_unlock_irqrestore(&port->sc_port.lock, flags); stole_lock = 0; } else { /* fell thru */ stole_lock = 1; } puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count); } else { stole_lock = 0; spin_lock_irqsave(&port->sc_port.lock, flags); sn_transmit_chars(port, 1); spin_unlock_irqrestore(&port->sc_port.lock, flags); puts_raw_fixed(port->sc_ops->sal_puts_raw, s, count); } } /** * sn_sal_console_setup - Set up console for early printing * @co: Console to work with * @options: Options to set * * Altix console doesn't do anything with baud rates, etc, anyway. * * This isn't required since not providing the setup function in the * console struct is ok. However, other patches like KDB plop something * here so providing it is easier. * */ static int sn_sal_console_setup(struct console *co, char *options) { return 0; } /** * sn_sal_console_write_early - simple early output routine * @co - console struct * @s - string to print * @count - count * * Simple function to provide early output, before even * sn_sal_serial_console_init is called. Referenced in the * console struct registerd in sn_serial_console_early_setup. * */ static void __init sn_sal_console_write_early(struct console *co, const char *s, unsigned count) { puts_raw_fixed(sal_console_port.sc_ops->sal_puts_raw, s, count); } /* Used for very early console printing - again, before * sn_sal_serial_console_init is run */ static struct console sal_console_early __initdata = { .name = "sn_sal", .write = sn_sal_console_write_early, .flags = CON_PRINTBUFFER, .index = -1, }; /** * sn_serial_console_early_setup - Sets up early console output support * * Register a console early on... This is for output before even * sn_sal_serial_cosnole_init is called. This function is called from * setup.c. This allows us to do really early polled writes. When * sn_sal_serial_console_init is called, this console is unregistered * and a new one registered. */ int __init sn_serial_console_early_setup(void) { if (!ia64_platform_is("sn2")) return -1; sal_console_port.sc_ops = &poll_ops; spin_lock_init(&sal_console_port.sc_port.lock); early_sn_setup(); /* Find SAL entry points */ register_console(&sal_console_early); return 0; } /** * sn_sal_serial_console_init - Early console output - set up for register * * This function is called when regular console init happens. Because we * support even earlier console output with sn_serial_console_early_setup * (called from setup.c directly), this function unregisters the really * early console. * * Note: Even if setup.c doesn't register sal_console_early, unregistering * it here doesn't hurt anything. * */ static int __init sn_sal_serial_console_init(void) { if (ia64_platform_is("sn2")) { sn_sal_switch_to_asynch(&sal_console_port); DPRINTF("sn_sal_serial_console_init : register console\n"); register_console(&sal_console); unregister_console(&sal_console_early); } return 0; } console_initcall(sn_sal_serial_console_init);
gpl-2.0
SlimRoms/kernel_sony_msm8x60
drivers/gpu/drm/nouveau/nouveau_channel.c
1362
13925
/* * Copyright 2005-2006 Stephane Marchesin * All Rights Reserved. * * Permission is hereby granted, free of charge, to any person obtaining a * copy of this software and associated documentation files (the "Software"), * to deal in the Software without restriction, including without limitation * the rights to use, copy, modify, merge, publish, distribute, sublicense, * and/or sell copies of the Software, and to permit persons to whom the * Software is furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice (including the next * paragraph) shall be included in all copies or substantial portions of the * Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER * DEALINGS IN THE SOFTWARE. */ #include "drmP.h" #include "drm.h" #include "nouveau_drv.h" #include "nouveau_drm.h" #include "nouveau_dma.h" static int nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_bo *pb = chan->pushbuf_bo; struct nouveau_gpuobj *pushbuf = NULL; int ret = 0; if (dev_priv->card_type >= NV_50) { if (dev_priv->card_type < NV_C0) { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, (1ULL << 40), NV_MEM_ACCESS_RO, NV_MEM_TARGET_VM, &pushbuf); } chan->pushbuf_base = pb->bo.offset; } else if (pb->bo.mem.mem_type == TTM_PL_TT) { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, dev_priv->gart_info.aper_size, NV_MEM_ACCESS_RO, NV_MEM_TARGET_GART, &pushbuf); chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; } else if (dev_priv->card_type != NV_04) { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, dev_priv->fb_available_size, NV_MEM_ACCESS_RO, NV_MEM_TARGET_VRAM, &pushbuf); chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; } else { /* NV04 cmdbuf hack, from original ddx.. not sure of it's * exact reason for existing :) PCI access to cmdbuf in * VRAM. */ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, pci_resource_start(dev->pdev, 1), dev_priv->fb_available_size, NV_MEM_ACCESS_RO, NV_MEM_TARGET_PCI, &pushbuf); chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; } nouveau_gpuobj_ref(pushbuf, &chan->pushbuf); nouveau_gpuobj_ref(NULL, &pushbuf); return ret; } static struct nouveau_bo * nouveau_channel_user_pushbuf_alloc(struct drm_device *dev) { struct nouveau_bo *pushbuf = NULL; int location, ret; if (nouveau_vram_pushbuf) location = TTM_PL_FLAG_VRAM; else location = TTM_PL_FLAG_TT; ret = nouveau_bo_new(dev, NULL, 65536, 0, location, 0, 0x0000, &pushbuf); if (ret) { NV_ERROR(dev, "error allocating DMA push buffer: %d\n", ret); return NULL; } ret = nouveau_bo_pin(pushbuf, location); if (ret) { NV_ERROR(dev, "error pinning DMA push buffer: %d\n", ret); nouveau_bo_ref(NULL, &pushbuf); return NULL; } ret = nouveau_bo_map(pushbuf); if (ret) { nouveau_bo_unpin(pushbuf); nouveau_bo_ref(NULL, &pushbuf); return NULL; } return pushbuf; } /* allocates and initializes a fifo for user space consumption */ int nouveau_channel_alloc(struct drm_device *dev, struct nouveau_channel **chan_ret, struct drm_file *file_priv, uint32_t vram_handle, uint32_t gart_handle) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; struct nouveau_channel *chan; unsigned long flags; int ret; /* allocate and lock channel structure */ chan = kzalloc(sizeof(*chan), GFP_KERNEL); if (!chan) return -ENOMEM; chan->dev = dev; chan->file_priv = file_priv; chan->vram_handle = vram_handle; chan->gart_handle = gart_handle; kref_init(&chan->ref); atomic_set(&chan->users, 1); mutex_init(&chan->mutex); mutex_lock(&chan->mutex); /* allocate hw channel id */ spin_lock_irqsave(&dev_priv->channels.lock, flags); for (chan->id = 0; chan->id < pfifo->channels; chan->id++) { if (!dev_priv->channels.ptr[chan->id]) { nouveau_channel_ref(chan, &dev_priv->channels.ptr[chan->id]); break; } } spin_unlock_irqrestore(&dev_priv->channels.lock, flags); if (chan->id == pfifo->channels) { mutex_unlock(&chan->mutex); kfree(chan); return -ENODEV; } NV_DEBUG(dev, "initialising channel %d\n", chan->id); INIT_LIST_HEAD(&chan->nvsw.vbl_wait); INIT_LIST_HEAD(&chan->nvsw.flip); INIT_LIST_HEAD(&chan->fence.pending); spin_lock_init(&chan->fence.lock); /* Allocate DMA push buffer */ chan->pushbuf_bo = nouveau_channel_user_pushbuf_alloc(dev); if (!chan->pushbuf_bo) { ret = -ENOMEM; NV_ERROR(dev, "pushbuf %d\n", ret); nouveau_channel_put(&chan); return ret; } nouveau_dma_pre_init(chan); chan->user_put = 0x40; chan->user_get = 0x44; /* Allocate space for per-channel fixed notifier memory */ ret = nouveau_notifier_init_channel(chan); if (ret) { NV_ERROR(dev, "ntfy %d\n", ret); nouveau_channel_put(&chan); return ret; } /* Setup channel's default objects */ ret = nouveau_gpuobj_channel_init(chan, vram_handle, gart_handle); if (ret) { NV_ERROR(dev, "gpuobj %d\n", ret); nouveau_channel_put(&chan); return ret; } /* Create a dma object for the push buffer */ ret = nouveau_channel_pushbuf_ctxdma_init(chan); if (ret) { NV_ERROR(dev, "pbctxdma %d\n", ret); nouveau_channel_put(&chan); return ret; } /* disable the fifo caches */ pfifo->reassign(dev, false); /* Construct initial RAMFC for new channel */ ret = pfifo->create_context(chan); if (ret) { nouveau_channel_put(&chan); return ret; } pfifo->reassign(dev, true); ret = nouveau_dma_init(chan); if (!ret) ret = nouveau_fence_channel_init(chan); if (ret) { nouveau_channel_put(&chan); return ret; } nouveau_debugfs_channel_init(chan); NV_DEBUG(dev, "channel %d initialised\n", chan->id); *chan_ret = chan; return 0; } struct nouveau_channel * nouveau_channel_get_unlocked(struct nouveau_channel *ref) { struct nouveau_channel *chan = NULL; if (likely(ref && atomic_inc_not_zero(&ref->users))) nouveau_channel_ref(ref, &chan); return chan; } struct nouveau_channel * nouveau_channel_get(struct drm_device *dev, struct drm_file *file_priv, int id) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_channel *chan; unsigned long flags; if (unlikely(id < 0 || id >= NOUVEAU_MAX_CHANNEL_NR)) return ERR_PTR(-EINVAL); spin_lock_irqsave(&dev_priv->channels.lock, flags); chan = nouveau_channel_get_unlocked(dev_priv->channels.ptr[id]); spin_unlock_irqrestore(&dev_priv->channels.lock, flags); if (unlikely(!chan)) return ERR_PTR(-EINVAL); if (unlikely(file_priv && chan->file_priv != file_priv)) { nouveau_channel_put_unlocked(&chan); return ERR_PTR(-EINVAL); } mutex_lock(&chan->mutex); return chan; } void nouveau_channel_put_unlocked(struct nouveau_channel **pchan) { struct nouveau_channel *chan = *pchan; struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; unsigned long flags; int i; /* decrement the refcount, and we're done if there's still refs */ if (likely(!atomic_dec_and_test(&chan->users))) { nouveau_channel_ref(NULL, pchan); return; } /* no one wants the channel anymore */ NV_DEBUG(dev, "freeing channel %d\n", chan->id); nouveau_debugfs_channel_fini(chan); /* give it chance to idle */ nouveau_channel_idle(chan); /* ensure all outstanding fences are signaled. they should be if the * above attempts at idling were OK, but if we failed this'll tell TTM * we're done with the buffers. */ nouveau_fence_channel_fini(chan); /* boot it off the hardware */ pfifo->reassign(dev, false); /* destroy the engine specific contexts */ pfifo->destroy_context(chan); for (i = 0; i < NVOBJ_ENGINE_NR; i++) { if (chan->engctx[i]) dev_priv->eng[i]->context_del(chan, i); } pfifo->reassign(dev, true); /* aside from its resources, the channel should now be dead, * remove it from the channel list */ spin_lock_irqsave(&dev_priv->channels.lock, flags); nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]); spin_unlock_irqrestore(&dev_priv->channels.lock, flags); /* destroy any resources the channel owned */ nouveau_gpuobj_ref(NULL, &chan->pushbuf); if (chan->pushbuf_bo) { nouveau_bo_unmap(chan->pushbuf_bo); nouveau_bo_unpin(chan->pushbuf_bo); nouveau_bo_ref(NULL, &chan->pushbuf_bo); } nouveau_gpuobj_channel_takedown(chan); nouveau_notifier_takedown_channel(chan); nouveau_channel_ref(NULL, pchan); } void nouveau_channel_put(struct nouveau_channel **pchan) { mutex_unlock(&(*pchan)->mutex); nouveau_channel_put_unlocked(pchan); } static void nouveau_channel_del(struct kref *ref) { struct nouveau_channel *chan = container_of(ref, struct nouveau_channel, ref); kfree(chan); } void nouveau_channel_ref(struct nouveau_channel *chan, struct nouveau_channel **pchan) { if (chan) kref_get(&chan->ref); if (*pchan) kref_put(&(*pchan)->ref, nouveau_channel_del); *pchan = chan; } void nouveau_channel_idle(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct nouveau_fence *fence = NULL; int ret; nouveau_fence_update(chan); if (chan->fence.sequence != chan->fence.sequence_ack) { ret = nouveau_fence_new(chan, &fence, true); if (!ret) { ret = nouveau_fence_wait(fence, false, false); nouveau_fence_unref(&fence); } if (ret) NV_ERROR(dev, "Failed to idle channel %d.\n", chan->id); } } /* cleans up all the fifos from file_priv */ void nouveau_channel_cleanup(struct drm_device *dev, struct drm_file *file_priv) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_engine *engine = &dev_priv->engine; struct nouveau_channel *chan; int i; NV_DEBUG(dev, "clearing FIFO enables from file_priv\n"); for (i = 0; i < engine->fifo.channels; i++) { chan = nouveau_channel_get(dev, file_priv, i); if (IS_ERR(chan)) continue; atomic_dec(&chan->users); nouveau_channel_put(&chan); } } /*********************************** * ioctls wrapping the functions ***********************************/ static int nouveau_ioctl_fifo_alloc(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct drm_nouveau_channel_alloc *init = data; struct nouveau_channel *chan; int ret; if (!dev_priv->eng[NVOBJ_ENGINE_GR]) return -ENODEV; if (init->fb_ctxdma_handle == ~0 || init->tt_ctxdma_handle == ~0) return -EINVAL; ret = nouveau_channel_alloc(dev, &chan, file_priv, init->fb_ctxdma_handle, init->tt_ctxdma_handle); if (ret) return ret; init->channel = chan->id; if (chan->dma.ib_max) init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM | NOUVEAU_GEM_DOMAIN_GART; else if (chan->pushbuf_bo->bo.mem.mem_type == TTM_PL_VRAM) init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_VRAM; else init->pushbuf_domains = NOUVEAU_GEM_DOMAIN_GART; if (dev_priv->card_type < NV_C0) { init->subchan[0].handle = NvM2MF; if (dev_priv->card_type < NV_50) init->subchan[0].grclass = 0x0039; else init->subchan[0].grclass = 0x5039; init->subchan[1].handle = NvSw; init->subchan[1].grclass = NV_SW; init->nr_subchan = 2; } else { init->subchan[0].handle = 0x9039; init->subchan[0].grclass = 0x9039; init->nr_subchan = 1; } /* Named memory object area */ ret = drm_gem_handle_create(file_priv, chan->notifier_bo->gem, &init->notifier_handle); if (ret == 0) atomic_inc(&chan->users); /* userspace reference */ nouveau_channel_put(&chan); return ret; } static int nouveau_ioctl_fifo_free(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_nouveau_channel_free *req = data; struct nouveau_channel *chan; chan = nouveau_channel_get(dev, file_priv, req->channel); if (IS_ERR(chan)) return PTR_ERR(chan); atomic_dec(&chan->users); nouveau_channel_put(&chan); return 0; } /*********************************** * finally, the ioctl table ***********************************/ struct drm_ioctl_desc nouveau_ioctls[] = { DRM_IOCTL_DEF_DRV(NOUVEAU_GETPARAM, nouveau_ioctl_getparam, DRM_UNLOCKED|DRM_AUTH), DRM_IOCTL_DEF_DRV(NOUVEAU_SETPARAM, nouveau_ioctl_setparam, DRM_UNLOCKED|DRM_AUTH|DRM_MASTER|DRM_ROOT_ONLY), DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_ALLOC, nouveau_ioctl_fifo_alloc, DRM_UNLOCKED|DRM_AUTH), DRM_IOCTL_DEF_DRV(NOUVEAU_CHANNEL_FREE, nouveau_ioctl_fifo_free, DRM_UNLOCKED|DRM_AUTH), DRM_IOCTL_DEF_DRV(NOUVEAU_GROBJ_ALLOC, nouveau_ioctl_grobj_alloc, DRM_UNLOCKED|DRM_AUTH), DRM_IOCTL_DEF_DRV(NOUVEAU_NOTIFIEROBJ_ALLOC, nouveau_ioctl_notifier_alloc, DRM_UNLOCKED|DRM_AUTH), DRM_IOCTL_DEF_DRV(NOUVEAU_GPUOBJ_FREE, nouveau_ioctl_gpuobj_free, DRM_UNLOCKED|DRM_AUTH), DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_NEW, nouveau_gem_ioctl_new, DRM_UNLOCKED|DRM_AUTH), DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_PUSHBUF, nouveau_gem_ioctl_pushbuf, DRM_UNLOCKED|DRM_AUTH), DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_PREP, nouveau_gem_ioctl_cpu_prep, DRM_UNLOCKED|DRM_AUTH), DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_CPU_FINI, nouveau_gem_ioctl_cpu_fini, DRM_UNLOCKED|DRM_AUTH), DRM_IOCTL_DEF_DRV(NOUVEAU_GEM_INFO, nouveau_gem_ioctl_info, DRM_UNLOCKED|DRM_AUTH), }; int nouveau_max_ioctl = DRM_ARRAY_SIZE(nouveau_ioctls);
gpl-2.0
yuzaipiaofei/android_kernel_cyanogen_msm8916
drivers/nfc/pn544/mei.c
2130
2461
/* * HCI based Driver for NXP pn544 NFC Chip * * Copyright (C) 2013 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2, as published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the * Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. */ #include <linux/module.h> #include <linux/mod_devicetable.h> #include <linux/nfc.h> #include <net/nfc/hci.h> #include <net/nfc/llc.h> #include "../mei_phy.h" #include "pn544.h" #define PN544_DRIVER_NAME "pn544" static int pn544_mei_probe(struct mei_cl_device *device, const struct mei_cl_device_id *id) { struct nfc_mei_phy *phy; int r; pr_info("Probing NFC pn544\n"); phy = nfc_mei_phy_alloc(device); if (!phy) { pr_err("Cannot allocate memory for pn544 mei phy.\n"); return -ENOMEM; } r = pn544_hci_probe(phy, &mei_phy_ops, LLC_NOP_NAME, MEI_NFC_HEADER_SIZE, 0, MEI_NFC_MAX_HCI_PAYLOAD, &phy->hdev); if (r < 0) { nfc_mei_phy_free(phy); return r; } return 0; } static int pn544_mei_remove(struct mei_cl_device *device) { struct nfc_mei_phy *phy = mei_cl_get_drvdata(device); pr_info("Removing pn544\n"); pn544_hci_remove(phy->hdev); nfc_mei_phy_free(phy); return 0; } static struct mei_cl_device_id pn544_mei_tbl[] = { { PN544_DRIVER_NAME }, /* required last entry */ { } }; MODULE_DEVICE_TABLE(mei, pn544_mei_tbl); static struct mei_cl_driver pn544_driver = { .id_table = pn544_mei_tbl, .name = PN544_DRIVER_NAME, .probe = pn544_mei_probe, .remove = pn544_mei_remove, }; static int pn544_mei_init(void) { int r; pr_debug(DRIVER_DESC ": %s\n", __func__); r = mei_cl_driver_register(&pn544_driver); if (r) { pr_err(PN544_DRIVER_NAME ": driver registration failed\n"); return r; } return 0; } static void pn544_mei_exit(void) { mei_cl_driver_unregister(&pn544_driver); } module_init(pn544_mei_init); module_exit(pn544_mei_exit); MODULE_LICENSE("GPL"); MODULE_DESCRIPTION(DRIVER_DESC);
gpl-2.0
djvoleur/test_test
drivers/ssb/driver_gpio.c
2386
5300
/* * Sonics Silicon Backplane * GPIO driver * * Copyright 2011, Broadcom Corporation * Copyright 2012, Hauke Mehrtens <hauke@hauke-m.de> * * Licensed under the GNU/GPL. See COPYING for details. */ #include <linux/gpio.h> #include <linux/export.h> #include <linux/ssb/ssb.h> #include "ssb_private.h" static struct ssb_bus *ssb_gpio_get_bus(struct gpio_chip *chip) { return container_of(chip, struct ssb_bus, gpio); } static int ssb_gpio_chipco_get_value(struct gpio_chip *chip, unsigned gpio) { struct ssb_bus *bus = ssb_gpio_get_bus(chip); return !!ssb_chipco_gpio_in(&bus->chipco, 1 << gpio); } static void ssb_gpio_chipco_set_value(struct gpio_chip *chip, unsigned gpio, int value) { struct ssb_bus *bus = ssb_gpio_get_bus(chip); ssb_chipco_gpio_out(&bus->chipco, 1 << gpio, value ? 1 << gpio : 0); } static int ssb_gpio_chipco_direction_input(struct gpio_chip *chip, unsigned gpio) { struct ssb_bus *bus = ssb_gpio_get_bus(chip); ssb_chipco_gpio_outen(&bus->chipco, 1 << gpio, 0); return 0; } static int ssb_gpio_chipco_direction_output(struct gpio_chip *chip, unsigned gpio, int value) { struct ssb_bus *bus = ssb_gpio_get_bus(chip); ssb_chipco_gpio_outen(&bus->chipco, 1 << gpio, 1 << gpio); ssb_chipco_gpio_out(&bus->chipco, 1 << gpio, value ? 1 << gpio : 0); return 0; } static int ssb_gpio_chipco_request(struct gpio_chip *chip, unsigned gpio) { struct ssb_bus *bus = ssb_gpio_get_bus(chip); ssb_chipco_gpio_control(&bus->chipco, 1 << gpio, 0); /* clear pulldown */ ssb_chipco_gpio_pulldown(&bus->chipco, 1 << gpio, 0); /* Set pullup */ ssb_chipco_gpio_pullup(&bus->chipco, 1 << gpio, 1 << gpio); return 0; } static void ssb_gpio_chipco_free(struct gpio_chip *chip, unsigned gpio) { struct ssb_bus *bus = ssb_gpio_get_bus(chip); /* clear pullup */ ssb_chipco_gpio_pullup(&bus->chipco, 1 << gpio, 0); } static int ssb_gpio_chipco_to_irq(struct gpio_chip *chip, unsigned gpio) { struct ssb_bus *bus = ssb_gpio_get_bus(chip); if (bus->bustype == SSB_BUSTYPE_SSB) return ssb_mips_irq(bus->chipco.dev) + 2; else return -EINVAL; } static int ssb_gpio_chipco_init(struct ssb_bus *bus) { struct gpio_chip *chip = &bus->gpio; chip->label = "ssb_chipco_gpio"; chip->owner = THIS_MODULE; chip->request = ssb_gpio_chipco_request; chip->free = ssb_gpio_chipco_free; chip->get = ssb_gpio_chipco_get_value; chip->set = ssb_gpio_chipco_set_value; chip->direction_input = ssb_gpio_chipco_direction_input; chip->direction_output = ssb_gpio_chipco_direction_output; chip->to_irq = ssb_gpio_chipco_to_irq; chip->ngpio = 16; /* There is just one SoC in one device and its GPIO addresses should be * deterministic to address them more easily. The other buses could get * a random base number. */ if (bus->bustype == SSB_BUSTYPE_SSB) chip->base = 0; else chip->base = -1; return gpiochip_add(chip); } #ifdef CONFIG_SSB_DRIVER_EXTIF static int ssb_gpio_extif_get_value(struct gpio_chip *chip, unsigned gpio) { struct ssb_bus *bus = ssb_gpio_get_bus(chip); return !!ssb_extif_gpio_in(&bus->extif, 1 << gpio); } static void ssb_gpio_extif_set_value(struct gpio_chip *chip, unsigned gpio, int value) { struct ssb_bus *bus = ssb_gpio_get_bus(chip); ssb_extif_gpio_out(&bus->extif, 1 << gpio, value ? 1 << gpio : 0); } static int ssb_gpio_extif_direction_input(struct gpio_chip *chip, unsigned gpio) { struct ssb_bus *bus = ssb_gpio_get_bus(chip); ssb_extif_gpio_outen(&bus->extif, 1 << gpio, 0); return 0; } static int ssb_gpio_extif_direction_output(struct gpio_chip *chip, unsigned gpio, int value) { struct ssb_bus *bus = ssb_gpio_get_bus(chip); ssb_extif_gpio_outen(&bus->extif, 1 << gpio, 1 << gpio); ssb_extif_gpio_out(&bus->extif, 1 << gpio, value ? 1 << gpio : 0); return 0; } static int ssb_gpio_extif_to_irq(struct gpio_chip *chip, unsigned gpio) { struct ssb_bus *bus = ssb_gpio_get_bus(chip); if (bus->bustype == SSB_BUSTYPE_SSB) return ssb_mips_irq(bus->extif.dev) + 2; else return -EINVAL; } static int ssb_gpio_extif_init(struct ssb_bus *bus) { struct gpio_chip *chip = &bus->gpio; chip->label = "ssb_extif_gpio"; chip->owner = THIS_MODULE; chip->get = ssb_gpio_extif_get_value; chip->set = ssb_gpio_extif_set_value; chip->direction_input = ssb_gpio_extif_direction_input; chip->direction_output = ssb_gpio_extif_direction_output; chip->to_irq = ssb_gpio_extif_to_irq; chip->ngpio = 5; /* There is just one SoC in one device and its GPIO addresses should be * deterministic to address them more easily. The other buses could get * a random base number. */ if (bus->bustype == SSB_BUSTYPE_SSB) chip->base = 0; else chip->base = -1; return gpiochip_add(chip); } #else static int ssb_gpio_extif_init(struct ssb_bus *bus) { return -ENOTSUPP; } #endif int ssb_gpio_init(struct ssb_bus *bus) { if (ssb_chipco_available(&bus->chipco)) return ssb_gpio_chipco_init(bus); else if (ssb_extif_available(&bus->extif)) return ssb_gpio_extif_init(bus); else SSB_WARN_ON(1); return -1; } int ssb_gpio_unregister(struct ssb_bus *bus) { if (ssb_chipco_available(&bus->chipco) || ssb_extif_available(&bus->extif)) { return gpiochip_remove(&bus->gpio); } else { SSB_WARN_ON(1); } return -1; }
gpl-2.0
free-z4u/android_kernel_htc_msm7x30
drivers/media/video/sh_mobile_ceu_camera.c
2386
59370
/* * V4L2 Driver for SuperH Mobile CEU interface * * Copyright (C) 2008 Magnus Damm * * Based on V4L2 Driver for PXA camera host - "pxa_camera.c", * * Copyright (C) 2006, Sascha Hauer, Pengutronix * Copyright (C) 2008, Guennadi Liakhovetski <kernel@pengutronix.de> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. */ #include <linux/init.h> #include <linux/module.h> #include <linux/io.h> #include <linux/completion.h> #include <linux/delay.h> #include <linux/dma-mapping.h> #include <linux/errno.h> #include <linux/fs.h> #include <linux/interrupt.h> #include <linux/kernel.h> #include <linux/mm.h> #include <linux/moduleparam.h> #include <linux/time.h> #include <linux/version.h> #include <linux/slab.h> #include <linux/device.h> #include <linux/platform_device.h> #include <linux/videodev2.h> #include <linux/pm_runtime.h> #include <linux/sched.h> #include <media/v4l2-common.h> #include <media/v4l2-dev.h> #include <media/soc_camera.h> #include <media/sh_mobile_ceu.h> #include <media/videobuf2-dma-contig.h> #include <media/v4l2-mediabus.h> #include <media/soc_mediabus.h> /* register offsets for sh7722 / sh7723 */ #define CAPSR 0x00 /* Capture start register */ #define CAPCR 0x04 /* Capture control register */ #define CAMCR 0x08 /* Capture interface control register */ #define CMCYR 0x0c /* Capture interface cycle register */ #define CAMOR 0x10 /* Capture interface offset register */ #define CAPWR 0x14 /* Capture interface width register */ #define CAIFR 0x18 /* Capture interface input format register */ #define CSTCR 0x20 /* Camera strobe control register (<= sh7722) */ #define CSECR 0x24 /* Camera strobe emission count register (<= sh7722) */ #define CRCNTR 0x28 /* CEU register control register */ #define CRCMPR 0x2c /* CEU register forcible control register */ #define CFLCR 0x30 /* Capture filter control register */ #define CFSZR 0x34 /* Capture filter size clip register */ #define CDWDR 0x38 /* Capture destination width register */ #define CDAYR 0x3c /* Capture data address Y register */ #define CDACR 0x40 /* Capture data address C register */ #define CDBYR 0x44 /* Capture data bottom-field address Y register */ #define CDBCR 0x48 /* Capture data bottom-field address C register */ #define CBDSR 0x4c /* Capture bundle destination size register */ #define CFWCR 0x5c /* Firewall operation control register */ #define CLFCR 0x60 /* Capture low-pass filter control register */ #define CDOCR 0x64 /* Capture data output control register */ #define CDDCR 0x68 /* Capture data complexity level register */ #define CDDAR 0x6c /* Capture data complexity level address register */ #define CEIER 0x70 /* Capture event interrupt enable register */ #define CETCR 0x74 /* Capture event flag clear register */ #define CSTSR 0x7c /* Capture status register */ #define CSRTR 0x80 /* Capture software reset register */ #define CDSSR 0x84 /* Capture data size register */ #define CDAYR2 0x90 /* Capture data address Y register 2 */ #define CDACR2 0x94 /* Capture data address C register 2 */ #define CDBYR2 0x98 /* Capture data bottom-field address Y register 2 */ #define CDBCR2 0x9c /* Capture data bottom-field address C register 2 */ #undef DEBUG_GEOMETRY #ifdef DEBUG_GEOMETRY #define dev_geo dev_info #else #define dev_geo dev_dbg #endif /* per video frame buffer */ struct sh_mobile_ceu_buffer { struct vb2_buffer vb; /* v4l buffer must be first */ struct list_head queue; enum v4l2_mbus_pixelcode code; }; struct sh_mobile_ceu_dev { struct soc_camera_host ici; struct soc_camera_device *icd; unsigned int irq; void __iomem *base; unsigned long video_limit; spinlock_t lock; /* Protects video buffer lists */ struct list_head capture; struct vb2_buffer *active; struct vb2_alloc_ctx *alloc_ctx; struct sh_mobile_ceu_info *pdata; struct completion complete; u32 cflcr; enum v4l2_field field; int sequence; unsigned int image_mode:1; unsigned int is_16bit:1; unsigned int frozen:1; }; struct sh_mobile_ceu_cam { /* CEU offsets within scaled by the CEU camera output */ unsigned int ceu_left; unsigned int ceu_top; /* Client output, as seen by the CEU */ unsigned int width; unsigned int height; /* * User window from S_CROP / G_CROP, produced by client cropping and * scaling, CEU scaling and CEU cropping, mapped back onto the client * input window */ struct v4l2_rect subrect; /* Camera cropping rectangle */ struct v4l2_rect rect; const struct soc_mbus_pixelfmt *extra_fmt; enum v4l2_mbus_pixelcode code; }; static struct sh_mobile_ceu_buffer *to_ceu_vb(struct vb2_buffer *vb) { return container_of(vb, struct sh_mobile_ceu_buffer, vb); } static unsigned long make_bus_param(struct sh_mobile_ceu_dev *pcdev) { unsigned long flags; flags = SOCAM_MASTER | SOCAM_PCLK_SAMPLE_RISING | SOCAM_HSYNC_ACTIVE_HIGH | SOCAM_HSYNC_ACTIVE_LOW | SOCAM_VSYNC_ACTIVE_HIGH | SOCAM_VSYNC_ACTIVE_LOW | SOCAM_DATA_ACTIVE_HIGH; if (pcdev->pdata->flags & SH_CEU_FLAG_USE_8BIT_BUS) flags |= SOCAM_DATAWIDTH_8; if (pcdev->pdata->flags & SH_CEU_FLAG_USE_16BIT_BUS) flags |= SOCAM_DATAWIDTH_16; if (flags & SOCAM_DATAWIDTH_MASK) return flags; return 0; } static void ceu_write(struct sh_mobile_ceu_dev *priv, unsigned long reg_offs, u32 data) { iowrite32(data, priv->base + reg_offs); } static u32 ceu_read(struct sh_mobile_ceu_dev *priv, unsigned long reg_offs) { return ioread32(priv->base + reg_offs); } static int sh_mobile_ceu_soft_reset(struct sh_mobile_ceu_dev *pcdev) { int i, success = 0; struct soc_camera_device *icd = pcdev->icd; ceu_write(pcdev, CAPSR, 1 << 16); /* reset */ /* wait CSTSR.CPTON bit */ for (i = 0; i < 1000; i++) { if (!(ceu_read(pcdev, CSTSR) & 1)) { success++; break; } udelay(1); } /* wait CAPSR.CPKIL bit */ for (i = 0; i < 1000; i++) { if (!(ceu_read(pcdev, CAPSR) & (1 << 16))) { success++; break; } udelay(1); } if (2 != success) { dev_warn(&icd->dev, "soft reset time out\n"); return -EIO; } return 0; } /* * Videobuf operations */ static int sh_mobile_ceu_videobuf_setup(struct vb2_queue *vq, unsigned int *count, unsigned int *num_planes, unsigned long sizes[], void *alloc_ctxs[]) { struct soc_camera_device *icd = container_of(vq, struct soc_camera_device, vb2_vidq); struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width, icd->current_fmt->host_fmt); if (bytes_per_line < 0) return bytes_per_line; *num_planes = 1; pcdev->sequence = 0; sizes[0] = bytes_per_line * icd->user_height; alloc_ctxs[0] = pcdev->alloc_ctx; if (!*count) *count = 2; if (pcdev->video_limit) { if (PAGE_ALIGN(sizes[0]) * *count > pcdev->video_limit) *count = pcdev->video_limit / PAGE_ALIGN(sizes[0]); } dev_dbg(icd->dev.parent, "count=%d, size=%lu\n", *count, sizes[0]); return 0; } #define CEU_CETCR_MAGIC 0x0317f313 /* acknowledge magical interrupt sources */ #define CEU_CETCR_IGRW (1 << 4) /* prohibited register access interrupt bit */ #define CEU_CEIER_CPEIE (1 << 0) /* one-frame capture end interrupt */ #define CEU_CEIER_VBP (1 << 20) /* vbp error */ #define CEU_CAPCR_CTNCP (1 << 16) /* continuous capture mode (if set) */ #define CEU_CEIER_MASK (CEU_CEIER_CPEIE | CEU_CEIER_VBP) /* * return value doesn't reflex the success/failure to queue the new buffer, * but rather the status of the previous buffer. */ static int sh_mobile_ceu_capture(struct sh_mobile_ceu_dev *pcdev) { struct soc_camera_device *icd = pcdev->icd; dma_addr_t phys_addr_top, phys_addr_bottom; unsigned long top1, top2; unsigned long bottom1, bottom2; u32 status; int ret = 0; /* * The hardware is _very_ picky about this sequence. Especially * the CEU_CETCR_MAGIC value. It seems like we need to acknowledge * several not-so-well documented interrupt sources in CETCR. */ ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) & ~CEU_CEIER_MASK); status = ceu_read(pcdev, CETCR); ceu_write(pcdev, CETCR, ~status & CEU_CETCR_MAGIC); if (!pcdev->frozen) ceu_write(pcdev, CEIER, ceu_read(pcdev, CEIER) | CEU_CEIER_MASK); ceu_write(pcdev, CAPCR, ceu_read(pcdev, CAPCR) & ~CEU_CAPCR_CTNCP); ceu_write(pcdev, CETCR, CEU_CETCR_MAGIC ^ CEU_CETCR_IGRW); /* * When a VBP interrupt occurs, a capture end interrupt does not occur * and the image of that frame is not captured correctly. So, soft reset * is needed here. */ if (status & CEU_CEIER_VBP) { sh_mobile_ceu_soft_reset(pcdev); ret = -EIO; } if (pcdev->frozen) { complete(&pcdev->complete); return ret; } if (!pcdev->active) return ret; if (V4L2_FIELD_INTERLACED_BT == pcdev->field) { top1 = CDBYR; top2 = CDBCR; bottom1 = CDAYR; bottom2 = CDACR; } else { top1 = CDAYR; top2 = CDACR; bottom1 = CDBYR; bottom2 = CDBCR; } phys_addr_top = vb2_dma_contig_plane_paddr(pcdev->active, 0); ceu_write(pcdev, top1, phys_addr_top); if (V4L2_FIELD_NONE != pcdev->field) { phys_addr_bottom = phys_addr_top + icd->user_width; ceu_write(pcdev, bottom1, phys_addr_bottom); } switch (icd->current_fmt->host_fmt->fourcc) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: phys_addr_top += icd->user_width * icd->user_height; ceu_write(pcdev, top2, phys_addr_top); if (V4L2_FIELD_NONE != pcdev->field) { phys_addr_bottom = phys_addr_top + icd->user_width; ceu_write(pcdev, bottom2, phys_addr_bottom); } } ceu_write(pcdev, CAPSR, 0x1); /* start capture */ return ret; } static int sh_mobile_ceu_videobuf_prepare(struct vb2_buffer *vb) { struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq); struct sh_mobile_ceu_buffer *buf; int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width, icd->current_fmt->host_fmt); unsigned long size; if (bytes_per_line < 0) return bytes_per_line; buf = to_ceu_vb(vb); dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%p %lu\n", __func__, vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0)); /* Added list head initialization on alloc */ WARN(!list_empty(&buf->queue), "Buffer %p on queue!\n", vb); #ifdef DEBUG /* * This can be useful if you want to see if we actually fill * the buffer with something */ if (vb2_plane_vaddr(vb, 0)) memset(vb2_plane_vaddr(vb, 0), 0xaa, vb2_get_plane_payload(vb, 0)); #endif BUG_ON(NULL == icd->current_fmt); size = icd->user_height * bytes_per_line; if (vb2_plane_size(vb, 0) < size) { dev_err(icd->dev.parent, "Buffer too small (%lu < %lu)\n", vb2_plane_size(vb, 0), size); return -ENOBUFS; } vb2_set_plane_payload(vb, 0, size); return 0; } static void sh_mobile_ceu_videobuf_queue(struct vb2_buffer *vb) { struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq); struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb); dev_dbg(icd->dev.parent, "%s (vb=0x%p) 0x%p %lu\n", __func__, vb, vb2_plane_vaddr(vb, 0), vb2_get_plane_payload(vb, 0)); spin_lock_irq(&pcdev->lock); list_add_tail(&buf->queue, &pcdev->capture); if (!pcdev->active) { /* * Because there were no active buffer at this moment, * we are not interested in the return value of * sh_mobile_ceu_capture here. */ pcdev->active = vb; sh_mobile_ceu_capture(pcdev); } spin_unlock_irq(&pcdev->lock); } static void sh_mobile_ceu_videobuf_release(struct vb2_buffer *vb) { struct soc_camera_device *icd = container_of(vb->vb2_queue, struct soc_camera_device, vb2_vidq); struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_buffer *buf = to_ceu_vb(vb); struct sh_mobile_ceu_dev *pcdev = ici->priv; spin_lock_irq(&pcdev->lock); if (pcdev->active == vb) { /* disable capture (release DMA buffer), reset */ ceu_write(pcdev, CAPSR, 1 << 16); pcdev->active = NULL; } /* Doesn't hurt also if the list is empty */ list_del_init(&buf->queue); spin_unlock_irq(&pcdev->lock); } static int sh_mobile_ceu_videobuf_init(struct vb2_buffer *vb) { /* This is for locking debugging only */ INIT_LIST_HEAD(&to_ceu_vb(vb)->queue); return 0; } static int sh_mobile_ceu_stop_streaming(struct vb2_queue *q) { struct soc_camera_device *icd = container_of(q, struct soc_camera_device, vb2_vidq); struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct list_head *buf_head, *tmp; spin_lock_irq(&pcdev->lock); pcdev->active = NULL; list_for_each_safe(buf_head, tmp, &pcdev->capture) list_del_init(buf_head); spin_unlock_irq(&pcdev->lock); return sh_mobile_ceu_soft_reset(pcdev); } static struct vb2_ops sh_mobile_ceu_videobuf_ops = { .queue_setup = sh_mobile_ceu_videobuf_setup, .buf_prepare = sh_mobile_ceu_videobuf_prepare, .buf_queue = sh_mobile_ceu_videobuf_queue, .buf_cleanup = sh_mobile_ceu_videobuf_release, .buf_init = sh_mobile_ceu_videobuf_init, .wait_prepare = soc_camera_unlock, .wait_finish = soc_camera_lock, .stop_streaming = sh_mobile_ceu_stop_streaming, }; static irqreturn_t sh_mobile_ceu_irq(int irq, void *data) { struct sh_mobile_ceu_dev *pcdev = data; struct vb2_buffer *vb; int ret; spin_lock(&pcdev->lock); vb = pcdev->active; if (!vb) /* Stale interrupt from a released buffer */ goto out; list_del_init(&to_ceu_vb(vb)->queue); if (!list_empty(&pcdev->capture)) pcdev->active = &list_entry(pcdev->capture.next, struct sh_mobile_ceu_buffer, queue)->vb; else pcdev->active = NULL; ret = sh_mobile_ceu_capture(pcdev); do_gettimeofday(&vb->v4l2_buf.timestamp); if (!ret) { vb->v4l2_buf.field = pcdev->field; vb->v4l2_buf.sequence = pcdev->sequence++; } vb2_buffer_done(vb, ret < 0 ? VB2_BUF_STATE_ERROR : VB2_BUF_STATE_DONE); out: spin_unlock(&pcdev->lock); return IRQ_HANDLED; } /* Called with .video_lock held */ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; int ret; if (pcdev->icd) return -EBUSY; dev_info(icd->dev.parent, "SuperH Mobile CEU driver attached to camera %d\n", icd->devnum); pm_runtime_get_sync(ici->v4l2_dev.dev); ret = sh_mobile_ceu_soft_reset(pcdev); if (!ret) pcdev->icd = icd; return ret; } /* Called with .video_lock held */ static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; BUG_ON(icd != pcdev->icd); /* disable capture, disable interrupts */ ceu_write(pcdev, CEIER, 0); sh_mobile_ceu_soft_reset(pcdev); /* make sure active buffer is canceled */ spin_lock_irq(&pcdev->lock); if (pcdev->active) { list_del_init(&to_ceu_vb(pcdev->active)->queue); vb2_buffer_done(pcdev->active, VB2_BUF_STATE_ERROR); pcdev->active = NULL; } spin_unlock_irq(&pcdev->lock); pm_runtime_put_sync(ici->v4l2_dev.dev); dev_info(icd->dev.parent, "SuperH Mobile CEU driver detached from camera %d\n", icd->devnum); pcdev->icd = NULL; } /* * See chapter 29.4.12 "Capture Filter Control Register (CFLCR)" * in SH7722 Hardware Manual */ static unsigned int size_dst(unsigned int src, unsigned int scale) { unsigned int mant_pre = scale >> 12; if (!src || !scale) return src; return ((mant_pre + 2 * (src - 1)) / (2 * mant_pre) - 1) * mant_pre * 4096 / scale + 1; } static u16 calc_scale(unsigned int src, unsigned int *dst) { u16 scale; if (src == *dst) return 0; scale = (src * 4096 / *dst) & ~7; while (scale > 4096 && size_dst(src, scale) < *dst) scale -= 8; *dst = size_dst(src, scale); return scale; } /* rect is guaranteed to not exceed the scaled camera rectangle */ static void sh_mobile_ceu_set_rect(struct soc_camera_device *icd) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_cam *cam = icd->host_priv; struct sh_mobile_ceu_dev *pcdev = ici->priv; unsigned int height, width, cdwdr_width, in_width, in_height; unsigned int left_offset, top_offset; u32 camor; dev_geo(icd->dev.parent, "Crop %ux%u@%u:%u\n", icd->user_width, icd->user_height, cam->ceu_left, cam->ceu_top); left_offset = cam->ceu_left; top_offset = cam->ceu_top; /* CEU cropping (CFSZR) is applied _after_ the scaling filter (CFLCR) */ if (pcdev->image_mode) { in_width = cam->width; if (!pcdev->is_16bit) { in_width *= 2; left_offset *= 2; } width = icd->user_width; cdwdr_width = icd->user_width; } else { int bytes_per_line = soc_mbus_bytes_per_line(icd->user_width, icd->current_fmt->host_fmt); unsigned int w_factor; width = icd->user_width; switch (icd->current_fmt->host_fmt->packing) { case SOC_MBUS_PACKING_2X8_PADHI: w_factor = 2; break; default: w_factor = 1; } in_width = cam->width * w_factor; left_offset = left_offset * w_factor; if (bytes_per_line < 0) cdwdr_width = icd->user_width; else cdwdr_width = bytes_per_line; } height = icd->user_height; in_height = cam->height; if (V4L2_FIELD_NONE != pcdev->field) { height /= 2; in_height /= 2; top_offset /= 2; cdwdr_width *= 2; } /* CSI2 special configuration */ if (pcdev->pdata->csi2_dev) { in_width = ((in_width - 2) * 2); left_offset *= 2; } /* Set CAMOR, CAPWR, CFSZR, take care of CDWDR */ camor = left_offset | (top_offset << 16); dev_geo(icd->dev.parent, "CAMOR 0x%x, CAPWR 0x%x, CFSZR 0x%x, CDWDR 0x%x\n", camor, (in_height << 16) | in_width, (height << 16) | width, cdwdr_width); ceu_write(pcdev, CAMOR, camor); ceu_write(pcdev, CAPWR, (in_height << 16) | in_width); ceu_write(pcdev, CFSZR, (height << 16) | width); ceu_write(pcdev, CDWDR, cdwdr_width); } static u32 capture_save_reset(struct sh_mobile_ceu_dev *pcdev) { u32 capsr = ceu_read(pcdev, CAPSR); ceu_write(pcdev, CAPSR, 1 << 16); /* reset, stop capture */ return capsr; } static void capture_restore(struct sh_mobile_ceu_dev *pcdev, u32 capsr) { unsigned long timeout = jiffies + 10 * HZ; /* * Wait until the end of the current frame. It can take a long time, * but if it has been aborted by a CAPSR reset, it shoule exit sooner. */ while ((ceu_read(pcdev, CSTSR) & 1) && time_before(jiffies, timeout)) msleep(1); if (time_after(jiffies, timeout)) { dev_err(pcdev->ici.v4l2_dev.dev, "Timeout waiting for frame end! Interface problem?\n"); return; } /* Wait until reset clears, this shall not hang... */ while (ceu_read(pcdev, CAPSR) & (1 << 16)) udelay(10); /* Anything to restore? */ if (capsr & ~(1 << 16)) ceu_write(pcdev, CAPSR, capsr); } /* Capture is not running, no interrupts, no locking needed */ static int sh_mobile_ceu_set_bus_param(struct soc_camera_device *icd, __u32 pixfmt) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; int ret; unsigned long camera_flags, common_flags, value; int yuv_lineskip; struct sh_mobile_ceu_cam *cam = icd->host_priv; u32 capsr = capture_save_reset(pcdev); camera_flags = icd->ops->query_bus_param(icd); common_flags = soc_camera_bus_param_compatible(camera_flags, make_bus_param(pcdev)); if (!common_flags) return -EINVAL; /* Make choises, based on platform preferences */ if ((common_flags & SOCAM_HSYNC_ACTIVE_HIGH) && (common_flags & SOCAM_HSYNC_ACTIVE_LOW)) { if (pcdev->pdata->flags & SH_CEU_FLAG_HSYNC_LOW) common_flags &= ~SOCAM_HSYNC_ACTIVE_HIGH; else common_flags &= ~SOCAM_HSYNC_ACTIVE_LOW; } if ((common_flags & SOCAM_VSYNC_ACTIVE_HIGH) && (common_flags & SOCAM_VSYNC_ACTIVE_LOW)) { if (pcdev->pdata->flags & SH_CEU_FLAG_VSYNC_LOW) common_flags &= ~SOCAM_VSYNC_ACTIVE_HIGH; else common_flags &= ~SOCAM_VSYNC_ACTIVE_LOW; } ret = icd->ops->set_bus_param(icd, common_flags); if (ret < 0) return ret; switch (common_flags & SOCAM_DATAWIDTH_MASK) { case SOCAM_DATAWIDTH_8: pcdev->is_16bit = 0; break; case SOCAM_DATAWIDTH_16: pcdev->is_16bit = 1; break; default: return -EINVAL; } ceu_write(pcdev, CRCNTR, 0); ceu_write(pcdev, CRCMPR, 0); value = 0x00000010; /* data fetch by default */ yuv_lineskip = 0; switch (icd->current_fmt->host_fmt->fourcc) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: yuv_lineskip = 1; /* skip for NV12/21, no skip for NV16/61 */ /* fall-through */ case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: switch (cam->code) { case V4L2_MBUS_FMT_UYVY8_2X8: value = 0x00000000; /* Cb0, Y0, Cr0, Y1 */ break; case V4L2_MBUS_FMT_VYUY8_2X8: value = 0x00000100; /* Cr0, Y0, Cb0, Y1 */ break; case V4L2_MBUS_FMT_YUYV8_2X8: value = 0x00000200; /* Y0, Cb0, Y1, Cr0 */ break; case V4L2_MBUS_FMT_YVYU8_2X8: value = 0x00000300; /* Y0, Cr0, Y1, Cb0 */ break; default: BUG(); } } if (icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV21 || icd->current_fmt->host_fmt->fourcc == V4L2_PIX_FMT_NV61) value ^= 0x00000100; /* swap U, V to change from NV1x->NVx1 */ value |= common_flags & SOCAM_VSYNC_ACTIVE_LOW ? 1 << 1 : 0; value |= common_flags & SOCAM_HSYNC_ACTIVE_LOW ? 1 << 0 : 0; value |= pcdev->is_16bit ? 1 << 12 : 0; /* CSI2 mode */ if (pcdev->pdata->csi2_dev) value |= 3 << 12; ceu_write(pcdev, CAMCR, value); ceu_write(pcdev, CAPCR, 0x00300000); switch (pcdev->field) { case V4L2_FIELD_INTERLACED_TB: value = 0x101; break; case V4L2_FIELD_INTERLACED_BT: value = 0x102; break; default: value = 0; break; } ceu_write(pcdev, CAIFR, value); sh_mobile_ceu_set_rect(icd); mdelay(1); dev_geo(icd->dev.parent, "CFLCR 0x%x\n", pcdev->cflcr); ceu_write(pcdev, CFLCR, pcdev->cflcr); /* * A few words about byte order (observed in Big Endian mode) * * In data fetch mode bytes are received in chunks of 8 bytes. * D0, D1, D2, D3, D4, D5, D6, D7 (D0 received first) * * The data is however by default written to memory in reverse order: * D7, D6, D5, D4, D3, D2, D1, D0 (D7 written to lowest byte) * * The lowest three bits of CDOCR allows us to do swapping, * using 7 we swap the data bytes to match the incoming order: * D0, D1, D2, D3, D4, D5, D6, D7 */ value = 0x00000017; if (yuv_lineskip) value &= ~0x00000010; /* convert 4:2:2 -> 4:2:0 */ ceu_write(pcdev, CDOCR, value); ceu_write(pcdev, CFWCR, 0); /* keep "datafetch firewall" disabled */ dev_dbg(icd->dev.parent, "S_FMT successful for %c%c%c%c %ux%u\n", pixfmt & 0xff, (pixfmt >> 8) & 0xff, (pixfmt >> 16) & 0xff, (pixfmt >> 24) & 0xff, icd->user_width, icd->user_height); capture_restore(pcdev, capsr); /* not in bundle mode: skip CBDSR, CDAYR2, CDACR2, CDBYR2, CDBCR2 */ return 0; } static int sh_mobile_ceu_try_bus_param(struct soc_camera_device *icd, unsigned char buswidth) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; unsigned long camera_flags, common_flags; camera_flags = icd->ops->query_bus_param(icd); common_flags = soc_camera_bus_param_compatible(camera_flags, make_bus_param(pcdev)); if (!common_flags || buswidth > 16 || (buswidth > 8 && !(common_flags & SOCAM_DATAWIDTH_16))) return -EINVAL; return 0; } static const struct soc_mbus_pixelfmt sh_mobile_ceu_formats[] = { { .fourcc = V4L2_PIX_FMT_NV12, .name = "NV12", .bits_per_sample = 12, .packing = SOC_MBUS_PACKING_NONE, .order = SOC_MBUS_ORDER_LE, }, { .fourcc = V4L2_PIX_FMT_NV21, .name = "NV21", .bits_per_sample = 12, .packing = SOC_MBUS_PACKING_NONE, .order = SOC_MBUS_ORDER_LE, }, { .fourcc = V4L2_PIX_FMT_NV16, .name = "NV16", .bits_per_sample = 16, .packing = SOC_MBUS_PACKING_NONE, .order = SOC_MBUS_ORDER_LE, }, { .fourcc = V4L2_PIX_FMT_NV61, .name = "NV61", .bits_per_sample = 16, .packing = SOC_MBUS_PACKING_NONE, .order = SOC_MBUS_ORDER_LE, }, }; /* This will be corrected as we get more formats */ static bool sh_mobile_ceu_packing_supported(const struct soc_mbus_pixelfmt *fmt) { return fmt->packing == SOC_MBUS_PACKING_NONE || (fmt->bits_per_sample == 8 && fmt->packing == SOC_MBUS_PACKING_2X8_PADHI) || (fmt->bits_per_sample > 8 && fmt->packing == SOC_MBUS_PACKING_EXTEND16); } static int client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect); static int sh_mobile_ceu_get_formats(struct soc_camera_device *icd, unsigned int idx, struct soc_camera_format_xlate *xlate) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct device *dev = icd->dev.parent; struct soc_camera_host *ici = to_soc_camera_host(dev); struct sh_mobile_ceu_dev *pcdev = ici->priv; int ret, k, n; int formats = 0; struct sh_mobile_ceu_cam *cam; enum v4l2_mbus_pixelcode code; const struct soc_mbus_pixelfmt *fmt; ret = v4l2_subdev_call(sd, video, enum_mbus_fmt, idx, &code); if (ret < 0) /* No more formats */ return 0; fmt = soc_mbus_get_fmtdesc(code); if (!fmt) { dev_warn(dev, "unsupported format code #%u: %d\n", idx, code); return 0; } if (!pcdev->pdata->csi2_dev) { ret = sh_mobile_ceu_try_bus_param(icd, fmt->bits_per_sample); if (ret < 0) return 0; } if (!icd->host_priv) { struct v4l2_mbus_framefmt mf; struct v4l2_rect rect; int shift = 0; /* FIXME: subwindow is lost between close / open */ /* Cache current client geometry */ ret = client_g_rect(sd, &rect); if (ret < 0) return ret; /* First time */ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf); if (ret < 0) return ret; while ((mf.width > 2560 || mf.height > 1920) && shift < 4) { /* Try 2560x1920, 1280x960, 640x480, 320x240 */ mf.width = 2560 >> shift; mf.height = 1920 >> shift; ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, s_mbus_fmt, &mf); if (ret < 0) return ret; shift++; } if (shift == 4) { dev_err(dev, "Failed to configure the client below %ux%x\n", mf.width, mf.height); return -EIO; } dev_geo(dev, "camera fmt %ux%u\n", mf.width, mf.height); cam = kzalloc(sizeof(*cam), GFP_KERNEL); if (!cam) return -ENOMEM; /* We are called with current camera crop, initialise subrect with it */ cam->rect = rect; cam->subrect = rect; cam->width = mf.width; cam->height = mf.height; cam->width = mf.width; cam->height = mf.height; icd->host_priv = cam; } else { cam = icd->host_priv; } /* Beginning of a pass */ if (!idx) cam->extra_fmt = NULL; switch (code) { case V4L2_MBUS_FMT_UYVY8_2X8: case V4L2_MBUS_FMT_VYUY8_2X8: case V4L2_MBUS_FMT_YUYV8_2X8: case V4L2_MBUS_FMT_YVYU8_2X8: if (cam->extra_fmt) break; /* * Our case is simple so far: for any of the above four camera * formats we add all our four synthesized NV* formats, so, * just marking the device with a single flag suffices. If * the format generation rules are more complex, you would have * to actually hang your already added / counted formats onto * the host_priv pointer and check whether the format you're * going to add now is already there. */ cam->extra_fmt = sh_mobile_ceu_formats; n = ARRAY_SIZE(sh_mobile_ceu_formats); formats += n; for (k = 0; xlate && k < n; k++) { xlate->host_fmt = &sh_mobile_ceu_formats[k]; xlate->code = code; xlate++; dev_dbg(dev, "Providing format %s using code %d\n", sh_mobile_ceu_formats[k].name, code); } break; default: if (!sh_mobile_ceu_packing_supported(fmt)) return 0; } /* Generic pass-through */ formats++; if (xlate) { xlate->host_fmt = fmt; xlate->code = code; xlate++; dev_dbg(dev, "Providing format %s in pass-through mode\n", fmt->name); } return formats; } static void sh_mobile_ceu_put_formats(struct soc_camera_device *icd) { kfree(icd->host_priv); icd->host_priv = NULL; } /* Check if any dimension of r1 is smaller than respective one of r2 */ static bool is_smaller(struct v4l2_rect *r1, struct v4l2_rect *r2) { return r1->width < r2->width || r1->height < r2->height; } /* Check if r1 fails to cover r2 */ static bool is_inside(struct v4l2_rect *r1, struct v4l2_rect *r2) { return r1->left > r2->left || r1->top > r2->top || r1->left + r1->width < r2->left + r2->width || r1->top + r1->height < r2->top + r2->height; } static unsigned int scale_down(unsigned int size, unsigned int scale) { return (size * 4096 + scale / 2) / scale; } static unsigned int calc_generic_scale(unsigned int input, unsigned int output) { return (input * 4096 + output / 2) / output; } /* Get and store current client crop */ static int client_g_rect(struct v4l2_subdev *sd, struct v4l2_rect *rect) { struct v4l2_crop crop; struct v4l2_cropcap cap; int ret; crop.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ret = v4l2_subdev_call(sd, video, g_crop, &crop); if (!ret) { *rect = crop.c; return ret; } /* Camera driver doesn't support .g_crop(), assume default rectangle */ cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ret = v4l2_subdev_call(sd, video, cropcap, &cap); if (!ret) *rect = cap.defrect; return ret; } /* Client crop has changed, update our sub-rectangle to remain within the area */ static void update_subrect(struct sh_mobile_ceu_cam *cam) { struct v4l2_rect *rect = &cam->rect, *subrect = &cam->subrect; if (rect->width < subrect->width) subrect->width = rect->width; if (rect->height < subrect->height) subrect->height = rect->height; if (rect->left > subrect->left) subrect->left = rect->left; else if (rect->left + rect->width > subrect->left + subrect->width) subrect->left = rect->left + rect->width - subrect->width; if (rect->top > subrect->top) subrect->top = rect->top; else if (rect->top + rect->height > subrect->top + subrect->height) subrect->top = rect->top + rect->height - subrect->height; } /* * The common for both scaling and cropping iterative approach is: * 1. try if the client can produce exactly what requested by the user * 2. if (1) failed, try to double the client image until we get one big enough * 3. if (2) failed, try to request the maximum image */ static int client_s_crop(struct soc_camera_device *icd, struct v4l2_crop *crop, struct v4l2_crop *cam_crop) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct v4l2_rect *rect = &crop->c, *cam_rect = &cam_crop->c; struct device *dev = sd->v4l2_dev->dev; struct sh_mobile_ceu_cam *cam = icd->host_priv; struct v4l2_cropcap cap; int ret; unsigned int width, height; v4l2_subdev_call(sd, video, s_crop, crop); ret = client_g_rect(sd, cam_rect); if (ret < 0) return ret; /* * Now cam_crop contains the current camera input rectangle, and it must * be within camera cropcap bounds */ if (!memcmp(rect, cam_rect, sizeof(*rect))) { /* Even if camera S_CROP failed, but camera rectangle matches */ dev_dbg(dev, "Camera S_CROP successful for %dx%d@%d:%d\n", rect->width, rect->height, rect->left, rect->top); cam->rect = *cam_rect; return 0; } /* Try to fix cropping, that camera hasn't managed to set */ dev_geo(dev, "Fix camera S_CROP for %dx%d@%d:%d to %dx%d@%d:%d\n", cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top, rect->width, rect->height, rect->left, rect->top); /* We need sensor maximum rectangle */ ret = v4l2_subdev_call(sd, video, cropcap, &cap); if (ret < 0) return ret; /* Put user requested rectangle within sensor bounds */ soc_camera_limit_side(&rect->left, &rect->width, cap.bounds.left, 2, cap.bounds.width); soc_camera_limit_side(&rect->top, &rect->height, cap.bounds.top, 4, cap.bounds.height); /* * Popular special case - some cameras can only handle fixed sizes like * QVGA, VGA,... Take care to avoid infinite loop. */ width = max(cam_rect->width, 2); height = max(cam_rect->height, 2); /* * Loop as long as sensor is not covering the requested rectangle and * is still within its bounds */ while (!ret && (is_smaller(cam_rect, rect) || is_inside(cam_rect, rect)) && (cap.bounds.width > width || cap.bounds.height > height)) { width *= 2; height *= 2; cam_rect->width = width; cam_rect->height = height; /* * We do not know what capabilities the camera has to set up * left and top borders. We could try to be smarter in iterating * them, e.g., if camera current left is to the right of the * target left, set it to the middle point between the current * left and minimum left. But that would add too much * complexity: we would have to iterate each border separately. * Instead we just drop to the left and top bounds. */ if (cam_rect->left > rect->left) cam_rect->left = cap.bounds.left; if (cam_rect->left + cam_rect->width < rect->left + rect->width) cam_rect->width = rect->left + rect->width - cam_rect->left; if (cam_rect->top > rect->top) cam_rect->top = cap.bounds.top; if (cam_rect->top + cam_rect->height < rect->top + rect->height) cam_rect->height = rect->top + rect->height - cam_rect->top; v4l2_subdev_call(sd, video, s_crop, cam_crop); ret = client_g_rect(sd, cam_rect); dev_geo(dev, "Camera S_CROP %d for %dx%d@%d:%d\n", ret, cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top); } /* S_CROP must not modify the rectangle */ if (is_smaller(cam_rect, rect) || is_inside(cam_rect, rect)) { /* * The camera failed to configure a suitable cropping, * we cannot use the current rectangle, set to max */ *cam_rect = cap.bounds; v4l2_subdev_call(sd, video, s_crop, cam_crop); ret = client_g_rect(sd, cam_rect); dev_geo(dev, "Camera S_CROP %d for max %dx%d@%d:%d\n", ret, cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top); } if (!ret) { cam->rect = *cam_rect; update_subrect(cam); } return ret; } /* Iterative s_mbus_fmt, also updates cached client crop on success */ static int client_s_fmt(struct soc_camera_device *icd, struct v4l2_mbus_framefmt *mf, bool ceu_can_scale) { struct sh_mobile_ceu_cam *cam = icd->host_priv; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct device *dev = icd->dev.parent; unsigned int width = mf->width, height = mf->height, tmp_w, tmp_h; unsigned int max_width, max_height; struct v4l2_cropcap cap; int ret; ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, s_mbus_fmt, mf); if (ret < 0) return ret; dev_geo(dev, "camera scaled to %ux%u\n", mf->width, mf->height); if ((width == mf->width && height == mf->height) || !ceu_can_scale) goto update_cache; cap.type = V4L2_BUF_TYPE_VIDEO_CAPTURE; ret = v4l2_subdev_call(sd, video, cropcap, &cap); if (ret < 0) return ret; max_width = min(cap.bounds.width, 2560); max_height = min(cap.bounds.height, 1920); /* Camera set a format, but geometry is not precise, try to improve */ tmp_w = mf->width; tmp_h = mf->height; /* width <= max_width && height <= max_height - guaranteed by try_fmt */ while ((width > tmp_w || height > tmp_h) && tmp_w < max_width && tmp_h < max_height) { tmp_w = min(2 * tmp_w, max_width); tmp_h = min(2 * tmp_h, max_height); mf->width = tmp_w; mf->height = tmp_h; ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, s_mbus_fmt, mf); dev_geo(dev, "Camera scaled to %ux%u\n", mf->width, mf->height); if (ret < 0) { /* This shouldn't happen */ dev_err(dev, "Client failed to set format: %d\n", ret); return ret; } } update_cache: /* Update cache */ ret = client_g_rect(sd, &cam->rect); if (ret < 0) return ret; update_subrect(cam); return 0; } /** * @width - on output: user width, mapped back to input * @height - on output: user height, mapped back to input * @mf - in- / output camera output window */ static int client_scale(struct soc_camera_device *icd, struct v4l2_mbus_framefmt *mf, unsigned int *width, unsigned int *height, bool ceu_can_scale) { struct sh_mobile_ceu_cam *cam = icd->host_priv; struct device *dev = icd->dev.parent; struct v4l2_mbus_framefmt mf_tmp = *mf; unsigned int scale_h, scale_v; int ret; /* * 5. Apply iterative camera S_FMT for camera user window (also updates * client crop cache and the imaginary sub-rectangle). */ ret = client_s_fmt(icd, &mf_tmp, ceu_can_scale); if (ret < 0) return ret; dev_geo(dev, "5: camera scaled to %ux%u\n", mf_tmp.width, mf_tmp.height); /* 6. Retrieve camera output window (g_fmt) */ /* unneeded - it is already in "mf_tmp" */ /* 7. Calculate new client scales. */ scale_h = calc_generic_scale(cam->rect.width, mf_tmp.width); scale_v = calc_generic_scale(cam->rect.height, mf_tmp.height); mf->width = mf_tmp.width; mf->height = mf_tmp.height; mf->colorspace = mf_tmp.colorspace; /* * 8. Calculate new CEU crop - apply camera scales to previously * updated "effective" crop. */ *width = scale_down(cam->subrect.width, scale_h); *height = scale_down(cam->subrect.height, scale_v); dev_geo(dev, "8: new client sub-window %ux%u\n", *width, *height); return 0; } /* * CEU can scale and crop, but we don't want to waste bandwidth and kill the * framerate by always requesting the maximum image from the client. See * Documentation/video4linux/sh_mobile_ceu_camera.txt for a description of * scaling and cropping algorithms and for the meaning of referenced here steps. */ static int sh_mobile_ceu_set_crop(struct soc_camera_device *icd, struct v4l2_crop *a) { struct v4l2_rect *rect = &a->c; struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct v4l2_crop cam_crop; struct sh_mobile_ceu_cam *cam = icd->host_priv; struct v4l2_rect *cam_rect = &cam_crop.c; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct device *dev = icd->dev.parent; struct v4l2_mbus_framefmt mf; unsigned int scale_cam_h, scale_cam_v, scale_ceu_h, scale_ceu_v, out_width, out_height; int interm_width, interm_height; u32 capsr, cflcr; int ret; dev_geo(dev, "S_CROP(%ux%u@%u:%u)\n", rect->width, rect->height, rect->left, rect->top); /* During camera cropping its output window can change too, stop CEU */ capsr = capture_save_reset(pcdev); dev_dbg(dev, "CAPSR 0x%x, CFLCR 0x%x\n", capsr, pcdev->cflcr); /* 1. - 2. Apply iterative camera S_CROP for new input window. */ ret = client_s_crop(icd, a, &cam_crop); if (ret < 0) return ret; dev_geo(dev, "1-2: camera cropped to %ux%u@%u:%u\n", cam_rect->width, cam_rect->height, cam_rect->left, cam_rect->top); /* On success cam_crop contains current camera crop */ /* 3. Retrieve camera output window */ ret = v4l2_subdev_call(sd, video, g_mbus_fmt, &mf); if (ret < 0) return ret; if (mf.width > 2560 || mf.height > 1920) return -EINVAL; /* 4. Calculate camera scales */ scale_cam_h = calc_generic_scale(cam_rect->width, mf.width); scale_cam_v = calc_generic_scale(cam_rect->height, mf.height); /* Calculate intermediate window */ interm_width = scale_down(rect->width, scale_cam_h); interm_height = scale_down(rect->height, scale_cam_v); if (interm_width < icd->user_width) { u32 new_scale_h; new_scale_h = calc_generic_scale(rect->width, icd->user_width); mf.width = scale_down(cam_rect->width, new_scale_h); } if (interm_height < icd->user_height) { u32 new_scale_v; new_scale_v = calc_generic_scale(rect->height, icd->user_height); mf.height = scale_down(cam_rect->height, new_scale_v); } if (interm_width < icd->user_width || interm_height < icd->user_height) { ret = v4l2_device_call_until_err(sd->v4l2_dev, (int)icd, video, s_mbus_fmt, &mf); if (ret < 0) return ret; dev_geo(dev, "New camera output %ux%u\n", mf.width, mf.height); scale_cam_h = calc_generic_scale(cam_rect->width, mf.width); scale_cam_v = calc_generic_scale(cam_rect->height, mf.height); interm_width = scale_down(rect->width, scale_cam_h); interm_height = scale_down(rect->height, scale_cam_v); } /* Cache camera output window */ cam->width = mf.width; cam->height = mf.height; if (pcdev->image_mode) { out_width = min(interm_width, icd->user_width); out_height = min(interm_height, icd->user_height); } else { out_width = interm_width; out_height = interm_height; } /* * 5. Calculate CEU scales from camera scales from results of (5) and * the user window */ scale_ceu_h = calc_scale(interm_width, &out_width); scale_ceu_v = calc_scale(interm_height, &out_height); dev_geo(dev, "5: CEU scales %u:%u\n", scale_ceu_h, scale_ceu_v); /* Apply CEU scales. */ cflcr = scale_ceu_h | (scale_ceu_v << 16); if (cflcr != pcdev->cflcr) { pcdev->cflcr = cflcr; ceu_write(pcdev, CFLCR, cflcr); } icd->user_width = out_width; icd->user_height = out_height; cam->ceu_left = scale_down(rect->left - cam_rect->left, scale_cam_h) & ~1; cam->ceu_top = scale_down(rect->top - cam_rect->top, scale_cam_v) & ~1; /* 6. Use CEU cropping to crop to the new window. */ sh_mobile_ceu_set_rect(icd); cam->subrect = *rect; dev_geo(dev, "6: CEU cropped to %ux%u@%u:%u\n", icd->user_width, icd->user_height, cam->ceu_left, cam->ceu_top); /* Restore capture. The CE bit can be cleared by the hardware */ if (pcdev->active) capsr |= 1; capture_restore(pcdev, capsr); /* Even if only camera cropping succeeded */ return ret; } static int sh_mobile_ceu_get_crop(struct soc_camera_device *icd, struct v4l2_crop *a) { struct sh_mobile_ceu_cam *cam = icd->host_priv; a->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; a->c = cam->subrect; return 0; } /* * Calculate real client output window by applying new scales to the current * client crop. New scales are calculated from the requested output format and * CEU crop, mapped backed onto the client input (subrect). */ static void calculate_client_output(struct soc_camera_device *icd, struct v4l2_pix_format *pix, struct v4l2_mbus_framefmt *mf) { struct sh_mobile_ceu_cam *cam = icd->host_priv; struct device *dev = icd->dev.parent; struct v4l2_rect *cam_subrect = &cam->subrect; unsigned int scale_v, scale_h; if (cam_subrect->width == cam->rect.width && cam_subrect->height == cam->rect.height) { /* No sub-cropping */ mf->width = pix->width; mf->height = pix->height; return; } /* 1.-2. Current camera scales and subwin - cached. */ dev_geo(dev, "2: subwin %ux%u@%u:%u\n", cam_subrect->width, cam_subrect->height, cam_subrect->left, cam_subrect->top); /* * 3. Calculate new combined scales from input sub-window to requested * user window. */ /* * TODO: CEU cannot scale images larger than VGA to smaller than SubQCIF * (128x96) or larger than VGA */ scale_h = calc_generic_scale(cam_subrect->width, pix->width); scale_v = calc_generic_scale(cam_subrect->height, pix->height); dev_geo(dev, "3: scales %u:%u\n", scale_h, scale_v); /* * 4. Calculate client output window by applying combined scales to real * input window. */ mf->width = scale_down(cam->rect.width, scale_h); mf->height = scale_down(cam->rect.height, scale_v); } /* Similar to set_crop multistage iterative algorithm */ static int sh_mobile_ceu_set_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; struct sh_mobile_ceu_cam *cam = icd->host_priv; struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_mbus_framefmt mf; struct device *dev = icd->dev.parent; __u32 pixfmt = pix->pixelformat; const struct soc_camera_format_xlate *xlate; /* Keep Compiler Happy */ unsigned int ceu_sub_width = 0, ceu_sub_height = 0; u16 scale_v, scale_h; int ret; bool image_mode; enum v4l2_field field; dev_geo(dev, "S_FMT(pix=0x%x, %ux%u)\n", pixfmt, pix->width, pix->height); switch (pix->field) { default: pix->field = V4L2_FIELD_NONE; /* fall-through */ case V4L2_FIELD_INTERLACED_TB: case V4L2_FIELD_INTERLACED_BT: case V4L2_FIELD_NONE: field = pix->field; break; case V4L2_FIELD_INTERLACED: field = V4L2_FIELD_INTERLACED_TB; break; } xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); if (!xlate) { dev_warn(dev, "Format %x not found\n", pixfmt); return -EINVAL; } /* 1.-4. Calculate client output geometry */ calculate_client_output(icd, &f->fmt.pix, &mf); mf.field = pix->field; mf.colorspace = pix->colorspace; mf.code = xlate->code; switch (pixfmt) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: image_mode = true; break; default: image_mode = false; } dev_geo(dev, "4: request camera output %ux%u\n", mf.width, mf.height); /* 5. - 9. */ ret = client_scale(icd, &mf, &ceu_sub_width, &ceu_sub_height, image_mode && V4L2_FIELD_NONE == field); dev_geo(dev, "5-9: client scale return %d\n", ret); /* Done with the camera. Now see if we can improve the result */ dev_geo(dev, "fmt %ux%u, requested %ux%u\n", mf.width, mf.height, pix->width, pix->height); if (ret < 0) return ret; if (mf.code != xlate->code) return -EINVAL; /* 9. Prepare CEU crop */ cam->width = mf.width; cam->height = mf.height; /* 10. Use CEU scaling to scale to the requested user window. */ /* We cannot scale up */ if (pix->width > ceu_sub_width) ceu_sub_width = pix->width; if (pix->height > ceu_sub_height) ceu_sub_height = pix->height; pix->colorspace = mf.colorspace; if (image_mode) { /* Scale pix->{width x height} down to width x height */ scale_h = calc_scale(ceu_sub_width, &pix->width); scale_v = calc_scale(ceu_sub_height, &pix->height); } else { pix->width = ceu_sub_width; pix->height = ceu_sub_height; scale_h = 0; scale_v = 0; } pcdev->cflcr = scale_h | (scale_v << 16); /* * We have calculated CFLCR, the actual configuration will be performed * in sh_mobile_ceu_set_bus_param() */ dev_geo(dev, "10: W: %u : 0x%x = %u, H: %u : 0x%x = %u\n", ceu_sub_width, scale_h, pix->width, ceu_sub_height, scale_v, pix->height); cam->code = xlate->code; icd->current_fmt = xlate; pcdev->field = field; pcdev->image_mode = image_mode; return 0; } static int sh_mobile_ceu_try_fmt(struct soc_camera_device *icd, struct v4l2_format *f) { const struct soc_camera_format_xlate *xlate; struct v4l2_pix_format *pix = &f->fmt.pix; struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct v4l2_mbus_framefmt mf; __u32 pixfmt = pix->pixelformat; int width, height; int ret; dev_geo(icd->dev.parent, "TRY_FMT(pix=0x%x, %ux%u)\n", pixfmt, pix->width, pix->height); xlate = soc_camera_xlate_by_fourcc(icd, pixfmt); if (!xlate) { dev_warn(icd->dev.parent, "Format %x not found\n", pixfmt); return -EINVAL; } /* FIXME: calculate using depth and bus width */ v4l_bound_align_image(&pix->width, 2, 2560, 1, &pix->height, 4, 1920, 2, 0); width = pix->width; height = pix->height; pix->bytesperline = soc_mbus_bytes_per_line(width, xlate->host_fmt); if ((int)pix->bytesperline < 0) return pix->bytesperline; pix->sizeimage = height * pix->bytesperline; /* limit to sensor capabilities */ mf.width = pix->width; mf.height = pix->height; mf.field = pix->field; mf.code = xlate->code; mf.colorspace = pix->colorspace; ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, try_mbus_fmt, &mf); if (ret < 0) return ret; pix->width = mf.width; pix->height = mf.height; pix->field = mf.field; pix->colorspace = mf.colorspace; switch (pixfmt) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: /* FIXME: check against rect_max after converting soc-camera */ /* We can scale precisely, need a bigger image from camera */ if (pix->width < width || pix->height < height) { /* * We presume, the sensor behaves sanely, i.e., if * requested a bigger rectangle, it will not return a * smaller one. */ mf.width = 2560; mf.height = 1920; ret = v4l2_device_call_until_err(sd->v4l2_dev, (long)icd, video, try_mbus_fmt, &mf); if (ret < 0) { /* Shouldn't actually happen... */ dev_err(icd->dev.parent, "FIXME: client try_fmt() = %d\n", ret); return ret; } } /* We will scale exactly */ if (mf.width > width) pix->width = width; if (mf.height > height) pix->height = height; } dev_geo(icd->dev.parent, "%s(): return %d, fmt 0x%x, %ux%u\n", __func__, ret, pix->pixelformat, pix->width, pix->height); return ret; } static int sh_mobile_ceu_set_livecrop(struct soc_camera_device *icd, struct v4l2_crop *a) { struct v4l2_subdev *sd = soc_camera_to_subdev(icd); struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; u32 out_width = icd->user_width, out_height = icd->user_height; int ret; /* Freeze queue */ pcdev->frozen = 1; /* Wait for frame */ ret = wait_for_completion_interruptible(&pcdev->complete); /* Stop the client */ ret = v4l2_subdev_call(sd, video, s_stream, 0); if (ret < 0) dev_warn(icd->dev.parent, "Client failed to stop the stream: %d\n", ret); else /* Do the crop, if it fails, there's nothing more we can do */ sh_mobile_ceu_set_crop(icd, a); dev_geo(icd->dev.parent, "Output after crop: %ux%u\n", icd->user_width, icd->user_height); if (icd->user_width != out_width || icd->user_height != out_height) { struct v4l2_format f = { .type = V4L2_BUF_TYPE_VIDEO_CAPTURE, .fmt.pix = { .width = out_width, .height = out_height, .pixelformat = icd->current_fmt->host_fmt->fourcc, .field = pcdev->field, .colorspace = icd->colorspace, }, }; ret = sh_mobile_ceu_set_fmt(icd, &f); if (!ret && (out_width != f.fmt.pix.width || out_height != f.fmt.pix.height)) ret = -EINVAL; if (!ret) { icd->user_width = out_width; icd->user_height = out_height; ret = sh_mobile_ceu_set_bus_param(icd, icd->current_fmt->host_fmt->fourcc); } } /* Thaw the queue */ pcdev->frozen = 0; spin_lock_irq(&pcdev->lock); sh_mobile_ceu_capture(pcdev); spin_unlock_irq(&pcdev->lock); /* Start the client */ ret = v4l2_subdev_call(sd, video, s_stream, 1); return ret; } static unsigned int sh_mobile_ceu_poll(struct file *file, poll_table *pt) { struct soc_camera_device *icd = file->private_data; return vb2_poll(&icd->vb2_vidq, file, pt); } static int sh_mobile_ceu_querycap(struct soc_camera_host *ici, struct v4l2_capability *cap) { strlcpy(cap->card, "SuperH_Mobile_CEU", sizeof(cap->card)); cap->version = KERNEL_VERSION(0, 0, 5); cap->capabilities = V4L2_CAP_VIDEO_CAPTURE | V4L2_CAP_STREAMING; return 0; } static int sh_mobile_ceu_init_videobuf(struct vb2_queue *q, struct soc_camera_device *icd) { q->type = V4L2_BUF_TYPE_VIDEO_CAPTURE; q->io_modes = VB2_MMAP | VB2_USERPTR; q->drv_priv = icd; q->ops = &sh_mobile_ceu_videobuf_ops; q->mem_ops = &vb2_dma_contig_memops; q->buf_struct_size = sizeof(struct sh_mobile_ceu_buffer); return vb2_queue_init(q); } static int sh_mobile_ceu_get_ctrl(struct soc_camera_device *icd, struct v4l2_control *ctrl) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; u32 val; switch (ctrl->id) { case V4L2_CID_SHARPNESS: val = ceu_read(pcdev, CLFCR); ctrl->value = val ^ 1; return 0; } return -ENOIOCTLCMD; } static int sh_mobile_ceu_set_ctrl(struct soc_camera_device *icd, struct v4l2_control *ctrl) { struct soc_camera_host *ici = to_soc_camera_host(icd->dev.parent); struct sh_mobile_ceu_dev *pcdev = ici->priv; switch (ctrl->id) { case V4L2_CID_SHARPNESS: switch (icd->current_fmt->host_fmt->fourcc) { case V4L2_PIX_FMT_NV12: case V4L2_PIX_FMT_NV21: case V4L2_PIX_FMT_NV16: case V4L2_PIX_FMT_NV61: ceu_write(pcdev, CLFCR, !ctrl->value); return 0; } return -EINVAL; } return -ENOIOCTLCMD; } static const struct v4l2_queryctrl sh_mobile_ceu_controls[] = { { .id = V4L2_CID_SHARPNESS, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Low-pass filter", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, }, }; static struct soc_camera_host_ops sh_mobile_ceu_host_ops = { .owner = THIS_MODULE, .add = sh_mobile_ceu_add_device, .remove = sh_mobile_ceu_remove_device, .get_formats = sh_mobile_ceu_get_formats, .put_formats = sh_mobile_ceu_put_formats, .get_crop = sh_mobile_ceu_get_crop, .set_crop = sh_mobile_ceu_set_crop, .set_livecrop = sh_mobile_ceu_set_livecrop, .set_fmt = sh_mobile_ceu_set_fmt, .try_fmt = sh_mobile_ceu_try_fmt, .set_ctrl = sh_mobile_ceu_set_ctrl, .get_ctrl = sh_mobile_ceu_get_ctrl, .poll = sh_mobile_ceu_poll, .querycap = sh_mobile_ceu_querycap, .set_bus_param = sh_mobile_ceu_set_bus_param, .init_videobuf2 = sh_mobile_ceu_init_videobuf, .controls = sh_mobile_ceu_controls, .num_controls = ARRAY_SIZE(sh_mobile_ceu_controls), }; struct bus_wait { struct notifier_block notifier; struct completion completion; struct device *dev; }; static int bus_notify(struct notifier_block *nb, unsigned long action, void *data) { struct device *dev = data; struct bus_wait *wait = container_of(nb, struct bus_wait, notifier); if (wait->dev != dev) return NOTIFY_DONE; switch (action) { case BUS_NOTIFY_UNBOUND_DRIVER: /* Protect from module unloading */ wait_for_completion(&wait->completion); return NOTIFY_OK; } return NOTIFY_DONE; } static int __devinit sh_mobile_ceu_probe(struct platform_device *pdev) { struct sh_mobile_ceu_dev *pcdev; struct resource *res; void __iomem *base; unsigned int irq; int err = 0; struct bus_wait wait = { .completion = COMPLETION_INITIALIZER_ONSTACK(wait.completion), .notifier.notifier_call = bus_notify, }; struct device *csi2; res = platform_get_resource(pdev, IORESOURCE_MEM, 0); irq = platform_get_irq(pdev, 0); if (!res || (int)irq <= 0) { dev_err(&pdev->dev, "Not enough CEU platform resources.\n"); err = -ENODEV; goto exit; } pcdev = kzalloc(sizeof(*pcdev), GFP_KERNEL); if (!pcdev) { dev_err(&pdev->dev, "Could not allocate pcdev\n"); err = -ENOMEM; goto exit; } INIT_LIST_HEAD(&pcdev->capture); spin_lock_init(&pcdev->lock); init_completion(&pcdev->complete); pcdev->pdata = pdev->dev.platform_data; if (!pcdev->pdata) { err = -EINVAL; dev_err(&pdev->dev, "CEU platform data not set.\n"); goto exit_kfree; } base = ioremap_nocache(res->start, resource_size(res)); if (!base) { err = -ENXIO; dev_err(&pdev->dev, "Unable to ioremap CEU registers.\n"); goto exit_kfree; } pcdev->irq = irq; pcdev->base = base; pcdev->video_limit = 0; /* only enabled if second resource exists */ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); if (res) { err = dma_declare_coherent_memory(&pdev->dev, res->start, res->start, resource_size(res), DMA_MEMORY_MAP | DMA_MEMORY_EXCLUSIVE); if (!err) { dev_err(&pdev->dev, "Unable to declare CEU memory.\n"); err = -ENXIO; goto exit_iounmap; } pcdev->video_limit = resource_size(res); } /* request irq */ err = request_irq(pcdev->irq, sh_mobile_ceu_irq, IRQF_DISABLED, dev_name(&pdev->dev), pcdev); if (err) { dev_err(&pdev->dev, "Unable to register CEU interrupt.\n"); goto exit_release_mem; } pm_suspend_ignore_children(&pdev->dev, true); pm_runtime_enable(&pdev->dev); pm_runtime_resume(&pdev->dev); pcdev->ici.priv = pcdev; pcdev->ici.v4l2_dev.dev = &pdev->dev; pcdev->ici.nr = pdev->id; pcdev->ici.drv_name = dev_name(&pdev->dev); pcdev->ici.ops = &sh_mobile_ceu_host_ops; /* CSI2 interfacing */ csi2 = pcdev->pdata->csi2_dev; if (csi2) { wait.dev = csi2; err = bus_register_notifier(&platform_bus_type, &wait.notifier); if (err < 0) goto exit_free_clk; /* * From this point the driver module will not unload, until * we complete the completion. */ if (!csi2->driver) { complete(&wait.completion); /* Either too late, or probing failed */ bus_unregister_notifier(&platform_bus_type, &wait.notifier); err = -ENXIO; goto exit_free_clk; } /* * The module is still loaded, in the worst case it is hanging * in device release on our completion. So, _now_ dereferencing * the "owner" is safe! */ err = try_module_get(csi2->driver->owner); /* Let notifier complete, if it has been locked */ complete(&wait.completion); bus_unregister_notifier(&platform_bus_type, &wait.notifier); if (!err) { err = -ENODEV; goto exit_free_clk; } } pcdev->alloc_ctx = vb2_dma_contig_init_ctx(&pdev->dev); if (IS_ERR(pcdev->alloc_ctx)) { err = PTR_ERR(pcdev->alloc_ctx); goto exit_module_put; } err = soc_camera_host_register(&pcdev->ici); if (err) goto exit_free_ctx; return 0; exit_free_ctx: vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx); exit_module_put: if (csi2 && csi2->driver) module_put(csi2->driver->owner); exit_free_clk: pm_runtime_disable(&pdev->dev); free_irq(pcdev->irq, pcdev); exit_release_mem: if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) dma_release_declared_memory(&pdev->dev); exit_iounmap: iounmap(base); exit_kfree: kfree(pcdev); exit: return err; } static int __devexit sh_mobile_ceu_remove(struct platform_device *pdev) { struct soc_camera_host *soc_host = to_soc_camera_host(&pdev->dev); struct sh_mobile_ceu_dev *pcdev = container_of(soc_host, struct sh_mobile_ceu_dev, ici); struct device *csi2 = pcdev->pdata->csi2_dev; soc_camera_host_unregister(soc_host); pm_runtime_disable(&pdev->dev); free_irq(pcdev->irq, pcdev); if (platform_get_resource(pdev, IORESOURCE_MEM, 1)) dma_release_declared_memory(&pdev->dev); iounmap(pcdev->base); vb2_dma_contig_cleanup_ctx(pcdev->alloc_ctx); if (csi2 && csi2->driver) module_put(csi2->driver->owner); kfree(pcdev); return 0; } static int sh_mobile_ceu_runtime_nop(struct device *dev) { /* Runtime PM callback shared between ->runtime_suspend() * and ->runtime_resume(). Simply returns success. * * This driver re-initializes all registers after * pm_runtime_get_sync() anyway so there is no need * to save and restore registers here. */ return 0; } static const struct dev_pm_ops sh_mobile_ceu_dev_pm_ops = { .runtime_suspend = sh_mobile_ceu_runtime_nop, .runtime_resume = sh_mobile_ceu_runtime_nop, }; static struct platform_driver sh_mobile_ceu_driver = { .driver = { .name = "sh_mobile_ceu", .pm = &sh_mobile_ceu_dev_pm_ops, }, .probe = sh_mobile_ceu_probe, .remove = __devexit_p(sh_mobile_ceu_remove), }; static int __init sh_mobile_ceu_init(void) { /* Whatever return code */ request_module("sh_mobile_csi2"); return platform_driver_register(&sh_mobile_ceu_driver); } static void __exit sh_mobile_ceu_exit(void) { platform_driver_unregister(&sh_mobile_ceu_driver); } module_init(sh_mobile_ceu_init); module_exit(sh_mobile_ceu_exit); MODULE_DESCRIPTION("SuperH Mobile CEU driver"); MODULE_AUTHOR("Magnus Damm"); MODULE_LICENSE("GPL"); MODULE_ALIAS("platform:sh_mobile_ceu");
gpl-2.0
humberos/android_kernel_samsung_aries
drivers/media/video/gspca/sonixj.c
2386
97019
/* * Sonix sn9c102p sn9c105 sn9c120 (jpeg) subdriver * * Copyright (C) 2009-2010 Jean-François Moine <http://moinejf.free.fr> * Copyright (C) 2005 Michel Xhaard mxhaard@magic.fr * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ #define MODULE_NAME "sonixj" #include <linux/input.h> #include "gspca.h" #include "jpeg.h" MODULE_AUTHOR("Jean-François Moine <http://moinejf.free.fr>"); MODULE_DESCRIPTION("GSPCA/SONIX JPEG USB Camera Driver"); MODULE_LICENSE("GPL"); /* controls */ enum e_ctrl { BRIGHTNESS, CONTRAST, COLORS, BLUE, RED, GAMMA, AUTOGAIN, HFLIP, VFLIP, SHARPNESS, ILLUM, FREQ, NCTRLS /* number of controls */ }; /* specific webcam descriptor */ struct sd { struct gspca_dev gspca_dev; /* !! must be the first item */ struct gspca_ctrl ctrls[NCTRLS]; atomic_t avg_lum; u32 exposure; struct work_struct work; struct workqueue_struct *work_thread; u32 pktsz; /* (used by pkt_scan) */ u16 npkt; s8 nchg; s8 short_mark; u8 quality; /* image quality */ #define QUALITY_MIN 25 #define QUALITY_MAX 90 #define QUALITY_DEF 70 u8 reg01; u8 reg17; u8 reg18; u8 flags; s8 ag_cnt; #define AG_CNT_START 13 u8 bridge; #define BRIDGE_SN9C102P 0 #define BRIDGE_SN9C105 1 #define BRIDGE_SN9C110 2 #define BRIDGE_SN9C120 3 u8 sensor; /* Type of image sensor chip */ u8 i2c_addr; u8 jpeg_hdr[JPEG_HDR_SZ]; }; enum sensors { SENSOR_ADCM1700, SENSOR_GC0307, SENSOR_HV7131R, SENSOR_MI0360, SENSOR_MI0360B, SENSOR_MO4000, SENSOR_MT9V111, SENSOR_OM6802, SENSOR_OV7630, SENSOR_OV7648, SENSOR_OV7660, SENSOR_PO1030, SENSOR_PO2030N, SENSOR_SOI768, SENSOR_SP80708, }; static void qual_upd(struct work_struct *work); /* device flags */ #define F_PDN_INV 0x01 /* inverse pin S_PWR_DN / sn_xxx tables */ #define F_ILLUM 0x02 /* presence of illuminator */ /* sn9c1xx definitions */ /* register 0x01 */ #define S_PWR_DN 0x01 /* sensor power down */ #define S_PDN_INV 0x02 /* inverse pin S_PWR_DN */ #define V_TX_EN 0x04 /* video transfer enable */ #define LED 0x08 /* output to pin LED */ #define SCL_SEL_OD 0x20 /* open-drain mode */ #define SYS_SEL_48M 0x40 /* system clock 0: 24MHz, 1: 48MHz */ /* register 0x17 */ #define MCK_SIZE_MASK 0x1f /* sensor master clock */ #define SEN_CLK_EN 0x20 /* enable sensor clock */ #define DEF_EN 0x80 /* defect pixel by 0: soft, 1: hard */ /* V4L2 controls supported by the driver */ static void setbrightness(struct gspca_dev *gspca_dev); static void setcontrast(struct gspca_dev *gspca_dev); static void setcolors(struct gspca_dev *gspca_dev); static void setredblue(struct gspca_dev *gspca_dev); static void setgamma(struct gspca_dev *gspca_dev); static void setautogain(struct gspca_dev *gspca_dev); static void sethvflip(struct gspca_dev *gspca_dev); static void setsharpness(struct gspca_dev *gspca_dev); static void setillum(struct gspca_dev *gspca_dev); static void setfreq(struct gspca_dev *gspca_dev); static const struct ctrl sd_ctrls[NCTRLS] = { [BRIGHTNESS] = { { .id = V4L2_CID_BRIGHTNESS, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Brightness", .minimum = 0, .maximum = 0xff, .step = 1, .default_value = 0x80, }, .set_control = setbrightness }, [CONTRAST] = { { .id = V4L2_CID_CONTRAST, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Contrast", .minimum = 0, #define CONTRAST_MAX 127 .maximum = CONTRAST_MAX, .step = 1, .default_value = 63, }, .set_control = setcontrast }, [COLORS] = { { .id = V4L2_CID_SATURATION, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Saturation", .minimum = 0, .maximum = 40, .step = 1, #define COLORS_DEF 25 .default_value = COLORS_DEF, }, .set_control = setcolors }, [BLUE] = { { .id = V4L2_CID_BLUE_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Blue Balance", .minimum = 24, .maximum = 40, .step = 1, .default_value = 32, }, .set_control = setredblue }, [RED] = { { .id = V4L2_CID_RED_BALANCE, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Red Balance", .minimum = 24, .maximum = 40, .step = 1, .default_value = 32, }, .set_control = setredblue }, [GAMMA] = { { .id = V4L2_CID_GAMMA, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Gamma", .minimum = 0, .maximum = 40, .step = 1, #define GAMMA_DEF 20 .default_value = GAMMA_DEF, }, .set_control = setgamma }, [AUTOGAIN] = { { .id = V4L2_CID_AUTOGAIN, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Auto Gain", .minimum = 0, .maximum = 1, .step = 1, .default_value = 1 }, .set_control = setautogain }, [HFLIP] = { { .id = V4L2_CID_HFLIP, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Mirror", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, }, .set_control = sethvflip }, [VFLIP] = { { .id = V4L2_CID_VFLIP, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Vflip", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, }, .set_control = sethvflip }, [SHARPNESS] = { { .id = V4L2_CID_SHARPNESS, .type = V4L2_CTRL_TYPE_INTEGER, .name = "Sharpness", .minimum = 0, .maximum = 255, .step = 1, .default_value = 90, }, .set_control = setsharpness }, [ILLUM] = { { .id = V4L2_CID_ILLUMINATORS_1, .type = V4L2_CTRL_TYPE_BOOLEAN, .name = "Illuminator / infrared", .minimum = 0, .maximum = 1, .step = 1, .default_value = 0, }, .set_control = setillum }, /* ov7630/ov7648/ov7660 only */ [FREQ] = { { .id = V4L2_CID_POWER_LINE_FREQUENCY, .type = V4L2_CTRL_TYPE_MENU, .name = "Light frequency filter", .minimum = 0, .maximum = 2, /* 0: 0, 1: 50Hz, 2:60Hz */ .step = 1, .default_value = 1, }, .set_control = setfreq }, }; /* table of the disabled controls */ static const __u32 ctrl_dis[] = { [SENSOR_ADCM1700] = (1 << AUTOGAIN) | (1 << HFLIP) | (1 << VFLIP) | (1 << FREQ), [SENSOR_GC0307] = (1 << HFLIP) | (1 << VFLIP) | (1 << FREQ), [SENSOR_HV7131R] = (1 << HFLIP) | (1 << FREQ), [SENSOR_MI0360] = (1 << HFLIP) | (1 << VFLIP) | (1 << FREQ), [SENSOR_MI0360B] = (1 << HFLIP) | (1 << VFLIP) | (1 << FREQ), [SENSOR_MO4000] = (1 << HFLIP) | (1 << VFLIP) | (1 << FREQ), [SENSOR_MT9V111] = (1 << HFLIP) | (1 << VFLIP) | (1 << FREQ), [SENSOR_OM6802] = (1 << HFLIP) | (1 << VFLIP) | (1 << FREQ), [SENSOR_OV7630] = (1 << HFLIP), [SENSOR_OV7648] = (1 << HFLIP), [SENSOR_OV7660] = (1 << AUTOGAIN) | (1 << HFLIP) | (1 << VFLIP), [SENSOR_PO1030] = (1 << AUTOGAIN) | (1 << HFLIP) | (1 << VFLIP) | (1 << FREQ), [SENSOR_PO2030N] = (1 << AUTOGAIN) | (1 << FREQ), [SENSOR_SOI768] = (1 << AUTOGAIN) | (1 << HFLIP) | (1 << VFLIP) | (1 << FREQ), [SENSOR_SP80708] = (1 << AUTOGAIN) | (1 << HFLIP) | (1 << VFLIP) | (1 << FREQ), }; static const struct v4l2_pix_format cif_mode[] = { {352, 288, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 352, .sizeimage = 352 * 288 * 4 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0}, }; static const struct v4l2_pix_format vga_mode[] = { {160, 120, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 160, .sizeimage = 160 * 120 * 4 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 2}, {320, 240, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 320, .sizeimage = 320 * 240 * 3 / 8 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 1}, {640, 480, V4L2_PIX_FMT_JPEG, V4L2_FIELD_NONE, .bytesperline = 640, /* Note 3 / 8 is not large enough, not even 5 / 8 is ?! */ .sizeimage = 640 * 480 * 3 / 4 + 590, .colorspace = V4L2_COLORSPACE_JPEG, .priv = 0}, }; static const u8 sn_adcm1700[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x43, 0x60, 0x00, 0x1a, 0x00, 0x00, 0x00, /* reg8 reg9 rega regb regc regd rege regf */ 0x80, 0x51, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x05, 0x01, 0x05, 0x16, 0x12, 0x42, /* reg18 reg19 reg1a reg1b */ 0x06, 0x00, 0x00, 0x00 }; static const u8 sn_gc0307[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x61, 0x62, 0x00, 0x1a, 0x00, 0x00, 0x00, /* reg8 reg9 rega regb regc regd rege regf */ 0x80, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x03, 0x01, 0x08, 0x28, 0x1e, 0x02, /* reg18 reg19 reg1a reg1b */ 0x06, 0x00, 0x00, 0x00 }; static const u8 sn_hv7131[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x03, 0x60, 0x00, 0x1a, 0x20, 0x20, 0x20, /* reg8 reg9 rega regb regc regd rege regf */ 0x81, 0x11, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x00, 0x01, 0x03, 0x28, 0x1e, 0x41, /* reg18 reg19 reg1a reg1b */ 0x0a, 0x00, 0x00, 0x00 }; static const u8 sn_mi0360[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x63, 0x40, 0x00, 0x1a, 0x20, 0x20, 0x20, /* reg8 reg9 rega regb regc regd rege regf */ 0x81, 0x5d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x00, 0x02, 0x0a, 0x28, 0x1e, 0x61, /* reg18 reg19 reg1a reg1b */ 0x06, 0x00, 0x00, 0x00 }; static const u8 sn_mi0360b[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x61, 0x40, 0x00, 0x1a, 0x00, 0x00, 0x00, /* reg8 reg9 rega regb regc regd rege regf */ 0x81, 0x5d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x00, 0x02, 0x0a, 0x28, 0x1e, 0x40, /* reg18 reg19 reg1a reg1b */ 0x06, 0x00, 0x00, 0x00 }; static const u8 sn_mo4000[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x23, 0x60, 0x00, 0x1a, 0x00, 0x20, 0x18, /* reg8 reg9 rega regb regc regd rege regf */ 0x81, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x0b, 0x0f, 0x14, 0x28, 0x1e, 0x40, /* reg18 reg19 reg1a reg1b */ 0x08, 0x00, 0x00, 0x00 }; static const u8 sn_mt9v111[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x61, 0x40, 0x00, 0x1a, 0x20, 0x20, 0x20, /* reg8 reg9 rega regb regc regd rege regf */ 0x81, 0x5c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x00, 0x02, 0x1c, 0x28, 0x1e, 0x40, /* reg18 reg19 reg1a reg1b */ 0x06, 0x00, 0x00, 0x00 }; static const u8 sn_om6802[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x23, 0x72, 0x00, 0x1a, 0x20, 0x20, 0x19, /* reg8 reg9 rega regb regc regd rege regf */ 0x80, 0x34, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x51, 0x01, 0x00, 0x28, 0x1e, 0x40, /* reg18 reg19 reg1a reg1b */ 0x05, 0x00, 0x00, 0x00 }; static const u8 sn_ov7630[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x21, 0x40, 0x00, 0x1a, 0x00, 0x00, 0x00, /* reg8 reg9 rega regb regc regd rege regf */ 0x81, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x04, 0x01, 0x0a, 0x28, 0x1e, 0xc2, /* reg18 reg19 reg1a reg1b */ 0x0b, 0x00, 0x00, 0x00 }; static const u8 sn_ov7648[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x63, 0x40, 0x00, 0x1a, 0x20, 0x20, 0x20, /* reg8 reg9 rega regb regc regd rege regf */ 0x81, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x00, 0x01, 0x00, 0x28, 0x1e, 0x00, /* reg18 reg19 reg1a reg1b */ 0x0b, 0x00, 0x00, 0x00 }; static const u8 sn_ov7660[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x61, 0x40, 0x00, 0x1a, 0x00, 0x00, 0x00, /* reg8 reg9 rega regb regc regd rege regf */ 0x81, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x01, 0x01, 0x08, 0x28, 0x1e, 0x20, /* reg18 reg19 reg1a reg1b */ 0x07, 0x00, 0x00, 0x00 }; static const u8 sn_po1030[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x21, 0x62, 0x00, 0x1a, 0x20, 0x20, 0x20, /* reg8 reg9 rega regb regc regd rege regf */ 0x81, 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x00, 0x06, 0x06, 0x28, 0x1e, 0x00, /* reg18 reg19 reg1a reg1b */ 0x07, 0x00, 0x00, 0x00 }; static const u8 sn_po2030n[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x63, 0x40, 0x00, 0x1a, 0x00, 0x00, 0x00, /* reg8 reg9 rega regb regc regd rege regf */ 0x81, 0x6e, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x00, 0x01, 0x14, 0x28, 0x1e, 0x00, /* reg18 reg19 reg1a reg1b */ 0x07, 0x00, 0x00, 0x00 }; static const u8 sn_soi768[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x21, 0x40, 0x00, 0x1a, 0x00, 0x00, 0x00, /* reg8 reg9 rega regb regc regd rege regf */ 0x81, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x00, 0x01, 0x08, 0x28, 0x1e, 0x00, /* reg18 reg19 reg1a reg1b */ 0x07, 0x00, 0x00, 0x00 }; static const u8 sn_sp80708[0x1c] = { /* reg0 reg1 reg2 reg3 reg4 reg5 reg6 reg7 */ 0x00, 0x63, 0x60, 0x00, 0x1a, 0x20, 0x20, 0x20, /* reg8 reg9 rega regb regc regd rege regf */ 0x81, 0x18, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* reg10 reg11 reg12 reg13 reg14 reg15 reg16 reg17 */ 0x03, 0x00, 0x00, 0x03, 0x04, 0x28, 0x1e, 0x00, /* reg18 reg19 reg1a reg1b */ 0x07, 0x00, 0x00, 0x00 }; /* sequence specific to the sensors - !! index = SENSOR_xxx */ static const u8 *sn_tb[] = { [SENSOR_ADCM1700] = sn_adcm1700, [SENSOR_GC0307] = sn_gc0307, [SENSOR_HV7131R] = sn_hv7131, [SENSOR_MI0360] = sn_mi0360, [SENSOR_MI0360B] = sn_mi0360b, [SENSOR_MO4000] = sn_mo4000, [SENSOR_MT9V111] = sn_mt9v111, [SENSOR_OM6802] = sn_om6802, [SENSOR_OV7630] = sn_ov7630, [SENSOR_OV7648] = sn_ov7648, [SENSOR_OV7660] = sn_ov7660, [SENSOR_PO1030] = sn_po1030, [SENSOR_PO2030N] = sn_po2030n, [SENSOR_SOI768] = sn_soi768, [SENSOR_SP80708] = sn_sp80708, }; /* default gamma table */ static const u8 gamma_def[17] = { 0x00, 0x2d, 0x46, 0x5a, 0x6c, 0x7c, 0x8b, 0x99, 0xa6, 0xb2, 0xbf, 0xca, 0xd5, 0xe0, 0xeb, 0xf5, 0xff }; /* gamma for sensor ADCM1700 */ static const u8 gamma_spec_0[17] = { 0x0f, 0x39, 0x5a, 0x74, 0x86, 0x95, 0xa6, 0xb4, 0xbd, 0xc4, 0xcc, 0xd4, 0xd5, 0xde, 0xe4, 0xed, 0xf5 }; /* gamma for sensors HV7131R and MT9V111 */ static const u8 gamma_spec_1[17] = { 0x08, 0x3a, 0x52, 0x65, 0x75, 0x83, 0x91, 0x9d, 0xa9, 0xb4, 0xbe, 0xc8, 0xd2, 0xdb, 0xe4, 0xed, 0xf5 }; /* gamma for sensor GC0307 */ static const u8 gamma_spec_2[17] = { 0x14, 0x37, 0x50, 0x6a, 0x7c, 0x8d, 0x9d, 0xab, 0xb5, 0xbf, 0xc2, 0xcb, 0xd1, 0xd6, 0xdb, 0xe1, 0xeb }; /* gamma for sensor SP80708 */ static const u8 gamma_spec_3[17] = { 0x0a, 0x2d, 0x4e, 0x68, 0x7d, 0x8f, 0x9f, 0xab, 0xb7, 0xc2, 0xcc, 0xd3, 0xd8, 0xde, 0xe2, 0xe5, 0xe6 }; /* color matrix and offsets */ static const u8 reg84[] = { 0x14, 0x00, 0x27, 0x00, 0x07, 0x00, /* YR YG YB gains */ 0xe8, 0x0f, 0xda, 0x0f, 0x40, 0x00, /* UR UG UB */ 0x3e, 0x00, 0xcd, 0x0f, 0xf7, 0x0f, /* VR VG VB */ 0x00, 0x00, 0x00 /* YUV offsets */ }; #define DELAY 0xdd static const u8 adcm1700_sensor_init[][8] = { {0xa0, 0x51, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xb0, 0x51, 0x04, 0x08, 0x00, 0x00, 0x00, 0x10}, /* reset */ {DELAY, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, {0xb0, 0x51, 0x04, 0x00, 0x00, 0x00, 0x00, 0x10}, {DELAY, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, {0xb0, 0x51, 0x0c, 0xe0, 0x2e, 0x00, 0x00, 0x10}, {0xb0, 0x51, 0x10, 0x02, 0x02, 0x00, 0x00, 0x10}, {0xb0, 0x51, 0x14, 0x0e, 0x0e, 0x00, 0x00, 0x10}, {0xb0, 0x51, 0x1c, 0x00, 0x80, 0x00, 0x00, 0x10}, {0xb0, 0x51, 0x20, 0x01, 0x00, 0x00, 0x00, 0x10}, {DELAY, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, {0xb0, 0x51, 0x04, 0x04, 0x00, 0x00, 0x00, 0x10}, {DELAY, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, {0xb0, 0x51, 0x04, 0x01, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x51, 0xfe, 0x10, 0x00, 0x00, 0x00, 0x10}, {0xb0, 0x51, 0x14, 0x01, 0x00, 0x00, 0x00, 0x10}, {0xb0, 0x51, 0x32, 0x00, 0x00, 0x00, 0x00, 0x10}, {} }; static const u8 adcm1700_sensor_param1[][8] = { {0xb0, 0x51, 0x26, 0xf9, 0x01, 0x00, 0x00, 0x10}, /* exposure? */ {0xd0, 0x51, 0x1e, 0x8e, 0x8e, 0x8e, 0x8e, 0x10}, {0xa0, 0x51, 0xfe, 0x01, 0x00, 0x00, 0x00, 0x10}, {0xb0, 0x51, 0x00, 0x02, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x51, 0xfe, 0x10, 0x00, 0x00, 0x00, 0x10}, {0xb0, 0x51, 0x32, 0x00, 0x72, 0x00, 0x00, 0x10}, {0xd0, 0x51, 0x1e, 0xbe, 0xd7, 0xe8, 0xbe, 0x10}, /* exposure? */ {0xa0, 0x51, 0xfe, 0x01, 0x00, 0x00, 0x00, 0x10}, {0xb0, 0x51, 0x00, 0x02, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x51, 0xfe, 0x10, 0x00, 0x00, 0x00, 0x10}, {0xb0, 0x51, 0x32, 0x00, 0xa2, 0x00, 0x00, 0x10}, {} }; static const u8 gc0307_sensor_init[][8] = { {0xa0, 0x21, 0x43, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x44, 0xa2, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x01, 0x6a, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x02, 0x70, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x10, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x1d, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x11, 0x05, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x05, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x06, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x07, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x08, 0x02, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x09, 0x01, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x0a, 0xe8, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x0b, 0x02, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x0c, 0x80, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x0d, 0x22, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x0e, 0x02, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x0f, 0xb2, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x12, 0x70, 0x00, 0x00, 0x00, 0x10}, {DELAY, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*delay 10ms*/ {0xa0, 0x21, 0x13, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x15, 0xb8, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x16, 0x13, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x17, 0x52, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x18, 0x50, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x1e, 0x0d, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x1f, 0x32, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x61, 0x90, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x63, 0x70, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x65, 0x98, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x67, 0x90, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x03, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x04, 0x96, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x45, 0x27, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x47, 0x2c, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x43, 0x47, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x44, 0xd8, 0x00, 0x00, 0x00, 0x10}, {} }; static const u8 gc0307_sensor_param1[][8] = { {0xa0, 0x21, 0x68, 0x13, 0x00, 0x00, 0x00, 0x10}, {0xd0, 0x21, 0x61, 0x80, 0x00, 0x80, 0x00, 0x10}, {0xc0, 0x21, 0x65, 0x80, 0x00, 0x80, 0x00, 0x10}, {0xc0, 0x21, 0x63, 0xa0, 0x00, 0xa6, 0x00, 0x10}, /*param3*/ {0xa0, 0x21, 0x01, 0x6e, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x21, 0x02, 0x88, 0x00, 0x00, 0x00, 0x10}, {} }; static const u8 hv7131r_sensor_init[][8] = { {0xc1, 0x11, 0x01, 0x08, 0x01, 0x00, 0x00, 0x10}, {0xb1, 0x11, 0x34, 0x17, 0x7f, 0x00, 0x00, 0x10}, {0xd1, 0x11, 0x40, 0xff, 0x7f, 0x7f, 0x7f, 0x10}, /* {0x91, 0x11, 0x44, 0x00, 0x00, 0x00, 0x00, 0x10}, */ {0xd1, 0x11, 0x10, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x11, 0x14, 0x01, 0xe2, 0x02, 0x82, 0x10}, /* {0x91, 0x11, 0x18, 0x00, 0x00, 0x00, 0x00, 0x10}, */ {0xa1, 0x11, 0x01, 0x08, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x11, 0x01, 0x08, 0x00, 0x00, 0x00, 0x10}, {0xc1, 0x11, 0x25, 0x00, 0x61, 0xa8, 0x00, 0x10}, {0xa1, 0x11, 0x30, 0x22, 0x00, 0x00, 0x00, 0x10}, {0xc1, 0x11, 0x31, 0x20, 0x2e, 0x20, 0x00, 0x10}, {0xc1, 0x11, 0x25, 0x00, 0xc3, 0x50, 0x00, 0x10}, {0xa1, 0x11, 0x30, 0x07, 0x00, 0x00, 0x00, 0x10}, /* gain14 */ {0xc1, 0x11, 0x31, 0x10, 0x10, 0x10, 0x00, 0x10}, /* r g b 101a10 */ {0xa1, 0x11, 0x01, 0x08, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x11, 0x20, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x11, 0x21, 0xd0, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x11, 0x22, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x11, 0x23, 0x09, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x11, 0x01, 0x08, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x11, 0x20, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x11, 0x21, 0xd0, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x11, 0x22, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x11, 0x23, 0x10, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x11, 0x01, 0x18, 0x00, 0x00, 0x00, 0x10}, /* set sensor clock */ {} }; static const u8 mi0360_sensor_init[][8] = { {0xb1, 0x5d, 0x07, 0x00, 0x02, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x0d, 0x00, 0x01, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x01, 0x00, 0x08, 0x00, 0x16, 0x10}, {0xd1, 0x5d, 0x03, 0x01, 0xe2, 0x02, 0x82, 0x10}, {0xd1, 0x5d, 0x05, 0x00, 0x09, 0x00, 0x53, 0x10}, {0xb1, 0x5d, 0x0d, 0x00, 0x02, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x10, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x12, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x14, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x16, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x18, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x32, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x20, 0x91, 0x01, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x22, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x24, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x26, 0x00, 0x00, 0x00, 0x24, 0x10}, {0xd1, 0x5d, 0x2f, 0xf7, 0xB0, 0x00, 0x04, 0x10}, {0xd1, 0x5d, 0x31, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x33, 0x00, 0x00, 0x01, 0x00, 0x10}, {0xb1, 0x5d, 0x3d, 0x06, 0x8f, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x40, 0x01, 0xe0, 0x00, 0xd1, 0x10}, {0xb1, 0x5d, 0x44, 0x00, 0x82, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x58, 0x00, 0x78, 0x00, 0x43, 0x10}, {0xd1, 0x5d, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x5c, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x5e, 0x00, 0x00, 0xa3, 0x1d, 0x10}, {0xb1, 0x5d, 0x62, 0x04, 0x11, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x20, 0x91, 0x01, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x20, 0x11, 0x01, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x09, 0x00, 0x64, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x2b, 0x00, 0xa0, 0x00, 0xb0, 0x10}, {0xd1, 0x5d, 0x2d, 0x00, 0xa0, 0x00, 0xa0, 0x10}, {0xb1, 0x5d, 0x0a, 0x00, 0x02, 0x00, 0x00, 0x10}, /* sensor clck ?2 */ {0xb1, 0x5d, 0x06, 0x00, 0x30, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x05, 0x00, 0x0a, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x09, 0x02, 0x35, 0x00, 0x00, 0x10}, /* exposure 2 */ {0xd1, 0x5d, 0x2b, 0x00, 0xb9, 0x00, 0xe3, 0x10}, {0xd1, 0x5d, 0x2d, 0x00, 0x5f, 0x00, 0xb9, 0x10}, /* 42 */ /* {0xb1, 0x5d, 0x35, 0x00, 0x67, 0x00, 0x00, 0x10}, * gain orig */ /* {0xb1, 0x5d, 0x35, 0x00, 0x20, 0x00, 0x00, 0x10}, * gain */ {0xb1, 0x5d, 0x07, 0x00, 0x03, 0x00, 0x00, 0x10}, /* update */ {0xb1, 0x5d, 0x07, 0x00, 0x02, 0x00, 0x00, 0x10}, /* sensor on */ {} }; static const u8 mi0360b_sensor_init[][8] = { {0xb1, 0x5d, 0x07, 0x00, 0x02, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x0d, 0x00, 0x01, 0x00, 0x00, 0x10}, {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*delay 20ms*/ {0xb1, 0x5d, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x10}, {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /*delay 20ms*/ {0xd1, 0x5d, 0x01, 0x00, 0x08, 0x00, 0x16, 0x10}, {0xd1, 0x5d, 0x03, 0x01, 0xe2, 0x02, 0x82, 0x10}, {0xd1, 0x5d, 0x05, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x0d, 0x00, 0x02, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x10, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x12, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x14, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x16, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x18, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x1a, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x1c, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x32, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x20, 0x11, 0x01, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x22, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x24, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x26, 0x00, 0x00, 0x00, 0x24, 0x10}, {0xd1, 0x5d, 0x2f, 0xf7, 0xb0, 0x00, 0x04, 0x10}, {0xd1, 0x5d, 0x31, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x33, 0x00, 0x00, 0x01, 0x00, 0x10}, {0xb1, 0x5d, 0x3d, 0x06, 0x8f, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x40, 0x01, 0xe0, 0x00, 0xd1, 0x10}, {0xb1, 0x5d, 0x44, 0x00, 0x82, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x58, 0x00, 0x78, 0x00, 0x43, 0x10}, {0xd1, 0x5d, 0x5a, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x5c, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x5e, 0x00, 0x00, 0xa3, 0x1d, 0x10}, {0xb1, 0x5d, 0x62, 0x04, 0x11, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x20, 0x11, 0x01, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x20, 0x11, 0x01, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x09, 0x00, 0x64, 0x00, 0x00, 0x10}, {0xd1, 0x5d, 0x2b, 0x00, 0x33, 0x00, 0xa0, 0x10}, {0xd1, 0x5d, 0x2d, 0x00, 0xa0, 0x00, 0x33, 0x10}, {} }; static const u8 mi0360b_sensor_param1[][8] = { {0xb1, 0x5d, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x06, 0x00, 0x53, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x05, 0x00, 0x09, 0x00, 0x00, 0x10}, {0xb1, 0x5d, 0x09, 0x02, 0x35, 0x00, 0x00, 0x10}, /* exposure 2 */ {0xd1, 0x5d, 0x2b, 0x00, 0xd1, 0x01, 0xc9, 0x10}, {0xd1, 0x5d, 0x2d, 0x00, 0xed, 0x00, 0xd1, 0x10}, {0xb1, 0x5d, 0x07, 0x00, 0x03, 0x00, 0x00, 0x10}, /* update */ {0xb1, 0x5d, 0x07, 0x00, 0x02, 0x00, 0x00, 0x10}, /* sensor on */ {} }; static const u8 mo4000_sensor_init[][8] = { {0xa1, 0x21, 0x01, 0x02, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x02, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x03, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x04, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x05, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x05, 0x04, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x06, 0x80, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x06, 0x81, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x0e, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x11, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x11, 0x20, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x11, 0x30, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x11, 0x38, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x11, 0x38, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x12, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x10, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x0f, 0x20, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x10, 0x20, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x11, 0x38, 0x00, 0x00, 0x00, 0x10}, {} }; static const u8 mt9v111_sensor_init[][8] = { {0xb1, 0x5c, 0x0d, 0x00, 0x01, 0x00, 0x00, 0x10}, /* reset? */ {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */ {0xb1, 0x5c, 0x0d, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xb1, 0x5c, 0x01, 0x00, 0x01, 0x00, 0x00, 0x10}, /* IFP select */ {0xb1, 0x5c, 0x08, 0x04, 0x80, 0x00, 0x00, 0x10}, /* output fmt ctrl */ {0xb1, 0x5c, 0x06, 0x00, 0x00, 0x00, 0x00, 0x10}, /* op mode ctrl */ {0xb1, 0x5c, 0x01, 0x00, 0x04, 0x00, 0x00, 0x10}, /* sensor select */ {0xb1, 0x5c, 0x08, 0x00, 0x08, 0x00, 0x00, 0x10}, /* row start */ {0xb1, 0x5c, 0x02, 0x00, 0x16, 0x00, 0x00, 0x10}, /* col start */ {0xb1, 0x5c, 0x03, 0x01, 0xe7, 0x00, 0x00, 0x10}, /* window height */ {0xb1, 0x5c, 0x04, 0x02, 0x87, 0x00, 0x00, 0x10}, /* window width */ {0xb1, 0x5c, 0x07, 0x30, 0x02, 0x00, 0x00, 0x10}, /* output ctrl */ {0xb1, 0x5c, 0x0c, 0x00, 0x00, 0x00, 0x00, 0x10}, /* shutter delay */ {0xb1, 0x5c, 0x12, 0x00, 0xb0, 0x00, 0x00, 0x10}, /* zoom col start */ {0xb1, 0x5c, 0x13, 0x00, 0x7c, 0x00, 0x00, 0x10}, /* zoom row start */ {0xb1, 0x5c, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x10}, /* digital zoom */ {0xb1, 0x5c, 0x20, 0x00, 0x00, 0x00, 0x00, 0x10}, /* read mode */ {0xb1, 0x5c, 0x20, 0x00, 0x00, 0x00, 0x00, 0x10}, {} }; static const u8 mt9v111_sensor_param1[][8] = { {0xd1, 0x5c, 0x2b, 0x00, 0x33, 0x00, 0xad, 0x10}, /* G1 and B gains */ {0xd1, 0x5c, 0x2d, 0x00, 0xad, 0x00, 0x33, 0x10}, /* R and G2 gains */ {0xb1, 0x5c, 0x06, 0x00, 0x40, 0x00, 0x00, 0x10}, /* vert blanking */ {0xb1, 0x5c, 0x05, 0x00, 0x09, 0x00, 0x00, 0x10}, /* horiz blanking */ {0xb1, 0x5c, 0x35, 0x01, 0xc0, 0x00, 0x00, 0x10}, /* global gain */ {} }; static const u8 om6802_init0[2][8] = { /*fixme: variable*/ {0xa0, 0x34, 0x29, 0x0e, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x34, 0x23, 0xb0, 0x00, 0x00, 0x00, 0x10}, }; static const u8 om6802_sensor_init[][8] = { {0xa0, 0x34, 0xdf, 0x6d, 0x00, 0x00, 0x00, 0x10}, /* factory mode */ {0xa0, 0x34, 0xdd, 0x18, 0x00, 0x00, 0x00, 0x10}, /* output raw RGB */ {0xa0, 0x34, 0x5a, 0xc0, 0x00, 0x00, 0x00, 0x10}, /* {0xa0, 0x34, 0xfb, 0x11, 0x00, 0x00, 0x00, 0x10}, */ {0xa0, 0x34, 0xf0, 0x04, 0x00, 0x00, 0x00, 0x10}, /* auto-exposure speed (0) / white balance mode (auto RGB) */ /* {0xa0, 0x34, 0xf1, 0x02, 0x00, 0x00, 0x00, 0x10}, * set color mode */ /* {0xa0, 0x34, 0xfe, 0x5b, 0x00, 0x00, 0x00, 0x10}, * max AGC value in AE */ /* {0xa0, 0x34, 0xe5, 0x00, 0x00, 0x00, 0x00, 0x10}, * preset AGC */ /* {0xa0, 0x34, 0xe6, 0x00, 0x00, 0x00, 0x00, 0x10}, * preset brightness */ /* {0xa0, 0x34, 0xe7, 0x00, 0x00, 0x00, 0x00, 0x10}, * preset contrast */ /* {0xa0, 0x34, 0xe8, 0x31, 0x00, 0x00, 0x00, 0x10}, * preset gamma */ {0xa0, 0x34, 0xe9, 0x0f, 0x00, 0x00, 0x00, 0x10}, /* luminance mode (0x4f -> AutoExpo on) */ {0xa0, 0x34, 0xe4, 0xff, 0x00, 0x00, 0x00, 0x10}, /* preset shutter */ /* {0xa0, 0x34, 0xef, 0x00, 0x00, 0x00, 0x00, 0x10}, * auto frame rate */ /* {0xa0, 0x34, 0xfb, 0xee, 0x00, 0x00, 0x00, 0x10}, */ {0xa0, 0x34, 0x5d, 0x80, 0x00, 0x00, 0x00, 0x10}, {} }; static const u8 om6802_sensor_param1[][8] = { {0xa0, 0x34, 0x71, 0x84, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x34, 0x72, 0x05, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x34, 0x68, 0x80, 0x00, 0x00, 0x00, 0x10}, {0xa0, 0x34, 0x69, 0x01, 0x00, 0x00, 0x00, 0x10}, {} }; static const u8 ov7630_sensor_init[][8] = { {0xa1, 0x21, 0x76, 0x01, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x12, 0xc8, 0x00, 0x00, 0x00, 0x10}, {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */ {0xa1, 0x21, 0x12, 0x48, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x12, 0xc8, 0x00, 0x00, 0x00, 0x10}, {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */ {0xa1, 0x21, 0x12, 0x48, 0x00, 0x00, 0x00, 0x10}, /* win: i2c_r from 00 to 80 */ {0xd1, 0x21, 0x03, 0x80, 0x10, 0x20, 0x80, 0x10}, {0xb1, 0x21, 0x0c, 0x20, 0x20, 0x00, 0x00, 0x10}, /* HDG: 0x11 was 0x00 change to 0x01 for better exposure (15 fps instead of 30) 0x13 was 0xc0 change to 0xc3 for auto gain and exposure */ {0xd1, 0x21, 0x11, 0x01, 0x48, 0xc3, 0x00, 0x10}, {0xb1, 0x21, 0x15, 0x80, 0x03, 0x00, 0x00, 0x10}, {0xd1, 0x21, 0x17, 0x1b, 0xbd, 0x05, 0xf6, 0x10}, {0xa1, 0x21, 0x1b, 0x04, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x21, 0x1f, 0x00, 0x80, 0x80, 0x80, 0x10}, {0xd1, 0x21, 0x23, 0xde, 0x10, 0x8a, 0xa0, 0x10}, {0xc1, 0x21, 0x27, 0xca, 0xa2, 0x74, 0x00, 0x10}, {0xd1, 0x21, 0x2a, 0x88, 0x00, 0x88, 0x01, 0x10}, {0xc1, 0x21, 0x2e, 0x80, 0x00, 0x18, 0x00, 0x10}, {0xa1, 0x21, 0x21, 0x08, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x22, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x2e, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xb1, 0x21, 0x32, 0xc2, 0x08, 0x00, 0x00, 0x10}, {0xb1, 0x21, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x21, 0x60, 0x05, 0x40, 0x12, 0x57, 0x10}, {0xa1, 0x21, 0x64, 0x73, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x21, 0x65, 0x00, 0x55, 0x01, 0xac, 0x10}, {0xa1, 0x21, 0x69, 0x38, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x21, 0x6f, 0x1f, 0x01, 0x00, 0x10, 0x10}, {0xd1, 0x21, 0x73, 0x50, 0x20, 0x02, 0x01, 0x10}, {0xd1, 0x21, 0x77, 0xf3, 0x90, 0x98, 0x98, 0x10}, {0xc1, 0x21, 0x7b, 0x00, 0x4c, 0xf7, 0x00, 0x10}, {0xd1, 0x21, 0x17, 0x1b, 0xbd, 0x05, 0xf6, 0x10}, {0xa1, 0x21, 0x1b, 0x04, 0x00, 0x00, 0x00, 0x10}, {} }; static const u8 ov7630_sensor_param1[][8] = { {0xa1, 0x21, 0x12, 0x48, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x12, 0x48, 0x00, 0x00, 0x00, 0x10}, /*fixme: + 0x12, 0x04*/ /* {0xa1, 0x21, 0x75, 0x82, 0x00, 0x00, 0x00, 0x10}, * COMN * set by setvflip */ {0xa1, 0x21, 0x10, 0x32, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xb1, 0x21, 0x01, 0x80, 0x80, 0x00, 0x00, 0x10}, /* */ /* {0xa1, 0x21, 0x2a, 0x88, 0x00, 0x00, 0x00, 0x10}, * set by setfreq */ /* {0xa1, 0x21, 0x2b, 0x34, 0x00, 0x00, 0x00, 0x10}, * set by setfreq */ /* */ {0xa1, 0x21, 0x10, 0x83, 0x00, 0x00, 0x00, 0x10}, /* {0xb1, 0x21, 0x01, 0x88, 0x70, 0x00, 0x00, 0x10}, */ {} }; static const u8 ov7648_sensor_init[][8] = { {0xa1, 0x21, 0x76, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10}, /* reset */ {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */ {0xa1, 0x21, 0x12, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x21, 0x03, 0xa4, 0x30, 0x88, 0x00, 0x10}, {0xb1, 0x21, 0x11, 0x80, 0x08, 0x00, 0x00, 0x10}, {0xc1, 0x21, 0x13, 0xa0, 0x04, 0x84, 0x00, 0x10}, {0xd1, 0x21, 0x17, 0x1a, 0x02, 0xba, 0xf4, 0x10}, {0xa1, 0x21, 0x1b, 0x04, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x21, 0x1f, 0x41, 0xc0, 0x80, 0x80, 0x10}, {0xd1, 0x21, 0x23, 0xde, 0xa0, 0x80, 0x32, 0x10}, {0xd1, 0x21, 0x27, 0xfe, 0xa0, 0x00, 0x91, 0x10}, {0xd1, 0x21, 0x2b, 0x00, 0x88, 0x85, 0x80, 0x10}, {0xc1, 0x21, 0x2f, 0x9c, 0x00, 0xc4, 0x00, 0x10}, {0xd1, 0x21, 0x60, 0xa6, 0x60, 0x88, 0x12, 0x10}, {0xd1, 0x21, 0x64, 0x88, 0x00, 0x00, 0x94, 0x10}, {0xd1, 0x21, 0x68, 0x7a, 0x0c, 0x00, 0x00, 0x10}, {0xd1, 0x21, 0x6c, 0x11, 0x33, 0x22, 0x00, 0x10}, {0xd1, 0x21, 0x70, 0x11, 0x00, 0x10, 0x50, 0x10}, {0xd1, 0x21, 0x74, 0x20, 0x06, 0x00, 0xb5, 0x10}, {0xd1, 0x21, 0x78, 0x8a, 0x00, 0x00, 0x00, 0x10}, {0xb1, 0x21, 0x7c, 0x00, 0x43, 0x00, 0x00, 0x10}, {0xd1, 0x21, 0x21, 0x86, 0x00, 0xde, 0xa0, 0x10}, /* {0xd1, 0x21, 0x25, 0x80, 0x32, 0xfe, 0xa0, 0x10}, jfm done */ /* {0xd1, 0x21, 0x29, 0x00, 0x91, 0x00, 0x88, 0x10}, jfm done */ /* {0xb1, 0x21, 0x2d, 0x85, 0x00, 0x00, 0x00, 0x10}, set by setfreq */ {} }; static const u8 ov7648_sensor_param1[][8] = { /* {0xa1, 0x21, 0x12, 0x08, 0x00, 0x00, 0x00, 0x10}, jfm done */ /* {0xa1, 0x21, 0x75, 0x06, 0x00, 0x00, 0x00, 0x10}, * COMN * set by setvflip */ {0xa1, 0x21, 0x19, 0x02, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x10, 0x32, 0x00, 0x00, 0x00, 0x10}, /* {0xa1, 0x21, 0x16, 0x00, 0x00, 0x00, 0x00, 0x10}, jfm done */ /* {0xa1, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10}, * GAIN - def */ /* {0xb1, 0x21, 0x01, 0x6c, 0x6c, 0x00, 0x00, 0x10}, * B R - def: 80 */ /*...*/ {0xa1, 0x21, 0x11, 0x81, 0x00, 0x00, 0x00, 0x10}, /* CLKRC */ /* {0xa1, 0x21, 0x1e, 0x00, 0x00, 0x00, 0x00, 0x10}, jfm done */ /* {0xa1, 0x21, 0x16, 0x00, 0x00, 0x00, 0x00, 0x10}, jfm done */ /* {0xa1, 0x21, 0x2a, 0x91, 0x00, 0x00, 0x00, 0x10}, jfm done */ /* {0xa1, 0x21, 0x2b, 0x00, 0x00, 0x00, 0x00, 0x10}, jfm done */ /* {0xb1, 0x21, 0x01, 0x64, 0x84, 0x00, 0x00, 0x10}, * B R - def: 80 */ {} }; static const u8 ov7660_sensor_init[][8] = { {0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10}, /* reset SCCB */ {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */ {0xa1, 0x21, 0x12, 0x05, 0x00, 0x00, 0x00, 0x10}, /* Outformat = rawRGB */ {0xa1, 0x21, 0x13, 0xb8, 0x00, 0x00, 0x00, 0x10}, /* init COM8 */ {0xd1, 0x21, 0x00, 0x01, 0x74, 0x92, 0x00, 0x10}, /* GAIN BLUE RED VREF */ {0xd1, 0x21, 0x04, 0x00, 0x7d, 0x62, 0x00, 0x10}, /* COM 1 BAVE GEAVE AECHH */ {0xb1, 0x21, 0x08, 0x83, 0x01, 0x00, 0x00, 0x10}, /* RAVE COM2 */ {0xd1, 0x21, 0x0c, 0x00, 0x08, 0x04, 0x4f, 0x10}, /* COM 3 4 5 6 */ {0xd1, 0x21, 0x10, 0x7f, 0x40, 0x05, 0xff, 0x10}, /* AECH CLKRC COM7 COM8 */ {0xc1, 0x21, 0x14, 0x2c, 0x00, 0x02, 0x00, 0x10}, /* COM9 COM10 */ {0xd1, 0x21, 0x17, 0x10, 0x60, 0x02, 0x7b, 0x10}, /* HSTART HSTOP VSTRT VSTOP */ {0xa1, 0x21, 0x1b, 0x02, 0x00, 0x00, 0x00, 0x10}, /* PSHFT */ {0xb1, 0x21, 0x1e, 0x01, 0x0e, 0x00, 0x00, 0x10}, /* MVFP LAEC */ {0xd1, 0x21, 0x20, 0x07, 0x07, 0x07, 0x07, 0x10}, /* BOS GBOS GROS ROS (BGGR offset) */ /* {0xd1, 0x21, 0x24, 0x68, 0x58, 0xd4, 0x80, 0x10}, */ {0xd1, 0x21, 0x24, 0x78, 0x68, 0xd4, 0x80, 0x10}, /* AEW AEB VPT BBIAS */ {0xd1, 0x21, 0x28, 0x80, 0x30, 0x00, 0x00, 0x10}, /* GbBIAS RSVD EXHCH EXHCL */ {0xd1, 0x21, 0x2c, 0x80, 0x00, 0x00, 0x62, 0x10}, /* RBIAS ADVFL ASDVFH YAVE */ {0xc1, 0x21, 0x30, 0x08, 0x30, 0xb4, 0x00, 0x10}, /* HSYST HSYEN HREF */ {0xd1, 0x21, 0x33, 0x00, 0x07, 0x84, 0x00, 0x10}, /* reserved */ {0xd1, 0x21, 0x37, 0x0c, 0x02, 0x43, 0x00, 0x10}, /* ADC ACOM OFON TSLB */ {0xd1, 0x21, 0x3b, 0x02, 0x6c, 0x19, 0x0e, 0x10}, /* COM11 COM12 COM13 COM14 */ {0xd1, 0x21, 0x3f, 0x41, 0xc1, 0x22, 0x08, 0x10}, /* EDGE COM15 COM16 COM17 */ {0xd1, 0x21, 0x43, 0xf0, 0x10, 0x78, 0xa8, 0x10}, /* reserved */ {0xd1, 0x21, 0x47, 0x60, 0x80, 0x00, 0x00, 0x10}, /* reserved */ {0xd1, 0x21, 0x4b, 0x00, 0x00, 0x00, 0x00, 0x10}, /* reserved */ {0xd1, 0x21, 0x4f, 0x46, 0x36, 0x0f, 0x17, 0x10}, /* MTX 1 2 3 4 */ {0xd1, 0x21, 0x53, 0x7f, 0x96, 0x40, 0x40, 0x10}, /* MTX 5 6 7 8 */ {0xb1, 0x21, 0x57, 0x40, 0x0f, 0x00, 0x00, 0x10}, /* MTX9 MTXS */ {0xd1, 0x21, 0x59, 0xba, 0x9a, 0x22, 0xb9, 0x10}, /* reserved */ {0xd1, 0x21, 0x5d, 0x9b, 0x10, 0xf0, 0x05, 0x10}, /* reserved */ {0xa1, 0x21, 0x61, 0x60, 0x00, 0x00, 0x00, 0x10}, /* reserved */ {0xd1, 0x21, 0x62, 0x00, 0x00, 0x50, 0x30, 0x10}, /* LCC1 LCC2 LCC3 LCC4 */ {0xa1, 0x21, 0x66, 0x00, 0x00, 0x00, 0x00, 0x10}, /* LCC5 */ {0xd1, 0x21, 0x67, 0x80, 0x7a, 0x90, 0x80, 0x10}, /* MANU */ {0xa1, 0x21, 0x6b, 0x0a, 0x00, 0x00, 0x00, 0x10}, /* band gap reference [0:3] DBLV */ {0xd1, 0x21, 0x6c, 0x30, 0x48, 0x80, 0x74, 0x10}, /* gamma curve */ {0xd1, 0x21, 0x70, 0x64, 0x60, 0x5c, 0x58, 0x10}, /* gamma curve */ {0xd1, 0x21, 0x74, 0x54, 0x4c, 0x40, 0x38, 0x10}, /* gamma curve */ {0xd1, 0x21, 0x78, 0x34, 0x30, 0x2f, 0x2b, 0x10}, /* gamma curve */ {0xd1, 0x21, 0x7c, 0x03, 0x07, 0x17, 0x34, 0x10}, /* gamma curve */ {0xd1, 0x21, 0x80, 0x41, 0x4d, 0x58, 0x63, 0x10}, /* gamma curve */ {0xd1, 0x21, 0x84, 0x6e, 0x77, 0x87, 0x95, 0x10}, /* gamma curve */ {0xc1, 0x21, 0x88, 0xaf, 0xc7, 0xdf, 0x00, 0x10}, /* gamma curve */ {0xc1, 0x21, 0x8b, 0x99, 0x99, 0xcf, 0x00, 0x10}, /* reserved */ {0xb1, 0x21, 0x92, 0x00, 0x00, 0x00, 0x00, 0x10}, /* DM_LNL/H */ /* not in all ms-win traces*/ {0xa1, 0x21, 0xa1, 0x00, 0x00, 0x00, 0x00, 0x10}, {} }; static const u8 ov7660_sensor_param1[][8] = { {0xa1, 0x21, 0x1e, 0x01, 0x00, 0x00, 0x00, 0x10}, /* MVFP */ /* bits[3..0]reserved */ {0xa1, 0x21, 0x1e, 0x01, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x03, 0x00, 0x00, 0x00, 0x00, 0x10}, /* VREF vertical frame ctrl */ {0xa1, 0x21, 0x03, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x10, 0x20, 0x00, 0x00, 0x00, 0x10}, /* AECH 0x20 */ {0xa1, 0x21, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x10}, /* ADVFL */ {0xa1, 0x21, 0x2e, 0x00, 0x00, 0x00, 0x00, 0x10}, /* ADVFH */ {0xa1, 0x21, 0x00, 0x1f, 0x00, 0x00, 0x00, 0x10}, /* GAIN */ /* {0xb1, 0x21, 0x01, 0x78, 0x78, 0x00, 0x00, 0x10}, * BLUE */ /****** (some exchanges in the win trace) ******/ /*fixme:param2*/ {0xa1, 0x21, 0x93, 0x00, 0x00, 0x00, 0x00, 0x10},/* dummy line hight */ {0xa1, 0x21, 0x92, 0x25, 0x00, 0x00, 0x00, 0x10}, /* dummy line low */ {0xa1, 0x21, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x10}, /* EXHCH */ {0xa1, 0x21, 0x2b, 0x00, 0x00, 0x00, 0x00, 0x10}, /* EXHCL */ /* {0xa1, 0x21, 0x02, 0x90, 0x00, 0x00, 0x00, 0x10}, * RED */ /****** (some exchanges in the win trace) ******/ /******!! startsensor KO if changed !!****/ /*fixme: param3*/ {0xa1, 0x21, 0x93, 0x01, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x92, 0xff, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x2a, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x2b, 0xc3, 0x00, 0x00, 0x00, 0x10}, {} }; static const u8 po1030_sensor_init[][8] = { /* the sensor registers are described in m5602/m5602_po1030.h */ {0xa1, 0x6e, 0x3f, 0x20, 0x00, 0x00, 0x00, 0x10}, /* sensor reset */ {DELAY, 0x14, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 20ms */ {0xa1, 0x6e, 0x3f, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x6e, 0x3e, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x04, 0x02, 0xb1, 0x02, 0x39, 0x10}, {0xd1, 0x6e, 0x08, 0x00, 0x01, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x0c, 0x02, 0x7f, 0x01, 0xe0, 0x10}, {0xd1, 0x6e, 0x12, 0x03, 0x02, 0x00, 0x03, 0x10}, {0xd1, 0x6e, 0x16, 0x85, 0x40, 0x4a, 0x40, 0x10}, /* r/g1/b/g2 gains */ {0xc1, 0x6e, 0x1a, 0x00, 0x80, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x1d, 0x08, 0x03, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x23, 0x00, 0xb0, 0x00, 0x94, 0x10}, {0xd1, 0x6e, 0x27, 0x58, 0x00, 0x00, 0x00, 0x10}, {0xb1, 0x6e, 0x2b, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x2d, 0x14, 0x35, 0x61, 0x84, 0x10}, /* gamma corr */ {0xd1, 0x6e, 0x31, 0xa2, 0xbd, 0xd8, 0xff, 0x10}, {0xd1, 0x6e, 0x35, 0x06, 0x1e, 0x12, 0x02, 0x10}, /* color matrix */ {0xd1, 0x6e, 0x39, 0xaa, 0x53, 0x37, 0xd5, 0x10}, {0xa1, 0x6e, 0x3d, 0xf2, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x3e, 0x00, 0x00, 0x80, 0x03, 0x10}, {0xd1, 0x6e, 0x42, 0x03, 0x00, 0x00, 0x00, 0x10}, {0xc1, 0x6e, 0x46, 0x00, 0x80, 0x80, 0x00, 0x10}, {0xd1, 0x6e, 0x4b, 0x02, 0xef, 0x08, 0xcd, 0x10}, {0xd1, 0x6e, 0x4f, 0x00, 0xd0, 0x00, 0xa0, 0x10}, {0xd1, 0x6e, 0x53, 0x01, 0xaa, 0x01, 0x40, 0x10}, {0xd1, 0x6e, 0x5a, 0x50, 0x04, 0x30, 0x03, 0x10}, /* raw rgb bayer */ {0xa1, 0x6e, 0x5e, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x5f, 0x10, 0x40, 0xff, 0x00, 0x10}, {0xd1, 0x6e, 0x63, 0x40, 0x40, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x67, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x6b, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x6f, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xc1, 0x6e, 0x73, 0x10, 0x80, 0xeb, 0x00, 0x10}, {} }; static const u8 po1030_sensor_param1[][8] = { /* from ms-win traces - these values change with auto gain/expo/wb.. */ {0xa1, 0x6e, 0x1e, 0x03, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x6e, 0x1e, 0x03, 0x00, 0x00, 0x00, 0x10}, /* mean values */ {0xc1, 0x6e, 0x1a, 0x02, 0xd4, 0xa4, 0x00, 0x10}, /* integlines */ {0xa1, 0x6e, 0x15, 0x04, 0x00, 0x00, 0x00, 0x10}, /* global gain */ {0xc1, 0x6e, 0x16, 0x40, 0x40, 0x40, 0x00, 0x10}, /* r/g1/b gains */ {0xa1, 0x6e, 0x1d, 0x08, 0x00, 0x00, 0x00, 0x10}, /* control1 */ {0xa1, 0x6e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x10}, /* frameheight */ {0xa1, 0x6e, 0x07, 0xd5, 0x00, 0x00, 0x00, 0x10}, /* {0xc1, 0x6e, 0x16, 0x49, 0x40, 0x45, 0x00, 0x10}, */ {} }; static const u8 po2030n_sensor_init[][8] = { {0xa1, 0x6e, 0x1e, 0x1a, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x6e, 0x1f, 0x99, 0x00, 0x00, 0x00, 0x10}, {DELAY, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 10ms */ {0xa1, 0x6e, 0x1e, 0x0a, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x6e, 0x1f, 0x19, 0x00, 0x00, 0x00, 0x10}, {DELAY, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 10ms */ {0xa1, 0x6e, 0x20, 0x44, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x6e, 0x04, 0x03, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x6e, 0x05, 0x70, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x6e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x6e, 0x07, 0x25, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x08, 0x00, 0xd0, 0x00, 0x08, 0x10}, {0xd1, 0x6e, 0x0c, 0x03, 0x50, 0x01, 0xe8, 0x10}, {0xd1, 0x6e, 0x1d, 0x20, 0x0a, 0x19, 0x44, 0x10}, {0xd1, 0x6e, 0x21, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x25, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x29, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x31, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x35, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x39, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x3d, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x41, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x45, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x49, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x4d, 0x00, 0x00, 0x00, 0xed, 0x10}, {0xd1, 0x6e, 0x51, 0x17, 0x4a, 0x2f, 0xc0, 0x10}, {0xd1, 0x6e, 0x55, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x59, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x5d, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x61, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x65, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x69, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x6d, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x71, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x75, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x79, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x7d, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x81, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x85, 0x00, 0x00, 0x00, 0x08, 0x10}, {0xd1, 0x6e, 0x89, 0x01, 0xe8, 0x00, 0x01, 0x10}, {0xa1, 0x6e, 0x8d, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x21, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x25, 0x00, 0x00, 0x00, 0x01, 0x10}, {0xd1, 0x6e, 0x29, 0xe6, 0x00, 0xbd, 0x03, 0x10}, {0xd1, 0x6e, 0x2d, 0x41, 0x38, 0x68, 0x40, 0x10}, {0xd1, 0x6e, 0x31, 0x2b, 0x00, 0x36, 0x00, 0x10}, {0xd1, 0x6e, 0x35, 0x30, 0x30, 0x08, 0x00, 0x10}, {0xd1, 0x6e, 0x39, 0x00, 0x00, 0x33, 0x06, 0x10}, {0xb1, 0x6e, 0x3d, 0x06, 0x02, 0x00, 0x00, 0x10}, {} }; static const u8 po2030n_sensor_param1[][8] = { {0xa1, 0x6e, 0x1a, 0x01, 0x00, 0x00, 0x00, 0x10}, {DELAY, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 8ms */ {0xa1, 0x6e, 0x1b, 0xf4, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x6e, 0x15, 0x04, 0x00, 0x00, 0x00, 0x10}, {0xd1, 0x6e, 0x16, 0x50, 0x40, 0x49, 0x40, 0x10}, /*param2*/ {0xa1, 0x6e, 0x1d, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x6e, 0x04, 0x03, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x6e, 0x05, 0x6f, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x6e, 0x06, 0x02, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x6e, 0x07, 0x25, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x6e, 0x15, 0x04, 0x00, 0x00, 0x00, 0x10}, {0xc1, 0x6e, 0x16, 0x52, 0x40, 0x48, 0x00, 0x10}, /*after start*/ {0xa1, 0x6e, 0x15, 0x0f, 0x00, 0x00, 0x00, 0x10}, {DELAY, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 5ms */ {0xa1, 0x6e, 0x1a, 0x05, 0x00, 0x00, 0x00, 0x10}, {DELAY, 0x05, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 5ms */ {0xa1, 0x6e, 0x1b, 0x53, 0x00, 0x00, 0x00, 0x10}, {} }; static const u8 soi768_sensor_init[][8] = { {0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10}, /* reset */ {DELAY, 0x60, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00}, /* delay 96ms */ {0xa1, 0x21, 0x12, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x13, 0x80, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x0f, 0x03, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x19, 0x00, 0x00, 0x00, 0x00, 0x10}, {} }; static const u8 soi768_sensor_param1[][8] = { {0xa1, 0x21, 0x10, 0x10, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x2e, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x21, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xb1, 0x21, 0x01, 0x7f, 0x7f, 0x00, 0x00, 0x10}, /* */ /* {0xa1, 0x21, 0x2e, 0x00, 0x00, 0x00, 0x00, 0x10}, */ /* {0xa1, 0x21, 0x2d, 0x25, 0x00, 0x00, 0x00, 0x10}, */ {0xa1, 0x21, 0x2b, 0x00, 0x00, 0x00, 0x00, 0x10}, /* {0xb1, 0x21, 0x2d, 0x00, 0x00, 0x00, 0x00, 0x10}, */ {0xa1, 0x21, 0x02, 0x8d, 0x00, 0x00, 0x00, 0x10}, /* the next sequence should be used for auto gain */ {0xa1, 0x21, 0x00, 0x07, 0x00, 0x00, 0x00, 0x10}, /* global gain ? : 07 - change with 0x15 at the end */ {0xa1, 0x21, 0x10, 0x3f, 0x00, 0x00, 0x00, 0x10}, /* ???? : 063f */ {0xa1, 0x21, 0x04, 0x06, 0x00, 0x00, 0x00, 0x10}, {0xb1, 0x21, 0x2d, 0x00, 0x02, 0x00, 0x00, 0x10}, /* exposure ? : 0200 - change with 0x1e at the end */ {} }; static const u8 sp80708_sensor_init[][8] = { {0xa1, 0x18, 0x06, 0xf9, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x09, 0x1f, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x0a, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x0d, 0xc0, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x0c, 0x04, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x0f, 0x0f, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x10, 0x40, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x11, 0x4e, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x12, 0x53, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x15, 0x80, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x18, 0x18, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x19, 0x18, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x1a, 0x10, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x1b, 0x10, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x1c, 0x28, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x1d, 0x02, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x1e, 0x10, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x26, 0x04, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x27, 0x1e, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x28, 0x5a, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x29, 0x28, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x2a, 0x78, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x2b, 0x01, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x2c, 0xf7, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x2d, 0x2d, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x2e, 0xd5, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x39, 0x42, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x3a, 0x67, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x3b, 0x87, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x3c, 0xa3, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x3d, 0xb0, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x3e, 0xbc, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x3f, 0xc8, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x40, 0xd4, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x41, 0xdf, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x42, 0xea, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x43, 0xf5, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x45, 0x80, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x46, 0x60, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x47, 0x50, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x48, 0x30, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x49, 0x01, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x4d, 0xae, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x4e, 0x03, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x4f, 0x66, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x50, 0x1c, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x44, 0x10, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x4a, 0x30, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x51, 0x80, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x52, 0x80, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x53, 0x80, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x54, 0x80, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x55, 0x80, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x56, 0x80, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x57, 0xe0, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x58, 0xc0, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x59, 0xab, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x5a, 0xa0, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x5b, 0x99, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x5c, 0x90, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x5e, 0x24, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x5f, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x60, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x61, 0x73, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x63, 0x42, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x64, 0x42, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x65, 0x42, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x66, 0x24, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x67, 0x24, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x68, 0x08, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x2f, 0xc9, 0x00, 0x00, 0x00, 0x10}, {} }; static const u8 sp80708_sensor_param1[][8] = { {0xa1, 0x18, 0x0c, 0x04, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x0c, 0x04, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x03, 0x01, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x04, 0xa4, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x14, 0x3f, 0x00, 0x00, 0x00, 0x10}, {0xa1, 0x18, 0x5d, 0x80, 0x00, 0x00, 0x00, 0x10}, {0xb1, 0x18, 0x11, 0x40, 0x40, 0x00, 0x00, 0x10}, {} }; static const u8 (*sensor_init[])[8] = { [SENSOR_ADCM1700] = adcm1700_sensor_init, [SENSOR_GC0307] = gc0307_sensor_init, [SENSOR_HV7131R] = hv7131r_sensor_init, [SENSOR_MI0360] = mi0360_sensor_init, [SENSOR_MI0360B] = mi0360b_sensor_init, [SENSOR_MO4000] = mo4000_sensor_init, [SENSOR_MT9V111] = mt9v111_sensor_init, [SENSOR_OM6802] = om6802_sensor_init, [SENSOR_OV7630] = ov7630_sensor_init, [SENSOR_OV7648] = ov7648_sensor_init, [SENSOR_OV7660] = ov7660_sensor_init, [SENSOR_PO1030] = po1030_sensor_init, [SENSOR_PO2030N] = po2030n_sensor_init, [SENSOR_SOI768] = soi768_sensor_init, [SENSOR_SP80708] = sp80708_sensor_init, }; /* read <len> bytes to gspca_dev->usb_buf */ static void reg_r(struct gspca_dev *gspca_dev, u16 value, int len) { int ret; if (gspca_dev->usb_err < 0) return; #ifdef GSPCA_DEBUG if (len > USB_BUF_SZ) { err("reg_r: buffer overflow"); return; } #endif ret = usb_control_msg(gspca_dev->dev, usb_rcvctrlpipe(gspca_dev->dev, 0), 0, USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, value, 0, gspca_dev->usb_buf, len, 500); PDEBUG(D_USBI, "reg_r [%02x] -> %02x", value, gspca_dev->usb_buf[0]); if (ret < 0) { err("reg_r err %d", ret); gspca_dev->usb_err = ret; } } static void reg_w1(struct gspca_dev *gspca_dev, u16 value, u8 data) { int ret; if (gspca_dev->usb_err < 0) return; PDEBUG(D_USBO, "reg_w1 [%04x] = %02x", value, data); gspca_dev->usb_buf[0] = data; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0x08, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, value, 0, gspca_dev->usb_buf, 1, 500); if (ret < 0) { err("reg_w1 err %d", ret); gspca_dev->usb_err = ret; } } static void reg_w(struct gspca_dev *gspca_dev, u16 value, const u8 *buffer, int len) { int ret; if (gspca_dev->usb_err < 0) return; PDEBUG(D_USBO, "reg_w [%04x] = %02x %02x ..", value, buffer[0], buffer[1]); #ifdef GSPCA_DEBUG if (len > USB_BUF_SZ) { err("reg_w: buffer overflow"); return; } #endif memcpy(gspca_dev->usb_buf, buffer, len); ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0x08, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, value, 0, gspca_dev->usb_buf, len, 500); if (ret < 0) { err("reg_w err %d", ret); gspca_dev->usb_err = ret; } } /* I2C write 1 byte */ static void i2c_w1(struct gspca_dev *gspca_dev, u8 reg, u8 val) { struct sd *sd = (struct sd *) gspca_dev; int ret; if (gspca_dev->usb_err < 0) return; PDEBUG(D_USBO, "i2c_w1 [%02x] = %02x", reg, val); switch (sd->sensor) { case SENSOR_ADCM1700: case SENSOR_OM6802: case SENSOR_GC0307: /* i2c command = a0 (100 kHz) */ gspca_dev->usb_buf[0] = 0x80 | (2 << 4); break; default: /* i2c command = a1 (400 kHz) */ gspca_dev->usb_buf[0] = 0x81 | (2 << 4); break; } gspca_dev->usb_buf[1] = sd->i2c_addr; gspca_dev->usb_buf[2] = reg; gspca_dev->usb_buf[3] = val; gspca_dev->usb_buf[4] = 0; gspca_dev->usb_buf[5] = 0; gspca_dev->usb_buf[6] = 0; gspca_dev->usb_buf[7] = 0x10; ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0x08, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0x08, /* value = i2c */ 0, gspca_dev->usb_buf, 8, 500); if (ret < 0) { err("i2c_w1 err %d", ret); gspca_dev->usb_err = ret; } } /* I2C write 8 bytes */ static void i2c_w8(struct gspca_dev *gspca_dev, const u8 *buffer) { int ret; if (gspca_dev->usb_err < 0) return; PDEBUG(D_USBO, "i2c_w8 [%02x] = %02x ..", buffer[2], buffer[3]); memcpy(gspca_dev->usb_buf, buffer, 8); ret = usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0x08, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0x08, 0, /* value, index */ gspca_dev->usb_buf, 8, 500); msleep(2); if (ret < 0) { err("i2c_w8 err %d", ret); gspca_dev->usb_err = ret; } } /* sensor read 'len' (1..5) bytes in gspca_dev->usb_buf */ static void i2c_r(struct gspca_dev *gspca_dev, u8 reg, int len) { struct sd *sd = (struct sd *) gspca_dev; u8 mode[8]; switch (sd->sensor) { case SENSOR_ADCM1700: case SENSOR_OM6802: case SENSOR_GC0307: /* i2c command = a0 (100 kHz) */ mode[0] = 0x80 | 0x10; break; default: /* i2c command = 91 (400 kHz) */ mode[0] = 0x81 | 0x10; break; } mode[1] = sd->i2c_addr; mode[2] = reg; mode[3] = 0; mode[4] = 0; mode[5] = 0; mode[6] = 0; mode[7] = 0x10; i2c_w8(gspca_dev, mode); msleep(2); mode[0] = (mode[0] & 0x81) | (len << 4) | 0x02; mode[2] = 0; i2c_w8(gspca_dev, mode); msleep(2); reg_r(gspca_dev, 0x0a, 5); } static void i2c_w_seq(struct gspca_dev *gspca_dev, const u8 (*data)[8]) { while ((*data)[0] != 0) { if ((*data)[0] != DELAY) i2c_w8(gspca_dev, *data); else msleep((*data)[1]); data++; } } /* check the ID of the hv7131 sensor */ /* this sequence is needed because it activates the sensor */ static void hv7131r_probe(struct gspca_dev *gspca_dev) { i2c_w1(gspca_dev, 0x02, 0); /* sensor wakeup */ msleep(10); reg_w1(gspca_dev, 0x02, 0x66); /* Gpio on */ msleep(10); i2c_r(gspca_dev, 0, 5); /* read sensor id */ if (gspca_dev->usb_buf[0] == 0x02 /* chip ID (02 is R) */ && gspca_dev->usb_buf[1] == 0x09 && gspca_dev->usb_buf[2] == 0x01) { PDEBUG(D_PROBE, "Sensor HV7131R found"); return; } warn("Erroneous HV7131R ID 0x%02x 0x%02x 0x%02x", gspca_dev->usb_buf[0], gspca_dev->usb_buf[1], gspca_dev->usb_buf[2]); } static void mi0360_probe(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int i, j; u16 val = 0; static const u8 probe_tb[][4][8] = { { /* mi0360 */ {0xb0, 0x5d, 0x07, 0x00, 0x02, 0x00, 0x00, 0x10}, {0x90, 0x5d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa2, 0x5d, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xb0, 0x5d, 0x07, 0x00, 0x00, 0x00, 0x00, 0x10} }, { /* mt9v111 */ {0xb0, 0x5c, 0x01, 0x00, 0x04, 0x00, 0x00, 0x10}, {0x90, 0x5c, 0x36, 0x00, 0x00, 0x00, 0x00, 0x10}, {0xa2, 0x5c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x10}, {} }, }; for (i = 0; i < ARRAY_SIZE(probe_tb); i++) { reg_w1(gspca_dev, 0x17, 0x62); reg_w1(gspca_dev, 0x01, 0x08); for (j = 0; j < 3; j++) i2c_w8(gspca_dev, probe_tb[i][j]); msleep(2); reg_r(gspca_dev, 0x0a, 5); val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4]; if (probe_tb[i][3][0] != 0) i2c_w8(gspca_dev, probe_tb[i][3]); reg_w1(gspca_dev, 0x01, 0x29); reg_w1(gspca_dev, 0x17, 0x42); if (val != 0xffff) break; } if (gspca_dev->usb_err < 0) return; switch (val) { case 0x8221: PDEBUG(D_PROBE, "Sensor mi0360b"); sd->sensor = SENSOR_MI0360B; break; case 0x823a: PDEBUG(D_PROBE, "Sensor mt9v111"); sd->sensor = SENSOR_MT9V111; break; case 0x8243: PDEBUG(D_PROBE, "Sensor mi0360"); break; default: PDEBUG(D_PROBE, "Unknown sensor %04x - forced to mi0360", val); break; } } static void ov7630_probe(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u16 val; /* check ov76xx */ reg_w1(gspca_dev, 0x17, 0x62); reg_w1(gspca_dev, 0x01, 0x08); sd->i2c_addr = 0x21; i2c_r(gspca_dev, 0x0a, 2); val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4]; reg_w1(gspca_dev, 0x01, 0x29); reg_w1(gspca_dev, 0x17, 0x42); if (gspca_dev->usb_err < 0) return; if (val == 0x7628) { /* soi768 */ sd->sensor = SENSOR_SOI768; /*fixme: only valid for 0c45:613e?*/ gspca_dev->cam.input_flags = V4L2_IN_ST_VFLIP | V4L2_IN_ST_HFLIP; PDEBUG(D_PROBE, "Sensor soi768"); return; } PDEBUG(D_PROBE, "Sensor ov%04x", val); } static void ov7648_probe(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u16 val; /* check ov76xx */ reg_w1(gspca_dev, 0x17, 0x62); reg_w1(gspca_dev, 0x01, 0x08); sd->i2c_addr = 0x21; i2c_r(gspca_dev, 0x0a, 2); val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4]; reg_w1(gspca_dev, 0x01, 0x29); reg_w1(gspca_dev, 0x17, 0x42); if ((val & 0xff00) == 0x7600) { /* ov76xx */ PDEBUG(D_PROBE, "Sensor ov%04x", val); return; } /* check po1030 */ reg_w1(gspca_dev, 0x17, 0x62); reg_w1(gspca_dev, 0x01, 0x08); sd->i2c_addr = 0x6e; i2c_r(gspca_dev, 0x00, 2); val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4]; reg_w1(gspca_dev, 0x01, 0x29); reg_w1(gspca_dev, 0x17, 0x42); if (gspca_dev->usb_err < 0) return; if (val == 0x1030) { /* po1030 */ PDEBUG(D_PROBE, "Sensor po1030"); sd->sensor = SENSOR_PO1030; return; } err("Unknown sensor %04x", val); } /* 0c45:6142 sensor may be po2030n, gc0305 or gc0307 */ static void po2030n_probe(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u16 val; /* check gc0307 */ reg_w1(gspca_dev, 0x17, 0x62); reg_w1(gspca_dev, 0x01, 0x08); reg_w1(gspca_dev, 0x02, 0x22); sd->i2c_addr = 0x21; i2c_r(gspca_dev, 0x00, 1); val = gspca_dev->usb_buf[4]; reg_w1(gspca_dev, 0x01, 0x29); /* reset */ reg_w1(gspca_dev, 0x17, 0x42); if (val == 0x99) { /* gc0307 (?) */ PDEBUG(D_PROBE, "Sensor gc0307"); sd->sensor = SENSOR_GC0307; return; } /* check po2030n */ reg_w1(gspca_dev, 0x17, 0x62); reg_w1(gspca_dev, 0x01, 0x0a); sd->i2c_addr = 0x6e; i2c_r(gspca_dev, 0x00, 2); val = (gspca_dev->usb_buf[3] << 8) | gspca_dev->usb_buf[4]; reg_w1(gspca_dev, 0x01, 0x29); reg_w1(gspca_dev, 0x17, 0x42); if (gspca_dev->usb_err < 0) return; if (val == 0x2030) { PDEBUG(D_PROBE, "Sensor po2030n"); /* sd->sensor = SENSOR_PO2030N; */ } else { err("Unknown sensor ID %04x", val); } } /* this function is called at probe time */ static int sd_config(struct gspca_dev *gspca_dev, const struct usb_device_id *id) { struct sd *sd = (struct sd *) gspca_dev; struct cam *cam; sd->bridge = id->driver_info >> 16; sd->sensor = id->driver_info >> 8; sd->flags = id->driver_info; cam = &gspca_dev->cam; if (sd->sensor == SENSOR_ADCM1700) { cam->cam_mode = cif_mode; cam->nmodes = ARRAY_SIZE(cif_mode); } else { cam->cam_mode = vga_mode; cam->nmodes = ARRAY_SIZE(vga_mode); } cam->npkt = 24; /* 24 packets per ISOC message */ cam->ctrls = sd->ctrls; sd->ag_cnt = -1; sd->quality = QUALITY_DEF; /* if USB 1.1, let some bandwidth for the audio device */ if (gspca_dev->audio && gspca_dev->dev->speed < USB_SPEED_HIGH) gspca_dev->nbalt--; INIT_WORK(&sd->work, qual_upd); return 0; } /* this function is called at probe and resume time */ static int sd_init(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; const u8 *sn9c1xx; u8 regGpio[] = { 0x29, 0x70 }; /* no audio */ u8 regF1; /* setup a selector by bridge */ reg_w1(gspca_dev, 0xf1, 0x01); reg_r(gspca_dev, 0x00, 1); reg_w1(gspca_dev, 0xf1, 0x00); reg_r(gspca_dev, 0x00, 1); /* get sonix chip id */ regF1 = gspca_dev->usb_buf[0]; if (gspca_dev->usb_err < 0) return gspca_dev->usb_err; PDEBUG(D_PROBE, "Sonix chip id: %02x", regF1); if (gspca_dev->audio) regGpio[1] |= 0x04; /* with audio */ switch (sd->bridge) { case BRIDGE_SN9C102P: case BRIDGE_SN9C105: if (regF1 != 0x11) return -ENODEV; break; default: /* case BRIDGE_SN9C110: */ /* case BRIDGE_SN9C120: */ if (regF1 != 0x12) return -ENODEV; } switch (sd->sensor) { case SENSOR_MI0360: mi0360_probe(gspca_dev); break; case SENSOR_OV7630: ov7630_probe(gspca_dev); break; case SENSOR_OV7648: ov7648_probe(gspca_dev); break; case SENSOR_PO2030N: po2030n_probe(gspca_dev); break; } switch (sd->bridge) { case BRIDGE_SN9C102P: reg_w1(gspca_dev, 0x02, regGpio[1]); break; default: reg_w(gspca_dev, 0x01, regGpio, 2); break; } if (sd->sensor == SENSOR_OM6802) sd->ctrls[SHARPNESS].def = 0x10; /* Note we do not disable the sensor clock here (power saving mode), as that also disables the button on the cam. */ reg_w1(gspca_dev, 0xf1, 0x00); /* set the i2c address */ sn9c1xx = sn_tb[sd->sensor]; sd->i2c_addr = sn9c1xx[9]; gspca_dev->ctrl_dis = ctrl_dis[sd->sensor]; if (!(sd->flags & F_ILLUM)) gspca_dev->ctrl_dis |= (1 << ILLUM); return gspca_dev->usb_err; } static u32 setexposure(struct gspca_dev *gspca_dev, u32 expo) { struct sd *sd = (struct sd *) gspca_dev; switch (sd->sensor) { case SENSOR_GC0307: { int a, b; /* expo = 0..255 -> a = 19..43 */ a = 19 + expo * 25 / 256; i2c_w1(gspca_dev, 0x68, a); a -= 12; b = a * a * 4; /* heuristic */ i2c_w1(gspca_dev, 0x03, b >> 8); i2c_w1(gspca_dev, 0x04, b); break; } case SENSOR_HV7131R: { u8 Expodoit[] = { 0xc1, 0x11, 0x25, 0x00, 0x00, 0x00, 0x00, 0x16 }; Expodoit[3] = expo >> 16; Expodoit[4] = expo >> 8; Expodoit[5] = expo; i2c_w8(gspca_dev, Expodoit); break; } case SENSOR_MI0360: case SENSOR_MI0360B: { u8 expoMi[] = /* exposure 0x0635 -> 4 fp/s 0x10 */ { 0xb1, 0x5d, 0x09, 0x00, 0x00, 0x00, 0x00, 0x16 }; static const u8 doit[] = /* update sensor */ { 0xb1, 0x5d, 0x07, 0x00, 0x03, 0x00, 0x00, 0x10 }; static const u8 sensorgo[] = /* sensor on */ { 0xb1, 0x5d, 0x07, 0x00, 0x02, 0x00, 0x00, 0x10 }; if (expo > 0x0635) expo = 0x0635; else if (expo < 0x0001) expo = 0x0001; expoMi[3] = expo >> 8; expoMi[4] = expo; i2c_w8(gspca_dev, expoMi); i2c_w8(gspca_dev, doit); i2c_w8(gspca_dev, sensorgo); break; } case SENSOR_MO4000: { u8 expoMof[] = { 0xa1, 0x21, 0x0f, 0x00, 0x00, 0x00, 0x00, 0x10 }; u8 expoMo10[] = { 0xa1, 0x21, 0x10, 0x00, 0x00, 0x00, 0x00, 0x10 }; static const u8 gainMo[] = { 0xa1, 0x21, 0x00, 0x10, 0x00, 0x00, 0x00, 0x1d }; if (expo > 0x1fff) expo = 0x1fff; else if (expo < 0x0001) expo = 0x0001; expoMof[3] = (expo & 0x03fc) >> 2; i2c_w8(gspca_dev, expoMof); expoMo10[3] = ((expo & 0x1c00) >> 10) | ((expo & 0x0003) << 4); i2c_w8(gspca_dev, expoMo10); i2c_w8(gspca_dev, gainMo); PDEBUG(D_FRAM, "set exposure %d", ((expoMo10[3] & 0x07) << 10) | (expoMof[3] << 2) | ((expoMo10[3] & 0x30) >> 4)); break; } case SENSOR_MT9V111: { u8 expo_c1[] = { 0xb1, 0x5c, 0x09, 0x00, 0x00, 0x00, 0x00, 0x10 }; if (expo > 0x0390) expo = 0x0390; else if (expo < 0x0060) expo = 0x0060; expo_c1[3] = expo >> 8; expo_c1[4] = expo; i2c_w8(gspca_dev, expo_c1); break; } case SENSOR_OM6802: { u8 gainOm[] = { 0xa0, 0x34, 0xe5, 0x00, 0x00, 0x00, 0x00, 0x10 }; /* preset AGC - works when AutoExpo = off */ if (expo > 0x03ff) expo = 0x03ff; if (expo < 0x0001) expo = 0x0001; gainOm[3] = expo >> 2; i2c_w8(gspca_dev, gainOm); reg_w1(gspca_dev, 0x96, expo >> 5); PDEBUG(D_FRAM, "set exposure %d", gainOm[3]); break; } } return expo; } static void setbrightness(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; unsigned int expo; int brightness; u8 k2; brightness = sd->ctrls[BRIGHTNESS].val; k2 = (brightness - 0x80) >> 2; switch (sd->sensor) { case SENSOR_ADCM1700: if (k2 > 0x1f) k2 = 0; /* only positive Y offset */ break; case SENSOR_HV7131R: expo = brightness << 12; if (expo > 0x002dc6c0) expo = 0x002dc6c0; else if (expo < 0x02a0) expo = 0x02a0; sd->exposure = setexposure(gspca_dev, expo); break; case SENSOR_MI0360: case SENSOR_MO4000: expo = brightness << 4; sd->exposure = setexposure(gspca_dev, expo); break; case SENSOR_MI0360B: expo = brightness << 2; sd->exposure = setexposure(gspca_dev, expo); break; case SENSOR_GC0307: expo = brightness; sd->exposure = setexposure(gspca_dev, expo); return; /* don't set the Y offset */ case SENSOR_MT9V111: expo = brightness << 2; sd->exposure = setexposure(gspca_dev, expo); return; /* don't set the Y offset */ case SENSOR_OM6802: expo = brightness << 2; sd->exposure = setexposure(gspca_dev, expo); k2 = brightness >> 3; break; } reg_w1(gspca_dev, 0x96, k2); /* color matrix Y offset */ } static void setcontrast(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 k2; u8 contrast[6]; k2 = sd->ctrls[CONTRAST].val * 0x30 / (CONTRAST_MAX + 1) + 0x10; /* 10..40 */ contrast[0] = (k2 + 1) / 2; /* red */ contrast[1] = 0; contrast[2] = k2; /* green */ contrast[3] = 0; contrast[4] = (k2 + 1) / 5; /* blue */ contrast[5] = 0; reg_w(gspca_dev, 0x84, contrast, sizeof contrast); } static void setcolors(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int i, v, colors; const s16 *uv; u8 reg8a[12]; /* U & V gains */ static const s16 uv_com[6] = { /* same as reg84 in signed decimal */ -24, -38, 64, /* UR UG UB */ 62, -51, -9 /* VR VG VB */ }; static const s16 uv_mi0360b[6] = { -20, -38, 64, /* UR UG UB */ 60, -51, -9 /* VR VG VB */ }; colors = sd->ctrls[COLORS].val; if (sd->sensor == SENSOR_MI0360B) uv = uv_mi0360b; else uv = uv_com; for (i = 0; i < 6; i++) { v = uv[i] * colors / COLORS_DEF; reg8a[i * 2] = v; reg8a[i * 2 + 1] = (v >> 8) & 0x0f; } reg_w(gspca_dev, 0x8a, reg8a, sizeof reg8a); } static void setredblue(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_w1(gspca_dev, 0x05, sd->ctrls[RED].val); /* reg_w1(gspca_dev, 0x07, 32); */ reg_w1(gspca_dev, 0x06, sd->ctrls[BLUE].val); } static void setgamma(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int i, val; u8 gamma[17]; const u8 *gamma_base; static const u8 delta[17] = { 0x00, 0x14, 0x1c, 0x1c, 0x1c, 0x1c, 0x1b, 0x1a, 0x18, 0x13, 0x10, 0x0e, 0x08, 0x07, 0x04, 0x02, 0x00 }; switch (sd->sensor) { case SENSOR_ADCM1700: gamma_base = gamma_spec_0; break; case SENSOR_HV7131R: case SENSOR_MI0360B: case SENSOR_MT9V111: gamma_base = gamma_spec_1; break; case SENSOR_GC0307: gamma_base = gamma_spec_2; break; case SENSOR_SP80708: gamma_base = gamma_spec_3; break; default: gamma_base = gamma_def; break; } val = sd->ctrls[GAMMA].val; for (i = 0; i < sizeof gamma; i++) gamma[i] = gamma_base[i] + delta[i] * (val - GAMMA_DEF) / 32; reg_w(gspca_dev, 0x20, gamma, sizeof gamma); } static void setautogain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (gspca_dev->ctrl_dis & (1 << AUTOGAIN)) return; switch (sd->sensor) { case SENSOR_OV7630: case SENSOR_OV7648: { u8 comb; if (sd->sensor == SENSOR_OV7630) comb = 0xc0; else comb = 0xa0; if (sd->ctrls[AUTOGAIN].val) comb |= 0x03; i2c_w1(&sd->gspca_dev, 0x13, comb); return; } } if (sd->ctrls[AUTOGAIN].val) sd->ag_cnt = AG_CNT_START; else sd->ag_cnt = -1; } static void sethvflip(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; u8 comn; switch (sd->sensor) { case SENSOR_HV7131R: comn = 0x18; /* clkdiv = 1, ablcen = 1 */ if (sd->ctrls[VFLIP].val) comn |= 0x01; i2c_w1(gspca_dev, 0x01, comn); /* sctra */ break; case SENSOR_OV7630: comn = 0x02; if (!sd->ctrls[VFLIP].val) comn |= 0x80; i2c_w1(gspca_dev, 0x75, comn); break; case SENSOR_OV7648: comn = 0x06; if (sd->ctrls[VFLIP].val) comn |= 0x80; i2c_w1(gspca_dev, 0x75, comn); break; case SENSOR_PO2030N: /* Reg. 0x1E: Timing Generator Control Register 2 (Tgcontrol2) * (reset value: 0x0A) * bit7: HM: Horizontal Mirror: 0: disable, 1: enable * bit6: VM: Vertical Mirror: 0: disable, 1: enable * bit5: ST: Shutter Selection: 0: electrical, 1: mechanical * bit4: FT: Single Frame Transfer: 0: disable, 1: enable * bit3-0: X */ comn = 0x0a; if (sd->ctrls[HFLIP].val) comn |= 0x80; if (sd->ctrls[VFLIP].val) comn |= 0x40; i2c_w1(&sd->gspca_dev, 0x1e, comn); break; } } static void setsharpness(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; reg_w1(gspca_dev, 0x99, sd->ctrls[SHARPNESS].val); } static void setillum(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (gspca_dev->ctrl_dis & (1 << ILLUM)) return; switch (sd->sensor) { case SENSOR_ADCM1700: reg_w1(gspca_dev, 0x02, /* gpio */ sd->ctrls[ILLUM].val ? 0x64 : 0x60); break; case SENSOR_MT9V111: reg_w1(gspca_dev, 0x02, sd->ctrls[ILLUM].val ? 0x77 : 0x74); /* should have been: */ /* 0x55 : 0x54); * 370i */ /* 0x66 : 0x64); * Clip */ break; } } static void setfreq(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (gspca_dev->ctrl_dis & (1 << FREQ)) return; if (sd->sensor == SENSOR_OV7660) { u8 com8; com8 = 0xdf; /* auto gain/wb/expo */ switch (sd->ctrls[FREQ].val) { case 0: /* Banding filter disabled */ i2c_w1(gspca_dev, 0x13, com8 | 0x20); break; case 1: /* 50 hz */ i2c_w1(gspca_dev, 0x13, com8); i2c_w1(gspca_dev, 0x3b, 0x0a); break; case 2: /* 60 hz */ i2c_w1(gspca_dev, 0x13, com8); i2c_w1(gspca_dev, 0x3b, 0x02); break; } } else { u8 reg2a = 0, reg2b = 0, reg2d = 0; /* Get reg2a / reg2d base values */ switch (sd->sensor) { case SENSOR_OV7630: reg2a = 0x08; reg2d = 0x01; break; case SENSOR_OV7648: reg2a = 0x11; reg2d = 0x81; break; } switch (sd->ctrls[FREQ].val) { case 0: /* Banding filter disabled */ break; case 1: /* 50 hz (filter on and framerate adj) */ reg2a |= 0x80; reg2b = 0xac; reg2d |= 0x04; break; case 2: /* 60 hz (filter on, no framerate adj) */ reg2a |= 0x80; reg2d |= 0x04; break; } i2c_w1(gspca_dev, 0x2a, reg2a); i2c_w1(gspca_dev, 0x2b, reg2b); i2c_w1(gspca_dev, 0x2d, reg2d); } } static void setjpegqual(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; jpeg_set_qual(sd->jpeg_hdr, sd->quality); #if USB_BUF_SZ < 64 #error "No room enough in usb_buf for quantization table" #endif memcpy(gspca_dev->usb_buf, &sd->jpeg_hdr[JPEG_QT0_OFFSET], 64); usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0x08, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0x0100, 0, gspca_dev->usb_buf, 64, 500); memcpy(gspca_dev->usb_buf, &sd->jpeg_hdr[JPEG_QT1_OFFSET], 64); usb_control_msg(gspca_dev->dev, usb_sndctrlpipe(gspca_dev->dev, 0), 0x08, USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_INTERFACE, 0x0140, 0, gspca_dev->usb_buf, 64, 500); sd->reg18 ^= 0x40; reg_w1(gspca_dev, 0x18, sd->reg18); } /* JPEG quality update */ /* This function is executed from a work queue. */ static void qual_upd(struct work_struct *work) { struct sd *sd = container_of(work, struct sd, work); struct gspca_dev *gspca_dev = &sd->gspca_dev; mutex_lock(&gspca_dev->usb_lock); PDEBUG(D_STREAM, "qual_upd %d%%", sd->quality); setjpegqual(gspca_dev); mutex_unlock(&gspca_dev->usb_lock); } /* -- start the camera -- */ static int sd_start(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int i; u8 reg01, reg17; u8 reg0102[2]; const u8 *sn9c1xx; const u8 (*init)[8]; const u8 *reg9a; int mode; static const u8 reg9a_def[] = {0x00, 0x40, 0x20, 0x00, 0x00, 0x00}; static const u8 reg9a_spec[] = {0x00, 0x40, 0x38, 0x30, 0x00, 0x20}; static const u8 regd4[] = {0x60, 0x00, 0x00}; static const u8 C0[] = { 0x2d, 0x2d, 0x3a, 0x05, 0x04, 0x3f }; static const u8 CA[] = { 0x28, 0xd8, 0x14, 0xec }; static const u8 CA_adcm1700[] = { 0x14, 0xec, 0x0a, 0xf6 }; static const u8 CA_po2030n[] = { 0x1e, 0xe2, 0x14, 0xec }; static const u8 CE[] = { 0x32, 0xdd, 0x2d, 0xdd }; /* MI0360 */ static const u8 CE_gc0307[] = { 0x32, 0xce, 0x2d, 0xd3 }; static const u8 CE_ov76xx[] = { 0x32, 0xdd, 0x32, 0xdd }; static const u8 CE_po2030n[] = { 0x14, 0xe7, 0x1e, 0xdd }; /* create the JPEG header */ jpeg_define(sd->jpeg_hdr, gspca_dev->height, gspca_dev->width, 0x21); /* JPEG 422 */ /* initialize the bridge */ sn9c1xx = sn_tb[sd->sensor]; /* sensor clock already enabled in sd_init */ /* reg_w1(gspca_dev, 0xf1, 0x00); */ reg01 = sn9c1xx[1]; if (sd->flags & F_PDN_INV) reg01 ^= S_PDN_INV; /* power down inverted */ reg_w1(gspca_dev, 0x01, reg01); /* configure gpio */ reg0102[0] = reg01; reg0102[1] = sn9c1xx[2]; if (gspca_dev->audio) reg0102[1] |= 0x04; /* keep the audio connection */ reg_w(gspca_dev, 0x01, reg0102, 2); reg_w(gspca_dev, 0x08, &sn9c1xx[8], 2); reg_w(gspca_dev, 0x17, &sn9c1xx[0x17], 5); switch (sd->sensor) { case SENSOR_GC0307: case SENSOR_OV7660: case SENSOR_PO1030: case SENSOR_PO2030N: case SENSOR_SOI768: case SENSOR_SP80708: reg9a = reg9a_spec; break; default: reg9a = reg9a_def; break; } reg_w(gspca_dev, 0x9a, reg9a, 6); reg_w(gspca_dev, 0xd4, regd4, sizeof regd4); reg_w(gspca_dev, 0x03, &sn9c1xx[3], 0x0f); reg17 = sn9c1xx[0x17]; switch (sd->sensor) { case SENSOR_GC0307: msleep(50); /*fixme: is it useful? */ break; case SENSOR_OM6802: msleep(10); reg_w1(gspca_dev, 0x02, 0x73); reg17 |= SEN_CLK_EN; reg_w1(gspca_dev, 0x17, reg17); reg_w1(gspca_dev, 0x01, 0x22); msleep(100); reg01 = SCL_SEL_OD | S_PDN_INV; reg17 &= MCK_SIZE_MASK; reg17 |= 0x04; /* clock / 4 */ break; } reg01 |= SYS_SEL_48M; reg_w1(gspca_dev, 0x01, reg01); reg17 |= SEN_CLK_EN; reg_w1(gspca_dev, 0x17, reg17); reg01 &= ~S_PWR_DN; /* sensor power on */ reg_w1(gspca_dev, 0x01, reg01); reg01 &= ~SYS_SEL_48M; reg_w1(gspca_dev, 0x01, reg01); switch (sd->sensor) { case SENSOR_HV7131R: hv7131r_probe(gspca_dev); /*fixme: is it useful? */ break; case SENSOR_OM6802: msleep(10); reg_w1(gspca_dev, 0x01, reg01); i2c_w8(gspca_dev, om6802_init0[0]); i2c_w8(gspca_dev, om6802_init0[1]); msleep(15); reg_w1(gspca_dev, 0x02, 0x71); msleep(150); break; case SENSOR_SP80708: msleep(100); reg_w1(gspca_dev, 0x02, 0x62); break; } /* initialize the sensor */ i2c_w_seq(gspca_dev, sensor_init[sd->sensor]); reg_w1(gspca_dev, 0x15, sn9c1xx[0x15]); reg_w1(gspca_dev, 0x16, sn9c1xx[0x16]); reg_w1(gspca_dev, 0x12, sn9c1xx[0x12]); reg_w1(gspca_dev, 0x13, sn9c1xx[0x13]); reg_w1(gspca_dev, 0x18, sn9c1xx[0x18]); if (sd->sensor == SENSOR_ADCM1700) { reg_w1(gspca_dev, 0xd2, 0x3a); /* AE_H_SIZE = 116 */ reg_w1(gspca_dev, 0xd3, 0x30); /* AE_V_SIZE = 96 */ } else { reg_w1(gspca_dev, 0xd2, 0x6a); /* AE_H_SIZE = 212 */ reg_w1(gspca_dev, 0xd3, 0x50); /* AE_V_SIZE = 160 */ } reg_w1(gspca_dev, 0xc6, 0x00); reg_w1(gspca_dev, 0xc7, 0x00); if (sd->sensor == SENSOR_ADCM1700) { reg_w1(gspca_dev, 0xc8, 0x2c); /* AW_H_STOP = 352 */ reg_w1(gspca_dev, 0xc9, 0x24); /* AW_V_STOP = 288 */ } else { reg_w1(gspca_dev, 0xc8, 0x50); /* AW_H_STOP = 640 */ reg_w1(gspca_dev, 0xc9, 0x3c); /* AW_V_STOP = 480 */ } reg_w1(gspca_dev, 0x18, sn9c1xx[0x18]); switch (sd->sensor) { case SENSOR_OM6802: /* case SENSOR_OV7648: * fixme: sometimes */ break; default: reg17 |= DEF_EN; break; } reg_w1(gspca_dev, 0x17, reg17); reg_w1(gspca_dev, 0x05, 0x00); /* red */ reg_w1(gspca_dev, 0x07, 0x00); /* green */ reg_w1(gspca_dev, 0x06, 0x00); /* blue */ reg_w1(gspca_dev, 0x14, sn9c1xx[0x14]); setgamma(gspca_dev); /*fixme: 8 times with all zeroes and 1 or 2 times with normal values */ for (i = 0; i < 8; i++) reg_w(gspca_dev, 0x84, reg84, sizeof reg84); switch (sd->sensor) { case SENSOR_ADCM1700: case SENSOR_OV7660: case SENSOR_SP80708: reg_w1(gspca_dev, 0x9a, 0x05); break; case SENSOR_GC0307: case SENSOR_MT9V111: case SENSOR_MI0360B: reg_w1(gspca_dev, 0x9a, 0x07); break; case SENSOR_OV7630: case SENSOR_OV7648: reg_w1(gspca_dev, 0x9a, 0x0a); break; case SENSOR_PO2030N: case SENSOR_SOI768: reg_w1(gspca_dev, 0x9a, 0x06); break; default: reg_w1(gspca_dev, 0x9a, 0x08); break; } setsharpness(gspca_dev); reg_w(gspca_dev, 0x84, reg84, sizeof reg84); reg_w1(gspca_dev, 0x05, 0x20); /* red */ reg_w1(gspca_dev, 0x07, 0x20); /* green */ reg_w1(gspca_dev, 0x06, 0x20); /* blue */ init = NULL; mode = gspca_dev->cam.cam_mode[gspca_dev->curr_mode].priv; reg01 |= SYS_SEL_48M | V_TX_EN; reg17 &= ~MCK_SIZE_MASK; reg17 |= 0x02; /* clock / 2 */ switch (sd->sensor) { case SENSOR_ADCM1700: init = adcm1700_sensor_param1; break; case SENSOR_GC0307: init = gc0307_sensor_param1; break; case SENSOR_HV7131R: case SENSOR_MI0360: if (mode) reg01 |= SYS_SEL_48M; /* 320x240: clk 48Mhz */ else reg01 &= ~SYS_SEL_48M; /* 640x480: clk 24Mhz */ reg17 &= ~MCK_SIZE_MASK; reg17 |= 0x01; /* clock / 1 */ break; case SENSOR_MI0360B: init = mi0360b_sensor_param1; break; case SENSOR_MO4000: if (mode) { /* if 320x240 */ reg01 &= ~SYS_SEL_48M; /* clk 24Mz */ reg17 &= ~MCK_SIZE_MASK; reg17 |= 0x01; /* clock / 1 */ } break; case SENSOR_MT9V111: init = mt9v111_sensor_param1; break; case SENSOR_OM6802: init = om6802_sensor_param1; if (!mode) { /* if 640x480 */ reg17 &= ~MCK_SIZE_MASK; reg17 |= 0x04; /* clock / 4 */ } break; case SENSOR_OV7630: init = ov7630_sensor_param1; break; case SENSOR_OV7648: init = ov7648_sensor_param1; reg17 &= ~MCK_SIZE_MASK; reg17 |= 0x01; /* clock / 1 */ break; case SENSOR_OV7660: init = ov7660_sensor_param1; break; case SENSOR_PO1030: init = po1030_sensor_param1; break; case SENSOR_PO2030N: init = po2030n_sensor_param1; break; case SENSOR_SOI768: init = soi768_sensor_param1; break; case SENSOR_SP80708: init = sp80708_sensor_param1; break; } /* more sensor initialization - param1 */ if (init != NULL) { i2c_w_seq(gspca_dev, init); /* init = NULL; */ } reg_w(gspca_dev, 0xc0, C0, 6); switch (sd->sensor) { case SENSOR_ADCM1700: case SENSOR_GC0307: case SENSOR_SOI768: reg_w(gspca_dev, 0xca, CA_adcm1700, 4); break; case SENSOR_PO2030N: reg_w(gspca_dev, 0xca, CA_po2030n, 4); break; default: reg_w(gspca_dev, 0xca, CA, 4); break; } switch (sd->sensor) { case SENSOR_ADCM1700: case SENSOR_OV7630: case SENSOR_OV7648: case SENSOR_OV7660: case SENSOR_SOI768: reg_w(gspca_dev, 0xce, CE_ov76xx, 4); break; case SENSOR_GC0307: reg_w(gspca_dev, 0xce, CE_gc0307, 4); break; case SENSOR_PO2030N: reg_w(gspca_dev, 0xce, CE_po2030n, 4); break; default: reg_w(gspca_dev, 0xce, CE, 4); /* ?? {0x1e, 0xdd, 0x2d, 0xe7} */ break; } /* here change size mode 0 -> VGA; 1 -> CIF */ sd->reg18 = sn9c1xx[0x18] | (mode << 4) | 0x40; reg_w1(gspca_dev, 0x18, sd->reg18); setjpegqual(gspca_dev); reg_w1(gspca_dev, 0x17, reg17); reg_w1(gspca_dev, 0x01, reg01); sd->reg01 = reg01; sd->reg17 = reg17; sethvflip(gspca_dev); setbrightness(gspca_dev); setcontrast(gspca_dev); setcolors(gspca_dev); setautogain(gspca_dev); setfreq(gspca_dev); sd->pktsz = sd->npkt = 0; sd->nchg = sd->short_mark = 0; sd->work_thread = create_singlethread_workqueue(MODULE_NAME); return gspca_dev->usb_err; } static void sd_stopN(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; static const u8 stophv7131[] = { 0xa1, 0x11, 0x02, 0x09, 0x00, 0x00, 0x00, 0x10 }; static const u8 stopmi0360[] = { 0xb1, 0x5d, 0x07, 0x00, 0x00, 0x00, 0x00, 0x10 }; static const u8 stopov7648[] = { 0xa1, 0x21, 0x76, 0x20, 0x00, 0x00, 0x00, 0x10 }; static const u8 stopsoi768[] = { 0xa1, 0x21, 0x12, 0x80, 0x00, 0x00, 0x00, 0x10 }; u8 reg01; u8 reg17; reg01 = sd->reg01; reg17 = sd->reg17 & ~SEN_CLK_EN; switch (sd->sensor) { case SENSOR_ADCM1700: case SENSOR_GC0307: case SENSOR_PO2030N: case SENSOR_SP80708: reg01 |= LED; reg_w1(gspca_dev, 0x01, reg01); reg01 &= ~(LED | V_TX_EN); reg_w1(gspca_dev, 0x01, reg01); /* reg_w1(gspca_dev, 0x02, 0x??); * LED off ? */ break; case SENSOR_HV7131R: reg01 &= ~V_TX_EN; reg_w1(gspca_dev, 0x01, reg01); i2c_w8(gspca_dev, stophv7131); break; case SENSOR_MI0360: case SENSOR_MI0360B: reg01 &= ~V_TX_EN; reg_w1(gspca_dev, 0x01, reg01); /* reg_w1(gspca_dev, 0x02, 0x40); * LED off ? */ i2c_w8(gspca_dev, stopmi0360); break; case SENSOR_MT9V111: case SENSOR_OM6802: case SENSOR_PO1030: reg01 &= ~V_TX_EN; reg_w1(gspca_dev, 0x01, reg01); break; case SENSOR_OV7630: case SENSOR_OV7648: reg01 &= ~V_TX_EN; reg_w1(gspca_dev, 0x01, reg01); i2c_w8(gspca_dev, stopov7648); break; case SENSOR_OV7660: reg01 &= ~V_TX_EN; reg_w1(gspca_dev, 0x01, reg01); break; case SENSOR_SOI768: i2c_w8(gspca_dev, stopsoi768); break; } reg01 |= SCL_SEL_OD; reg_w1(gspca_dev, 0x01, reg01); reg01 |= S_PWR_DN; /* sensor power down */ reg_w1(gspca_dev, 0x01, reg01); reg_w1(gspca_dev, 0x17, reg17); reg01 &= ~SYS_SEL_48M; /* clock 24MHz */ reg_w1(gspca_dev, 0x01, reg01); reg01 |= LED; reg_w1(gspca_dev, 0x01, reg01); /* Don't disable sensor clock as that disables the button on the cam */ /* reg_w1(gspca_dev, 0xf1, 0x01); */ } /* called on streamoff with alt==0 and on disconnect */ /* the usb_lock is held at entry - restore on exit */ static void sd_stop0(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; if (sd->work_thread != NULL) { mutex_unlock(&gspca_dev->usb_lock); destroy_workqueue(sd->work_thread); mutex_lock(&gspca_dev->usb_lock); sd->work_thread = NULL; } } static void do_autogain(struct gspca_dev *gspca_dev) { struct sd *sd = (struct sd *) gspca_dev; int delta; int expotimes; u8 luma_mean = 130; u8 luma_delta = 20; /* Thanks S., without your advice, autobright should not work :) */ if (sd->ag_cnt < 0) return; if (--sd->ag_cnt >= 0) return; sd->ag_cnt = AG_CNT_START; delta = atomic_read(&sd->avg_lum); PDEBUG(D_FRAM, "mean lum %d", delta); if (delta < luma_mean - luma_delta || delta > luma_mean + luma_delta) { switch (sd->sensor) { case SENSOR_GC0307: expotimes = sd->exposure; expotimes += (luma_mean - delta) >> 6; if (expotimes < 0) expotimes = 0; sd->exposure = setexposure(gspca_dev, (unsigned int) expotimes); break; case SENSOR_HV7131R: expotimes = sd->exposure >> 8; expotimes += (luma_mean - delta) >> 4; if (expotimes < 0) expotimes = 0; sd->exposure = setexposure(gspca_dev, (unsigned int) (expotimes << 8)); break; case SENSOR_OM6802: case SENSOR_MT9V111: expotimes = sd->exposure; expotimes += (luma_mean - delta) >> 2; if (expotimes < 0) expotimes = 0; sd->exposure = setexposure(gspca_dev, (unsigned int) expotimes); setredblue(gspca_dev); break; default: /* case SENSOR_MO4000: */ /* case SENSOR_MI0360: */ /* case SENSOR_MI0360B: */ expotimes = sd->exposure; expotimes += (luma_mean - delta) >> 6; if (expotimes < 0) expotimes = 0; sd->exposure = setexposure(gspca_dev, (unsigned int) expotimes); setredblue(gspca_dev); break; } } } /* set the average luminosity from an isoc marker */ static void set_lum(struct sd *sd, u8 *data) { int avg_lum; /* w0 w1 w2 * w3 w4 w5 * w6 w7 w8 */ avg_lum = (data[27] << 8) + data[28] /* w3 */ + (data[31] << 8) + data[32] /* w5 */ + (data[23] << 8) + data[24] /* w1 */ + (data[35] << 8) + data[36] /* w7 */ + (data[29] << 10) + (data[30] << 2); /* w4 * 4 */ avg_lum >>= 10; atomic_set(&sd->avg_lum, avg_lum); } /* scan the URB packets */ /* This function is run at interrupt level. */ static void sd_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* isoc packet */ int len) /* iso packet length */ { struct sd *sd = (struct sd *) gspca_dev; int i, new_qual; /* * A frame ends on the marker * ff ff 00 c4 c4 96 .. * which is 62 bytes long and is followed by various information * including statuses and luminosity. * * A marker may be splitted on two packets. * * The 6th byte of a marker contains the bits: * 0x08: USB full * 0xc0: frame sequence * When the bit 'USB full' is set, the frame must be discarded; * this is also the case when the 2 bytes before the marker are * not the JPEG end of frame ('ff d9'). */ /*fixme: assumption about the following code: * - there can be only one marker in a packet */ /* skip the remaining bytes of a short marker */ i = sd->short_mark; if (i != 0) { sd->short_mark = 0; if (i < 0 /* if 'ff' at end of previous packet */ && data[0] == 0xff && data[1] == 0x00) goto marker_found; if (data[0] == 0xff && data[1] == 0xff) { i = 0; goto marker_found; } len -= i; if (len <= 0) return; data += i; } /* count the packets and their size */ sd->npkt++; sd->pktsz += len; /* search backwards if there is a marker in the packet */ for (i = len - 1; --i >= 0; ) { if (data[i] != 0xff) { i--; continue; } if (data[i + 1] == 0xff) { /* (there may be 'ff ff' inside a marker) */ if (i + 2 >= len || data[i + 2] == 0x00) goto marker_found; } } /* no marker found */ /* add the JPEG header if first fragment */ if (data[len - 1] == 0xff) sd->short_mark = -1; if (gspca_dev->last_packet_type == LAST_PACKET) gspca_frame_add(gspca_dev, FIRST_PACKET, sd->jpeg_hdr, JPEG_HDR_SZ); gspca_frame_add(gspca_dev, INTER_PACKET, data, len); return; /* marker found */ /* if some error, discard the frame and decrease the quality */ marker_found: new_qual = 0; if (i > 2) { if (data[i - 2] != 0xff || data[i - 1] != 0xd9) { gspca_dev->last_packet_type = DISCARD_PACKET; new_qual = -3; } } else if (i + 6 < len) { if (data[i + 6] & 0x08) { gspca_dev->last_packet_type = DISCARD_PACKET; new_qual = -5; } } gspca_frame_add(gspca_dev, LAST_PACKET, data, i); /* compute the filling rate and a new JPEG quality */ if (new_qual == 0) { int r; r = (sd->pktsz * 100) / (sd->npkt * gspca_dev->urb[0]->iso_frame_desc[0].length); if (r >= 85) new_qual = -3; else if (r < 75) new_qual = 2; } if (new_qual != 0) { sd->nchg += new_qual; if (sd->nchg < -6 || sd->nchg >= 12) { sd->nchg = 0; new_qual += sd->quality; if (new_qual < QUALITY_MIN) new_qual = QUALITY_MIN; else if (new_qual > QUALITY_MAX) new_qual = QUALITY_MAX; if (new_qual != sd->quality) { sd->quality = new_qual; queue_work(sd->work_thread, &sd->work); } } } else { sd->nchg = 0; } sd->pktsz = sd->npkt = 0; /* if the marker is smaller than 62 bytes, * memorize the number of bytes to skip in the next packet */ if (i + 62 > len) { /* no more usable data */ sd->short_mark = i + 62 - len; return; } if (sd->ag_cnt >= 0) set_lum(sd, data + i); /* if more data, start a new frame */ i += 62; if (i < len) { data += i; len -= i; gspca_frame_add(gspca_dev, FIRST_PACKET, sd->jpeg_hdr, JPEG_HDR_SZ); gspca_frame_add(gspca_dev, INTER_PACKET, data, len); } } static int sd_get_jcomp(struct gspca_dev *gspca_dev, struct v4l2_jpegcompression *jcomp) { struct sd *sd = (struct sd *) gspca_dev; memset(jcomp, 0, sizeof *jcomp); jcomp->quality = sd->quality; jcomp->jpeg_markers = V4L2_JPEG_MARKER_DHT | V4L2_JPEG_MARKER_DQT; return 0; } static int sd_querymenu(struct gspca_dev *gspca_dev, struct v4l2_querymenu *menu) { switch (menu->id) { case V4L2_CID_POWER_LINE_FREQUENCY: switch (menu->index) { case 0: /* V4L2_CID_POWER_LINE_FREQUENCY_DISABLED */ strcpy((char *) menu->name, "NoFliker"); return 0; case 1: /* V4L2_CID_POWER_LINE_FREQUENCY_50HZ */ strcpy((char *) menu->name, "50 Hz"); return 0; case 2: /* V4L2_CID_POWER_LINE_FREQUENCY_60HZ */ strcpy((char *) menu->name, "60 Hz"); return 0; } break; } return -EINVAL; } #if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE) static int sd_int_pkt_scan(struct gspca_dev *gspca_dev, u8 *data, /* interrupt packet data */ int len) /* interrupt packet length */ { int ret = -EINVAL; if (len == 1 && data[0] == 1) { input_report_key(gspca_dev->input_dev, KEY_CAMERA, 1); input_sync(gspca_dev->input_dev); input_report_key(gspca_dev->input_dev, KEY_CAMERA, 0); input_sync(gspca_dev->input_dev); ret = 0; } return ret; } #endif /* sub-driver description */ static const struct sd_desc sd_desc = { .name = MODULE_NAME, .ctrls = sd_ctrls, .nctrls = NCTRLS, .config = sd_config, .init = sd_init, .start = sd_start, .stopN = sd_stopN, .stop0 = sd_stop0, .pkt_scan = sd_pkt_scan, .dq_callback = do_autogain, .get_jcomp = sd_get_jcomp, .querymenu = sd_querymenu, #if defined(CONFIG_INPUT) || defined(CONFIG_INPUT_MODULE) .int_pkt_scan = sd_int_pkt_scan, #endif }; /* -- module initialisation -- */ #define BS(bridge, sensor) \ .driver_info = (BRIDGE_ ## bridge << 16) \ | (SENSOR_ ## sensor << 8) #define BSF(bridge, sensor, flags) \ .driver_info = (BRIDGE_ ## bridge << 16) \ | (SENSOR_ ## sensor << 8) \ | (flags) static const struct usb_device_id device_table[] = { {USB_DEVICE(0x0458, 0x7025), BS(SN9C120, MI0360)}, {USB_DEVICE(0x0458, 0x702e), BS(SN9C120, OV7660)}, {USB_DEVICE(0x045e, 0x00f5), BSF(SN9C105, OV7660, F_PDN_INV)}, {USB_DEVICE(0x045e, 0x00f7), BSF(SN9C105, OV7660, F_PDN_INV)}, {USB_DEVICE(0x0471, 0x0327), BS(SN9C105, MI0360)}, {USB_DEVICE(0x0471, 0x0328), BS(SN9C105, MI0360)}, {USB_DEVICE(0x0471, 0x0330), BS(SN9C105, MI0360)}, {USB_DEVICE(0x06f8, 0x3004), BS(SN9C105, OV7660)}, {USB_DEVICE(0x06f8, 0x3008), BS(SN9C105, OV7660)}, /* {USB_DEVICE(0x0c45, 0x603a), BS(SN9C102P, OV7648)}, */ {USB_DEVICE(0x0c45, 0x6040), BS(SN9C102P, HV7131R)}, /* {USB_DEVICE(0x0c45, 0x607a), BS(SN9C102P, OV7648)}, */ /* {USB_DEVICE(0x0c45, 0x607b), BS(SN9C102P, OV7660)}, */ {USB_DEVICE(0x0c45, 0x607c), BS(SN9C102P, HV7131R)}, /* {USB_DEVICE(0x0c45, 0x607e), BS(SN9C102P, OV7630)}, */ {USB_DEVICE(0x0c45, 0x60c0), BSF(SN9C105, MI0360, F_ILLUM)}, /* or MT9V111 */ /* {USB_DEVICE(0x0c45, 0x60c2), BS(SN9C105, P1030xC)}, */ /* {USB_DEVICE(0x0c45, 0x60c8), BS(SN9C105, OM6802)}, */ /* {USB_DEVICE(0x0c45, 0x60cc), BS(SN9C105, HV7131GP)}, */ {USB_DEVICE(0x0c45, 0x60ce), BS(SN9C105, SP80708)}, {USB_DEVICE(0x0c45, 0x60ec), BS(SN9C105, MO4000)}, /* {USB_DEVICE(0x0c45, 0x60ef), BS(SN9C105, ICM105C)}, */ /* {USB_DEVICE(0x0c45, 0x60fa), BS(SN9C105, OV7648)}, */ /* {USB_DEVICE(0x0c45, 0x60f2), BS(SN9C105, OV7660)}, */ {USB_DEVICE(0x0c45, 0x60fb), BS(SN9C105, OV7660)}, {USB_DEVICE(0x0c45, 0x60fc), BS(SN9C105, HV7131R)}, {USB_DEVICE(0x0c45, 0x60fe), BS(SN9C105, OV7630)}, {USB_DEVICE(0x0c45, 0x6100), BS(SN9C120, MI0360)}, /*sn9c128*/ {USB_DEVICE(0x0c45, 0x6102), BS(SN9C120, PO2030N)}, /* /GC0305*/ /* {USB_DEVICE(0x0c45, 0x6108), BS(SN9C120, OM6802)}, */ {USB_DEVICE(0x0c45, 0x610a), BS(SN9C120, OV7648)}, /*sn9c128*/ {USB_DEVICE(0x0c45, 0x610b), BS(SN9C120, OV7660)}, /*sn9c128*/ {USB_DEVICE(0x0c45, 0x610c), BS(SN9C120, HV7131R)}, /*sn9c128*/ {USB_DEVICE(0x0c45, 0x610e), BS(SN9C120, OV7630)}, /*sn9c128*/ /* {USB_DEVICE(0x0c45, 0x610f), BS(SN9C120, S5K53BEB)}, */ /* {USB_DEVICE(0x0c45, 0x6122), BS(SN9C110, ICM105C)}, */ /* {USB_DEVICE(0x0c45, 0x6123), BS(SN9C110, SanyoCCD)}, */ {USB_DEVICE(0x0c45, 0x6128), BS(SN9C120, OM6802)}, /*sn9c325?*/ /*bw600.inf:*/ {USB_DEVICE(0x0c45, 0x612a), BS(SN9C120, OV7648)}, /*sn9c325?*/ {USB_DEVICE(0x0c45, 0x612b), BS(SN9C110, ADCM1700)}, {USB_DEVICE(0x0c45, 0x612c), BS(SN9C110, MO4000)}, {USB_DEVICE(0x0c45, 0x612e), BS(SN9C110, OV7630)}, /* {USB_DEVICE(0x0c45, 0x612f), BS(SN9C110, ICM105C)}, */ {USB_DEVICE(0x0c45, 0x6130), BS(SN9C120, MI0360)}, /* or MT9V111 / MI0360B */ /* {USB_DEVICE(0x0c45, 0x6132), BS(SN9C120, OV7670)}, */ {USB_DEVICE(0x0c45, 0x6138), BS(SN9C120, MO4000)}, {USB_DEVICE(0x0c45, 0x613a), BS(SN9C120, OV7648)}, {USB_DEVICE(0x0c45, 0x613b), BS(SN9C120, OV7660)}, {USB_DEVICE(0x0c45, 0x613c), BS(SN9C120, HV7131R)}, {USB_DEVICE(0x0c45, 0x613e), BS(SN9C120, OV7630)}, {USB_DEVICE(0x0c45, 0x6142), BS(SN9C120, PO2030N)}, /*sn9c120b*/ /* or GC0305 / GC0307 */ {USB_DEVICE(0x0c45, 0x6143), BS(SN9C120, SP80708)}, /*sn9c120b*/ {USB_DEVICE(0x0c45, 0x6148), BS(SN9C120, OM6802)}, /*sn9c120b*/ {USB_DEVICE(0x0c45, 0x614a), BSF(SN9C120, ADCM1700, F_ILLUM)}, /* {USB_DEVICE(0x0c45, 0x614c), BS(SN9C120, GC0306)}, */ /*sn9c120b*/ {} }; MODULE_DEVICE_TABLE(usb, device_table); /* -- device connect -- */ static int sd_probe(struct usb_interface *intf, const struct usb_device_id *id) { return gspca_dev_probe(intf, id, &sd_desc, sizeof(struct sd), THIS_MODULE); } static struct usb_driver sd_driver = { .name = MODULE_NAME, .id_table = device_table, .probe = sd_probe, .disconnect = gspca_disconnect, #ifdef CONFIG_PM .suspend = gspca_suspend, .resume = gspca_resume, #endif }; /* -- module insert / remove -- */ static int __init sd_mod_init(void) { return usb_register(&sd_driver); } static void __exit sd_mod_exit(void) { usb_deregister(&sd_driver); } module_init(sd_mod_init); module_exit(sd_mod_exit);
gpl-2.0
janztec/empc-arpi-linux
arch/x86/kernel/sys_x86_64.c
2898
4770
#include <linux/errno.h> #include <linux/sched.h> #include <linux/syscalls.h> #include <linux/mm.h> #include <linux/fs.h> #include <linux/smp.h> #include <linux/sem.h> #include <linux/msg.h> #include <linux/shm.h> #include <linux/stat.h> #include <linux/mman.h> #include <linux/file.h> #include <linux/utsname.h> #include <linux/personality.h> #include <linux/random.h> #include <linux/uaccess.h> #include <linux/elf.h> #include <asm/ia32.h> #include <asm/syscalls.h> /* * Align a virtual address to avoid aliasing in the I$ on AMD F15h. */ static unsigned long get_align_mask(void) { /* handle 32- and 64-bit case with a single conditional */ if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32()))) return 0; if (!(current->flags & PF_RANDOMIZE)) return 0; return va_align.mask; } unsigned long align_vdso_addr(unsigned long addr) { unsigned long align_mask = get_align_mask(); return (addr + align_mask) & ~align_mask; } static int __init control_va_addr_alignment(char *str) { /* guard against enabling this on other CPU families */ if (va_align.flags < 0) return 1; if (*str == 0) return 1; if (*str == '=') str++; if (!strcmp(str, "32")) va_align.flags = ALIGN_VA_32; else if (!strcmp(str, "64")) va_align.flags = ALIGN_VA_64; else if (!strcmp(str, "off")) va_align.flags = 0; else if (!strcmp(str, "on")) va_align.flags = ALIGN_VA_32 | ALIGN_VA_64; else return 0; return 1; } __setup("align_va_addr", control_va_addr_alignment); SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long, prot, unsigned long, flags, unsigned long, fd, unsigned long, off) { long error; error = -EINVAL; if (off & ~PAGE_MASK) goto out; error = sys_mmap_pgoff(addr, len, prot, flags, fd, off >> PAGE_SHIFT); out: return error; } static void find_start_end(unsigned long flags, unsigned long *begin, unsigned long *end) { if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) { unsigned long new_begin; /* This is usually used needed to map code in small model, so it needs to be in the first 31bit. Limit it to that. This means we need to move the unmapped base down for this case. This can give conflicts with the heap, but we assume that glibc malloc knows how to fall back to mmap. Give it 1GB of playground for now. -AK */ *begin = 0x40000000; *end = 0x80000000; if (current->flags & PF_RANDOMIZE) { new_begin = randomize_range(*begin, *begin + 0x02000000, 0); if (new_begin) *begin = new_begin; } } else { *begin = current->mm->mmap_legacy_base; *end = TASK_SIZE; } } unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags) { struct mm_struct *mm = current->mm; struct vm_area_struct *vma; struct vm_unmapped_area_info info; unsigned long begin, end; if (flags & MAP_FIXED) return addr; find_start_end(flags, &begin, &end); if (len > end) return -ENOMEM; if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (end - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } info.flags = 0; info.length = len; info.low_limit = begin; info.high_limit = end; info.align_mask = filp ? get_align_mask() : 0; info.align_offset = pgoff << PAGE_SHIFT; return vm_unmapped_area(&info); } unsigned long arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, const unsigned long len, const unsigned long pgoff, const unsigned long flags) { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; unsigned long addr = addr0; struct vm_unmapped_area_info info; /* requested length too big for entire address space */ if (len > TASK_SIZE) return -ENOMEM; if (flags & MAP_FIXED) return addr; /* for MAP_32BIT mappings we force the legacy mmap base */ if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) goto bottomup; /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); if (TASK_SIZE - len >= addr && (!vma || addr + len <= vma->vm_start)) return addr; } info.flags = VM_UNMAPPED_AREA_TOPDOWN; info.length = len; info.low_limit = PAGE_SIZE; info.high_limit = mm->mmap_base; info.align_mask = filp ? get_align_mask() : 0; info.align_offset = pgoff << PAGE_SHIFT; addr = vm_unmapped_area(&info); if (!(addr & ~PAGE_MASK)) return addr; VM_BUG_ON(addr != -ENOMEM); bottomup: /* * A failed mmap() very likely causes application failure, * so fall back to the bottom-up function here. This scenario * can happen with large stack limits and large mmap() * allocations. */ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags); }
gpl-2.0
PRJosh/kernel_samsung_mondrianwifi
mm/swap.c
3154
21023
/* * linux/mm/swap.c * * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds */ /* * This file contains the default values for the operation of the * Linux VM subsystem. Fine-tuning documentation can be found in * Documentation/sysctl/vm.txt. * Started 18.12.91 * Swap aging added 23.2.95, Stephen Tweedie. * Buffermem limits added 12.3.98, Rik van Riel. */ #include <linux/mm.h> #include <linux/sched.h> #include <linux/kernel_stat.h> #include <linux/swap.h> #include <linux/mman.h> #include <linux/pagemap.h> #include <linux/pagevec.h> #include <linux/init.h> #include <linux/export.h> #include <linux/mm_inline.h> #include <linux/percpu_counter.h> #include <linux/percpu.h> #include <linux/cpu.h> #include <linux/notifier.h> #include <linux/backing-dev.h> #include <linux/memcontrol.h> #include <linux/gfp.h> #include "internal.h" /* How many pages do we try to swap or page in/out together? */ int page_cluster; static DEFINE_PER_CPU(struct pagevec[NR_LRU_LISTS], lru_add_pvecs); static DEFINE_PER_CPU(struct pagevec, lru_rotate_pvecs); static DEFINE_PER_CPU(struct pagevec, lru_deactivate_pvecs); /* * This path almost never happens for VM activity - pages are normally * freed via pagevecs. But it gets used by networking. */ static void __page_cache_release(struct page *page) { if (PageLRU(page)) { unsigned long flags; struct zone *zone = page_zone(page); spin_lock_irqsave(&zone->lru_lock, flags); VM_BUG_ON(!PageLRU(page)); __ClearPageLRU(page); del_page_from_lru_list(zone, page, page_off_lru(page)); spin_unlock_irqrestore(&zone->lru_lock, flags); } } static void __put_single_page(struct page *page) { __page_cache_release(page); free_hot_cold_page(page, 0); } static void __put_compound_page(struct page *page) { compound_page_dtor *dtor; __page_cache_release(page); dtor = get_compound_page_dtor(page); (*dtor)(page); } static void put_compound_page(struct page *page) { if (unlikely(PageTail(page))) { /* __split_huge_page_refcount can run under us */ struct page *page_head = compound_trans_head(page); if (likely(page != page_head && get_page_unless_zero(page_head))) { unsigned long flags; /* * page_head wasn't a dangling pointer but it * may not be a head page anymore by the time * we obtain the lock. That is ok as long as it * can't be freed from under us. */ flags = compound_lock_irqsave(page_head); if (unlikely(!PageTail(page))) { /* __split_huge_page_refcount run before us */ compound_unlock_irqrestore(page_head, flags); VM_BUG_ON(PageHead(page_head)); if (put_page_testzero(page_head)) __put_single_page(page_head); out_put_single: if (put_page_testzero(page)) __put_single_page(page); return; } VM_BUG_ON(page_head != page->first_page); /* * We can release the refcount taken by * get_page_unless_zero() now that * __split_huge_page_refcount() is blocked on * the compound_lock. */ if (put_page_testzero(page_head)) VM_BUG_ON(1); /* __split_huge_page_refcount will wait now */ VM_BUG_ON(page_mapcount(page) <= 0); atomic_dec(&page->_mapcount); VM_BUG_ON(atomic_read(&page_head->_count) <= 0); VM_BUG_ON(atomic_read(&page->_count) != 0); compound_unlock_irqrestore(page_head, flags); if (put_page_testzero(page_head)) { if (PageHead(page_head)) __put_compound_page(page_head); else __put_single_page(page_head); } } else { /* page_head is a dangling pointer */ VM_BUG_ON(PageTail(page)); goto out_put_single; } } else if (put_page_testzero(page)) { if (PageHead(page)) __put_compound_page(page); else __put_single_page(page); } } void put_page(struct page *page) { if (unlikely(PageCompound(page))) put_compound_page(page); else if (put_page_testzero(page)) __put_single_page(page); } EXPORT_SYMBOL(put_page); /* * This function is exported but must not be called by anything other * than get_page(). It implements the slow path of get_page(). */ bool __get_page_tail(struct page *page) { /* * This takes care of get_page() if run on a tail page * returned by one of the get_user_pages/follow_page variants. * get_user_pages/follow_page itself doesn't need the compound * lock because it runs __get_page_tail_foll() under the * proper PT lock that already serializes against * split_huge_page(). */ unsigned long flags; bool got = false; struct page *page_head = compound_trans_head(page); if (likely(page != page_head && get_page_unless_zero(page_head))) { /* * page_head wasn't a dangling pointer but it * may not be a head page anymore by the time * we obtain the lock. That is ok as long as it * can't be freed from under us. */ flags = compound_lock_irqsave(page_head); /* here __split_huge_page_refcount won't run anymore */ if (likely(PageTail(page))) { __get_page_tail_foll(page, false); got = true; } compound_unlock_irqrestore(page_head, flags); if (unlikely(!got)) put_page(page_head); } return got; } EXPORT_SYMBOL(__get_page_tail); /** * put_pages_list() - release a list of pages * @pages: list of pages threaded on page->lru * * Release a list of pages which are strung together on page.lru. Currently * used by read_cache_pages() and related error recovery code. */ void put_pages_list(struct list_head *pages) { while (!list_empty(pages)) { struct page *victim; victim = list_entry(pages->prev, struct page, lru); list_del(&victim->lru); page_cache_release(victim); } } EXPORT_SYMBOL(put_pages_list); static void pagevec_lru_move_fn(struct pagevec *pvec, void (*move_fn)(struct page *page, void *arg), void *arg) { int i; struct zone *zone = NULL; unsigned long flags = 0; for (i = 0; i < pagevec_count(pvec); i++) { struct page *page = pvec->pages[i]; struct zone *pagezone = page_zone(page); if (pagezone != zone) { if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); zone = pagezone; spin_lock_irqsave(&zone->lru_lock, flags); } (*move_fn)(page, arg); } if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); release_pages(pvec->pages, pvec->nr, pvec->cold); pagevec_reinit(pvec); } static void pagevec_move_tail_fn(struct page *page, void *arg) { int *pgmoved = arg; if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { enum lru_list lru = page_lru_base_type(page); struct lruvec *lruvec; lruvec = mem_cgroup_lru_move_lists(page_zone(page), page, lru, lru); list_move_tail(&page->lru, &lruvec->lists[lru]); (*pgmoved)++; } } /* * pagevec_move_tail() must be called with IRQ disabled. * Otherwise this may cause nasty races. */ static void pagevec_move_tail(struct pagevec *pvec) { int pgmoved = 0; pagevec_lru_move_fn(pvec, pagevec_move_tail_fn, &pgmoved); __count_vm_events(PGROTATED, pgmoved); } /* * Writeback is about to end against a page which has been marked for immediate * reclaim. If it still appears to be reclaimable, move it to the tail of the * inactive list. */ void rotate_reclaimable_page(struct page *page) { if (!PageLocked(page) && !PageDirty(page) && !PageActive(page) && !PageUnevictable(page) && PageLRU(page)) { struct pagevec *pvec; unsigned long flags; page_cache_get(page); local_irq_save(flags); pvec = &__get_cpu_var(lru_rotate_pvecs); if (!pagevec_add(pvec, page)) pagevec_move_tail(pvec); local_irq_restore(flags); } } static void update_page_reclaim_stat(struct zone *zone, struct page *page, int file, int rotated) { struct zone_reclaim_stat *reclaim_stat = &zone->reclaim_stat; struct zone_reclaim_stat *memcg_reclaim_stat; memcg_reclaim_stat = mem_cgroup_get_reclaim_stat_from_page(page); reclaim_stat->recent_scanned[file]++; if (rotated) reclaim_stat->recent_rotated[file]++; if (!memcg_reclaim_stat) return; memcg_reclaim_stat->recent_scanned[file]++; if (rotated) memcg_reclaim_stat->recent_rotated[file]++; } static void __activate_page(struct page *page, void *arg) { struct zone *zone = page_zone(page); if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { int file = page_is_file_cache(page); int lru = page_lru_base_type(page); del_page_from_lru_list(zone, page, lru); SetPageActive(page); lru += LRU_ACTIVE; add_page_to_lru_list(zone, page, lru); __count_vm_event(PGACTIVATE); update_page_reclaim_stat(zone, page, file, 1); } } #ifdef CONFIG_SMP static DEFINE_PER_CPU(struct pagevec, activate_page_pvecs); static void activate_page_drain(int cpu) { struct pagevec *pvec = &per_cpu(activate_page_pvecs, cpu); if (pagevec_count(pvec)) pagevec_lru_move_fn(pvec, __activate_page, NULL); } void activate_page(struct page *page) { if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { struct pagevec *pvec = &get_cpu_var(activate_page_pvecs); page_cache_get(page); if (!pagevec_add(pvec, page)) pagevec_lru_move_fn(pvec, __activate_page, NULL); put_cpu_var(activate_page_pvecs); } } #else static inline void activate_page_drain(int cpu) { } void activate_page(struct page *page) { struct zone *zone = page_zone(page); spin_lock_irq(&zone->lru_lock); __activate_page(page, NULL); spin_unlock_irq(&zone->lru_lock); } #endif /* * Mark a page as having seen activity. * * inactive,unreferenced -> inactive,referenced * inactive,referenced -> active,unreferenced * active,unreferenced -> active,referenced */ void mark_page_accessed(struct page *page) { if (!PageActive(page) && !PageUnevictable(page) && PageReferenced(page) && PageLRU(page)) { activate_page(page); ClearPageReferenced(page); } else if (!PageReferenced(page)) { SetPageReferenced(page); } } EXPORT_SYMBOL(mark_page_accessed); void __lru_cache_add(struct page *page, enum lru_list lru) { struct pagevec *pvec = &get_cpu_var(lru_add_pvecs)[lru]; page_cache_get(page); if (!pagevec_add(pvec, page)) __pagevec_lru_add(pvec, lru); put_cpu_var(lru_add_pvecs); } EXPORT_SYMBOL(__lru_cache_add); /** * lru_cache_add_lru - add a page to a page list * @page: the page to be added to the LRU. * @lru: the LRU list to which the page is added. */ void lru_cache_add_lru(struct page *page, enum lru_list lru) { if (PageActive(page)) { VM_BUG_ON(PageUnevictable(page)); ClearPageActive(page); } else if (PageUnevictable(page)) { VM_BUG_ON(PageActive(page)); ClearPageUnevictable(page); } VM_BUG_ON(PageLRU(page) || PageActive(page) || PageUnevictable(page)); __lru_cache_add(page, lru); } /** * add_page_to_unevictable_list - add a page to the unevictable list * @page: the page to be added to the unevictable list * * Add page directly to its zone's unevictable list. To avoid races with * tasks that might be making the page evictable, through eg. munlock, * munmap or exit, while it's not on the lru, we want to add the page * while it's locked or otherwise "invisible" to other tasks. This is * difficult to do when using the pagevec cache, so bypass that. */ void add_page_to_unevictable_list(struct page *page) { struct zone *zone = page_zone(page); spin_lock_irq(&zone->lru_lock); SetPageUnevictable(page); SetPageLRU(page); add_page_to_lru_list(zone, page, LRU_UNEVICTABLE); spin_unlock_irq(&zone->lru_lock); } /* * If the page can not be invalidated, it is moved to the * inactive list to speed up its reclaim. It is moved to the * head of the list, rather than the tail, to give the flusher * threads some time to write it out, as this is much more * effective than the single-page writeout from reclaim. * * If the page isn't page_mapped and dirty/writeback, the page * could reclaim asap using PG_reclaim. * * 1. active, mapped page -> none * 2. active, dirty/writeback page -> inactive, head, PG_reclaim * 3. inactive, mapped page -> none * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim * 5. inactive, clean -> inactive, tail * 6. Others -> none * * In 4, why it moves inactive's head, the VM expects the page would * be write it out by flusher threads as this is much more effective * than the single-page writeout from reclaim. */ static void lru_deactivate_fn(struct page *page, void *arg) { int lru, file; bool active; struct zone *zone = page_zone(page); if (!PageLRU(page)) return; if (PageUnevictable(page)) return; /* Some processes are using the page */ if (page_mapped(page)) return; active = PageActive(page); file = page_is_file_cache(page); lru = page_lru_base_type(page); del_page_from_lru_list(zone, page, lru + active); ClearPageActive(page); ClearPageReferenced(page); add_page_to_lru_list(zone, page, lru); if (PageWriteback(page) || PageDirty(page)) { /* * PG_reclaim could be raced with end_page_writeback * It can make readahead confusing. But race window * is _really_ small and it's non-critical problem. */ SetPageReclaim(page); } else { struct lruvec *lruvec; /* * The page's writeback ends up during pagevec * We moves tha page into tail of inactive. */ lruvec = mem_cgroup_lru_move_lists(zone, page, lru, lru); list_move_tail(&page->lru, &lruvec->lists[lru]); __count_vm_event(PGROTATED); } if (active) __count_vm_event(PGDEACTIVATE); update_page_reclaim_stat(zone, page, file, 0); } /* * Drain pages out of the cpu's pagevecs. * Either "cpu" is the current CPU, and preemption has already been * disabled; or "cpu" is being hot-unplugged, and is already dead. */ void lru_add_drain_cpu(int cpu) { struct pagevec *pvecs = per_cpu(lru_add_pvecs, cpu); struct pagevec *pvec; int lru; for_each_lru(lru) { pvec = &pvecs[lru - LRU_BASE]; if (pagevec_count(pvec)) __pagevec_lru_add(pvec, lru); } pvec = &per_cpu(lru_rotate_pvecs, cpu); if (pagevec_count(pvec)) { unsigned long flags; /* No harm done if a racing interrupt already did this */ local_irq_save(flags); pagevec_move_tail(pvec); local_irq_restore(flags); } pvec = &per_cpu(lru_deactivate_pvecs, cpu); if (pagevec_count(pvec)) pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); activate_page_drain(cpu); } /** * deactivate_page - forcefully deactivate a page * @page: page to deactivate * * This function hints the VM that @page is a good reclaim candidate, * for example if its invalidation fails due to the page being dirty * or under writeback. */ void deactivate_page(struct page *page) { /* * In a workload with many unevictable page such as mprotect, unevictable * page deactivation for accelerating reclaim is pointless. */ if (PageUnevictable(page)) return; if (likely(get_page_unless_zero(page))) { struct pagevec *pvec = &get_cpu_var(lru_deactivate_pvecs); if (!pagevec_add(pvec, page)) pagevec_lru_move_fn(pvec, lru_deactivate_fn, NULL); put_cpu_var(lru_deactivate_pvecs); } } void lru_add_drain(void) { lru_add_drain_cpu(get_cpu()); put_cpu(); } static void lru_add_drain_per_cpu(struct work_struct *dummy) { lru_add_drain(); } /* * Returns 0 for success */ int lru_add_drain_all(void) { return schedule_on_each_cpu(lru_add_drain_per_cpu); } /* * Batched page_cache_release(). Decrement the reference count on all the * passed pages. If it fell to zero then remove the page from the LRU and * free it. * * Avoid taking zone->lru_lock if possible, but if it is taken, retain it * for the remainder of the operation. * * The locking in this function is against shrink_inactive_list(): we recheck * the page count inside the lock to see whether shrink_inactive_list() * grabbed the page via the LRU. If it did, give up: shrink_inactive_list() * will free it. */ void release_pages(struct page **pages, int nr, int cold) { int i; LIST_HEAD(pages_to_free); struct zone *zone = NULL; unsigned long uninitialized_var(flags); for (i = 0; i < nr; i++) { struct page *page = pages[i]; if (unlikely(PageCompound(page))) { if (zone) { spin_unlock_irqrestore(&zone->lru_lock, flags); zone = NULL; } put_compound_page(page); continue; } if (!put_page_testzero(page)) continue; if (PageLRU(page)) { struct zone *pagezone = page_zone(page); if (pagezone != zone) { if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); zone = pagezone; spin_lock_irqsave(&zone->lru_lock, flags); } VM_BUG_ON(!PageLRU(page)); __ClearPageLRU(page); del_page_from_lru_list(zone, page, page_off_lru(page)); } list_add(&page->lru, &pages_to_free); } if (zone) spin_unlock_irqrestore(&zone->lru_lock, flags); free_hot_cold_page_list(&pages_to_free, cold); } EXPORT_SYMBOL(release_pages); /* * The pages which we're about to release may be in the deferred lru-addition * queues. That would prevent them from really being freed right now. That's * OK from a correctness point of view but is inefficient - those pages may be * cache-warm and we want to give them back to the page allocator ASAP. * * So __pagevec_release() will drain those queues here. __pagevec_lru_add() * and __pagevec_lru_add_active() call release_pages() directly to avoid * mutual recursion. */ void __pagevec_release(struct pagevec *pvec) { lru_add_drain(); release_pages(pvec->pages, pagevec_count(pvec), pvec->cold); pagevec_reinit(pvec); } EXPORT_SYMBOL(__pagevec_release); #ifdef CONFIG_TRANSPARENT_HUGEPAGE /* used by __split_huge_page_refcount() */ void lru_add_page_tail(struct zone* zone, struct page *page, struct page *page_tail) { int uninitialized_var(active); enum lru_list lru; const int file = 0; VM_BUG_ON(!PageHead(page)); VM_BUG_ON(PageCompound(page_tail)); VM_BUG_ON(PageLRU(page_tail)); VM_BUG_ON(NR_CPUS != 1 && !spin_is_locked(&zone->lru_lock)); SetPageLRU(page_tail); if (page_evictable(page_tail, NULL)) { if (PageActive(page)) { SetPageActive(page_tail); active = 1; lru = LRU_ACTIVE_ANON; } else { active = 0; lru = LRU_INACTIVE_ANON; } } else { SetPageUnevictable(page_tail); lru = LRU_UNEVICTABLE; } if (likely(PageLRU(page))) list_add_tail(&page_tail->lru, &page->lru); else { struct list_head *list_head; /* * Head page has not yet been counted, as an hpage, * so we must account for each subpage individually. * * Use the standard add function to put page_tail on the list, * but then correct its position so they all end up in order. */ add_page_to_lru_list(zone, page_tail, lru); list_head = page_tail->lru.prev; list_move_tail(&page_tail->lru, list_head); } if (!PageUnevictable(page)) update_page_reclaim_stat(zone, page_tail, file, active); } #endif /* CONFIG_TRANSPARENT_HUGEPAGE */ static void __pagevec_lru_add_fn(struct page *page, void *arg) { enum lru_list lru = (enum lru_list)arg; struct zone *zone = page_zone(page); int file = is_file_lru(lru); int active = is_active_lru(lru); VM_BUG_ON(PageActive(page)); VM_BUG_ON(PageUnevictable(page)); VM_BUG_ON(PageLRU(page)); SetPageLRU(page); if (active) SetPageActive(page); add_page_to_lru_list(zone, page, lru); update_page_reclaim_stat(zone, page, file, active); } /* * Add the passed pages to the LRU, then drop the caller's refcount * on them. Reinitialises the caller's pagevec. */ void __pagevec_lru_add(struct pagevec *pvec, enum lru_list lru) { VM_BUG_ON(is_unevictable_lru(lru)); pagevec_lru_move_fn(pvec, __pagevec_lru_add_fn, (void *)lru); } EXPORT_SYMBOL(__pagevec_lru_add); /** * pagevec_lookup - gang pagecache lookup * @pvec: Where the resulting pages are placed * @mapping: The address_space to search * @start: The starting page index * @nr_pages: The maximum number of pages * * pagevec_lookup() will search for and return a group of up to @nr_pages pages * in the mapping. The pages are placed in @pvec. pagevec_lookup() takes a * reference against the pages in @pvec. * * The search returns a group of mapping-contiguous pages with ascending * indexes. There may be holes in the indices due to not-present pages. * * pagevec_lookup() returns the number of pages which were found. */ unsigned pagevec_lookup(struct pagevec *pvec, struct address_space *mapping, pgoff_t start, unsigned nr_pages) { pvec->nr = find_get_pages(mapping, start, nr_pages, pvec->pages); return pagevec_count(pvec); } EXPORT_SYMBOL(pagevec_lookup); unsigned pagevec_lookup_tag(struct pagevec *pvec, struct address_space *mapping, pgoff_t *index, int tag, unsigned nr_pages) { pvec->nr = find_get_pages_tag(mapping, index, tag, nr_pages, pvec->pages); return pagevec_count(pvec); } EXPORT_SYMBOL(pagevec_lookup_tag); /* * Perform any setup for the swap system */ void __init swap_setup(void) { unsigned long megs = totalram_pages >> (20 - PAGE_SHIFT); #ifdef CONFIG_SWAP bdi_init(swapper_space.backing_dev_info); #endif /* Use a smaller cluster for small-memory machines */ if (megs < 16) page_cluster = 2; else page_cluster = 3; /* * Right now other parts of the system means that we * _really_ don't want to cluster much more */ }
gpl-2.0
armani-dev/android_kernel_xiaomi_armani_OLD
fs/hpfs/dir.c
4178
8787
/* * linux/fs/hpfs/dir.c * * Mikulas Patocka (mikulas@artax.karlin.mff.cuni.cz), 1998-1999 * * directory VFS functions */ #include <linux/slab.h> #include "hpfs_fn.h" static int hpfs_dir_release(struct inode *inode, struct file *filp) { hpfs_lock(inode->i_sb); hpfs_del_pos(inode, &filp->f_pos); /*hpfs_write_if_changed(inode);*/ hpfs_unlock(inode->i_sb); return 0; } /* This is slow, but it's not used often */ static loff_t hpfs_dir_lseek(struct file *filp, loff_t off, int whence) { loff_t new_off = off + (whence == 1 ? filp->f_pos : 0); loff_t pos; struct quad_buffer_head qbh; struct inode *i = filp->f_path.dentry->d_inode; struct hpfs_inode_info *hpfs_inode = hpfs_i(i); struct super_block *s = i->i_sb; /* Somebody else will have to figure out what to do here */ if (whence == SEEK_DATA || whence == SEEK_HOLE) return -EINVAL; hpfs_lock(s); /*printk("dir lseek\n");*/ if (new_off == 0 || new_off == 1 || new_off == 11 || new_off == 12 || new_off == 13) goto ok; mutex_lock(&i->i_mutex); pos = ((loff_t) hpfs_de_as_down_as_possible(s, hpfs_inode->i_dno) << 4) + 1; while (pos != new_off) { if (map_pos_dirent(i, &pos, &qbh)) hpfs_brelse4(&qbh); else goto fail; if (pos == 12) goto fail; } mutex_unlock(&i->i_mutex); ok: hpfs_unlock(s); return filp->f_pos = new_off; fail: mutex_unlock(&i->i_mutex); /*printk("illegal lseek: %016llx\n", new_off);*/ hpfs_unlock(s); return -ESPIPE; } static int hpfs_readdir(struct file *filp, void *dirent, filldir_t filldir) { struct inode *inode = filp->f_path.dentry->d_inode; struct hpfs_inode_info *hpfs_inode = hpfs_i(inode); struct quad_buffer_head qbh; struct hpfs_dirent *de; int lc; long old_pos; unsigned char *tempname; int c1, c2 = 0; int ret = 0; hpfs_lock(inode->i_sb); if (hpfs_sb(inode->i_sb)->sb_chk) { if (hpfs_chk_sectors(inode->i_sb, inode->i_ino, 1, "dir_fnode")) { ret = -EFSERROR; goto out; } if (hpfs_chk_sectors(inode->i_sb, hpfs_inode->i_dno, 4, "dir_dnode")) { ret = -EFSERROR; goto out; } } if (hpfs_sb(inode->i_sb)->sb_chk >= 2) { struct buffer_head *bh; struct fnode *fno; int e = 0; if (!(fno = hpfs_map_fnode(inode->i_sb, inode->i_ino, &bh))) { ret = -EIOERROR; goto out; } if (!fno->dirflag) { e = 1; hpfs_error(inode->i_sb, "not a directory, fnode %08lx", (unsigned long)inode->i_ino); } if (hpfs_inode->i_dno != le32_to_cpu(fno->u.external[0].disk_secno)) { e = 1; hpfs_error(inode->i_sb, "corrupted inode: i_dno == %08x, fnode -> dnode == %08x", hpfs_inode->i_dno, le32_to_cpu(fno->u.external[0].disk_secno)); } brelse(bh); if (e) { ret = -EFSERROR; goto out; } } lc = hpfs_sb(inode->i_sb)->sb_lowercase; if (filp->f_pos == 12) { /* diff -r requires this (note, that diff -r */ filp->f_pos = 13; /* also fails on msdos filesystem in 2.0) */ goto out; } if (filp->f_pos == 13) { ret = -ENOENT; goto out; } while (1) { again: /* This won't work when cycle is longer than number of dirents accepted by filldir, but what can I do? maybe killall -9 ls helps */ if (hpfs_sb(inode->i_sb)->sb_chk) if (hpfs_stop_cycles(inode->i_sb, filp->f_pos, &c1, &c2, "hpfs_readdir")) { ret = -EFSERROR; goto out; } if (filp->f_pos == 12) goto out; if (filp->f_pos == 3 || filp->f_pos == 4 || filp->f_pos == 5) { printk("HPFS: warning: pos==%d\n",(int)filp->f_pos); goto out; } if (filp->f_pos == 0) { if (filldir(dirent, ".", 1, filp->f_pos, inode->i_ino, DT_DIR) < 0) goto out; filp->f_pos = 11; } if (filp->f_pos == 11) { if (filldir(dirent, "..", 2, filp->f_pos, hpfs_inode->i_parent_dir, DT_DIR) < 0) goto out; filp->f_pos = 1; } if (filp->f_pos == 1) { filp->f_pos = ((loff_t) hpfs_de_as_down_as_possible(inode->i_sb, hpfs_inode->i_dno) << 4) + 1; hpfs_add_pos(inode, &filp->f_pos); filp->f_version = inode->i_version; } old_pos = filp->f_pos; if (!(de = map_pos_dirent(inode, &filp->f_pos, &qbh))) { ret = -EIOERROR; goto out; } if (de->first || de->last) { if (hpfs_sb(inode->i_sb)->sb_chk) { if (de->first && !de->last && (de->namelen != 2 || de ->name[0] != 1 || de->name[1] != 1)) hpfs_error(inode->i_sb, "hpfs_readdir: bad ^A^A entry; pos = %08lx", old_pos); if (de->last && (de->namelen != 1 || de ->name[0] != 255)) hpfs_error(inode->i_sb, "hpfs_readdir: bad \\377 entry; pos = %08lx", old_pos); } hpfs_brelse4(&qbh); goto again; } tempname = hpfs_translate_name(inode->i_sb, de->name, de->namelen, lc, de->not_8x3); if (filldir(dirent, tempname, de->namelen, old_pos, le32_to_cpu(de->fnode), DT_UNKNOWN) < 0) { filp->f_pos = old_pos; if (tempname != de->name) kfree(tempname); hpfs_brelse4(&qbh); goto out; } if (tempname != de->name) kfree(tempname); hpfs_brelse4(&qbh); } out: hpfs_unlock(inode->i_sb); return ret; } /* * lookup. Search the specified directory for the specified name, set * *result to the corresponding inode. * * lookup uses the inode number to tell read_inode whether it is reading * the inode of a directory or a file -- file ino's are odd, directory * ino's are even. read_inode avoids i/o for file inodes; everything * needed is up here in the directory. (And file fnodes are out in * the boondocks.) * * - M.P.: this is over, sometimes we've got to read file's fnode for eas * inode numbers are just fnode sector numbers; iget lock is used * to tell read_inode to read fnode or not. */ struct dentry *hpfs_lookup(struct inode *dir, struct dentry *dentry, struct nameidata *nd) { const unsigned char *name = dentry->d_name.name; unsigned len = dentry->d_name.len; struct quad_buffer_head qbh; struct hpfs_dirent *de; ino_t ino; int err; struct inode *result = NULL; struct hpfs_inode_info *hpfs_result; hpfs_lock(dir->i_sb); if ((err = hpfs_chk_name(name, &len))) { if (err == -ENAMETOOLONG) { hpfs_unlock(dir->i_sb); return ERR_PTR(-ENAMETOOLONG); } goto end_add; } /* * '.' and '..' will never be passed here. */ de = map_dirent(dir, hpfs_i(dir)->i_dno, name, len, NULL, &qbh); /* * This is not really a bailout, just means file not found. */ if (!de) goto end; /* * Get inode number, what we're after. */ ino = le32_to_cpu(de->fnode); /* * Go find or make an inode. */ result = iget_locked(dir->i_sb, ino); if (!result) { hpfs_error(dir->i_sb, "hpfs_lookup: can't get inode"); goto bail1; } if (result->i_state & I_NEW) { hpfs_init_inode(result); if (de->directory) hpfs_read_inode(result); else if (le32_to_cpu(de->ea_size) && hpfs_sb(dir->i_sb)->sb_eas) hpfs_read_inode(result); else { result->i_mode |= S_IFREG; result->i_mode &= ~0111; result->i_op = &hpfs_file_iops; result->i_fop = &hpfs_file_ops; set_nlink(result, 1); } unlock_new_inode(result); } hpfs_result = hpfs_i(result); if (!de->directory) hpfs_result->i_parent_dir = dir->i_ino; if (de->has_acl || de->has_xtd_perm) if (!(dir->i_sb->s_flags & MS_RDONLY)) { hpfs_error(result->i_sb, "ACLs or XPERM found. This is probably HPFS386. This driver doesn't support it now. Send me some info on these structures"); goto bail1; } /* * Fill in the info from the directory if this is a newly created * inode. */ if (!result->i_ctime.tv_sec) { if (!(result->i_ctime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->creation_date)))) result->i_ctime.tv_sec = 1; result->i_ctime.tv_nsec = 0; result->i_mtime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->write_date)); result->i_mtime.tv_nsec = 0; result->i_atime.tv_sec = local_to_gmt(dir->i_sb, le32_to_cpu(de->read_date)); result->i_atime.tv_nsec = 0; hpfs_result->i_ea_size = le32_to_cpu(de->ea_size); if (!hpfs_result->i_ea_mode && de->read_only) result->i_mode &= ~0222; if (!de->directory) { if (result->i_size == -1) { result->i_size = le32_to_cpu(de->file_size); result->i_data.a_ops = &hpfs_aops; hpfs_i(result)->mmu_private = result->i_size; /* * i_blocks should count the fnode and any anodes. * We count 1 for the fnode and don't bother about * anodes -- the disk heads are on the directory band * and we want them to stay there. */ result->i_blocks = 1 + ((result->i_size + 511) >> 9); } } } hpfs_brelse4(&qbh); /* * Made it. */ end: end_add: hpfs_unlock(dir->i_sb); d_add(dentry, result); return NULL; /* * Didn't. */ bail1: hpfs_brelse4(&qbh); /*bail:*/ hpfs_unlock(dir->i_sb); return ERR_PTR(-ENOENT); } const struct file_operations hpfs_dir_ops = { .llseek = hpfs_dir_lseek, .read = generic_read_dir, .readdir = hpfs_readdir, .release = hpfs_dir_release, .fsync = hpfs_file_fsync, };
gpl-2.0
yank555-lu/Hammerhead-3.4-kitkat-mr2
sound/soc/fsl/mpc5200_dma.c
4946
14971
/* * Freescale MPC5200 PSC DMA * ALSA SoC Platform driver * * Copyright (C) 2008 Secret Lab Technologies Ltd. * Copyright (C) 2009 Jon Smirl, Digispeaker */ #include <linux/module.h> #include <linux/of_device.h> #include <linux/dma-mapping.h> #include <linux/slab.h> #include <linux/of_platform.h> #include <sound/soc.h> #include <sysdev/bestcomm/bestcomm.h> #include <sysdev/bestcomm/gen_bd.h> #include <asm/mpc52xx_psc.h> #include "mpc5200_dma.h" /* * Interrupt handlers */ static irqreturn_t psc_dma_status_irq(int irq, void *_psc_dma) { struct psc_dma *psc_dma = _psc_dma; struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs; u16 isr; isr = in_be16(&regs->mpc52xx_psc_isr); /* Playback underrun error */ if (psc_dma->playback.active && (isr & MPC52xx_PSC_IMR_TXEMP)) psc_dma->stats.underrun_count++; /* Capture overrun error */ if (psc_dma->capture.active && (isr & MPC52xx_PSC_IMR_ORERR)) psc_dma->stats.overrun_count++; out_8(&regs->command, MPC52xx_PSC_RST_ERR_STAT); return IRQ_HANDLED; } /** * psc_dma_bcom_enqueue_next_buffer - Enqueue another audio buffer * @s: pointer to stream private data structure * * Enqueues another audio period buffer into the bestcomm queue. * * Note: The routine must only be called when there is space available in * the queue. Otherwise the enqueue will fail and the audio ring buffer * will get out of sync */ static void psc_dma_bcom_enqueue_next_buffer(struct psc_dma_stream *s) { struct bcom_bd *bd; /* Prepare and enqueue the next buffer descriptor */ bd = bcom_prepare_next_buffer(s->bcom_task); bd->status = s->period_bytes; bd->data[0] = s->runtime->dma_addr + (s->period_next * s->period_bytes); bcom_submit_next_buffer(s->bcom_task, NULL); /* Update for next period */ s->period_next = (s->period_next + 1) % s->runtime->periods; } /* Bestcomm DMA irq handler */ static irqreturn_t psc_dma_bcom_irq(int irq, void *_psc_dma_stream) { struct psc_dma_stream *s = _psc_dma_stream; spin_lock(&s->psc_dma->lock); /* For each finished period, dequeue the completed period buffer * and enqueue a new one in it's place. */ while (bcom_buffer_done(s->bcom_task)) { bcom_retrieve_buffer(s->bcom_task, NULL, NULL); s->period_current = (s->period_current+1) % s->runtime->periods; s->period_count++; psc_dma_bcom_enqueue_next_buffer(s); } spin_unlock(&s->psc_dma->lock); /* If the stream is active, then also inform the PCM middle layer * of the period finished event. */ if (s->active) snd_pcm_period_elapsed(s->stream); return IRQ_HANDLED; } static int psc_dma_hw_free(struct snd_pcm_substream *substream) { snd_pcm_set_runtime_buffer(substream, NULL); return 0; } /** * psc_dma_trigger: start and stop the DMA transfer. * * This function is called by ALSA to start, stop, pause, and resume the DMA * transfer of data. */ static int psc_dma_trigger(struct snd_pcm_substream *substream, int cmd) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai); struct snd_pcm_runtime *runtime = substream->runtime; struct psc_dma_stream *s = to_psc_dma_stream(substream, psc_dma); struct mpc52xx_psc __iomem *regs = psc_dma->psc_regs; u16 imr; unsigned long flags; int i; switch (cmd) { case SNDRV_PCM_TRIGGER_START: dev_dbg(psc_dma->dev, "START: stream=%i fbits=%u ps=%u #p=%u\n", substream->pstr->stream, runtime->frame_bits, (int)runtime->period_size, runtime->periods); s->period_bytes = frames_to_bytes(runtime, runtime->period_size); s->period_next = 0; s->period_current = 0; s->active = 1; s->period_count = 0; s->runtime = runtime; /* Fill up the bestcomm bd queue and enable DMA. * This will begin filling the PSC's fifo. */ spin_lock_irqsave(&psc_dma->lock, flags); if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE) bcom_gen_bd_rx_reset(s->bcom_task); else bcom_gen_bd_tx_reset(s->bcom_task); for (i = 0; i < runtime->periods; i++) if (!bcom_queue_full(s->bcom_task)) psc_dma_bcom_enqueue_next_buffer(s); bcom_enable(s->bcom_task); spin_unlock_irqrestore(&psc_dma->lock, flags); out_8(&regs->command, MPC52xx_PSC_RST_ERR_STAT); break; case SNDRV_PCM_TRIGGER_STOP: dev_dbg(psc_dma->dev, "STOP: stream=%i periods_count=%i\n", substream->pstr->stream, s->period_count); s->active = 0; spin_lock_irqsave(&psc_dma->lock, flags); bcom_disable(s->bcom_task); if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE) bcom_gen_bd_rx_reset(s->bcom_task); else bcom_gen_bd_tx_reset(s->bcom_task); spin_unlock_irqrestore(&psc_dma->lock, flags); break; default: dev_dbg(psc_dma->dev, "unhandled trigger: stream=%i cmd=%i\n", substream->pstr->stream, cmd); return -EINVAL; } /* Update interrupt enable settings */ imr = 0; if (psc_dma->playback.active) imr |= MPC52xx_PSC_IMR_TXEMP; if (psc_dma->capture.active) imr |= MPC52xx_PSC_IMR_ORERR; out_be16(&regs->isr_imr.imr, psc_dma->imr | imr); return 0; } /* --------------------------------------------------------------------- * The PSC DMA 'ASoC platform' driver * * Can be referenced by an 'ASoC machine' driver * This driver only deals with the audio bus; it doesn't have any * interaction with the attached codec */ static const struct snd_pcm_hardware psc_dma_hardware = { .info = SNDRV_PCM_INFO_MMAP | SNDRV_PCM_INFO_MMAP_VALID | SNDRV_PCM_INFO_INTERLEAVED | SNDRV_PCM_INFO_BLOCK_TRANSFER | SNDRV_PCM_INFO_BATCH, .formats = SNDRV_PCM_FMTBIT_S8 | SNDRV_PCM_FMTBIT_S16_BE | SNDRV_PCM_FMTBIT_S24_BE | SNDRV_PCM_FMTBIT_S32_BE, .rate_min = 8000, .rate_max = 48000, .channels_min = 1, .channels_max = 2, .period_bytes_max = 1024 * 1024, .period_bytes_min = 32, .periods_min = 2, .periods_max = 256, .buffer_bytes_max = 2 * 1024 * 1024, .fifo_size = 512, }; static int psc_dma_open(struct snd_pcm_substream *substream) { struct snd_pcm_runtime *runtime = substream->runtime; struct snd_soc_pcm_runtime *rtd = substream->private_data; struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai); struct psc_dma_stream *s; int rc; dev_dbg(psc_dma->dev, "psc_dma_open(substream=%p)\n", substream); if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE) s = &psc_dma->capture; else s = &psc_dma->playback; snd_soc_set_runtime_hwparams(substream, &psc_dma_hardware); rc = snd_pcm_hw_constraint_integer(runtime, SNDRV_PCM_HW_PARAM_PERIODS); if (rc < 0) { dev_err(substream->pcm->card->dev, "invalid buffer size\n"); return rc; } s->stream = substream; return 0; } static int psc_dma_close(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai); struct psc_dma_stream *s; dev_dbg(psc_dma->dev, "psc_dma_close(substream=%p)\n", substream); if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE) s = &psc_dma->capture; else s = &psc_dma->playback; if (!psc_dma->playback.active && !psc_dma->capture.active) { /* Disable all interrupts and reset the PSC */ out_be16(&psc_dma->psc_regs->isr_imr.imr, psc_dma->imr); out_8(&psc_dma->psc_regs->command, 4 << 4); /* reset error */ } s->stream = NULL; return 0; } static snd_pcm_uframes_t psc_dma_pointer(struct snd_pcm_substream *substream) { struct snd_soc_pcm_runtime *rtd = substream->private_data; struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai); struct psc_dma_stream *s; dma_addr_t count; if (substream->pstr->stream == SNDRV_PCM_STREAM_CAPTURE) s = &psc_dma->capture; else s = &psc_dma->playback; count = s->period_current * s->period_bytes; return bytes_to_frames(substream->runtime, count); } static int psc_dma_hw_params(struct snd_pcm_substream *substream, struct snd_pcm_hw_params *params) { snd_pcm_set_runtime_buffer(substream, &substream->dma_buffer); return 0; } static struct snd_pcm_ops psc_dma_ops = { .open = psc_dma_open, .close = psc_dma_close, .hw_free = psc_dma_hw_free, .ioctl = snd_pcm_lib_ioctl, .pointer = psc_dma_pointer, .trigger = psc_dma_trigger, .hw_params = psc_dma_hw_params, }; static u64 psc_dma_dmamask = DMA_BIT_MASK(32); static int psc_dma_new(struct snd_soc_pcm_runtime *rtd) { struct snd_card *card = rtd->card->snd_card; struct snd_soc_dai *dai = rtd->cpu_dai; struct snd_pcm *pcm = rtd->pcm; struct psc_dma *psc_dma = snd_soc_dai_get_drvdata(rtd->cpu_dai); size_t size = psc_dma_hardware.buffer_bytes_max; int rc = 0; dev_dbg(rtd->platform->dev, "psc_dma_new(card=%p, dai=%p, pcm=%p)\n", card, dai, pcm); if (!card->dev->dma_mask) card->dev->dma_mask = &psc_dma_dmamask; if (!card->dev->coherent_dma_mask) card->dev->coherent_dma_mask = DMA_BIT_MASK(32); if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) { rc = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev, size, &pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->dma_buffer); if (rc) goto playback_alloc_err; } if (pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream) { rc = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, pcm->card->dev, size, &pcm->streams[SNDRV_PCM_STREAM_CAPTURE].substream->dma_buffer); if (rc) goto capture_alloc_err; } if (rtd->codec->ac97) rtd->codec->ac97->private_data = psc_dma; return 0; capture_alloc_err: if (pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream) snd_dma_free_pages(&pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream->dma_buffer); playback_alloc_err: dev_err(card->dev, "Cannot allocate buffer(s)\n"); return -ENOMEM; } static void psc_dma_free(struct snd_pcm *pcm) { struct snd_soc_pcm_runtime *rtd = pcm->private_data; struct snd_pcm_substream *substream; int stream; dev_dbg(rtd->platform->dev, "psc_dma_free(pcm=%p)\n", pcm); for (stream = 0; stream < 2; stream++) { substream = pcm->streams[stream].substream; if (substream) { snd_dma_free_pages(&substream->dma_buffer); substream->dma_buffer.area = NULL; substream->dma_buffer.addr = 0; } } } static struct snd_soc_platform_driver mpc5200_audio_dma_platform = { .ops = &psc_dma_ops, .pcm_new = &psc_dma_new, .pcm_free = &psc_dma_free, }; static int mpc5200_hpcd_probe(struct platform_device *op) { phys_addr_t fifo; struct psc_dma *psc_dma; struct resource res; int size, irq, rc; const __be32 *prop; void __iomem *regs; int ret; /* Fetch the registers and IRQ of the PSC */ irq = irq_of_parse_and_map(op->dev.of_node, 0); if (of_address_to_resource(op->dev.of_node, 0, &res)) { dev_err(&op->dev, "Missing reg property\n"); return -ENODEV; } regs = ioremap(res.start, resource_size(&res)); if (!regs) { dev_err(&op->dev, "Could not map registers\n"); return -ENODEV; } /* Allocate and initialize the driver private data */ psc_dma = kzalloc(sizeof *psc_dma, GFP_KERNEL); if (!psc_dma) { ret = -ENOMEM; goto out_unmap; } /* Get the PSC ID */ prop = of_get_property(op->dev.of_node, "cell-index", &size); if (!prop || size < sizeof *prop) { ret = -ENODEV; goto out_free; } spin_lock_init(&psc_dma->lock); mutex_init(&psc_dma->mutex); psc_dma->id = be32_to_cpu(*prop); psc_dma->irq = irq; psc_dma->psc_regs = regs; psc_dma->fifo_regs = regs + sizeof *psc_dma->psc_regs; psc_dma->dev = &op->dev; psc_dma->playback.psc_dma = psc_dma; psc_dma->capture.psc_dma = psc_dma; snprintf(psc_dma->name, sizeof psc_dma->name, "PSC%u", psc_dma->id); /* Find the address of the fifo data registers and setup the * DMA tasks */ fifo = res.start + offsetof(struct mpc52xx_psc, buffer.buffer_32); psc_dma->capture.bcom_task = bcom_psc_gen_bd_rx_init(psc_dma->id, 10, fifo, 512); psc_dma->playback.bcom_task = bcom_psc_gen_bd_tx_init(psc_dma->id, 10, fifo); if (!psc_dma->capture.bcom_task || !psc_dma->playback.bcom_task) { dev_err(&op->dev, "Could not allocate bestcomm tasks\n"); ret = -ENODEV; goto out_free; } /* Disable all interrupts and reset the PSC */ out_be16(&psc_dma->psc_regs->isr_imr.imr, psc_dma->imr); /* reset receiver */ out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_RST_RX); /* reset transmitter */ out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_RST_TX); /* reset error */ out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_RST_ERR_STAT); /* reset mode */ out_8(&psc_dma->psc_regs->command, MPC52xx_PSC_SEL_MODE_REG_1); /* Set up mode register; * First write: RxRdy (FIFO Alarm) generates rx FIFO irq * Second write: register Normal mode for non loopback */ out_8(&psc_dma->psc_regs->mode, 0); out_8(&psc_dma->psc_regs->mode, 0); /* Set the TX and RX fifo alarm thresholds */ out_be16(&psc_dma->fifo_regs->rfalarm, 0x100); out_8(&psc_dma->fifo_regs->rfcntl, 0x4); out_be16(&psc_dma->fifo_regs->tfalarm, 0x100); out_8(&psc_dma->fifo_regs->tfcntl, 0x7); /* Lookup the IRQ numbers */ psc_dma->playback.irq = bcom_get_task_irq(psc_dma->playback.bcom_task); psc_dma->capture.irq = bcom_get_task_irq(psc_dma->capture.bcom_task); rc = request_irq(psc_dma->irq, &psc_dma_status_irq, IRQF_SHARED, "psc-dma-status", psc_dma); rc |= request_irq(psc_dma->capture.irq, &psc_dma_bcom_irq, IRQF_SHARED, "psc-dma-capture", &psc_dma->capture); rc |= request_irq(psc_dma->playback.irq, &psc_dma_bcom_irq, IRQF_SHARED, "psc-dma-playback", &psc_dma->playback); if (rc) { ret = -ENODEV; goto out_irq; } /* Save what we've done so it can be found again later */ dev_set_drvdata(&op->dev, psc_dma); /* Tell the ASoC OF helpers about it */ return snd_soc_register_platform(&op->dev, &mpc5200_audio_dma_platform); out_irq: free_irq(psc_dma->irq, psc_dma); free_irq(psc_dma->capture.irq, &psc_dma->capture); free_irq(psc_dma->playback.irq, &psc_dma->playback); out_free: kfree(psc_dma); out_unmap: iounmap(regs); return ret; } static int mpc5200_hpcd_remove(struct platform_device *op) { struct psc_dma *psc_dma = dev_get_drvdata(&op->dev); dev_dbg(&op->dev, "mpc5200_audio_dma_destroy()\n"); snd_soc_unregister_platform(&op->dev); bcom_gen_bd_rx_release(psc_dma->capture.bcom_task); bcom_gen_bd_tx_release(psc_dma->playback.bcom_task); /* Release irqs */ free_irq(psc_dma->irq, psc_dma); free_irq(psc_dma->capture.irq, &psc_dma->capture); free_irq(psc_dma->playback.irq, &psc_dma->playback); iounmap(psc_dma->psc_regs); kfree(psc_dma); dev_set_drvdata(&op->dev, NULL); return 0; } static struct of_device_id mpc5200_hpcd_match[] = { { .compatible = "fsl,mpc5200-pcm", }, {} }; MODULE_DEVICE_TABLE(of, mpc5200_hpcd_match); static struct platform_driver mpc5200_hpcd_of_driver = { .probe = mpc5200_hpcd_probe, .remove = mpc5200_hpcd_remove, .driver = { .owner = THIS_MODULE, .name = "mpc5200-pcm-audio", .of_match_table = mpc5200_hpcd_match, } }; module_platform_driver(mpc5200_hpcd_of_driver); MODULE_AUTHOR("Grant Likely <grant.likely@secretlab.ca>"); MODULE_DESCRIPTION("Freescale MPC5200 PSC in DMA mode ASoC Driver"); MODULE_LICENSE("GPL");
gpl-2.0
GalaxyTab4/android_kernel_samsung_matissewifi
drivers/net/ethernet/sfc/falcon_boards.c
4946
21951
/**************************************************************************** * Driver for Solarflare Solarstorm network controllers and boards * Copyright 2007-2010 Solarflare Communications Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 as published * by the Free Software Foundation, incorporated herein by reference. */ #include <linux/rtnetlink.h> #include "net_driver.h" #include "phy.h" #include "efx.h" #include "nic.h" #include "workarounds.h" /* Macros for unpacking the board revision */ /* The revision info is in host byte order. */ #define FALCON_BOARD_TYPE(_rev) (_rev >> 8) #define FALCON_BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf) #define FALCON_BOARD_MINOR(_rev) (_rev & 0xf) /* Board types */ #define FALCON_BOARD_SFE4001 0x01 #define FALCON_BOARD_SFE4002 0x02 #define FALCON_BOARD_SFE4003 0x03 #define FALCON_BOARD_SFN4112F 0x52 /* Board temperature is about 15°C above ambient when air flow is * limited. The maximum acceptable ambient temperature varies * depending on the PHY specifications but the critical temperature * above which we should shut down to avoid damage is 80°C. */ #define FALCON_BOARD_TEMP_BIAS 15 #define FALCON_BOARD_TEMP_CRIT (80 + FALCON_BOARD_TEMP_BIAS) /* SFC4000 datasheet says: 'The maximum permitted junction temperature * is 125°C; the thermal design of the environment for the SFC4000 * should aim to keep this well below 100°C.' */ #define FALCON_JUNC_TEMP_MIN 0 #define FALCON_JUNC_TEMP_MAX 90 #define FALCON_JUNC_TEMP_CRIT 125 /***************************************************************************** * Support for LM87 sensor chip used on several boards */ #define LM87_REG_TEMP_HW_INT_LOCK 0x13 #define LM87_REG_TEMP_HW_EXT_LOCK 0x14 #define LM87_REG_TEMP_HW_INT 0x17 #define LM87_REG_TEMP_HW_EXT 0x18 #define LM87_REG_TEMP_EXT1 0x26 #define LM87_REG_TEMP_INT 0x27 #define LM87_REG_ALARMS1 0x41 #define LM87_REG_ALARMS2 0x42 #define LM87_IN_LIMITS(nr, _min, _max) \ 0x2B + (nr) * 2, _max, 0x2C + (nr) * 2, _min #define LM87_AIN_LIMITS(nr, _min, _max) \ 0x3B + (nr), _max, 0x1A + (nr), _min #define LM87_TEMP_INT_LIMITS(_min, _max) \ 0x39, _max, 0x3A, _min #define LM87_TEMP_EXT1_LIMITS(_min, _max) \ 0x37, _max, 0x38, _min #define LM87_ALARM_TEMP_INT 0x10 #define LM87_ALARM_TEMP_EXT1 0x20 #if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE) static int efx_poke_lm87(struct i2c_client *client, const u8 *reg_values) { while (*reg_values) { u8 reg = *reg_values++; u8 value = *reg_values++; int rc = i2c_smbus_write_byte_data(client, reg, value); if (rc) return rc; } return 0; } static const u8 falcon_lm87_common_regs[] = { LM87_REG_TEMP_HW_INT_LOCK, FALCON_BOARD_TEMP_CRIT, LM87_REG_TEMP_HW_INT, FALCON_BOARD_TEMP_CRIT, LM87_TEMP_EXT1_LIMITS(FALCON_JUNC_TEMP_MIN, FALCON_JUNC_TEMP_MAX), LM87_REG_TEMP_HW_EXT_LOCK, FALCON_JUNC_TEMP_CRIT, LM87_REG_TEMP_HW_EXT, FALCON_JUNC_TEMP_CRIT, 0 }; static int efx_init_lm87(struct efx_nic *efx, const struct i2c_board_info *info, const u8 *reg_values) { struct falcon_board *board = falcon_board(efx); struct i2c_client *client = i2c_new_device(&board->i2c_adap, info); int rc; if (!client) return -EIO; /* Read-to-clear alarm/interrupt status */ i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1); i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2); rc = efx_poke_lm87(client, reg_values); if (rc) goto err; rc = efx_poke_lm87(client, falcon_lm87_common_regs); if (rc) goto err; board->hwmon_client = client; return 0; err: i2c_unregister_device(client); return rc; } static void efx_fini_lm87(struct efx_nic *efx) { i2c_unregister_device(falcon_board(efx)->hwmon_client); } static int efx_check_lm87(struct efx_nic *efx, unsigned mask) { struct i2c_client *client = falcon_board(efx)->hwmon_client; bool temp_crit, elec_fault, is_failure; u16 alarms; s32 reg; /* If link is up then do not monitor temperature */ if (EFX_WORKAROUND_7884(efx) && efx->link_state.up) return 0; reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1); if (reg < 0) return reg; alarms = reg; reg = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2); if (reg < 0) return reg; alarms |= reg << 8; alarms &= mask; temp_crit = false; if (alarms & LM87_ALARM_TEMP_INT) { reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_INT); if (reg < 0) return reg; if (reg > FALCON_BOARD_TEMP_CRIT) temp_crit = true; } if (alarms & LM87_ALARM_TEMP_EXT1) { reg = i2c_smbus_read_byte_data(client, LM87_REG_TEMP_EXT1); if (reg < 0) return reg; if (reg > FALCON_JUNC_TEMP_CRIT) temp_crit = true; } elec_fault = alarms & ~(LM87_ALARM_TEMP_INT | LM87_ALARM_TEMP_EXT1); is_failure = temp_crit || elec_fault; if (alarms) netif_err(efx, hw, efx->net_dev, "LM87 detected a hardware %s (status %02x:%02x)" "%s%s%s%s\n", is_failure ? "failure" : "problem", alarms & 0xff, alarms >> 8, (alarms & LM87_ALARM_TEMP_INT) ? "; board is overheating" : "", (alarms & LM87_ALARM_TEMP_EXT1) ? "; controller is overheating" : "", temp_crit ? "; reached critical temperature" : "", elec_fault ? "; electrical fault" : ""); return is_failure ? -ERANGE : 0; } #else /* !CONFIG_SENSORS_LM87 */ static inline int efx_init_lm87(struct efx_nic *efx, const struct i2c_board_info *info, const u8 *reg_values) { return 0; } static inline void efx_fini_lm87(struct efx_nic *efx) { } static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask) { return 0; } #endif /* CONFIG_SENSORS_LM87 */ /***************************************************************************** * Support for the SFE4001 NIC. * * The SFE4001 does not power-up fully at reset due to its high power * consumption. We control its power via a PCA9539 I/O expander. * It also has a MAX6647 temperature monitor which we expose to * the lm90 driver. * * This also provides minimal support for reflashing the PHY, which is * initiated by resetting it with the FLASH_CFG_1 pin pulled down. * On SFE4001 rev A2 and later this is connected to the 3V3X output of * the IO-expander. * We represent reflash mode as PHY_MODE_SPECIAL and make it mutually * exclusive with the network device being open. */ /************************************************************************** * Support for I2C IO Expander device on SFE4001 */ #define PCA9539 0x74 #define P0_IN 0x00 #define P0_OUT 0x02 #define P0_INVERT 0x04 #define P0_CONFIG 0x06 #define P0_EN_1V0X_LBN 0 #define P0_EN_1V0X_WIDTH 1 #define P0_EN_1V2_LBN 1 #define P0_EN_1V2_WIDTH 1 #define P0_EN_2V5_LBN 2 #define P0_EN_2V5_WIDTH 1 #define P0_EN_3V3X_LBN 3 #define P0_EN_3V3X_WIDTH 1 #define P0_EN_5V_LBN 4 #define P0_EN_5V_WIDTH 1 #define P0_SHORTEN_JTAG_LBN 5 #define P0_SHORTEN_JTAG_WIDTH 1 #define P0_X_TRST_LBN 6 #define P0_X_TRST_WIDTH 1 #define P0_DSP_RESET_LBN 7 #define P0_DSP_RESET_WIDTH 1 #define P1_IN 0x01 #define P1_OUT 0x03 #define P1_INVERT 0x05 #define P1_CONFIG 0x07 #define P1_AFE_PWD_LBN 0 #define P1_AFE_PWD_WIDTH 1 #define P1_DSP_PWD25_LBN 1 #define P1_DSP_PWD25_WIDTH 1 #define P1_RESERVED_LBN 2 #define P1_RESERVED_WIDTH 2 #define P1_SPARE_LBN 4 #define P1_SPARE_WIDTH 4 /* Temperature Sensor */ #define MAX664X_REG_RSL 0x02 #define MAX664X_REG_WLHO 0x0B static void sfe4001_poweroff(struct efx_nic *efx) { struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client; struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client; /* Turn off all power rails and disable outputs */ i2c_smbus_write_byte_data(ioexp_client, P0_OUT, 0xff); i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG, 0xff); i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0xff); /* Clear any over-temperature alert */ i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL); } static int sfe4001_poweron(struct efx_nic *efx) { struct i2c_client *ioexp_client = falcon_board(efx)->ioexp_client; struct i2c_client *hwmon_client = falcon_board(efx)->hwmon_client; unsigned int i, j; int rc; u8 out; /* Clear any previous over-temperature alert */ rc = i2c_smbus_read_byte_data(hwmon_client, MAX664X_REG_RSL); if (rc < 0) return rc; /* Enable port 0 and port 1 outputs on IO expander */ rc = i2c_smbus_write_byte_data(ioexp_client, P0_CONFIG, 0x00); if (rc) return rc; rc = i2c_smbus_write_byte_data(ioexp_client, P1_CONFIG, 0xff & ~(1 << P1_SPARE_LBN)); if (rc) goto fail_on; /* If PHY power is on, turn it all off and wait 1 second to * ensure a full reset. */ rc = i2c_smbus_read_byte_data(ioexp_client, P0_OUT); if (rc < 0) goto fail_on; out = 0xff & ~((0 << P0_EN_1V2_LBN) | (0 << P0_EN_2V5_LBN) | (0 << P0_EN_3V3X_LBN) | (0 << P0_EN_5V_LBN) | (0 << P0_EN_1V0X_LBN)); if (rc != out) { netif_info(efx, hw, efx->net_dev, "power-cycling PHY\n"); rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out); if (rc) goto fail_on; schedule_timeout_uninterruptible(HZ); } for (i = 0; i < 20; ++i) { /* Turn on 1.2V, 2.5V, 3.3V and 5V power rails */ out = 0xff & ~((1 << P0_EN_1V2_LBN) | (1 << P0_EN_2V5_LBN) | (1 << P0_EN_3V3X_LBN) | (1 << P0_EN_5V_LBN) | (1 << P0_X_TRST_LBN)); if (efx->phy_mode & PHY_MODE_SPECIAL) out |= 1 << P0_EN_3V3X_LBN; rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out); if (rc) goto fail_on; msleep(10); /* Turn on 1V power rail */ out &= ~(1 << P0_EN_1V0X_LBN); rc = i2c_smbus_write_byte_data(ioexp_client, P0_OUT, out); if (rc) goto fail_on; netif_info(efx, hw, efx->net_dev, "waiting for DSP boot (attempt %d)...\n", i); /* In flash config mode, DSP does not turn on AFE, so * just wait 1 second. */ if (efx->phy_mode & PHY_MODE_SPECIAL) { schedule_timeout_uninterruptible(HZ); return 0; } for (j = 0; j < 10; ++j) { msleep(100); /* Check DSP has asserted AFE power line */ rc = i2c_smbus_read_byte_data(ioexp_client, P1_IN); if (rc < 0) goto fail_on; if (rc & (1 << P1_AFE_PWD_LBN)) return 0; } } netif_info(efx, hw, efx->net_dev, "timed out waiting for DSP boot\n"); rc = -ETIMEDOUT; fail_on: sfe4001_poweroff(efx); return rc; } static ssize_t show_phy_flash_cfg(struct device *dev, struct device_attribute *attr, char *buf) { struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); return sprintf(buf, "%d\n", !!(efx->phy_mode & PHY_MODE_SPECIAL)); } static ssize_t set_phy_flash_cfg(struct device *dev, struct device_attribute *attr, const char *buf, size_t count) { struct efx_nic *efx = pci_get_drvdata(to_pci_dev(dev)); enum efx_phy_mode old_mode, new_mode; int err; rtnl_lock(); old_mode = efx->phy_mode; if (count == 0 || *buf == '0') new_mode = old_mode & ~PHY_MODE_SPECIAL; else new_mode = PHY_MODE_SPECIAL; if (!((old_mode ^ new_mode) & PHY_MODE_SPECIAL)) { err = 0; } else if (efx->state != STATE_RUNNING || netif_running(efx->net_dev)) { err = -EBUSY; } else { /* Reset the PHY, reconfigure the MAC and enable/disable * MAC stats accordingly. */ efx->phy_mode = new_mode; if (new_mode & PHY_MODE_SPECIAL) falcon_stop_nic_stats(efx); err = sfe4001_poweron(efx); if (!err) err = efx_reconfigure_port(efx); if (!(new_mode & PHY_MODE_SPECIAL)) falcon_start_nic_stats(efx); } rtnl_unlock(); return err ? err : count; } static DEVICE_ATTR(phy_flash_cfg, 0644, show_phy_flash_cfg, set_phy_flash_cfg); static void sfe4001_fini(struct efx_nic *efx) { struct falcon_board *board = falcon_board(efx); netif_info(efx, drv, efx->net_dev, "%s\n", __func__); device_remove_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg); sfe4001_poweroff(efx); i2c_unregister_device(board->ioexp_client); i2c_unregister_device(board->hwmon_client); } static int sfe4001_check_hw(struct efx_nic *efx) { struct falcon_nic_data *nic_data = efx->nic_data; s32 status; /* If XAUI link is up then do not monitor */ if (EFX_WORKAROUND_7884(efx) && !nic_data->xmac_poll_required) return 0; /* Check the powered status of the PHY. Lack of power implies that * the MAX6647 has shut down power to it, probably due to a temp. * alarm. Reading the power status rather than the MAX6647 status * directly because the later is read-to-clear and would thus * start to power up the PHY again when polled, causing us to blip * the power undesirably. * We know we can read from the IO expander because we did * it during power-on. Assume failure now is bad news. */ status = i2c_smbus_read_byte_data(falcon_board(efx)->ioexp_client, P1_IN); if (status >= 0 && (status & ((1 << P1_AFE_PWD_LBN) | (1 << P1_DSP_PWD25_LBN))) != 0) return 0; /* Use board power control, not PHY power control */ sfe4001_poweroff(efx); efx->phy_mode = PHY_MODE_OFF; return (status < 0) ? -EIO : -ERANGE; } static const struct i2c_board_info sfe4001_hwmon_info = { I2C_BOARD_INFO("max6647", 0x4e), }; /* This board uses an I2C expander to provider power to the PHY, which needs to * be turned on before the PHY can be used. * Context: Process context, rtnl lock held */ static int sfe4001_init(struct efx_nic *efx) { struct falcon_board *board = falcon_board(efx); int rc; #if defined(CONFIG_SENSORS_LM90) || defined(CONFIG_SENSORS_LM90_MODULE) board->hwmon_client = i2c_new_device(&board->i2c_adap, &sfe4001_hwmon_info); #else board->hwmon_client = i2c_new_dummy(&board->i2c_adap, sfe4001_hwmon_info.addr); #endif if (!board->hwmon_client) return -EIO; /* Raise board/PHY high limit from 85 to 90 degrees Celsius */ rc = i2c_smbus_write_byte_data(board->hwmon_client, MAX664X_REG_WLHO, 90); if (rc) goto fail_hwmon; board->ioexp_client = i2c_new_dummy(&board->i2c_adap, PCA9539); if (!board->ioexp_client) { rc = -EIO; goto fail_hwmon; } if (efx->phy_mode & PHY_MODE_SPECIAL) { /* PHY won't generate a 156.25 MHz clock and MAC stats fetch * will fail. */ falcon_stop_nic_stats(efx); } rc = sfe4001_poweron(efx); if (rc) goto fail_ioexp; rc = device_create_file(&efx->pci_dev->dev, &dev_attr_phy_flash_cfg); if (rc) goto fail_on; netif_info(efx, hw, efx->net_dev, "PHY is powered on\n"); return 0; fail_on: sfe4001_poweroff(efx); fail_ioexp: i2c_unregister_device(board->ioexp_client); fail_hwmon: i2c_unregister_device(board->hwmon_client); return rc; } /***************************************************************************** * Support for the SFE4002 * */ static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */ static const u8 sfe4002_lm87_regs[] = { LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */ LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */ LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */ LM87_IN_LIMITS(3, 0xac, 0xd4), /* 5V: 5.0V +/- 10% */ LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */ LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */ LM87_AIN_LIMITS(0, 0x98, 0xbb), /* AIN1: 1.66V +/- 10% */ LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */ LM87_TEMP_INT_LIMITS(0, 80 + FALCON_BOARD_TEMP_BIAS), LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX), 0 }; static const struct i2c_board_info sfe4002_hwmon_info = { I2C_BOARD_INFO("lm87", 0x2e), .platform_data = &sfe4002_lm87_channel, }; /****************************************************************************/ /* LED allocations. Note that on rev A0 boards the schematic and the reality * differ: red and green are swapped. Below is the fixed (A1) layout (there * are only 3 A0 boards in existence, so no real reason to make this * conditional). */ #define SFE4002_FAULT_LED (2) /* Red */ #define SFE4002_RX_LED (0) /* Green */ #define SFE4002_TX_LED (1) /* Amber */ static void sfe4002_init_phy(struct efx_nic *efx) { /* Set the TX and RX LEDs to reflect status and activity, and the * fault LED off */ falcon_qt202x_set_led(efx, SFE4002_TX_LED, QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT); falcon_qt202x_set_led(efx, SFE4002_RX_LED, QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT); falcon_qt202x_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF); } static void sfe4002_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) { falcon_qt202x_set_led( efx, SFE4002_FAULT_LED, (mode == EFX_LED_ON) ? QUAKE_LED_ON : QUAKE_LED_OFF); } static int sfe4002_check_hw(struct efx_nic *efx) { struct falcon_board *board = falcon_board(efx); /* A0 board rev. 4002s report a temperature fault the whole time * (bad sensor) so we mask it out. */ unsigned alarm_mask = (board->major == 0 && board->minor == 0) ? ~LM87_ALARM_TEMP_EXT1 : ~0; return efx_check_lm87(efx, alarm_mask); } static int sfe4002_init(struct efx_nic *efx) { return efx_init_lm87(efx, &sfe4002_hwmon_info, sfe4002_lm87_regs); } /***************************************************************************** * Support for the SFN4112F * */ static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */ static const u8 sfn4112f_lm87_regs[] = { LM87_IN_LIMITS(0, 0x7c, 0x99), /* 2.5V: 1.8V +/- 10% */ LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */ LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */ LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */ LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */ LM87_AIN_LIMITS(1, 0x8a, 0xa9), /* AIN2: 1.5V +/- 10% */ LM87_TEMP_INT_LIMITS(0, 60 + FALCON_BOARD_TEMP_BIAS), LM87_TEMP_EXT1_LIMITS(0, FALCON_JUNC_TEMP_MAX), 0 }; static const struct i2c_board_info sfn4112f_hwmon_info = { I2C_BOARD_INFO("lm87", 0x2e), .platform_data = &sfn4112f_lm87_channel, }; #define SFN4112F_ACT_LED 0 #define SFN4112F_LINK_LED 1 static void sfn4112f_init_phy(struct efx_nic *efx) { falcon_qt202x_set_led(efx, SFN4112F_ACT_LED, QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACT); falcon_qt202x_set_led(efx, SFN4112F_LINK_LED, QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT); } static void sfn4112f_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) { int reg; switch (mode) { case EFX_LED_OFF: reg = QUAKE_LED_OFF; break; case EFX_LED_ON: reg = QUAKE_LED_ON; break; default: reg = QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT; break; } falcon_qt202x_set_led(efx, SFN4112F_LINK_LED, reg); } static int sfn4112f_check_hw(struct efx_nic *efx) { /* Mask out unused sensors */ return efx_check_lm87(efx, ~0x48); } static int sfn4112f_init(struct efx_nic *efx) { return efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs); } /***************************************************************************** * Support for the SFE4003 * */ static u8 sfe4003_lm87_channel = 0x03; /* use AIN not FAN inputs */ static const u8 sfe4003_lm87_regs[] = { LM87_IN_LIMITS(0, 0x67, 0x7f), /* 2.5V: 1.5V +/- 10% */ LM87_IN_LIMITS(1, 0x4c, 0x5e), /* Vccp1: 1.2V +/- 10% */ LM87_IN_LIMITS(2, 0xac, 0xd4), /* 3.3V: 3.3V +/- 10% */ LM87_IN_LIMITS(4, 0xac, 0xe0), /* 12V: 10.8-14V */ LM87_IN_LIMITS(5, 0x3f, 0x4f), /* Vccp2: 1.0V +/- 10% */ LM87_TEMP_INT_LIMITS(0, 70 + FALCON_BOARD_TEMP_BIAS), 0 }; static const struct i2c_board_info sfe4003_hwmon_info = { I2C_BOARD_INFO("lm87", 0x2e), .platform_data = &sfe4003_lm87_channel, }; /* Board-specific LED info. */ #define SFE4003_RED_LED_GPIO 11 #define SFE4003_LED_ON 1 #define SFE4003_LED_OFF 0 static void sfe4003_set_id_led(struct efx_nic *efx, enum efx_led_mode mode) { struct falcon_board *board = falcon_board(efx); /* The LEDs were not wired to GPIOs before A3 */ if (board->minor < 3 && board->major == 0) return; falcon_txc_set_gpio_val( efx, SFE4003_RED_LED_GPIO, (mode == EFX_LED_ON) ? SFE4003_LED_ON : SFE4003_LED_OFF); } static void sfe4003_init_phy(struct efx_nic *efx) { struct falcon_board *board = falcon_board(efx); /* The LEDs were not wired to GPIOs before A3 */ if (board->minor < 3 && board->major == 0) return; falcon_txc_set_gpio_dir(efx, SFE4003_RED_LED_GPIO, TXC_GPIO_DIR_OUTPUT); falcon_txc_set_gpio_val(efx, SFE4003_RED_LED_GPIO, SFE4003_LED_OFF); } static int sfe4003_check_hw(struct efx_nic *efx) { struct falcon_board *board = falcon_board(efx); /* A0/A1/A2 board rev. 4003s report a temperature fault the whole time * (bad sensor) so we mask it out. */ unsigned alarm_mask = (board->major == 0 && board->minor <= 2) ? ~LM87_ALARM_TEMP_EXT1 : ~0; return efx_check_lm87(efx, alarm_mask); } static int sfe4003_init(struct efx_nic *efx) { return efx_init_lm87(efx, &sfe4003_hwmon_info, sfe4003_lm87_regs); } static const struct falcon_board_type board_types[] = { { .id = FALCON_BOARD_SFE4001, .init = sfe4001_init, .init_phy = efx_port_dummy_op_void, .fini = sfe4001_fini, .set_id_led = tenxpress_set_id_led, .monitor = sfe4001_check_hw, }, { .id = FALCON_BOARD_SFE4002, .init = sfe4002_init, .init_phy = sfe4002_init_phy, .fini = efx_fini_lm87, .set_id_led = sfe4002_set_id_led, .monitor = sfe4002_check_hw, }, { .id = FALCON_BOARD_SFE4003, .init = sfe4003_init, .init_phy = sfe4003_init_phy, .fini = efx_fini_lm87, .set_id_led = sfe4003_set_id_led, .monitor = sfe4003_check_hw, }, { .id = FALCON_BOARD_SFN4112F, .init = sfn4112f_init, .init_phy = sfn4112f_init_phy, .fini = efx_fini_lm87, .set_id_led = sfn4112f_set_id_led, .monitor = sfn4112f_check_hw, }, }; int falcon_probe_board(struct efx_nic *efx, u16 revision_info) { struct falcon_board *board = falcon_board(efx); u8 type_id = FALCON_BOARD_TYPE(revision_info); int i; board->major = FALCON_BOARD_MAJOR(revision_info); board->minor = FALCON_BOARD_MINOR(revision_info); for (i = 0; i < ARRAY_SIZE(board_types); i++) if (board_types[i].id == type_id) board->type = &board_types[i]; if (board->type) { return 0; } else { netif_err(efx, probe, efx->net_dev, "unknown board type %d\n", type_id); return -ENODEV; } }
gpl-2.0
sssemil/10.5.A.0.230
net/sched/act_csum.c
5458
13537
/* * Checksum updating actions * * Copyright (c) 2010 Gregoire Baron <baronchon@n7mm.org> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #include <linux/types.h> #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/netlink.h> #include <net/netlink.h> #include <linux/rtnetlink.h> #include <linux/skbuff.h> #include <net/ip.h> #include <net/ipv6.h> #include <net/icmp.h> #include <linux/icmpv6.h> #include <linux/igmp.h> #include <net/tcp.h> #include <net/udp.h> #include <net/ip6_checksum.h> #include <net/act_api.h> #include <linux/tc_act/tc_csum.h> #include <net/tc_act/tc_csum.h> #define CSUM_TAB_MASK 15 static struct tcf_common *tcf_csum_ht[CSUM_TAB_MASK + 1]; static u32 csum_idx_gen; static DEFINE_RWLOCK(csum_lock); static struct tcf_hashinfo csum_hash_info = { .htab = tcf_csum_ht, .hmask = CSUM_TAB_MASK, .lock = &csum_lock, }; static const struct nla_policy csum_policy[TCA_CSUM_MAX + 1] = { [TCA_CSUM_PARMS] = { .len = sizeof(struct tc_csum), }, }; static int tcf_csum_init(struct nlattr *nla, struct nlattr *est, struct tc_action *a, int ovr, int bind) { struct nlattr *tb[TCA_CSUM_MAX + 1]; struct tc_csum *parm; struct tcf_common *pc; struct tcf_csum *p; int ret = 0, err; if (nla == NULL) return -EINVAL; err = nla_parse_nested(tb, TCA_CSUM_MAX, nla, csum_policy); if (err < 0) return err; if (tb[TCA_CSUM_PARMS] == NULL) return -EINVAL; parm = nla_data(tb[TCA_CSUM_PARMS]); pc = tcf_hash_check(parm->index, a, bind, &csum_hash_info); if (!pc) { pc = tcf_hash_create(parm->index, est, a, sizeof(*p), bind, &csum_idx_gen, &csum_hash_info); if (IS_ERR(pc)) return PTR_ERR(pc); p = to_tcf_csum(pc); ret = ACT_P_CREATED; } else { p = to_tcf_csum(pc); if (!ovr) { tcf_hash_release(pc, bind, &csum_hash_info); return -EEXIST; } } spin_lock_bh(&p->tcf_lock); p->tcf_action = parm->action; p->update_flags = parm->update_flags; spin_unlock_bh(&p->tcf_lock); if (ret == ACT_P_CREATED) tcf_hash_insert(pc, &csum_hash_info); return ret; } static int tcf_csum_cleanup(struct tc_action *a, int bind) { struct tcf_csum *p = a->priv; return tcf_hash_release(&p->common, bind, &csum_hash_info); } /** * tcf_csum_skb_nextlayer - Get next layer pointer * @skb: sk_buff to use * @ihl: previous summed headers length * @ipl: complete packet length * @jhl: next header length * * Check the expected next layer availability in the specified sk_buff. * Return the next layer pointer if pass, NULL otherwise. */ static void *tcf_csum_skb_nextlayer(struct sk_buff *skb, unsigned int ihl, unsigned int ipl, unsigned int jhl) { int ntkoff = skb_network_offset(skb); int hl = ihl + jhl; if (!pskb_may_pull(skb, ipl + ntkoff) || (ipl < hl) || (skb_cloned(skb) && !skb_clone_writable(skb, hl + ntkoff) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC))) return NULL; else return (void *)(skb_network_header(skb) + ihl); } static int tcf_csum_ipv4_icmp(struct sk_buff *skb, unsigned int ihl, unsigned int ipl) { struct icmphdr *icmph; icmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmph)); if (icmph == NULL) return 0; icmph->checksum = 0; skb->csum = csum_partial(icmph, ipl - ihl, 0); icmph->checksum = csum_fold(skb->csum); skb->ip_summed = CHECKSUM_NONE; return 1; } static int tcf_csum_ipv4_igmp(struct sk_buff *skb, unsigned int ihl, unsigned int ipl) { struct igmphdr *igmph; igmph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*igmph)); if (igmph == NULL) return 0; igmph->csum = 0; skb->csum = csum_partial(igmph, ipl - ihl, 0); igmph->csum = csum_fold(skb->csum); skb->ip_summed = CHECKSUM_NONE; return 1; } static int tcf_csum_ipv6_icmp(struct sk_buff *skb, struct ipv6hdr *ip6h, unsigned int ihl, unsigned int ipl) { struct icmp6hdr *icmp6h; icmp6h = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*icmp6h)); if (icmp6h == NULL) return 0; icmp6h->icmp6_cksum = 0; skb->csum = csum_partial(icmp6h, ipl - ihl, 0); icmp6h->icmp6_cksum = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ipl - ihl, IPPROTO_ICMPV6, skb->csum); skb->ip_summed = CHECKSUM_NONE; return 1; } static int tcf_csum_ipv4_tcp(struct sk_buff *skb, struct iphdr *iph, unsigned int ihl, unsigned int ipl) { struct tcphdr *tcph; tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph)); if (tcph == NULL) return 0; tcph->check = 0; skb->csum = csum_partial(tcph, ipl - ihl, 0); tcph->check = tcp_v4_check(ipl - ihl, iph->saddr, iph->daddr, skb->csum); skb->ip_summed = CHECKSUM_NONE; return 1; } static int tcf_csum_ipv6_tcp(struct sk_buff *skb, struct ipv6hdr *ip6h, unsigned int ihl, unsigned int ipl) { struct tcphdr *tcph; tcph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*tcph)); if (tcph == NULL) return 0; tcph->check = 0; skb->csum = csum_partial(tcph, ipl - ihl, 0); tcph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ipl - ihl, IPPROTO_TCP, skb->csum); skb->ip_summed = CHECKSUM_NONE; return 1; } static int tcf_csum_ipv4_udp(struct sk_buff *skb, struct iphdr *iph, unsigned int ihl, unsigned int ipl, int udplite) { struct udphdr *udph; u16 ul; /* * Support both UDP and UDPLITE checksum algorithms, Don't use * udph->len to get the real length without any protocol check, * UDPLITE uses udph->len for another thing, * Use iph->tot_len, or just ipl. */ udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph)); if (udph == NULL) return 0; ul = ntohs(udph->len); if (udplite || udph->check) { udph->check = 0; if (udplite) { if (ul == 0) skb->csum = csum_partial(udph, ipl - ihl, 0); else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl)) skb->csum = csum_partial(udph, ul, 0); else goto ignore_obscure_skb; } else { if (ul != ipl - ihl) goto ignore_obscure_skb; skb->csum = csum_partial(udph, ul, 0); } udph->check = csum_tcpudp_magic(iph->saddr, iph->daddr, ul, iph->protocol, skb->csum); if (!udph->check) udph->check = CSUM_MANGLED_0; } skb->ip_summed = CHECKSUM_NONE; ignore_obscure_skb: return 1; } static int tcf_csum_ipv6_udp(struct sk_buff *skb, struct ipv6hdr *ip6h, unsigned int ihl, unsigned int ipl, int udplite) { struct udphdr *udph; u16 ul; /* * Support both UDP and UDPLITE checksum algorithms, Don't use * udph->len to get the real length without any protocol check, * UDPLITE uses udph->len for another thing, * Use ip6h->payload_len + sizeof(*ip6h) ... , or just ipl. */ udph = tcf_csum_skb_nextlayer(skb, ihl, ipl, sizeof(*udph)); if (udph == NULL) return 0; ul = ntohs(udph->len); udph->check = 0; if (udplite) { if (ul == 0) skb->csum = csum_partial(udph, ipl - ihl, 0); else if ((ul >= sizeof(*udph)) && (ul <= ipl - ihl)) skb->csum = csum_partial(udph, ul, 0); else goto ignore_obscure_skb; } else { if (ul != ipl - ihl) goto ignore_obscure_skb; skb->csum = csum_partial(udph, ul, 0); } udph->check = csum_ipv6_magic(&ip6h->saddr, &ip6h->daddr, ul, udplite ? IPPROTO_UDPLITE : IPPROTO_UDP, skb->csum); if (!udph->check) udph->check = CSUM_MANGLED_0; skb->ip_summed = CHECKSUM_NONE; ignore_obscure_skb: return 1; } static int tcf_csum_ipv4(struct sk_buff *skb, u32 update_flags) { struct iphdr *iph; int ntkoff; ntkoff = skb_network_offset(skb); if (!pskb_may_pull(skb, sizeof(*iph) + ntkoff)) goto fail; iph = ip_hdr(skb); switch (iph->frag_off & htons(IP_OFFSET) ? 0 : iph->protocol) { case IPPROTO_ICMP: if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP) if (!tcf_csum_ipv4_icmp(skb, iph->ihl * 4, ntohs(iph->tot_len))) goto fail; break; case IPPROTO_IGMP: if (update_flags & TCA_CSUM_UPDATE_FLAG_IGMP) if (!tcf_csum_ipv4_igmp(skb, iph->ihl * 4, ntohs(iph->tot_len))) goto fail; break; case IPPROTO_TCP: if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP) if (!tcf_csum_ipv4_tcp(skb, iph, iph->ihl * 4, ntohs(iph->tot_len))) goto fail; break; case IPPROTO_UDP: if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP) if (!tcf_csum_ipv4_udp(skb, iph, iph->ihl * 4, ntohs(iph->tot_len), 0)) goto fail; break; case IPPROTO_UDPLITE: if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE) if (!tcf_csum_ipv4_udp(skb, iph, iph->ihl * 4, ntohs(iph->tot_len), 1)) goto fail; break; } if (update_flags & TCA_CSUM_UPDATE_FLAG_IPV4HDR) { if (skb_cloned(skb) && !skb_clone_writable(skb, sizeof(*iph) + ntkoff) && pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) goto fail; ip_send_check(iph); } return 1; fail: return 0; } static int tcf_csum_ipv6_hopopts(struct ipv6_opt_hdr *ip6xh, unsigned int ixhl, unsigned int *pl) { int off, len, optlen; unsigned char *xh = (void *)ip6xh; off = sizeof(*ip6xh); len = ixhl - off; while (len > 1) { switch (xh[off]) { case IPV6_TLV_PAD0: optlen = 1; break; case IPV6_TLV_JUMBO: optlen = xh[off + 1] + 2; if (optlen != 6 || len < 6 || (off & 3) != 2) /* wrong jumbo option length/alignment */ return 0; *pl = ntohl(*(__be32 *)(xh + off + 2)); goto done; default: optlen = xh[off + 1] + 2; if (optlen > len) /* ignore obscure options */ goto done; break; } off += optlen; len -= optlen; } done: return 1; } static int tcf_csum_ipv6(struct sk_buff *skb, u32 update_flags) { struct ipv6hdr *ip6h; struct ipv6_opt_hdr *ip6xh; unsigned int hl, ixhl; unsigned int pl; int ntkoff; u8 nexthdr; ntkoff = skb_network_offset(skb); hl = sizeof(*ip6h); if (!pskb_may_pull(skb, hl + ntkoff)) goto fail; ip6h = ipv6_hdr(skb); pl = ntohs(ip6h->payload_len); nexthdr = ip6h->nexthdr; do { switch (nexthdr) { case NEXTHDR_FRAGMENT: goto ignore_skb; case NEXTHDR_ROUTING: case NEXTHDR_HOP: case NEXTHDR_DEST: if (!pskb_may_pull(skb, hl + sizeof(*ip6xh) + ntkoff)) goto fail; ip6xh = (void *)(skb_network_header(skb) + hl); ixhl = ipv6_optlen(ip6xh); if (!pskb_may_pull(skb, hl + ixhl + ntkoff)) goto fail; if ((nexthdr == NEXTHDR_HOP) && !(tcf_csum_ipv6_hopopts(ip6xh, ixhl, &pl))) goto fail; nexthdr = ip6xh->nexthdr; hl += ixhl; break; case IPPROTO_ICMPV6: if (update_flags & TCA_CSUM_UPDATE_FLAG_ICMP) if (!tcf_csum_ipv6_icmp(skb, ip6h, hl, pl + sizeof(*ip6h))) goto fail; goto done; case IPPROTO_TCP: if (update_flags & TCA_CSUM_UPDATE_FLAG_TCP) if (!tcf_csum_ipv6_tcp(skb, ip6h, hl, pl + sizeof(*ip6h))) goto fail; goto done; case IPPROTO_UDP: if (update_flags & TCA_CSUM_UPDATE_FLAG_UDP) if (!tcf_csum_ipv6_udp(skb, ip6h, hl, pl + sizeof(*ip6h), 0)) goto fail; goto done; case IPPROTO_UDPLITE: if (update_flags & TCA_CSUM_UPDATE_FLAG_UDPLITE) if (!tcf_csum_ipv6_udp(skb, ip6h, hl, pl + sizeof(*ip6h), 1)) goto fail; goto done; default: goto ignore_skb; } } while (pskb_may_pull(skb, hl + 1 + ntkoff)); done: ignore_skb: return 1; fail: return 0; } static int tcf_csum(struct sk_buff *skb, const struct tc_action *a, struct tcf_result *res) { struct tcf_csum *p = a->priv; int action; u32 update_flags; spin_lock(&p->tcf_lock); p->tcf_tm.lastuse = jiffies; bstats_update(&p->tcf_bstats, skb); action = p->tcf_action; update_flags = p->update_flags; spin_unlock(&p->tcf_lock); if (unlikely(action == TC_ACT_SHOT)) goto drop; switch (skb->protocol) { case cpu_to_be16(ETH_P_IP): if (!tcf_csum_ipv4(skb, update_flags)) goto drop; break; case cpu_to_be16(ETH_P_IPV6): if (!tcf_csum_ipv6(skb, update_flags)) goto drop; break; } return action; drop: spin_lock(&p->tcf_lock); p->tcf_qstats.drops++; spin_unlock(&p->tcf_lock); return TC_ACT_SHOT; } static int tcf_csum_dump(struct sk_buff *skb, struct tc_action *a, int bind, int ref) { unsigned char *b = skb_tail_pointer(skb); struct tcf_csum *p = a->priv; struct tc_csum opt = { .update_flags = p->update_flags, .index = p->tcf_index, .action = p->tcf_action, .refcnt = p->tcf_refcnt - ref, .bindcnt = p->tcf_bindcnt - bind, }; struct tcf_t t; NLA_PUT(skb, TCA_CSUM_PARMS, sizeof(opt), &opt); t.install = jiffies_to_clock_t(jiffies - p->tcf_tm.install); t.lastuse = jiffies_to_clock_t(jiffies - p->tcf_tm.lastuse); t.expires = jiffies_to_clock_t(p->tcf_tm.expires); NLA_PUT(skb, TCA_CSUM_TM, sizeof(t), &t); return skb->len; nla_put_failure: nlmsg_trim(skb, b); return -1; } static struct tc_action_ops act_csum_ops = { .kind = "csum", .hinfo = &csum_hash_info, .type = TCA_ACT_CSUM, .capab = TCA_CAP_NONE, .owner = THIS_MODULE, .act = tcf_csum, .dump = tcf_csum_dump, .cleanup = tcf_csum_cleanup, .lookup = tcf_hash_search, .init = tcf_csum_init, .walk = tcf_generic_walker }; MODULE_DESCRIPTION("Checksum updating actions"); MODULE_LICENSE("GPL"); static int __init csum_init_module(void) { return tcf_register_action(&act_csum_ops); } static void __exit csum_cleanup_module(void) { tcf_unregister_action(&act_csum_ops); } module_init(csum_init_module); module_exit(csum_cleanup_module);
gpl-2.0
PsychoGame/android_kernel_lge_apq8064
arch/mips/jz4740/dma.c
7762
7628
/* * Copyright (C) 2010, Lars-Peter Clausen <lars@metafoo.de> * JZ4740 SoC DMA support * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the * Free Software Foundation; either version 2 of the License, or (at your * option) any later version. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/kernel.h> #include <linux/module.h> #include <linux/spinlock.h> #include <linux/interrupt.h> #include <linux/dma-mapping.h> #include <asm/mach-jz4740/dma.h> #include <asm/mach-jz4740/base.h> #define JZ_REG_DMA_SRC_ADDR(x) (0x00 + (x) * 0x20) #define JZ_REG_DMA_DST_ADDR(x) (0x04 + (x) * 0x20) #define JZ_REG_DMA_TRANSFER_COUNT(x) (0x08 + (x) * 0x20) #define JZ_REG_DMA_REQ_TYPE(x) (0x0C + (x) * 0x20) #define JZ_REG_DMA_STATUS_CTRL(x) (0x10 + (x) * 0x20) #define JZ_REG_DMA_CMD(x) (0x14 + (x) * 0x20) #define JZ_REG_DMA_DESC_ADDR(x) (0x18 + (x) * 0x20) #define JZ_REG_DMA_CTRL 0x300 #define JZ_REG_DMA_IRQ 0x304 #define JZ_REG_DMA_DOORBELL 0x308 #define JZ_REG_DMA_DOORBELL_SET 0x30C #define JZ_DMA_STATUS_CTRL_NO_DESC BIT(31) #define JZ_DMA_STATUS_CTRL_DESC_INV BIT(6) #define JZ_DMA_STATUS_CTRL_ADDR_ERR BIT(4) #define JZ_DMA_STATUS_CTRL_TRANSFER_DONE BIT(3) #define JZ_DMA_STATUS_CTRL_HALT BIT(2) #define JZ_DMA_STATUS_CTRL_COUNT_TERMINATE BIT(1) #define JZ_DMA_STATUS_CTRL_ENABLE BIT(0) #define JZ_DMA_CMD_SRC_INC BIT(23) #define JZ_DMA_CMD_DST_INC BIT(22) #define JZ_DMA_CMD_RDIL_MASK (0xf << 16) #define JZ_DMA_CMD_SRC_WIDTH_MASK (0x3 << 14) #define JZ_DMA_CMD_DST_WIDTH_MASK (0x3 << 12) #define JZ_DMA_CMD_INTERVAL_LENGTH_MASK (0x7 << 8) #define JZ_DMA_CMD_BLOCK_MODE BIT(7) #define JZ_DMA_CMD_DESC_VALID BIT(4) #define JZ_DMA_CMD_DESC_VALID_MODE BIT(3) #define JZ_DMA_CMD_VALID_IRQ_ENABLE BIT(2) #define JZ_DMA_CMD_TRANSFER_IRQ_ENABLE BIT(1) #define JZ_DMA_CMD_LINK_ENABLE BIT(0) #define JZ_DMA_CMD_FLAGS_OFFSET 22 #define JZ_DMA_CMD_RDIL_OFFSET 16 #define JZ_DMA_CMD_SRC_WIDTH_OFFSET 14 #define JZ_DMA_CMD_DST_WIDTH_OFFSET 12 #define JZ_DMA_CMD_TRANSFER_SIZE_OFFSET 8 #define JZ_DMA_CMD_MODE_OFFSET 7 #define JZ_DMA_CTRL_PRIORITY_MASK (0x3 << 8) #define JZ_DMA_CTRL_HALT BIT(3) #define JZ_DMA_CTRL_ADDRESS_ERROR BIT(2) #define JZ_DMA_CTRL_ENABLE BIT(0) static void __iomem *jz4740_dma_base; static spinlock_t jz4740_dma_lock; static inline uint32_t jz4740_dma_read(size_t reg) { return readl(jz4740_dma_base + reg); } static inline void jz4740_dma_write(size_t reg, uint32_t val) { writel(val, jz4740_dma_base + reg); } static inline void jz4740_dma_write_mask(size_t reg, uint32_t val, uint32_t mask) { uint32_t val2; val2 = jz4740_dma_read(reg); val2 &= ~mask; val2 |= val; jz4740_dma_write(reg, val2); } struct jz4740_dma_chan { unsigned int id; void *dev; const char *name; enum jz4740_dma_flags flags; uint32_t transfer_shift; jz4740_dma_complete_callback_t complete_cb; unsigned used:1; }; #define JZ4740_DMA_CHANNEL(_id) { .id = _id } struct jz4740_dma_chan jz4740_dma_channels[] = { JZ4740_DMA_CHANNEL(0), JZ4740_DMA_CHANNEL(1), JZ4740_DMA_CHANNEL(2), JZ4740_DMA_CHANNEL(3), JZ4740_DMA_CHANNEL(4), JZ4740_DMA_CHANNEL(5), }; struct jz4740_dma_chan *jz4740_dma_request(void *dev, const char *name) { unsigned int i; struct jz4740_dma_chan *dma = NULL; spin_lock(&jz4740_dma_lock); for (i = 0; i < ARRAY_SIZE(jz4740_dma_channels); ++i) { if (!jz4740_dma_channels[i].used) { dma = &jz4740_dma_channels[i]; dma->used = 1; break; } } spin_unlock(&jz4740_dma_lock); if (!dma) return NULL; dma->dev = dev; dma->name = name; return dma; } EXPORT_SYMBOL_GPL(jz4740_dma_request); void jz4740_dma_configure(struct jz4740_dma_chan *dma, const struct jz4740_dma_config *config) { uint32_t cmd; switch (config->transfer_size) { case JZ4740_DMA_TRANSFER_SIZE_2BYTE: dma->transfer_shift = 1; break; case JZ4740_DMA_TRANSFER_SIZE_4BYTE: dma->transfer_shift = 2; break; case JZ4740_DMA_TRANSFER_SIZE_16BYTE: dma->transfer_shift = 4; break; case JZ4740_DMA_TRANSFER_SIZE_32BYTE: dma->transfer_shift = 5; break; default: dma->transfer_shift = 0; break; } cmd = config->flags << JZ_DMA_CMD_FLAGS_OFFSET; cmd |= config->src_width << JZ_DMA_CMD_SRC_WIDTH_OFFSET; cmd |= config->dst_width << JZ_DMA_CMD_DST_WIDTH_OFFSET; cmd |= config->transfer_size << JZ_DMA_CMD_TRANSFER_SIZE_OFFSET; cmd |= config->mode << JZ_DMA_CMD_MODE_OFFSET; cmd |= JZ_DMA_CMD_TRANSFER_IRQ_ENABLE; jz4740_dma_write(JZ_REG_DMA_CMD(dma->id), cmd); jz4740_dma_write(JZ_REG_DMA_STATUS_CTRL(dma->id), 0); jz4740_dma_write(JZ_REG_DMA_REQ_TYPE(dma->id), config->request_type); } EXPORT_SYMBOL_GPL(jz4740_dma_configure); void jz4740_dma_set_src_addr(struct jz4740_dma_chan *dma, dma_addr_t src) { jz4740_dma_write(JZ_REG_DMA_SRC_ADDR(dma->id), src); } EXPORT_SYMBOL_GPL(jz4740_dma_set_src_addr); void jz4740_dma_set_dst_addr(struct jz4740_dma_chan *dma, dma_addr_t dst) { jz4740_dma_write(JZ_REG_DMA_DST_ADDR(dma->id), dst); } EXPORT_SYMBOL_GPL(jz4740_dma_set_dst_addr); void jz4740_dma_set_transfer_count(struct jz4740_dma_chan *dma, uint32_t count) { count >>= dma->transfer_shift; jz4740_dma_write(JZ_REG_DMA_TRANSFER_COUNT(dma->id), count); } EXPORT_SYMBOL_GPL(jz4740_dma_set_transfer_count); void jz4740_dma_set_complete_cb(struct jz4740_dma_chan *dma, jz4740_dma_complete_callback_t cb) { dma->complete_cb = cb; } EXPORT_SYMBOL_GPL(jz4740_dma_set_complete_cb); void jz4740_dma_free(struct jz4740_dma_chan *dma) { dma->dev = NULL; dma->complete_cb = NULL; dma->used = 0; } EXPORT_SYMBOL_GPL(jz4740_dma_free); void jz4740_dma_enable(struct jz4740_dma_chan *dma) { jz4740_dma_write_mask(JZ_REG_DMA_STATUS_CTRL(dma->id), JZ_DMA_STATUS_CTRL_NO_DESC | JZ_DMA_STATUS_CTRL_ENABLE, JZ_DMA_STATUS_CTRL_HALT | JZ_DMA_STATUS_CTRL_NO_DESC | JZ_DMA_STATUS_CTRL_ENABLE); jz4740_dma_write_mask(JZ_REG_DMA_CTRL, JZ_DMA_CTRL_ENABLE, JZ_DMA_CTRL_HALT | JZ_DMA_CTRL_ENABLE); } EXPORT_SYMBOL_GPL(jz4740_dma_enable); void jz4740_dma_disable(struct jz4740_dma_chan *dma) { jz4740_dma_write_mask(JZ_REG_DMA_STATUS_CTRL(dma->id), 0, JZ_DMA_STATUS_CTRL_ENABLE); } EXPORT_SYMBOL_GPL(jz4740_dma_disable); uint32_t jz4740_dma_get_residue(const struct jz4740_dma_chan *dma) { uint32_t residue; residue = jz4740_dma_read(JZ_REG_DMA_TRANSFER_COUNT(dma->id)); return residue << dma->transfer_shift; } EXPORT_SYMBOL_GPL(jz4740_dma_get_residue); static void jz4740_dma_chan_irq(struct jz4740_dma_chan *dma) { (void) jz4740_dma_read(JZ_REG_DMA_STATUS_CTRL(dma->id)); jz4740_dma_write_mask(JZ_REG_DMA_STATUS_CTRL(dma->id), 0, JZ_DMA_STATUS_CTRL_ENABLE | JZ_DMA_STATUS_CTRL_TRANSFER_DONE); if (dma->complete_cb) dma->complete_cb(dma, 0, dma->dev); } static irqreturn_t jz4740_dma_irq(int irq, void *dev_id) { uint32_t irq_status; unsigned int i; irq_status = readl(jz4740_dma_base + JZ_REG_DMA_IRQ); for (i = 0; i < 6; ++i) { if (irq_status & (1 << i)) jz4740_dma_chan_irq(&jz4740_dma_channels[i]); } return IRQ_HANDLED; } static int jz4740_dma_init(void) { unsigned int ret; jz4740_dma_base = ioremap(JZ4740_DMAC_BASE_ADDR, 0x400); if (!jz4740_dma_base) return -EBUSY; spin_lock_init(&jz4740_dma_lock); ret = request_irq(JZ4740_IRQ_DMAC, jz4740_dma_irq, 0, "DMA", NULL); if (ret) printk(KERN_ERR "JZ4740 DMA: Failed to request irq: %d\n", ret); return ret; } arch_initcall(jz4740_dma_init);
gpl-2.0
RolanDroid/lge_kernel_lproj
arch/h8300/kernel/sys_h8300.c
7762
1728
/* * linux/arch/h8300/kernel/sys_h8300.c * * This file contains various random system calls that * have a non-standard calling sequence on the H8/300 * platform. */ #include <linux/errno.h> #include <linux/sched.h> #include <linux/mm.h> #include <linux/smp.h> #include <linux/sem.h> #include <linux/msg.h> #include <linux/shm.h> #include <linux/stat.h> #include <linux/syscalls.h> #include <linux/mman.h> #include <linux/file.h> #include <linux/fs.h> #include <linux/ipc.h> #include <asm/setup.h> #include <asm/uaccess.h> #include <asm/cachectl.h> #include <asm/traps.h> #include <asm/unistd.h> /* sys_cacheflush -- no support. */ asmlinkage int sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len) { return -EINVAL; } asmlinkage int sys_getpagesize(void) { return PAGE_SIZE; } #if defined(CONFIG_SYSCALL_PRINT) asmlinkage void syscall_print(void *dummy,...) { struct pt_regs *regs = (struct pt_regs *) ((unsigned char *)&dummy-4); printk("call %06lx:%ld 1:%08lx,2:%08lx,3:%08lx,ret:%08lx\n", ((regs->pc)&0xffffff)-2,regs->orig_er0,regs->er1,regs->er2,regs->er3,regs->er0); } #endif /* * Do a system call from kernel instead of calling sys_execve so we * end up with proper pt_regs. */ int kernel_execve(const char *filename, const char *const argv[], const char *const envp[]) { register long res __asm__("er0"); register const char *const *_c __asm__("er3") = envp; register const char *const *_b __asm__("er2") = argv; register const char * _a __asm__("er1") = filename; __asm__ __volatile__ ("mov.l %1,er0\n\t" "trapa #0\n\t" : "=r" (res) : "g" (__NR_execve), "g" (_a), "g" (_b), "g" (_c) : "cc", "memory"); return res; }
gpl-2.0
GustavoRD78/78Kernel-Android-N-Developer-Preview
drivers/media/dvb/frontends/mb86a16.c
8018
46822
/* Fujitsu MB86A16 DVB-S/DSS DC Receiver driver Copyright (C) Manu Abraham (abraham.manu@gmail.com) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. */ #include <linux/init.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/moduleparam.h> #include <linux/slab.h> #include "dvb_frontend.h" #include "mb86a16.h" #include "mb86a16_priv.h" unsigned int verbose = 5; module_param(verbose, int, 0644); #define ABS(x) ((x) < 0 ? (-x) : (x)) struct mb86a16_state { struct i2c_adapter *i2c_adap; const struct mb86a16_config *config; struct dvb_frontend frontend; /* tuning parameters */ int frequency; int srate; /* Internal stuff */ int master_clk; int deci; int csel; int rsel; }; #define MB86A16_ERROR 0 #define MB86A16_NOTICE 1 #define MB86A16_INFO 2 #define MB86A16_DEBUG 3 #define dprintk(x, y, z, format, arg...) do { \ if (z) { \ if ((x > MB86A16_ERROR) && (x > y)) \ printk(KERN_ERR "%s: " format "\n", __func__, ##arg); \ else if ((x > MB86A16_NOTICE) && (x > y)) \ printk(KERN_NOTICE "%s: " format "\n", __func__, ##arg); \ else if ((x > MB86A16_INFO) && (x > y)) \ printk(KERN_INFO "%s: " format "\n", __func__, ##arg); \ else if ((x > MB86A16_DEBUG) && (x > y)) \ printk(KERN_DEBUG "%s: " format "\n", __func__, ##arg); \ } else { \ if (x > y) \ printk(format, ##arg); \ } \ } while (0) #define TRACE_IN dprintk(verbose, MB86A16_DEBUG, 1, "-->()") #define TRACE_OUT dprintk(verbose, MB86A16_DEBUG, 1, "()-->") static int mb86a16_write(struct mb86a16_state *state, u8 reg, u8 val) { int ret; u8 buf[] = { reg, val }; struct i2c_msg msg = { .addr = state->config->demod_address, .flags = 0, .buf = buf, .len = 2 }; dprintk(verbose, MB86A16_DEBUG, 1, "writing to [0x%02x],Reg[0x%02x],Data[0x%02x]", state->config->demod_address, buf[0], buf[1]); ret = i2c_transfer(state->i2c_adap, &msg, 1); return (ret != 1) ? -EREMOTEIO : 0; } static int mb86a16_read(struct mb86a16_state *state, u8 reg, u8 *val) { int ret; u8 b0[] = { reg }; u8 b1[] = { 0 }; struct i2c_msg msg[] = { { .addr = state->config->demod_address, .flags = 0, .buf = b0, .len = 1 }, { .addr = state->config->demod_address, .flags = I2C_M_RD, .buf = b1, .len = 1 } }; ret = i2c_transfer(state->i2c_adap, msg, 2); if (ret != 2) { dprintk(verbose, MB86A16_ERROR, 1, "read error(reg=0x%02x, ret=0x%i)", reg, ret); return -EREMOTEIO; } *val = b1[0]; return ret; } static int CNTM_set(struct mb86a16_state *state, unsigned char timint1, unsigned char timint2, unsigned char cnext) { unsigned char val; val = (timint1 << 4) | (timint2 << 2) | cnext; if (mb86a16_write(state, MB86A16_CNTMR, val) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int smrt_set(struct mb86a16_state *state, int rate) { int tmp ; int m ; unsigned char STOFS0, STOFS1; m = 1 << state->deci; tmp = (8192 * state->master_clk - 2 * m * rate * 8192 + state->master_clk / 2) / state->master_clk; STOFS0 = tmp & 0x0ff; STOFS1 = (tmp & 0xf00) >> 8; if (mb86a16_write(state, MB86A16_SRATE1, (state->deci << 2) | (state->csel << 1) | state->rsel) < 0) goto err; if (mb86a16_write(state, MB86A16_SRATE2, STOFS0) < 0) goto err; if (mb86a16_write(state, MB86A16_SRATE3, STOFS1) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -1; } static int srst(struct mb86a16_state *state) { if (mb86a16_write(state, MB86A16_RESET, 0x04) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int afcex_data_set(struct mb86a16_state *state, unsigned char AFCEX_L, unsigned char AFCEX_H) { if (mb86a16_write(state, MB86A16_AFCEXL, AFCEX_L) < 0) goto err; if (mb86a16_write(state, MB86A16_AFCEXH, AFCEX_H) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -1; } static int afcofs_data_set(struct mb86a16_state *state, unsigned char AFCEX_L, unsigned char AFCEX_H) { if (mb86a16_write(state, 0x58, AFCEX_L) < 0) goto err; if (mb86a16_write(state, 0x59, AFCEX_H) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int stlp_set(struct mb86a16_state *state, unsigned char STRAS, unsigned char STRBS) { if (mb86a16_write(state, MB86A16_STRFILTCOEF1, (STRBS << 3) | (STRAS)) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int Vi_set(struct mb86a16_state *state, unsigned char ETH, unsigned char VIA) { if (mb86a16_write(state, MB86A16_VISET2, 0x04) < 0) goto err; if (mb86a16_write(state, MB86A16_VISET3, 0xf5) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int initial_set(struct mb86a16_state *state) { if (stlp_set(state, 5, 7)) goto err; udelay(100); if (afcex_data_set(state, 0, 0)) goto err; udelay(100); if (afcofs_data_set(state, 0, 0)) goto err; udelay(100); if (mb86a16_write(state, MB86A16_CRLFILTCOEF1, 0x16) < 0) goto err; if (mb86a16_write(state, 0x2f, 0x21) < 0) goto err; if (mb86a16_write(state, MB86A16_VIMAG, 0x38) < 0) goto err; if (mb86a16_write(state, MB86A16_FAGCS1, 0x00) < 0) goto err; if (mb86a16_write(state, MB86A16_FAGCS2, 0x1c) < 0) goto err; if (mb86a16_write(state, MB86A16_FAGCS3, 0x20) < 0) goto err; if (mb86a16_write(state, MB86A16_FAGCS4, 0x1e) < 0) goto err; if (mb86a16_write(state, MB86A16_FAGCS5, 0x23) < 0) goto err; if (mb86a16_write(state, 0x54, 0xff) < 0) goto err; if (mb86a16_write(state, MB86A16_TSOUT, 0x00) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int S01T_set(struct mb86a16_state *state, unsigned char s1t, unsigned s0t) { if (mb86a16_write(state, 0x33, (s1t << 3) | s0t) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int EN_set(struct mb86a16_state *state, int cren, int afcen) { unsigned char val; val = 0x7a | (cren << 7) | (afcen << 2); if (mb86a16_write(state, 0x49, val) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int AFCEXEN_set(struct mb86a16_state *state, int afcexen, int smrt) { unsigned char AFCA ; if (smrt > 18875) AFCA = 4; else if (smrt > 9375) AFCA = 3; else if (smrt > 2250) AFCA = 2; else AFCA = 1; if (mb86a16_write(state, 0x2a, 0x02 | (afcexen << 5) | (AFCA << 2)) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int DAGC_data_set(struct mb86a16_state *state, unsigned char DAGCA, unsigned char DAGCW) { if (mb86a16_write(state, 0x2d, (DAGCA << 3) | DAGCW) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static void smrt_info_get(struct mb86a16_state *state, int rate) { if (rate >= 37501) { state->deci = 0; state->csel = 0; state->rsel = 0; } else if (rate >= 30001) { state->deci = 0; state->csel = 0; state->rsel = 1; } else if (rate >= 26251) { state->deci = 0; state->csel = 1; state->rsel = 0; } else if (rate >= 22501) { state->deci = 0; state->csel = 1; state->rsel = 1; } else if (rate >= 18751) { state->deci = 1; state->csel = 0; state->rsel = 0; } else if (rate >= 15001) { state->deci = 1; state->csel = 0; state->rsel = 1; } else if (rate >= 13126) { state->deci = 1; state->csel = 1; state->rsel = 0; } else if (rate >= 11251) { state->deci = 1; state->csel = 1; state->rsel = 1; } else if (rate >= 9376) { state->deci = 2; state->csel = 0; state->rsel = 0; } else if (rate >= 7501) { state->deci = 2; state->csel = 0; state->rsel = 1; } else if (rate >= 6563) { state->deci = 2; state->csel = 1; state->rsel = 0; } else if (rate >= 5626) { state->deci = 2; state->csel = 1; state->rsel = 1; } else if (rate >= 4688) { state->deci = 3; state->csel = 0; state->rsel = 0; } else if (rate >= 3751) { state->deci = 3; state->csel = 0; state->rsel = 1; } else if (rate >= 3282) { state->deci = 3; state->csel = 1; state->rsel = 0; } else if (rate >= 2814) { state->deci = 3; state->csel = 1; state->rsel = 1; } else if (rate >= 2344) { state->deci = 4; state->csel = 0; state->rsel = 0; } else if (rate >= 1876) { state->deci = 4; state->csel = 0; state->rsel = 1; } else if (rate >= 1641) { state->deci = 4; state->csel = 1; state->rsel = 0; } else if (rate >= 1407) { state->deci = 4; state->csel = 1; state->rsel = 1; } else if (rate >= 1172) { state->deci = 5; state->csel = 0; state->rsel = 0; } else if (rate >= 939) { state->deci = 5; state->csel = 0; state->rsel = 1; } else if (rate >= 821) { state->deci = 5; state->csel = 1; state->rsel = 0; } else { state->deci = 5; state->csel = 1; state->rsel = 1; } if (state->csel == 0) state->master_clk = 92000; else state->master_clk = 61333; } static int signal_det(struct mb86a16_state *state, int smrt, unsigned char *SIG) { int ret ; int smrtd ; int wait_sym ; u32 wait_t; unsigned char S[3] ; int i ; if (*SIG > 45) { if (CNTM_set(state, 2, 1, 2) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "CNTM set Error"); return -1; } wait_sym = 40000; } else { if (CNTM_set(state, 3, 1, 2) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "CNTM set Error"); return -1; } wait_sym = 80000; } for (i = 0; i < 3; i++) { if (i == 0) smrtd = smrt * 98 / 100; else if (i == 1) smrtd = smrt; else smrtd = smrt * 102 / 100; smrt_info_get(state, smrtd); smrt_set(state, smrtd); srst(state); wait_t = (wait_sym + 99 * smrtd / 100) / smrtd; if (wait_t == 0) wait_t = 1; msleep_interruptible(10); if (mb86a16_read(state, 0x37, &(S[i])) != 2) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } } if ((S[1] > S[0] * 112 / 100) && (S[1] > S[2] * 112 / 100)) { ret = 1; } else { ret = 0; } *SIG = S[1]; if (CNTM_set(state, 0, 1, 2) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "CNTM set Error"); return -1; } return ret; } static int rf_val_set(struct mb86a16_state *state, int f, int smrt, unsigned char R) { unsigned char C, F, B; int M; unsigned char rf_val[5]; int ack = -1; if (smrt > 37750) C = 1; else if (smrt > 18875) C = 2; else if (smrt > 5500) C = 3; else C = 4; if (smrt > 30500) F = 3; else if (smrt > 9375) F = 1; else if (smrt > 4625) F = 0; else F = 2; if (f < 1060) B = 0; else if (f < 1175) B = 1; else if (f < 1305) B = 2; else if (f < 1435) B = 3; else if (f < 1570) B = 4; else if (f < 1715) B = 5; else if (f < 1845) B = 6; else if (f < 1980) B = 7; else if (f < 2080) B = 8; else B = 9; M = f * (1 << R) / 2; rf_val[0] = 0x01 | (C << 3) | (F << 1); rf_val[1] = (R << 5) | ((M & 0x1f000) >> 12); rf_val[2] = (M & 0x00ff0) >> 4; rf_val[3] = ((M & 0x0000f) << 4) | B; /* Frequency Set */ if (mb86a16_write(state, 0x21, rf_val[0]) < 0) ack = 0; if (mb86a16_write(state, 0x22, rf_val[1]) < 0) ack = 0; if (mb86a16_write(state, 0x23, rf_val[2]) < 0) ack = 0; if (mb86a16_write(state, 0x24, rf_val[3]) < 0) ack = 0; if (mb86a16_write(state, 0x25, 0x01) < 0) ack = 0; if (ack == 0) { dprintk(verbose, MB86A16_ERROR, 1, "RF Setup - I2C transfer error"); return -EREMOTEIO; } return 0; } static int afcerr_chk(struct mb86a16_state *state) { unsigned char AFCM_L, AFCM_H ; int AFCM ; int afcm, afcerr ; if (mb86a16_read(state, 0x0e, &AFCM_L) != 2) goto err; if (mb86a16_read(state, 0x0f, &AFCM_H) != 2) goto err; AFCM = (AFCM_H << 8) + AFCM_L; if (AFCM > 2048) afcm = AFCM - 4096; else afcm = AFCM; afcerr = afcm * state->master_clk / 8192; return afcerr; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int dagcm_val_get(struct mb86a16_state *state) { int DAGCM; unsigned char DAGCM_H, DAGCM_L; if (mb86a16_read(state, 0x45, &DAGCM_L) != 2) goto err; if (mb86a16_read(state, 0x46, &DAGCM_H) != 2) goto err; DAGCM = (DAGCM_H << 8) + DAGCM_L; return DAGCM; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int mb86a16_read_status(struct dvb_frontend *fe, fe_status_t *status) { u8 stat, stat2; struct mb86a16_state *state = fe->demodulator_priv; *status = 0; if (mb86a16_read(state, MB86A16_SIG1, &stat) != 2) goto err; if (mb86a16_read(state, MB86A16_SIG2, &stat2) != 2) goto err; if ((stat > 25) && (stat2 > 25)) *status |= FE_HAS_SIGNAL; if ((stat > 45) && (stat2 > 45)) *status |= FE_HAS_CARRIER; if (mb86a16_read(state, MB86A16_STATUS, &stat) != 2) goto err; if (stat & 0x01) *status |= FE_HAS_SYNC; if (stat & 0x01) *status |= FE_HAS_VITERBI; if (mb86a16_read(state, MB86A16_FRAMESYNC, &stat) != 2) goto err; if ((stat & 0x0f) && (*status & FE_HAS_VITERBI)) *status |= FE_HAS_LOCK; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int sync_chk(struct mb86a16_state *state, unsigned char *VIRM) { unsigned char val; int sync; if (mb86a16_read(state, 0x0d, &val) != 2) goto err; dprintk(verbose, MB86A16_INFO, 1, "Status = %02x,", val); sync = val & 0x01; *VIRM = (val & 0x1c) >> 2; return sync; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int freqerr_chk(struct mb86a16_state *state, int fTP, int smrt, int unit) { unsigned char CRM, AFCML, AFCMH; unsigned char temp1, temp2, temp3; int crm, afcm, AFCM; int crrerr, afcerr; /* kHz */ int frqerr; /* MHz */ int afcen, afcexen = 0; int R, M, fOSC, fOSC_OFS; if (mb86a16_read(state, 0x43, &CRM) != 2) goto err; if (CRM > 127) crm = CRM - 256; else crm = CRM; crrerr = smrt * crm / 256; if (mb86a16_read(state, 0x49, &temp1) != 2) goto err; afcen = (temp1 & 0x04) >> 2; if (afcen == 0) { if (mb86a16_read(state, 0x2a, &temp1) != 2) goto err; afcexen = (temp1 & 0x20) >> 5; } if (afcen == 1) { if (mb86a16_read(state, 0x0e, &AFCML) != 2) goto err; if (mb86a16_read(state, 0x0f, &AFCMH) != 2) goto err; } else if (afcexen == 1) { if (mb86a16_read(state, 0x2b, &AFCML) != 2) goto err; if (mb86a16_read(state, 0x2c, &AFCMH) != 2) goto err; } if ((afcen == 1) || (afcexen == 1)) { smrt_info_get(state, smrt); AFCM = ((AFCMH & 0x01) << 8) + AFCML; if (AFCM > 255) afcm = AFCM - 512; else afcm = AFCM; afcerr = afcm * state->master_clk / 8192; } else afcerr = 0; if (mb86a16_read(state, 0x22, &temp1) != 2) goto err; if (mb86a16_read(state, 0x23, &temp2) != 2) goto err; if (mb86a16_read(state, 0x24, &temp3) != 2) goto err; R = (temp1 & 0xe0) >> 5; M = ((temp1 & 0x1f) << 12) + (temp2 << 4) + (temp3 >> 4); if (R == 0) fOSC = 2 * M; else fOSC = M; fOSC_OFS = fOSC - fTP; if (unit == 0) { /* MHz */ if (crrerr + afcerr + fOSC_OFS * 1000 >= 0) frqerr = (crrerr + afcerr + fOSC_OFS * 1000 + 500) / 1000; else frqerr = (crrerr + afcerr + fOSC_OFS * 1000 - 500) / 1000; } else { /* kHz */ frqerr = crrerr + afcerr + fOSC_OFS * 1000; } return frqerr; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static unsigned char vco_dev_get(struct mb86a16_state *state, int smrt) { unsigned char R; if (smrt > 9375) R = 0; else R = 1; return R; } static void swp_info_get(struct mb86a16_state *state, int fOSC_start, int smrt, int v, int R, int swp_ofs, int *fOSC, int *afcex_freq, unsigned char *AFCEX_L, unsigned char *AFCEX_H) { int AFCEX ; int crnt_swp_freq ; crnt_swp_freq = fOSC_start * 1000 + v * swp_ofs; if (R == 0) *fOSC = (crnt_swp_freq + 1000) / 2000 * 2; else *fOSC = (crnt_swp_freq + 500) / 1000; if (*fOSC >= crnt_swp_freq) *afcex_freq = *fOSC * 1000 - crnt_swp_freq; else *afcex_freq = crnt_swp_freq - *fOSC * 1000; AFCEX = *afcex_freq * 8192 / state->master_clk; *AFCEX_L = AFCEX & 0x00ff; *AFCEX_H = (AFCEX & 0x0f00) >> 8; } static int swp_freq_calcuation(struct mb86a16_state *state, int i, int v, int *V, int vmax, int vmin, int SIGMIN, int fOSC, int afcex_freq, int swp_ofs, unsigned char *SIG1) { int swp_freq ; if ((i % 2 == 1) && (v <= vmax)) { /* positive v (case 1) */ if ((v - 1 == vmin) && (*(V + 30 + v) >= 0) && (*(V + 30 + v - 1) >= 0) && (*(V + 30 + v - 1) > *(V + 30 + v)) && (*(V + 30 + v - 1) > SIGMIN)) { swp_freq = fOSC * 1000 + afcex_freq - swp_ofs; *SIG1 = *(V + 30 + v - 1); } else if ((v == vmax) && (*(V + 30 + v) >= 0) && (*(V + 30 + v - 1) >= 0) && (*(V + 30 + v) > *(V + 30 + v - 1)) && (*(V + 30 + v) > SIGMIN)) { /* (case 2) */ swp_freq = fOSC * 1000 + afcex_freq; *SIG1 = *(V + 30 + v); } else if ((*(V + 30 + v) > 0) && (*(V + 30 + v - 1) > 0) && (*(V + 30 + v - 2) > 0) && (*(V + 30 + v - 3) > 0) && (*(V + 30 + v - 1) > *(V + 30 + v)) && (*(V + 30 + v - 2) > *(V + 30 + v - 3)) && ((*(V + 30 + v - 1) > SIGMIN) || (*(V + 30 + v - 2) > SIGMIN))) { /* (case 3) */ if (*(V + 30 + v - 1) >= *(V + 30 + v - 2)) { swp_freq = fOSC * 1000 + afcex_freq - swp_ofs; *SIG1 = *(V + 30 + v - 1); } else { swp_freq = fOSC * 1000 + afcex_freq - swp_ofs * 2; *SIG1 = *(V + 30 + v - 2); } } else if ((v == vmax) && (*(V + 30 + v) >= 0) && (*(V + 30 + v - 1) >= 0) && (*(V + 30 + v - 2) >= 0) && (*(V + 30 + v) > *(V + 30 + v - 2)) && (*(V + 30 + v - 1) > *(V + 30 + v - 2)) && ((*(V + 30 + v) > SIGMIN) || (*(V + 30 + v - 1) > SIGMIN))) { /* (case 4) */ if (*(V + 30 + v) >= *(V + 30 + v - 1)) { swp_freq = fOSC * 1000 + afcex_freq; *SIG1 = *(V + 30 + v); } else { swp_freq = fOSC * 1000 + afcex_freq - swp_ofs; *SIG1 = *(V + 30 + v - 1); } } else { swp_freq = -1 ; } } else if ((i % 2 == 0) && (v >= vmin)) { /* Negative v (case 1) */ if ((*(V + 30 + v) > 0) && (*(V + 30 + v + 1) > 0) && (*(V + 30 + v + 2) > 0) && (*(V + 30 + v + 1) > *(V + 30 + v)) && (*(V + 30 + v + 1) > *(V + 30 + v + 2)) && (*(V + 30 + v + 1) > SIGMIN)) { swp_freq = fOSC * 1000 + afcex_freq + swp_ofs; *SIG1 = *(V + 30 + v + 1); } else if ((v + 1 == vmax) && (*(V + 30 + v) >= 0) && (*(V + 30 + v + 1) >= 0) && (*(V + 30 + v + 1) > *(V + 30 + v)) && (*(V + 30 + v + 1) > SIGMIN)) { /* (case 2) */ swp_freq = fOSC * 1000 + afcex_freq + swp_ofs; *SIG1 = *(V + 30 + v); } else if ((v == vmin) && (*(V + 30 + v) > 0) && (*(V + 30 + v + 1) > 0) && (*(V + 30 + v + 2) > 0) && (*(V + 30 + v) > *(V + 30 + v + 1)) && (*(V + 30 + v) > *(V + 30 + v + 2)) && (*(V + 30 + v) > SIGMIN)) { /* (case 3) */ swp_freq = fOSC * 1000 + afcex_freq; *SIG1 = *(V + 30 + v); } else if ((*(V + 30 + v) >= 0) && (*(V + 30 + v + 1) >= 0) && (*(V + 30 + v + 2) >= 0) && (*(V + 30 + v + 3) >= 0) && (*(V + 30 + v + 1) > *(V + 30 + v)) && (*(V + 30 + v + 2) > *(V + 30 + v + 3)) && ((*(V + 30 + v + 1) > SIGMIN) || (*(V + 30 + v + 2) > SIGMIN))) { /* (case 4) */ if (*(V + 30 + v + 1) >= *(V + 30 + v + 2)) { swp_freq = fOSC * 1000 + afcex_freq + swp_ofs; *SIG1 = *(V + 30 + v + 1); } else { swp_freq = fOSC * 1000 + afcex_freq + swp_ofs * 2; *SIG1 = *(V + 30 + v + 2); } } else if ((*(V + 30 + v) >= 0) && (*(V + 30 + v + 1) >= 0) && (*(V + 30 + v + 2) >= 0) && (*(V + 30 + v + 3) >= 0) && (*(V + 30 + v) > *(V + 30 + v + 2)) && (*(V + 30 + v + 1) > *(V + 30 + v + 2)) && (*(V + 30 + v) > *(V + 30 + v + 3)) && (*(V + 30 + v + 1) > *(V + 30 + v + 3)) && ((*(V + 30 + v) > SIGMIN) || (*(V + 30 + v + 1) > SIGMIN))) { /* (case 5) */ if (*(V + 30 + v) >= *(V + 30 + v + 1)) { swp_freq = fOSC * 1000 + afcex_freq; *SIG1 = *(V + 30 + v); } else { swp_freq = fOSC * 1000 + afcex_freq + swp_ofs; *SIG1 = *(V + 30 + v + 1); } } else if ((v + 2 == vmin) && (*(V + 30 + v) >= 0) && (*(V + 30 + v + 1) >= 0) && (*(V + 30 + v + 2) >= 0) && (*(V + 30 + v + 1) > *(V + 30 + v)) && (*(V + 30 + v + 2) > *(V + 30 + v)) && ((*(V + 30 + v + 1) > SIGMIN) || (*(V + 30 + v + 2) > SIGMIN))) { /* (case 6) */ if (*(V + 30 + v + 1) >= *(V + 30 + v + 2)) { swp_freq = fOSC * 1000 + afcex_freq + swp_ofs; *SIG1 = *(V + 30 + v + 1); } else { swp_freq = fOSC * 1000 + afcex_freq + swp_ofs * 2; *SIG1 = *(V + 30 + v + 2); } } else if ((vmax == 0) && (vmin == 0) && (*(V + 30 + v) > SIGMIN)) { swp_freq = fOSC * 1000; *SIG1 = *(V + 30 + v); } else swp_freq = -1; } else swp_freq = -1; return swp_freq; } static void swp_info_get2(struct mb86a16_state *state, int smrt, int R, int swp_freq, int *afcex_freq, int *fOSC, unsigned char *AFCEX_L, unsigned char *AFCEX_H) { int AFCEX ; if (R == 0) *fOSC = (swp_freq + 1000) / 2000 * 2; else *fOSC = (swp_freq + 500) / 1000; if (*fOSC >= swp_freq) *afcex_freq = *fOSC * 1000 - swp_freq; else *afcex_freq = swp_freq - *fOSC * 1000; AFCEX = *afcex_freq * 8192 / state->master_clk; *AFCEX_L = AFCEX & 0x00ff; *AFCEX_H = (AFCEX & 0x0f00) >> 8; } static void afcex_info_get(struct mb86a16_state *state, int afcex_freq, unsigned char *AFCEX_L, unsigned char *AFCEX_H) { int AFCEX ; AFCEX = afcex_freq * 8192 / state->master_clk; *AFCEX_L = AFCEX & 0x00ff; *AFCEX_H = (AFCEX & 0x0f00) >> 8; } static int SEQ_set(struct mb86a16_state *state, unsigned char loop) { /* SLOCK0 = 0 */ if (mb86a16_write(state, 0x32, 0x02 | (loop << 2)) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } return 0; } static int iq_vt_set(struct mb86a16_state *state, unsigned char IQINV) { /* Viterbi Rate, IQ Settings */ if (mb86a16_write(state, 0x06, 0xdf | (IQINV << 5)) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } return 0; } static int FEC_srst(struct mb86a16_state *state) { if (mb86a16_write(state, MB86A16_RESET, 0x02) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } return 0; } static int S2T_set(struct mb86a16_state *state, unsigned char S2T) { if (mb86a16_write(state, 0x34, 0x70 | S2T) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } return 0; } static int S45T_set(struct mb86a16_state *state, unsigned char S4T, unsigned char S5T) { if (mb86a16_write(state, 0x35, 0x00 | (S5T << 4) | S4T) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } return 0; } static int mb86a16_set_fe(struct mb86a16_state *state) { u8 agcval, cnmval; int i, j; int fOSC = 0; int fOSC_start = 0; int wait_t; int fcp; int swp_ofs; int V[60]; u8 SIG1MIN; unsigned char CREN, AFCEN, AFCEXEN; unsigned char SIG1; unsigned char TIMINT1, TIMINT2, TIMEXT; unsigned char S0T, S1T; unsigned char S2T; /* unsigned char S2T, S3T; */ unsigned char S4T, S5T; unsigned char AFCEX_L, AFCEX_H; unsigned char R; unsigned char VIRM; unsigned char ETH, VIA; unsigned char junk; int loop; int ftemp; int v, vmax, vmin; int vmax_his, vmin_his; int swp_freq, prev_swp_freq[20]; int prev_freq_num; int signal_dupl; int afcex_freq; int signal; int afcerr; int temp_freq, delta_freq; int dagcm[4]; int smrt_d; /* int freq_err; */ int n; int ret = -1; int sync; dprintk(verbose, MB86A16_INFO, 1, "freq=%d Mhz, symbrt=%d Ksps", state->frequency, state->srate); fcp = 3000; swp_ofs = state->srate / 4; for (i = 0; i < 60; i++) V[i] = -1; for (i = 0; i < 20; i++) prev_swp_freq[i] = 0; SIG1MIN = 25; for (n = 0; ((n < 3) && (ret == -1)); n++) { SEQ_set(state, 0); iq_vt_set(state, 0); CREN = 0; AFCEN = 0; AFCEXEN = 1; TIMINT1 = 0; TIMINT2 = 1; TIMEXT = 2; S1T = 0; S0T = 0; if (initial_set(state) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "initial set failed"); return -1; } if (DAGC_data_set(state, 3, 2) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "DAGC data set error"); return -1; } if (EN_set(state, CREN, AFCEN) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "EN set error"); return -1; /* (0, 0) */ } if (AFCEXEN_set(state, AFCEXEN, state->srate) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "AFCEXEN set error"); return -1; /* (1, smrt) = (1, symbolrate) */ } if (CNTM_set(state, TIMINT1, TIMINT2, TIMEXT) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "CNTM set error"); return -1; /* (0, 1, 2) */ } if (S01T_set(state, S1T, S0T) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "S01T set error"); return -1; /* (0, 0) */ } smrt_info_get(state, state->srate); if (smrt_set(state, state->srate) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "smrt info get error"); return -1; } R = vco_dev_get(state, state->srate); if (R == 1) fOSC_start = state->frequency; else if (R == 0) { if (state->frequency % 2 == 0) { fOSC_start = state->frequency; } else { fOSC_start = state->frequency + 1; if (fOSC_start > 2150) fOSC_start = state->frequency - 1; } } loop = 1; ftemp = fOSC_start * 1000; vmax = 0 ; while (loop == 1) { ftemp = ftemp + swp_ofs; vmax++; /* Upper bound */ if (ftemp > 2150000) { loop = 0; vmax--; } else { if ((ftemp == 2150000) || (ftemp - state->frequency * 1000 >= fcp + state->srate / 4)) loop = 0; } } loop = 1; ftemp = fOSC_start * 1000; vmin = 0 ; while (loop == 1) { ftemp = ftemp - swp_ofs; vmin--; /* Lower bound */ if (ftemp < 950000) { loop = 0; vmin++; } else { if ((ftemp == 950000) || (state->frequency * 1000 - ftemp >= fcp + state->srate / 4)) loop = 0; } } wait_t = (8000 + state->srate / 2) / state->srate; if (wait_t == 0) wait_t = 1; i = 0; j = 0; prev_freq_num = 0; loop = 1; signal = 0; vmax_his = 0; vmin_his = 0; v = 0; while (loop == 1) { swp_info_get(state, fOSC_start, state->srate, v, R, swp_ofs, &fOSC, &afcex_freq, &AFCEX_L, &AFCEX_H); udelay(100); if (rf_val_set(state, fOSC, state->srate, R) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "rf val set error"); return -1; } udelay(100); if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "afcex data set error"); return -1; } if (srst(state) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "srst error"); return -1; } msleep_interruptible(wait_t); if (mb86a16_read(state, 0x37, &SIG1) != 2) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -1; } V[30 + v] = SIG1 ; swp_freq = swp_freq_calcuation(state, i, v, V, vmax, vmin, SIG1MIN, fOSC, afcex_freq, swp_ofs, &SIG1); /* changed */ signal_dupl = 0; for (j = 0; j < prev_freq_num; j++) { if ((ABS(prev_swp_freq[j] - swp_freq)) < (swp_ofs * 3 / 2)) { signal_dupl = 1; dprintk(verbose, MB86A16_INFO, 1, "Probably Duplicate Signal, j = %d", j); } } if ((signal_dupl == 0) && (swp_freq > 0) && (ABS(swp_freq - state->frequency * 1000) < fcp + state->srate / 6)) { dprintk(verbose, MB86A16_DEBUG, 1, "------ Signal detect ------ [swp_freq=[%07d, srate=%05d]]", swp_freq, state->srate); prev_swp_freq[prev_freq_num] = swp_freq; prev_freq_num++; swp_info_get2(state, state->srate, R, swp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H); if (rf_val_set(state, fOSC, state->srate, R) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "rf val set error"); return -1; } if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "afcex data set error"); return -1; } signal = signal_det(state, state->srate, &SIG1); if (signal == 1) { dprintk(verbose, MB86A16_ERROR, 1, "***** Signal Found *****"); loop = 0; } else { dprintk(verbose, MB86A16_ERROR, 1, "!!!!! No signal !!!!!, try again..."); smrt_info_get(state, state->srate); if (smrt_set(state, state->srate) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "smrt set error"); return -1; } } } if (v > vmax) vmax_his = 1 ; if (v < vmin) vmin_his = 1 ; i++; if ((i % 2 == 1) && (vmax_his == 1)) i++; if ((i % 2 == 0) && (vmin_his == 1)) i++; if (i % 2 == 1) v = (i + 1) / 2; else v = -i / 2; if ((vmax_his == 1) && (vmin_his == 1)) loop = 0 ; } if (signal == 1) { dprintk(verbose, MB86A16_INFO, 1, " Start Freq Error Check"); S1T = 7 ; S0T = 1 ; CREN = 0 ; AFCEN = 1 ; AFCEXEN = 0 ; if (S01T_set(state, S1T, S0T) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "S01T set error"); return -1; } smrt_info_get(state, state->srate); if (smrt_set(state, state->srate) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "smrt set error"); return -1; } if (EN_set(state, CREN, AFCEN) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "EN set error"); return -1; } if (AFCEXEN_set(state, AFCEXEN, state->srate) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "AFCEXEN set error"); return -1; } afcex_info_get(state, afcex_freq, &AFCEX_L, &AFCEX_H); if (afcofs_data_set(state, AFCEX_L, AFCEX_H) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "AFCOFS data set error"); return -1; } if (srst(state) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "srst error"); return -1; } /* delay 4~200 */ wait_t = 200000 / state->master_clk + 200000 / state->srate; msleep(wait_t); afcerr = afcerr_chk(state); if (afcerr == -1) return -1; swp_freq = fOSC * 1000 + afcerr ; AFCEXEN = 1 ; if (state->srate >= 1500) smrt_d = state->srate / 3; else smrt_d = state->srate / 2; smrt_info_get(state, smrt_d); if (smrt_set(state, smrt_d) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "smrt set error"); return -1; } if (AFCEXEN_set(state, AFCEXEN, smrt_d) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "AFCEXEN set error"); return -1; } R = vco_dev_get(state, smrt_d); if (DAGC_data_set(state, 2, 0) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "DAGC data set error"); return -1; } for (i = 0; i < 3; i++) { temp_freq = swp_freq + (i - 1) * state->srate / 8; swp_info_get2(state, smrt_d, R, temp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H); if (rf_val_set(state, fOSC, smrt_d, R) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "rf val set error"); return -1; } if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "afcex data set error"); return -1; } wait_t = 200000 / state->master_clk + 40000 / smrt_d; msleep(wait_t); dagcm[i] = dagcm_val_get(state); } if ((dagcm[0] > dagcm[1]) && (dagcm[0] > dagcm[2]) && (dagcm[0] - dagcm[1] > 2 * (dagcm[2] - dagcm[1]))) { temp_freq = swp_freq - 2 * state->srate / 8; swp_info_get2(state, smrt_d, R, temp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H); if (rf_val_set(state, fOSC, smrt_d, R) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "rf val set error"); return -1; } if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "afcex data set"); return -1; } wait_t = 200000 / state->master_clk + 40000 / smrt_d; msleep(wait_t); dagcm[3] = dagcm_val_get(state); if (dagcm[3] > dagcm[1]) delta_freq = (dagcm[2] - dagcm[0] + dagcm[1] - dagcm[3]) * state->srate / 300; else delta_freq = 0; } else if ((dagcm[2] > dagcm[1]) && (dagcm[2] > dagcm[0]) && (dagcm[2] - dagcm[1] > 2 * (dagcm[0] - dagcm[1]))) { temp_freq = swp_freq + 2 * state->srate / 8; swp_info_get2(state, smrt_d, R, temp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H); if (rf_val_set(state, fOSC, smrt_d, R) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "rf val set"); return -1; } if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "afcex data set"); return -1; } wait_t = 200000 / state->master_clk + 40000 / smrt_d; msleep(wait_t); dagcm[3] = dagcm_val_get(state); if (dagcm[3] > dagcm[1]) delta_freq = (dagcm[2] - dagcm[0] + dagcm[3] - dagcm[1]) * state->srate / 300; else delta_freq = 0 ; } else { delta_freq = 0 ; } dprintk(verbose, MB86A16_INFO, 1, "SWEEP Frequency = %d", swp_freq); swp_freq += delta_freq; dprintk(verbose, MB86A16_INFO, 1, "Adjusting .., DELTA Freq = %d, SWEEP Freq=%d", delta_freq, swp_freq); if (ABS(state->frequency * 1000 - swp_freq) > 3800) { dprintk(verbose, MB86A16_INFO, 1, "NO -- SIGNAL !"); } else { S1T = 0; S0T = 3; CREN = 1; AFCEN = 0; AFCEXEN = 1; if (S01T_set(state, S1T, S0T) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "S01T set error"); return -1; } if (DAGC_data_set(state, 0, 0) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "DAGC data set error"); return -1; } R = vco_dev_get(state, state->srate); smrt_info_get(state, state->srate); if (smrt_set(state, state->srate) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "smrt set error"); return -1; } if (EN_set(state, CREN, AFCEN) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "EN set error"); return -1; } if (AFCEXEN_set(state, AFCEXEN, state->srate) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "AFCEXEN set error"); return -1; } swp_info_get2(state, state->srate, R, swp_freq, &afcex_freq, &fOSC, &AFCEX_L, &AFCEX_H); if (rf_val_set(state, fOSC, state->srate, R) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "rf val set error"); return -1; } if (afcex_data_set(state, AFCEX_L, AFCEX_H) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "afcex data set error"); return -1; } if (srst(state) < 0) { dprintk(verbose, MB86A16_ERROR, 1, "srst error"); return -1; } wait_t = 7 + (10000 + state->srate / 2) / state->srate; if (wait_t == 0) wait_t = 1; msleep_interruptible(wait_t); if (mb86a16_read(state, 0x37, &SIG1) != 2) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } if (SIG1 > 110) { S2T = 4; S4T = 1; S5T = 6; ETH = 4; VIA = 6; wait_t = 7 + (917504 + state->srate / 2) / state->srate; } else if (SIG1 > 105) { S2T = 4; S4T = 2; S5T = 8; ETH = 7; VIA = 2; wait_t = 7 + (1048576 + state->srate / 2) / state->srate; } else if (SIG1 > 85) { S2T = 5; S4T = 2; S5T = 8; ETH = 7; VIA = 2; wait_t = 7 + (1310720 + state->srate / 2) / state->srate; } else if (SIG1 > 65) { S2T = 6; S4T = 2; S5T = 8; ETH = 7; VIA = 2; wait_t = 7 + (1572864 + state->srate / 2) / state->srate; } else { S2T = 7; S4T = 2; S5T = 8; ETH = 7; VIA = 2; wait_t = 7 + (2097152 + state->srate / 2) / state->srate; } wait_t *= 2; /* FOS */ S2T_set(state, S2T); S45T_set(state, S4T, S5T); Vi_set(state, ETH, VIA); srst(state); msleep_interruptible(wait_t); sync = sync_chk(state, &VIRM); dprintk(verbose, MB86A16_INFO, 1, "-------- Viterbi=[%d] SYNC=[%d] ---------", VIRM, sync); if (VIRM) { if (VIRM == 4) { /* 5/6 */ if (SIG1 > 110) wait_t = (786432 + state->srate / 2) / state->srate; else wait_t = (1572864 + state->srate / 2) / state->srate; if (state->srate < 5000) /* FIXME ! , should be a long wait ! */ msleep_interruptible(wait_t); else msleep_interruptible(wait_t); if (sync_chk(state, &junk) == 0) { iq_vt_set(state, 1); FEC_srst(state); } } /* 1/2, 2/3, 3/4, 7/8 */ if (SIG1 > 110) wait_t = (786432 + state->srate / 2) / state->srate; else wait_t = (1572864 + state->srate / 2) / state->srate; msleep_interruptible(wait_t); SEQ_set(state, 1); } else { dprintk(verbose, MB86A16_INFO, 1, "NO -- SYNC"); SEQ_set(state, 1); ret = -1; } } } else { dprintk(verbose, MB86A16_INFO, 1, "NO -- SIGNAL"); ret = -1; } sync = sync_chk(state, &junk); if (sync) { dprintk(verbose, MB86A16_INFO, 1, "******* SYNC *******"); freqerr_chk(state, state->frequency, state->srate, 1); ret = 0; break; } } mb86a16_read(state, 0x15, &agcval); mb86a16_read(state, 0x26, &cnmval); dprintk(verbose, MB86A16_INFO, 1, "AGC = %02x CNM = %02x", agcval, cnmval); return ret; } static int mb86a16_send_diseqc_msg(struct dvb_frontend *fe, struct dvb_diseqc_master_cmd *cmd) { struct mb86a16_state *state = fe->demodulator_priv; int i; u8 regs; if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA) < 0) goto err; if (mb86a16_write(state, MB86A16_DCCOUT, 0x00) < 0) goto err; if (mb86a16_write(state, MB86A16_TONEOUT2, 0x04) < 0) goto err; regs = 0x18; if (cmd->msg_len > 5 || cmd->msg_len < 4) return -EINVAL; for (i = 0; i < cmd->msg_len; i++) { if (mb86a16_write(state, regs, cmd->msg[i]) < 0) goto err; regs++; } i += 0x90; msleep_interruptible(10); if (mb86a16_write(state, MB86A16_DCC1, i) < 0) goto err; if (mb86a16_write(state, MB86A16_DCCOUT, MB86A16_DCCOUT_DISEN) < 0) goto err; return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int mb86a16_send_diseqc_burst(struct dvb_frontend *fe, fe_sec_mini_cmd_t burst) { struct mb86a16_state *state = fe->demodulator_priv; switch (burst) { case SEC_MINI_A: if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA | MB86A16_DCC1_TBEN | MB86A16_DCC1_TBO) < 0) goto err; if (mb86a16_write(state, MB86A16_DCCOUT, MB86A16_DCCOUT_DISEN) < 0) goto err; break; case SEC_MINI_B: if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA | MB86A16_DCC1_TBEN) < 0) goto err; if (mb86a16_write(state, MB86A16_DCCOUT, MB86A16_DCCOUT_DISEN) < 0) goto err; break; } return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int mb86a16_set_tone(struct dvb_frontend *fe, fe_sec_tone_mode_t tone) { struct mb86a16_state *state = fe->demodulator_priv; switch (tone) { case SEC_TONE_ON: if (mb86a16_write(state, MB86A16_TONEOUT2, 0x00) < 0) goto err; if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA | MB86A16_DCC1_CTOE) < 0) goto err; if (mb86a16_write(state, MB86A16_DCCOUT, MB86A16_DCCOUT_DISEN) < 0) goto err; break; case SEC_TONE_OFF: if (mb86a16_write(state, MB86A16_TONEOUT2, 0x04) < 0) goto err; if (mb86a16_write(state, MB86A16_DCC1, MB86A16_DCC1_DISTA) < 0) goto err; if (mb86a16_write(state, MB86A16_DCCOUT, 0x00) < 0) goto err; break; default: return -EINVAL; } return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static enum dvbfe_search mb86a16_search(struct dvb_frontend *fe) { struct dtv_frontend_properties *p = &fe->dtv_property_cache; struct mb86a16_state *state = fe->demodulator_priv; state->frequency = p->frequency / 1000; state->srate = p->symbol_rate / 1000; if (!mb86a16_set_fe(state)) { dprintk(verbose, MB86A16_ERROR, 1, "Successfully acquired LOCK"); return DVBFE_ALGO_SEARCH_SUCCESS; } dprintk(verbose, MB86A16_ERROR, 1, "Lock acquisition failed!"); return DVBFE_ALGO_SEARCH_FAILED; } static void mb86a16_release(struct dvb_frontend *fe) { struct mb86a16_state *state = fe->demodulator_priv; kfree(state); } static int mb86a16_init(struct dvb_frontend *fe) { return 0; } static int mb86a16_sleep(struct dvb_frontend *fe) { return 0; } static int mb86a16_read_ber(struct dvb_frontend *fe, u32 *ber) { u8 ber_mon, ber_tab, ber_lsb, ber_mid, ber_msb, ber_tim, ber_rst; u32 timer; struct mb86a16_state *state = fe->demodulator_priv; *ber = 0; if (mb86a16_read(state, MB86A16_BERMON, &ber_mon) != 2) goto err; if (mb86a16_read(state, MB86A16_BERTAB, &ber_tab) != 2) goto err; if (mb86a16_read(state, MB86A16_BERLSB, &ber_lsb) != 2) goto err; if (mb86a16_read(state, MB86A16_BERMID, &ber_mid) != 2) goto err; if (mb86a16_read(state, MB86A16_BERMSB, &ber_msb) != 2) goto err; /* BER monitor invalid when BER_EN = 0 */ if (ber_mon & 0x04) { /* coarse, fast calculation */ *ber = ber_tab & 0x1f; dprintk(verbose, MB86A16_DEBUG, 1, "BER coarse=[0x%02x]", *ber); if (ber_mon & 0x01) { /* * BER_SEL = 1, The monitored BER is the estimated * value with a Reed-Solomon decoder error amount at * the deinterleaver output. * monitored BER is expressed as a 20 bit output in total */ ber_rst = ber_mon >> 3; *ber = (((ber_msb << 8) | ber_mid) << 8) | ber_lsb; if (ber_rst == 0) timer = 12500000; if (ber_rst == 1) timer = 25000000; if (ber_rst == 2) timer = 50000000; if (ber_rst == 3) timer = 100000000; *ber /= timer; dprintk(verbose, MB86A16_DEBUG, 1, "BER fine=[0x%02x]", *ber); } else { /* * BER_SEL = 0, The monitored BER is the estimated * value with a Viterbi decoder error amount at the * QPSK demodulator output. * monitored BER is expressed as a 24 bit output in total */ ber_tim = ber_mon >> 1; *ber = (((ber_msb << 8) | ber_mid) << 8) | ber_lsb; if (ber_tim == 0) timer = 16; if (ber_tim == 1) timer = 24; *ber /= 2 ^ timer; dprintk(verbose, MB86A16_DEBUG, 1, "BER fine=[0x%02x]", *ber); } } return 0; err: dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } static int mb86a16_read_signal_strength(struct dvb_frontend *fe, u16 *strength) { u8 agcm = 0; struct mb86a16_state *state = fe->demodulator_priv; *strength = 0; if (mb86a16_read(state, MB86A16_AGCM, &agcm) != 2) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } *strength = ((0xff - agcm) * 100) / 256; dprintk(verbose, MB86A16_DEBUG, 1, "Signal strength=[%d %%]", (u8) *strength); *strength = (0xffff - 0xff) + agcm; return 0; } struct cnr { u8 cn_reg; u8 cn_val; }; static const struct cnr cnr_tab[] = { { 35, 2 }, { 40, 3 }, { 50, 4 }, { 60, 5 }, { 70, 6 }, { 80, 7 }, { 92, 8 }, { 103, 9 }, { 115, 10 }, { 138, 12 }, { 162, 15 }, { 180, 18 }, { 185, 19 }, { 189, 20 }, { 195, 22 }, { 199, 24 }, { 201, 25 }, { 202, 26 }, { 203, 27 }, { 205, 28 }, { 208, 30 } }; static int mb86a16_read_snr(struct dvb_frontend *fe, u16 *snr) { struct mb86a16_state *state = fe->demodulator_priv; int i = 0; int low_tide = 2, high_tide = 30, q_level; u8 cn; *snr = 0; if (mb86a16_read(state, 0x26, &cn) != 2) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } for (i = 0; i < ARRAY_SIZE(cnr_tab); i++) { if (cn < cnr_tab[i].cn_reg) { *snr = cnr_tab[i].cn_val; break; } } q_level = (*snr * 100) / (high_tide - low_tide); dprintk(verbose, MB86A16_ERROR, 1, "SNR (Quality) = [%d dB], Level=%d %%", *snr, q_level); *snr = (0xffff - 0xff) + *snr; return 0; } static int mb86a16_read_ucblocks(struct dvb_frontend *fe, u32 *ucblocks) { u8 dist; struct mb86a16_state *state = fe->demodulator_priv; if (mb86a16_read(state, MB86A16_DISTMON, &dist) != 2) { dprintk(verbose, MB86A16_ERROR, 1, "I2C transfer error"); return -EREMOTEIO; } *ucblocks = dist; return 0; } static enum dvbfe_algo mb86a16_frontend_algo(struct dvb_frontend *fe) { return DVBFE_ALGO_CUSTOM; } static struct dvb_frontend_ops mb86a16_ops = { .delsys = { SYS_DVBS }, .info = { .name = "Fujitsu MB86A16 DVB-S", .frequency_min = 950000, .frequency_max = 2150000, .frequency_stepsize = 3000, .frequency_tolerance = 0, .symbol_rate_min = 1000000, .symbol_rate_max = 45000000, .symbol_rate_tolerance = 500, .caps = FE_CAN_FEC_1_2 | FE_CAN_FEC_2_3 | FE_CAN_FEC_3_4 | FE_CAN_FEC_5_6 | FE_CAN_FEC_7_8 | FE_CAN_QPSK | FE_CAN_FEC_AUTO }, .release = mb86a16_release, .get_frontend_algo = mb86a16_frontend_algo, .search = mb86a16_search, .init = mb86a16_init, .sleep = mb86a16_sleep, .read_status = mb86a16_read_status, .read_ber = mb86a16_read_ber, .read_signal_strength = mb86a16_read_signal_strength, .read_snr = mb86a16_read_snr, .read_ucblocks = mb86a16_read_ucblocks, .diseqc_send_master_cmd = mb86a16_send_diseqc_msg, .diseqc_send_burst = mb86a16_send_diseqc_burst, .set_tone = mb86a16_set_tone, }; struct dvb_frontend *mb86a16_attach(const struct mb86a16_config *config, struct i2c_adapter *i2c_adap) { u8 dev_id = 0; struct mb86a16_state *state = NULL; state = kmalloc(sizeof(struct mb86a16_state), GFP_KERNEL); if (state == NULL) goto error; state->config = config; state->i2c_adap = i2c_adap; mb86a16_read(state, 0x7f, &dev_id); if (dev_id != 0xfe) goto error; memcpy(&state->frontend.ops, &mb86a16_ops, sizeof(struct dvb_frontend_ops)); state->frontend.demodulator_priv = state; state->frontend.ops.set_voltage = state->config->set_voltage; return &state->frontend; error: kfree(state); return NULL; } EXPORT_SYMBOL(mb86a16_attach); MODULE_LICENSE("GPL"); MODULE_AUTHOR("Manu Abraham");
gpl-2.0
dumtara/android_kernel_yu_msm8916
arch/parisc/kernel/processor.c
11090
12168
/* * Initial setup-routines for HP 9000 based hardware. * * Copyright (C) 1991, 1992, 1995 Linus Torvalds * Modifications for PA-RISC (C) 1999-2008 Helge Deller <deller@gmx.de> * Modifications copyright 1999 SuSE GmbH (Philipp Rumpf) * Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net> * Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org> * Modifications copyright 2001 Ryan Bradetich <rbradetich@uswest.net> * * Initial PA-RISC Version: 04-23-1999 by Helge Deller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/delay.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/cpu.h> #include <asm/param.h> #include <asm/cache.h> #include <asm/hardware.h> /* for register_parisc_driver() stuff */ #include <asm/processor.h> #include <asm/page.h> #include <asm/pdc.h> #include <asm/pdcpat.h> #include <asm/irq.h> /* for struct irq_region */ #include <asm/parisc-device.h> struct system_cpuinfo_parisc boot_cpu_data __read_mostly; EXPORT_SYMBOL(boot_cpu_data); DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data); extern int update_cr16_clocksource(void); /* from time.c */ /* ** PARISC CPU driver - claim "device" and initialize CPU data structures. ** ** Consolidate per CPU initialization into (mostly) one module. ** Monarch CPU will initialize boot_cpu_data which shouldn't ** change once the system has booted. ** ** The callback *should* do per-instance initialization of ** everything including the monarch. "Per CPU" init code in ** setup.c:start_parisc() has migrated here and start_parisc() ** will call register_parisc_driver(&cpu_driver) before calling do_inventory(). ** ** The goal of consolidating CPU initialization into one place is ** to make sure all CPUs get initialized the same way. ** The code path not shared is how PDC hands control of the CPU to the OS. ** The initialization of OS data structures is the same (done below). */ /** * init_cpu_profiler - enable/setup per cpu profiling hooks. * @cpunum: The processor instance. * * FIXME: doesn't do much yet... */ static void __cpuinit init_percpu_prof(unsigned long cpunum) { struct cpuinfo_parisc *p; p = &per_cpu(cpu_data, cpunum); p->prof_counter = 1; p->prof_multiplier = 1; } /** * processor_probe - Determine if processor driver should claim this device. * @dev: The device which has been found. * * Determine if processor driver should claim this chip (return 0) or not * (return 1). If so, initialize the chip and tell other partners in crime * they have work to do. */ static int __cpuinit processor_probe(struct parisc_device *dev) { unsigned long txn_addr; unsigned long cpuid; struct cpuinfo_parisc *p; #ifdef CONFIG_SMP if (num_online_cpus() >= nr_cpu_ids) { printk(KERN_INFO "num_online_cpus() >= nr_cpu_ids\n"); return 1; } #else if (boot_cpu_data.cpu_count > 0) { printk(KERN_INFO "CONFIG_SMP=n ignoring additional CPUs\n"); return 1; } #endif /* logical CPU ID and update global counter * May get overwritten by PAT code. */ cpuid = boot_cpu_data.cpu_count; txn_addr = dev->hpa.start; /* for legacy PDC */ #ifdef CONFIG_64BIT if (is_pdc_pat()) { ulong status; unsigned long bytecnt; pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell; #undef USE_PAT_CPUID #ifdef USE_PAT_CPUID struct pdc_pat_cpu_num cpu_info; #endif pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL); if (!pa_pdc_cell) panic("couldn't allocate memory for PDC_PAT_CELL!"); status = pdc_pat_cell_module(&bytecnt, dev->pcell_loc, dev->mod_index, PA_VIEW, pa_pdc_cell); BUG_ON(PDC_OK != status); /* verify it's the same as what do_pat_inventory() found */ BUG_ON(dev->mod_info != pa_pdc_cell->mod_info); BUG_ON(dev->pmod_loc != pa_pdc_cell->mod_location); txn_addr = pa_pdc_cell->mod[0]; /* id_eid for IO sapic */ kfree(pa_pdc_cell); #ifdef USE_PAT_CPUID /* We need contiguous numbers for cpuid. Firmware's notion * of cpuid is for physical CPUs and we just don't care yet. * We'll care when we need to query PAT PDC about a CPU *after* * boot time (ie shutdown a CPU from an OS perspective). */ /* get the cpu number */ status = pdc_pat_cpu_get_number(&cpu_info, dev->hpa.start); BUG_ON(PDC_OK != status); if (cpu_info.cpu_num >= NR_CPUS) { printk(KERN_WARNING "IGNORING CPU at 0x%x," " cpu_slot_id > NR_CPUS" " (%ld > %d)\n", dev->hpa.start, cpu_info.cpu_num, NR_CPUS); /* Ignore CPU since it will only crash */ boot_cpu_data.cpu_count--; return 1; } else { cpuid = cpu_info.cpu_num; } #endif } #endif p = &per_cpu(cpu_data, cpuid); boot_cpu_data.cpu_count++; /* initialize counters - CPU 0 gets it_value set in time_init() */ if (cpuid) memset(p, 0, sizeof(struct cpuinfo_parisc)); p->loops_per_jiffy = loops_per_jiffy; p->dev = dev; /* Save IODC data in case we need it */ p->hpa = dev->hpa.start; /* save CPU hpa */ p->cpuid = cpuid; /* save CPU id */ p->txn_addr = txn_addr; /* save CPU IRQ address */ #ifdef CONFIG_SMP /* ** FIXME: review if any other initialization is clobbered ** for boot_cpu by the above memset(). */ init_percpu_prof(cpuid); #endif /* ** CONFIG_SMP: init_smp_config() will attempt to get CPUs into ** OS control. RENDEZVOUS is the default state - see mem_set above. ** p->state = STATE_RENDEZVOUS; */ #if 0 /* CPU 0 IRQ table is statically allocated/initialized */ if (cpuid) { struct irqaction actions[]; /* ** itimer and ipi IRQ handlers are statically initialized in ** arch/parisc/kernel/irq.c. ie Don't need to register them. */ actions = kmalloc(sizeof(struct irqaction)*MAX_CPU_IRQ, GFP_ATOMIC); if (!actions) { /* not getting it's own table, share with monarch */ actions = cpu_irq_actions[0]; } cpu_irq_actions[cpuid] = actions; } #endif /* * Bring this CPU up now! (ignore bootstrap cpuid == 0) */ #ifdef CONFIG_SMP if (cpuid) { set_cpu_present(cpuid, true); cpu_up(cpuid); } #endif /* If we've registered more than one cpu, * we'll use the jiffies clocksource since cr16 * is not synchronized between CPUs. */ update_cr16_clocksource(); return 0; } /** * collect_boot_cpu_data - Fill the boot_cpu_data structure. * * This function collects and stores the generic processor information * in the boot_cpu_data structure. */ void __init collect_boot_cpu_data(void) { memset(&boot_cpu_data, 0, sizeof(boot_cpu_data)); boot_cpu_data.cpu_hz = 100 * PAGE0->mem_10msec; /* Hz of this PARISC */ /* get CPU-Model Information... */ #define p ((unsigned long *)&boot_cpu_data.pdc.model) if (pdc_model_info(&boot_cpu_data.pdc.model) == PDC_OK) printk(KERN_INFO "model %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]); #undef p if (pdc_model_versions(&boot_cpu_data.pdc.versions, 0) == PDC_OK) printk(KERN_INFO "vers %08lx\n", boot_cpu_data.pdc.versions); if (pdc_model_cpuid(&boot_cpu_data.pdc.cpuid) == PDC_OK) printk(KERN_INFO "CPUID vers %ld rev %ld (0x%08lx)\n", (boot_cpu_data.pdc.cpuid >> 5) & 127, boot_cpu_data.pdc.cpuid & 31, boot_cpu_data.pdc.cpuid); if (pdc_model_capabilities(&boot_cpu_data.pdc.capabilities) == PDC_OK) printk(KERN_INFO "capabilities 0x%lx\n", boot_cpu_data.pdc.capabilities); if (pdc_model_sysmodel(boot_cpu_data.pdc.sys_model_name) == PDC_OK) printk(KERN_INFO "model %s\n", boot_cpu_data.pdc.sys_model_name); boot_cpu_data.hversion = boot_cpu_data.pdc.model.hversion; boot_cpu_data.sversion = boot_cpu_data.pdc.model.sversion; boot_cpu_data.cpu_type = parisc_get_cpu_type(boot_cpu_data.hversion); boot_cpu_data.cpu_name = cpu_name_version[boot_cpu_data.cpu_type][0]; boot_cpu_data.family_name = cpu_name_version[boot_cpu_data.cpu_type][1]; } /** * init_per_cpu - Handle individual processor initializations. * @cpunum: logical processor number. * * This function handles initialization for *every* CPU * in the system: * * o Set "default" CPU width for trap handlers * * o Enable FP coprocessor * REVISIT: this could be done in the "code 22" trap handler. * (frowands idea - that way we know which processes need FP * registers saved on the interrupt stack.) * NEWS FLASH: wide kernels need FP coprocessor enabled to handle * formatted printing of %lx for example (double divides I think) * * o Enable CPU profiling hooks. */ int __cpuinit init_per_cpu(int cpunum) { int ret; struct pdc_coproc_cfg coproc_cfg; set_firmware_width(); ret = pdc_coproc_cfg(&coproc_cfg); if(ret >= 0 && coproc_cfg.ccr_functional) { mtctl(coproc_cfg.ccr_functional, 10); /* 10 == Coprocessor Control Reg */ /* FWIW, FP rev/model is a more accurate way to determine ** CPU type. CPU rev/model has some ambiguous cases. */ per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision; per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model; printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n", cpunum, coproc_cfg.revision, coproc_cfg.model); /* ** store status register to stack (hopefully aligned) ** and clear the T-bit. */ asm volatile ("fstd %fr0,8(%sp)"); } else { printk(KERN_WARNING "WARNING: No FP CoProcessor?!" " (coproc_cfg.ccr_functional == 0x%lx, expected 0xc0)\n" #ifdef CONFIG_64BIT "Halting Machine - FP required\n" #endif , coproc_cfg.ccr_functional); #ifdef CONFIG_64BIT mdelay(100); /* previous chars get pushed to console */ panic("FP CoProc not reported"); #endif } /* FUTURE: Enable Performance Monitor : ccr bit 0x20 */ init_percpu_prof(cpunum); return ret; } /* * Display CPU info for all CPUs. */ int show_cpuinfo (struct seq_file *m, void *v) { unsigned long cpu; for_each_online_cpu(cpu) { const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); #ifdef CONFIG_SMP if (0 == cpuinfo->hpa) continue; #endif seq_printf(m, "processor\t: %lu\n" "cpu family\t: PA-RISC %s\n", cpu, boot_cpu_data.family_name); seq_printf(m, "cpu\t\t: %s\n", boot_cpu_data.cpu_name ); /* cpu MHz */ seq_printf(m, "cpu MHz\t\t: %d.%06d\n", boot_cpu_data.cpu_hz / 1000000, boot_cpu_data.cpu_hz % 1000000 ); seq_printf(m, "capabilities\t:"); if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS32) seq_printf(m, " os32"); if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS64) seq_printf(m, " os64"); seq_printf(m, "\n"); seq_printf(m, "model\t\t: %s\n" "model name\t: %s\n", boot_cpu_data.pdc.sys_model_name, cpuinfo->dev ? cpuinfo->dev->name : "Unknown"); seq_printf(m, "hversion\t: 0x%08x\n" "sversion\t: 0x%08x\n", boot_cpu_data.hversion, boot_cpu_data.sversion ); /* print cachesize info */ show_cache_info(m); seq_printf(m, "bogomips\t: %lu.%02lu\n", cpuinfo->loops_per_jiffy / (500000 / HZ), (cpuinfo->loops_per_jiffy / (5000 / HZ)) % 100); seq_printf(m, "software id\t: %ld\n\n", boot_cpu_data.pdc.model.sw_id); } return 0; } static const struct parisc_device_id processor_tbl[] = { { HPHW_NPROC, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, SVERSION_ANY_ID }, { 0, } }; static struct parisc_driver cpu_driver = { .name = "CPU", .id_table = processor_tbl, .probe = processor_probe }; /** * processor_init - Processor initialization procedure. * * Register this driver. */ void __init processor_init(void) { register_parisc_driver(&cpu_driver); }
gpl-2.0
sexmachine/msm
arch/parisc/kernel/processor.c
11090
12168
/* * Initial setup-routines for HP 9000 based hardware. * * Copyright (C) 1991, 1992, 1995 Linus Torvalds * Modifications for PA-RISC (C) 1999-2008 Helge Deller <deller@gmx.de> * Modifications copyright 1999 SuSE GmbH (Philipp Rumpf) * Modifications copyright 2000 Martin K. Petersen <mkp@mkp.net> * Modifications copyright 2000 Philipp Rumpf <prumpf@tux.org> * Modifications copyright 2001 Ryan Bradetich <rbradetich@uswest.net> * * Initial PA-RISC Version: 04-23-1999 by Helge Deller * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. * */ #include <linux/delay.h> #include <linux/init.h> #include <linux/mm.h> #include <linux/module.h> #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/cpu.h> #include <asm/param.h> #include <asm/cache.h> #include <asm/hardware.h> /* for register_parisc_driver() stuff */ #include <asm/processor.h> #include <asm/page.h> #include <asm/pdc.h> #include <asm/pdcpat.h> #include <asm/irq.h> /* for struct irq_region */ #include <asm/parisc-device.h> struct system_cpuinfo_parisc boot_cpu_data __read_mostly; EXPORT_SYMBOL(boot_cpu_data); DEFINE_PER_CPU(struct cpuinfo_parisc, cpu_data); extern int update_cr16_clocksource(void); /* from time.c */ /* ** PARISC CPU driver - claim "device" and initialize CPU data structures. ** ** Consolidate per CPU initialization into (mostly) one module. ** Monarch CPU will initialize boot_cpu_data which shouldn't ** change once the system has booted. ** ** The callback *should* do per-instance initialization of ** everything including the monarch. "Per CPU" init code in ** setup.c:start_parisc() has migrated here and start_parisc() ** will call register_parisc_driver(&cpu_driver) before calling do_inventory(). ** ** The goal of consolidating CPU initialization into one place is ** to make sure all CPUs get initialized the same way. ** The code path not shared is how PDC hands control of the CPU to the OS. ** The initialization of OS data structures is the same (done below). */ /** * init_cpu_profiler - enable/setup per cpu profiling hooks. * @cpunum: The processor instance. * * FIXME: doesn't do much yet... */ static void __cpuinit init_percpu_prof(unsigned long cpunum) { struct cpuinfo_parisc *p; p = &per_cpu(cpu_data, cpunum); p->prof_counter = 1; p->prof_multiplier = 1; } /** * processor_probe - Determine if processor driver should claim this device. * @dev: The device which has been found. * * Determine if processor driver should claim this chip (return 0) or not * (return 1). If so, initialize the chip and tell other partners in crime * they have work to do. */ static int __cpuinit processor_probe(struct parisc_device *dev) { unsigned long txn_addr; unsigned long cpuid; struct cpuinfo_parisc *p; #ifdef CONFIG_SMP if (num_online_cpus() >= nr_cpu_ids) { printk(KERN_INFO "num_online_cpus() >= nr_cpu_ids\n"); return 1; } #else if (boot_cpu_data.cpu_count > 0) { printk(KERN_INFO "CONFIG_SMP=n ignoring additional CPUs\n"); return 1; } #endif /* logical CPU ID and update global counter * May get overwritten by PAT code. */ cpuid = boot_cpu_data.cpu_count; txn_addr = dev->hpa.start; /* for legacy PDC */ #ifdef CONFIG_64BIT if (is_pdc_pat()) { ulong status; unsigned long bytecnt; pdc_pat_cell_mod_maddr_block_t *pa_pdc_cell; #undef USE_PAT_CPUID #ifdef USE_PAT_CPUID struct pdc_pat_cpu_num cpu_info; #endif pa_pdc_cell = kmalloc(sizeof (*pa_pdc_cell), GFP_KERNEL); if (!pa_pdc_cell) panic("couldn't allocate memory for PDC_PAT_CELL!"); status = pdc_pat_cell_module(&bytecnt, dev->pcell_loc, dev->mod_index, PA_VIEW, pa_pdc_cell); BUG_ON(PDC_OK != status); /* verify it's the same as what do_pat_inventory() found */ BUG_ON(dev->mod_info != pa_pdc_cell->mod_info); BUG_ON(dev->pmod_loc != pa_pdc_cell->mod_location); txn_addr = pa_pdc_cell->mod[0]; /* id_eid for IO sapic */ kfree(pa_pdc_cell); #ifdef USE_PAT_CPUID /* We need contiguous numbers for cpuid. Firmware's notion * of cpuid is for physical CPUs and we just don't care yet. * We'll care when we need to query PAT PDC about a CPU *after* * boot time (ie shutdown a CPU from an OS perspective). */ /* get the cpu number */ status = pdc_pat_cpu_get_number(&cpu_info, dev->hpa.start); BUG_ON(PDC_OK != status); if (cpu_info.cpu_num >= NR_CPUS) { printk(KERN_WARNING "IGNORING CPU at 0x%x," " cpu_slot_id > NR_CPUS" " (%ld > %d)\n", dev->hpa.start, cpu_info.cpu_num, NR_CPUS); /* Ignore CPU since it will only crash */ boot_cpu_data.cpu_count--; return 1; } else { cpuid = cpu_info.cpu_num; } #endif } #endif p = &per_cpu(cpu_data, cpuid); boot_cpu_data.cpu_count++; /* initialize counters - CPU 0 gets it_value set in time_init() */ if (cpuid) memset(p, 0, sizeof(struct cpuinfo_parisc)); p->loops_per_jiffy = loops_per_jiffy; p->dev = dev; /* Save IODC data in case we need it */ p->hpa = dev->hpa.start; /* save CPU hpa */ p->cpuid = cpuid; /* save CPU id */ p->txn_addr = txn_addr; /* save CPU IRQ address */ #ifdef CONFIG_SMP /* ** FIXME: review if any other initialization is clobbered ** for boot_cpu by the above memset(). */ init_percpu_prof(cpuid); #endif /* ** CONFIG_SMP: init_smp_config() will attempt to get CPUs into ** OS control. RENDEZVOUS is the default state - see mem_set above. ** p->state = STATE_RENDEZVOUS; */ #if 0 /* CPU 0 IRQ table is statically allocated/initialized */ if (cpuid) { struct irqaction actions[]; /* ** itimer and ipi IRQ handlers are statically initialized in ** arch/parisc/kernel/irq.c. ie Don't need to register them. */ actions = kmalloc(sizeof(struct irqaction)*MAX_CPU_IRQ, GFP_ATOMIC); if (!actions) { /* not getting it's own table, share with monarch */ actions = cpu_irq_actions[0]; } cpu_irq_actions[cpuid] = actions; } #endif /* * Bring this CPU up now! (ignore bootstrap cpuid == 0) */ #ifdef CONFIG_SMP if (cpuid) { set_cpu_present(cpuid, true); cpu_up(cpuid); } #endif /* If we've registered more than one cpu, * we'll use the jiffies clocksource since cr16 * is not synchronized between CPUs. */ update_cr16_clocksource(); return 0; } /** * collect_boot_cpu_data - Fill the boot_cpu_data structure. * * This function collects and stores the generic processor information * in the boot_cpu_data structure. */ void __init collect_boot_cpu_data(void) { memset(&boot_cpu_data, 0, sizeof(boot_cpu_data)); boot_cpu_data.cpu_hz = 100 * PAGE0->mem_10msec; /* Hz of this PARISC */ /* get CPU-Model Information... */ #define p ((unsigned long *)&boot_cpu_data.pdc.model) if (pdc_model_info(&boot_cpu_data.pdc.model) == PDC_OK) printk(KERN_INFO "model %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n", p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7], p[8]); #undef p if (pdc_model_versions(&boot_cpu_data.pdc.versions, 0) == PDC_OK) printk(KERN_INFO "vers %08lx\n", boot_cpu_data.pdc.versions); if (pdc_model_cpuid(&boot_cpu_data.pdc.cpuid) == PDC_OK) printk(KERN_INFO "CPUID vers %ld rev %ld (0x%08lx)\n", (boot_cpu_data.pdc.cpuid >> 5) & 127, boot_cpu_data.pdc.cpuid & 31, boot_cpu_data.pdc.cpuid); if (pdc_model_capabilities(&boot_cpu_data.pdc.capabilities) == PDC_OK) printk(KERN_INFO "capabilities 0x%lx\n", boot_cpu_data.pdc.capabilities); if (pdc_model_sysmodel(boot_cpu_data.pdc.sys_model_name) == PDC_OK) printk(KERN_INFO "model %s\n", boot_cpu_data.pdc.sys_model_name); boot_cpu_data.hversion = boot_cpu_data.pdc.model.hversion; boot_cpu_data.sversion = boot_cpu_data.pdc.model.sversion; boot_cpu_data.cpu_type = parisc_get_cpu_type(boot_cpu_data.hversion); boot_cpu_data.cpu_name = cpu_name_version[boot_cpu_data.cpu_type][0]; boot_cpu_data.family_name = cpu_name_version[boot_cpu_data.cpu_type][1]; } /** * init_per_cpu - Handle individual processor initializations. * @cpunum: logical processor number. * * This function handles initialization for *every* CPU * in the system: * * o Set "default" CPU width for trap handlers * * o Enable FP coprocessor * REVISIT: this could be done in the "code 22" trap handler. * (frowands idea - that way we know which processes need FP * registers saved on the interrupt stack.) * NEWS FLASH: wide kernels need FP coprocessor enabled to handle * formatted printing of %lx for example (double divides I think) * * o Enable CPU profiling hooks. */ int __cpuinit init_per_cpu(int cpunum) { int ret; struct pdc_coproc_cfg coproc_cfg; set_firmware_width(); ret = pdc_coproc_cfg(&coproc_cfg); if(ret >= 0 && coproc_cfg.ccr_functional) { mtctl(coproc_cfg.ccr_functional, 10); /* 10 == Coprocessor Control Reg */ /* FWIW, FP rev/model is a more accurate way to determine ** CPU type. CPU rev/model has some ambiguous cases. */ per_cpu(cpu_data, cpunum).fp_rev = coproc_cfg.revision; per_cpu(cpu_data, cpunum).fp_model = coproc_cfg.model; printk(KERN_INFO "FP[%d] enabled: Rev %ld Model %ld\n", cpunum, coproc_cfg.revision, coproc_cfg.model); /* ** store status register to stack (hopefully aligned) ** and clear the T-bit. */ asm volatile ("fstd %fr0,8(%sp)"); } else { printk(KERN_WARNING "WARNING: No FP CoProcessor?!" " (coproc_cfg.ccr_functional == 0x%lx, expected 0xc0)\n" #ifdef CONFIG_64BIT "Halting Machine - FP required\n" #endif , coproc_cfg.ccr_functional); #ifdef CONFIG_64BIT mdelay(100); /* previous chars get pushed to console */ panic("FP CoProc not reported"); #endif } /* FUTURE: Enable Performance Monitor : ccr bit 0x20 */ init_percpu_prof(cpunum); return ret; } /* * Display CPU info for all CPUs. */ int show_cpuinfo (struct seq_file *m, void *v) { unsigned long cpu; for_each_online_cpu(cpu) { const struct cpuinfo_parisc *cpuinfo = &per_cpu(cpu_data, cpu); #ifdef CONFIG_SMP if (0 == cpuinfo->hpa) continue; #endif seq_printf(m, "processor\t: %lu\n" "cpu family\t: PA-RISC %s\n", cpu, boot_cpu_data.family_name); seq_printf(m, "cpu\t\t: %s\n", boot_cpu_data.cpu_name ); /* cpu MHz */ seq_printf(m, "cpu MHz\t\t: %d.%06d\n", boot_cpu_data.cpu_hz / 1000000, boot_cpu_data.cpu_hz % 1000000 ); seq_printf(m, "capabilities\t:"); if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS32) seq_printf(m, " os32"); if (boot_cpu_data.pdc.capabilities & PDC_MODEL_OS64) seq_printf(m, " os64"); seq_printf(m, "\n"); seq_printf(m, "model\t\t: %s\n" "model name\t: %s\n", boot_cpu_data.pdc.sys_model_name, cpuinfo->dev ? cpuinfo->dev->name : "Unknown"); seq_printf(m, "hversion\t: 0x%08x\n" "sversion\t: 0x%08x\n", boot_cpu_data.hversion, boot_cpu_data.sversion ); /* print cachesize info */ show_cache_info(m); seq_printf(m, "bogomips\t: %lu.%02lu\n", cpuinfo->loops_per_jiffy / (500000 / HZ), (cpuinfo->loops_per_jiffy / (5000 / HZ)) % 100); seq_printf(m, "software id\t: %ld\n\n", boot_cpu_data.pdc.model.sw_id); } return 0; } static const struct parisc_device_id processor_tbl[] = { { HPHW_NPROC, HVERSION_REV_ANY_ID, HVERSION_ANY_ID, SVERSION_ANY_ID }, { 0, } }; static struct parisc_driver cpu_driver = { .name = "CPU", .id_table = processor_tbl, .probe = processor_probe }; /** * processor_init - Processor initialization procedure. * * Register this driver. */ void __init processor_init(void) { register_parisc_driver(&cpu_driver); }
gpl-2.0
thornbirdblue/8974_kernel
tools/perf/scripts/perl/Perf-Trace-Util/Context.c
11602
3691
/* * This file was generated automatically by ExtUtils::ParseXS version 2.18_02 from the * contents of Context.xs. Do not edit this file, edit Context.xs instead. * * ANY CHANGES MADE HERE WILL BE LOST! * */ #line 1 "Context.xs" /* * Context.xs. XS interfaces for perf script. * * Copyright (C) 2009 Tom Zanussi <tzanussi@gmail.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ #include "EXTERN.h" #include "perl.h" #include "XSUB.h" #include "../../../perf.h" #include "../../../util/trace-event.h" #ifndef PERL_UNUSED_VAR # define PERL_UNUSED_VAR(var) if (0) var = var #endif #line 42 "Context.c" XS(XS_Perf__Trace__Context_common_pc); /* prototype to pass -Wmissing-prototypes */ XS(XS_Perf__Trace__Context_common_pc) { #ifdef dVAR dVAR; dXSARGS; #else dXSARGS; #endif if (items != 1) Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_pc", "context"); PERL_UNUSED_VAR(cv); /* -W */ { struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0))); int RETVAL; dXSTARG; RETVAL = common_pc(context); XSprePUSH; PUSHi((IV)RETVAL); } XSRETURN(1); } XS(XS_Perf__Trace__Context_common_flags); /* prototype to pass -Wmissing-prototypes */ XS(XS_Perf__Trace__Context_common_flags) { #ifdef dVAR dVAR; dXSARGS; #else dXSARGS; #endif if (items != 1) Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_flags", "context"); PERL_UNUSED_VAR(cv); /* -W */ { struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0))); int RETVAL; dXSTARG; RETVAL = common_flags(context); XSprePUSH; PUSHi((IV)RETVAL); } XSRETURN(1); } XS(XS_Perf__Trace__Context_common_lock_depth); /* prototype to pass -Wmissing-prototypes */ XS(XS_Perf__Trace__Context_common_lock_depth) { #ifdef dVAR dVAR; dXSARGS; #else dXSARGS; #endif if (items != 1) Perl_croak(aTHX_ "Usage: %s(%s)", "Perf::Trace::Context::common_lock_depth", "context"); PERL_UNUSED_VAR(cv); /* -W */ { struct scripting_context * context = INT2PTR(struct scripting_context *,SvIV(ST(0))); int RETVAL; dXSTARG; RETVAL = common_lock_depth(context); XSprePUSH; PUSHi((IV)RETVAL); } XSRETURN(1); } #ifdef __cplusplus extern "C" #endif XS(boot_Perf__Trace__Context); /* prototype to pass -Wmissing-prototypes */ XS(boot_Perf__Trace__Context) { #ifdef dVAR dVAR; dXSARGS; #else dXSARGS; #endif const char* file = __FILE__; PERL_UNUSED_VAR(cv); /* -W */ PERL_UNUSED_VAR(items); /* -W */ XS_VERSION_BOOTCHECK ; newXSproto("Perf::Trace::Context::common_pc", XS_Perf__Trace__Context_common_pc, file, "$"); newXSproto("Perf::Trace::Context::common_flags", XS_Perf__Trace__Context_common_flags, file, "$"); newXSproto("Perf::Trace::Context::common_lock_depth", XS_Perf__Trace__Context_common_lock_depth, file, "$"); if (PL_unitcheckav) call_list(PL_scopestack_ix, PL_unitcheckav); XSRETURN_YES; }
gpl-2.0
CitrusB/android_kernel_samsung_s6810p
arch/ia64/kernel/audit.c
15698
1117
#include <linux/init.h> #include <linux/types.h> #include <linux/audit.h> #include <asm/unistd.h> static unsigned dir_class[] = { #include <asm-generic/audit_dir_write.h> ~0U }; static unsigned read_class[] = { #include <asm-generic/audit_read.h> ~0U }; static unsigned write_class[] = { #include <asm-generic/audit_write.h> ~0U }; static unsigned chattr_class[] = { #include <asm-generic/audit_change_attr.h> ~0U }; static unsigned signal_class[] = { #include <asm-generic/audit_signal.h> ~0U }; int audit_classify_arch(int arch) { return 0; } int audit_classify_syscall(int abi, unsigned syscall) { switch(syscall) { case __NR_open: return 2; case __NR_openat: return 3; case __NR_execve: return 5; default: return 0; } } static int __init audit_classes_init(void) { audit_register_class(AUDIT_CLASS_WRITE, write_class); audit_register_class(AUDIT_CLASS_READ, read_class); audit_register_class(AUDIT_CLASS_DIR_WRITE, dir_class); audit_register_class(AUDIT_CLASS_CHATTR, chattr_class); audit_register_class(AUDIT_CLASS_SIGNAL, signal_class); return 0; } __initcall(audit_classes_init);
gpl-2.0
UNGLinux/UNGKernel
arch/x86/kernel/nmi.c
83
15752
/* * Copyright (C) 1991, 1992 Linus Torvalds * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs * Copyright (C) 2011 Don Zickus Red Hat, Inc. * * Pentium III FXSR, SSE support * Gareth Hughes <gareth@valinux.com>, May 2000 */ /* * Handle hardware traps and faults. */ #include <linux/spinlock.h> #include <linux/kprobes.h> #include <linux/kdebug.h> #include <linux/nmi.h> #include <linux/debugfs.h> #include <linux/delay.h> #include <linux/hardirq.h> #include <linux/slab.h> #include <linux/export.h> #if defined(CONFIG_EDAC) #include <linux/edac.h> #endif #include <linux/atomic.h> #include <asm/traps.h> #include <asm/mach_traps.h> #include <asm/nmi.h> #include <asm/x86_init.h> #define CREATE_TRACE_POINTS #include <trace/events/nmi.h> struct nmi_desc { spinlock_t lock; struct list_head head; }; static struct nmi_desc nmi_desc[NMI_MAX] = { { .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[0].lock), .head = LIST_HEAD_INIT(nmi_desc[0].head), }, { .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[1].lock), .head = LIST_HEAD_INIT(nmi_desc[1].head), }, { .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[2].lock), .head = LIST_HEAD_INIT(nmi_desc[2].head), }, { .lock = __SPIN_LOCK_UNLOCKED(&nmi_desc[3].lock), .head = LIST_HEAD_INIT(nmi_desc[3].head), }, }; struct nmi_stats { unsigned int normal; unsigned int unknown; unsigned int external; unsigned int swallow; }; static DEFINE_PER_CPU(struct nmi_stats, nmi_stats); static int ignore_nmis; int unknown_nmi_panic; /* * Prevent NMI reason port (0x61) being accessed simultaneously, can * only be used in NMI handler. */ static DEFINE_RAW_SPINLOCK(nmi_reason_lock); static int __init setup_unknown_nmi_panic(char *str) { unknown_nmi_panic = 1; return 1; } __setup("unknown_nmi_panic", setup_unknown_nmi_panic); #define nmi_to_desc(type) (&nmi_desc[type]) static u64 nmi_longest_ns = 1 * NSEC_PER_MSEC; static int __init nmi_warning_debugfs(void) { debugfs_create_u64("nmi_longest_ns", 0644, arch_debugfs_dir, &nmi_longest_ns); return 0; } fs_initcall(nmi_warning_debugfs); static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2b) { struct nmi_desc *desc = nmi_to_desc(type); struct nmiaction *a; int handled=0; rcu_read_lock(); /* * NMIs are edge-triggered, which means if you have enough * of them concurrently, you can lose some because only one * can be latched at any given time. Walk the whole list * to handle those situations. */ list_for_each_entry_rcu(a, &desc->head, list) { u64 before, delta, whole_msecs; int remainder_ns, decimal_msecs, thishandled; before = local_clock(); thishandled = a->handler(type, regs); handled += thishandled; delta = local_clock() - before; trace_nmi_handler(a->handler, (int)delta, thishandled); if (delta < nmi_longest_ns) continue; nmi_longest_ns = delta; whole_msecs = delta; remainder_ns = do_div(whole_msecs, (1000 * 1000)); decimal_msecs = remainder_ns / 1000; printk_ratelimited(KERN_INFO "INFO: NMI handler (%ps) took too long to run: " "%lld.%03d msecs\n", a->handler, whole_msecs, decimal_msecs); } rcu_read_unlock(); /* return total number of NMI events handled */ return handled; } int __register_nmi_handler(unsigned int type, struct nmiaction *action) { struct nmi_desc *desc = nmi_to_desc(type); unsigned long flags; if (!action->handler) return -EINVAL; spin_lock_irqsave(&desc->lock, flags); /* * most handlers of type NMI_UNKNOWN never return because * they just assume the NMI is theirs. Just a sanity check * to manage expectations */ WARN_ON_ONCE(type == NMI_UNKNOWN && !list_empty(&desc->head)); WARN_ON_ONCE(type == NMI_SERR && !list_empty(&desc->head)); WARN_ON_ONCE(type == NMI_IO_CHECK && !list_empty(&desc->head)); /* * some handlers need to be executed first otherwise a fake * event confuses some handlers (kdump uses this flag) */ if (action->flags & NMI_FLAG_FIRST) list_add_rcu(&action->list, &desc->head); else list_add_tail_rcu(&action->list, &desc->head); spin_unlock_irqrestore(&desc->lock, flags); return 0; } EXPORT_SYMBOL(__register_nmi_handler); void unregister_nmi_handler(unsigned int type, const char *name) { struct nmi_desc *desc = nmi_to_desc(type); struct nmiaction *n; unsigned long flags; spin_lock_irqsave(&desc->lock, flags); list_for_each_entry_rcu(n, &desc->head, list) { /* * the name passed in to describe the nmi handler * is used as the lookup key */ if (!strcmp(n->name, name)) { WARN(in_nmi(), "Trying to free NMI (%s) from NMI context!\n", n->name); list_del_rcu(&n->list); break; } } spin_unlock_irqrestore(&desc->lock, flags); synchronize_rcu(); } EXPORT_SYMBOL_GPL(unregister_nmi_handler); static __kprobes void pci_serr_error(unsigned char reason, struct pt_regs *regs) { /* check to see if anyone registered against these types of errors */ if (nmi_handle(NMI_SERR, regs, false)) return; pr_emerg("NMI: PCI system error (SERR) for reason %02x on CPU %d.\n", reason, smp_processor_id()); /* * On some machines, PCI SERR line is used to report memory * errors. EDAC makes use of it. */ #if defined(CONFIG_EDAC) if (edac_handler_set()) { edac_atomic_assert_error(); return; } #endif if (panic_on_unrecovered_nmi) panic("NMI: Not continuing"); pr_emerg("Dazed and confused, but trying to continue\n"); /* Clear and disable the PCI SERR error line. */ reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_SERR; outb(reason, NMI_REASON_PORT); } static __kprobes void io_check_error(unsigned char reason, struct pt_regs *regs) { unsigned long i; /* check to see if anyone registered against these types of errors */ if (nmi_handle(NMI_IO_CHECK, regs, false)) return; pr_emerg( "NMI: IOCK error (debug interrupt?) for reason %02x on CPU %d.\n", reason, smp_processor_id()); show_regs(regs); if (panic_on_io_nmi) panic("NMI IOCK error: Not continuing"); /* Re-enable the IOCK line, wait for a few seconds */ reason = (reason & NMI_REASON_CLEAR_MASK) | NMI_REASON_CLEAR_IOCHK; outb(reason, NMI_REASON_PORT); i = 20000; while (--i) { touch_nmi_watchdog(); udelay(100); } reason &= ~NMI_REASON_CLEAR_IOCHK; outb(reason, NMI_REASON_PORT); } static __kprobes void unknown_nmi_error(unsigned char reason, struct pt_regs *regs) { int handled; /* * Use 'false' as back-to-back NMIs are dealt with one level up. * Of course this makes having multiple 'unknown' handlers useless * as only the first one is ever run (unless it can actually determine * if it caused the NMI) */ handled = nmi_handle(NMI_UNKNOWN, regs, false); if (handled) { __this_cpu_add(nmi_stats.unknown, handled); return; } __this_cpu_add(nmi_stats.unknown, 1); pr_emerg("Uhhuh. NMI received for unknown reason %02x on CPU %d.\n", reason, smp_processor_id()); pr_emerg("Do you have a strange power saving mode enabled?\n"); if (unknown_nmi_panic || panic_on_unrecovered_nmi) panic("NMI: Not continuing"); pr_emerg("Dazed and confused, but trying to continue\n"); } static DEFINE_PER_CPU(bool, swallow_nmi); static DEFINE_PER_CPU(unsigned long, last_nmi_rip); static __kprobes void default_do_nmi(struct pt_regs *regs) { unsigned char reason = 0; int handled; bool b2b = false; /* * CPU-specific NMI must be processed before non-CPU-specific * NMI, otherwise we may lose it, because the CPU-specific * NMI can not be detected/processed on other CPUs. */ /* * Back-to-back NMIs are interesting because they can either * be two NMI or more than two NMIs (any thing over two is dropped * due to NMI being edge-triggered). If this is the second half * of the back-to-back NMI, assume we dropped things and process * more handlers. Otherwise reset the 'swallow' NMI behaviour */ if (regs->ip == __this_cpu_read(last_nmi_rip)) b2b = true; else __this_cpu_write(swallow_nmi, false); __this_cpu_write(last_nmi_rip, regs->ip); handled = nmi_handle(NMI_LOCAL, regs, b2b); __this_cpu_add(nmi_stats.normal, handled); if (handled) { /* * There are cases when a NMI handler handles multiple * events in the current NMI. One of these events may * be queued for in the next NMI. Because the event is * already handled, the next NMI will result in an unknown * NMI. Instead lets flag this for a potential NMI to * swallow. */ if (handled > 1) __this_cpu_write(swallow_nmi, true); return; } /* Non-CPU-specific NMI: NMI sources can be processed on any CPU */ raw_spin_lock(&nmi_reason_lock); reason = x86_platform.get_nmi_reason(); if (reason & NMI_REASON_MASK) { if (reason & NMI_REASON_SERR) pci_serr_error(reason, regs); else if (reason & NMI_REASON_IOCHK) io_check_error(reason, regs); #ifdef CONFIG_X86_32 /* * Reassert NMI in case it became active * meanwhile as it's edge-triggered: */ reassert_nmi(); #endif __this_cpu_add(nmi_stats.external, 1); raw_spin_unlock(&nmi_reason_lock); return; } raw_spin_unlock(&nmi_reason_lock); /* * Only one NMI can be latched at a time. To handle * this we may process multiple nmi handlers at once to * cover the case where an NMI is dropped. The downside * to this approach is we may process an NMI prematurely, * while its real NMI is sitting latched. This will cause * an unknown NMI on the next run of the NMI processing. * * We tried to flag that condition above, by setting the * swallow_nmi flag when we process more than one event. * This condition is also only present on the second half * of a back-to-back NMI, so we flag that condition too. * * If both are true, we assume we already processed this * NMI previously and we swallow it. Otherwise we reset * the logic. * * There are scenarios where we may accidentally swallow * a 'real' unknown NMI. For example, while processing * a perf NMI another perf NMI comes in along with a * 'real' unknown NMI. These two NMIs get combined into * one (as descibed above). When the next NMI gets * processed, it will be flagged by perf as handled, but * noone will know that there was a 'real' unknown NMI sent * also. As a result it gets swallowed. Or if the first * perf NMI returns two events handled then the second * NMI will get eaten by the logic below, again losing a * 'real' unknown NMI. But this is the best we can do * for now. */ if (b2b && __this_cpu_read(swallow_nmi)) __this_cpu_add(nmi_stats.swallow, 1); else unknown_nmi_error(reason, regs); } /* * NMIs can hit breakpoints which will cause it to lose its * NMI context with the CPU when the breakpoint does an iret. */ #ifdef CONFIG_X86_32 /* * For i386, NMIs use the same stack as the kernel, and we can * add a workaround to the iret problem in C (preventing nested * NMIs if an NMI takes a trap). Simply have 3 states the NMI * can be in: * * 1) not running * 2) executing * 3) latched * * When no NMI is in progress, it is in the "not running" state. * When an NMI comes in, it goes into the "executing" state. * Normally, if another NMI is triggered, it does not interrupt * the running NMI and the HW will simply latch it so that when * the first NMI finishes, it will restart the second NMI. * (Note, the latch is binary, thus multiple NMIs triggering, * when one is running, are ignored. Only one NMI is restarted.) * * If an NMI hits a breakpoint that executes an iret, another * NMI can preempt it. We do not want to allow this new NMI * to run, but we want to execute it when the first one finishes. * We set the state to "latched", and the exit of the first NMI will * perform a dec_return, if the result is zero (NOT_RUNNING), then * it will simply exit the NMI handler. If not, the dec_return * would have set the state to NMI_EXECUTING (what we want it to * be when we are running). In this case, we simply jump back * to rerun the NMI handler again, and restart the 'latched' NMI. * * No trap (breakpoint or page fault) should be hit before nmi_restart, * thus there is no race between the first check of state for NOT_RUNNING * and setting it to NMI_EXECUTING. The HW will prevent nested NMIs * at this point. * * In case the NMI takes a page fault, we need to save off the CR2 * because the NMI could have preempted another page fault and corrupt * the CR2 that is about to be read. As nested NMIs must be restarted * and they can not take breakpoints or page faults, the update of the * CR2 must be done before converting the nmi state back to NOT_RUNNING. * Otherwise, there would be a race of another nested NMI coming in * after setting state to NOT_RUNNING but before updating the nmi_cr2. */ enum nmi_states { NMI_NOT_RUNNING = 0, NMI_EXECUTING, NMI_LATCHED, }; static DEFINE_PER_CPU(enum nmi_states, nmi_state); static DEFINE_PER_CPU(unsigned long, nmi_cr2); #define nmi_nesting_preprocess(regs) \ do { \ if (this_cpu_read(nmi_state) != NMI_NOT_RUNNING) { \ this_cpu_write(nmi_state, NMI_LATCHED); \ return; \ } \ this_cpu_write(nmi_state, NMI_EXECUTING); \ this_cpu_write(nmi_cr2, read_cr2()); \ } while (0); \ nmi_restart: #define nmi_nesting_postprocess() \ do { \ if (unlikely(this_cpu_read(nmi_cr2) != read_cr2())) \ write_cr2(this_cpu_read(nmi_cr2)); \ if (this_cpu_dec_return(nmi_state)) \ goto nmi_restart; \ } while (0) #else /* x86_64 */ /* * In x86_64 things are a bit more difficult. This has the same problem * where an NMI hitting a breakpoint that calls iret will remove the * NMI context, allowing a nested NMI to enter. What makes this more * difficult is that both NMIs and breakpoints have their own stack. * When a new NMI or breakpoint is executed, the stack is set to a fixed * point. If an NMI is nested, it will have its stack set at that same * fixed address that the first NMI had, and will start corrupting the * stack. This is handled in entry_64.S, but the same problem exists with * the breakpoint stack. * * If a breakpoint is being processed, and the debug stack is being used, * if an NMI comes in and also hits a breakpoint, the stack pointer * will be set to the same fixed address as the breakpoint that was * interrupted, causing that stack to be corrupted. To handle this case, * check if the stack that was interrupted is the debug stack, and if * so, change the IDT so that new breakpoints will use the current stack * and not switch to the fixed address. On return of the NMI, switch back * to the original IDT. */ static DEFINE_PER_CPU(int, update_debug_stack); static inline void nmi_nesting_preprocess(struct pt_regs *regs) { /* * If we interrupted a breakpoint, it is possible that * the nmi handler will have breakpoints too. We need to * change the IDT such that breakpoints that happen here * continue to use the NMI stack. */ if (unlikely(is_debug_stack(regs->sp))) { debug_stack_set_zero(); this_cpu_write(update_debug_stack, 1); } } static inline void nmi_nesting_postprocess(void) { if (unlikely(this_cpu_read(update_debug_stack))) { debug_stack_reset(); this_cpu_write(update_debug_stack, 0); } } #endif dotraplinkage notrace __kprobes void do_nmi(struct pt_regs *regs, long error_code) { nmi_nesting_preprocess(regs); nmi_enter(); inc_irq_stat(__nmi_count); if (!ignore_nmis) default_do_nmi(regs); nmi_exit(); /* On i386, may loop back to preprocess */ nmi_nesting_postprocess(); } void stop_nmi(void) { ignore_nmis++; } void restart_nmi(void) { ignore_nmis--; } /* reset the back-to-back NMI logic */ void local_touch_nmi(void) { __this_cpu_write(last_nmi_rip, 0); } EXPORT_SYMBOL_GPL(local_touch_nmi);
gpl-2.0
klock-android/linux
drivers/clk/at91/clk-programmable.c
83
7039
/* * Copyright (C) 2013 Boris BREZILLON <b.brezillon@overkiz.com> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * */ #include <linux/clk-provider.h> #include <linux/clkdev.h> #include <linux/clk/at91_pmc.h> #include <linux/of.h> #include <linux/of_address.h> #include <linux/io.h> #include <linux/wait.h> #include <linux/sched.h> #include "pmc.h" #define PROG_SOURCE_MAX 5 #define PROG_ID_MAX 7 #define PROG_STATUS_MASK(id) (1 << ((id) + 8)) #define PROG_PRES_MASK 0x7 #define PROG_MAX_RM9200_CSS 3 struct clk_programmable_layout { u8 pres_shift; u8 css_mask; u8 have_slck_mck; }; struct clk_programmable { struct clk_hw hw; struct at91_pmc *pmc; u8 id; const struct clk_programmable_layout *layout; }; #define to_clk_programmable(hw) container_of(hw, struct clk_programmable, hw) static unsigned long clk_programmable_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) { u32 pres; struct clk_programmable *prog = to_clk_programmable(hw); struct at91_pmc *pmc = prog->pmc; const struct clk_programmable_layout *layout = prog->layout; pres = (pmc_read(pmc, AT91_PMC_PCKR(prog->id)) >> layout->pres_shift) & PROG_PRES_MASK; return parent_rate >> pres; } static long clk_programmable_determine_rate(struct clk_hw *hw, unsigned long rate, unsigned long *best_parent_rate, struct clk_hw **best_parent_hw) { struct clk *parent = NULL; long best_rate = -EINVAL; unsigned long parent_rate; unsigned long tmp_rate; int shift; int i; for (i = 0; i < __clk_get_num_parents(hw->clk); i++) { parent = clk_get_parent_by_index(hw->clk, i); if (!parent) continue; parent_rate = __clk_get_rate(parent); for (shift = 0; shift < PROG_PRES_MASK; shift++) { tmp_rate = parent_rate >> shift; if (tmp_rate <= rate) break; } if (tmp_rate > rate) continue; if (best_rate < 0 || (rate - tmp_rate) < (rate - best_rate)) { best_rate = tmp_rate; *best_parent_rate = parent_rate; *best_parent_hw = __clk_get_hw(parent); } if (!best_rate) break; } return best_rate; } static int clk_programmable_set_parent(struct clk_hw *hw, u8 index) { struct clk_programmable *prog = to_clk_programmable(hw); const struct clk_programmable_layout *layout = prog->layout; struct at91_pmc *pmc = prog->pmc; u32 tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id)) & ~layout->css_mask; if (layout->have_slck_mck) tmp &= AT91_PMC_CSSMCK_MCK; if (index > layout->css_mask) { if (index > PROG_MAX_RM9200_CSS && layout->have_slck_mck) { tmp |= AT91_PMC_CSSMCK_MCK; return 0; } else { return -EINVAL; } } pmc_write(pmc, AT91_PMC_PCKR(prog->id), tmp | index); return 0; } static u8 clk_programmable_get_parent(struct clk_hw *hw) { u32 tmp; u8 ret; struct clk_programmable *prog = to_clk_programmable(hw); struct at91_pmc *pmc = prog->pmc; const struct clk_programmable_layout *layout = prog->layout; tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id)); ret = tmp & layout->css_mask; if (layout->have_slck_mck && (tmp & AT91_PMC_CSSMCK_MCK) && !ret) ret = PROG_MAX_RM9200_CSS + 1; return ret; } static int clk_programmable_set_rate(struct clk_hw *hw, unsigned long rate, unsigned long parent_rate) { struct clk_programmable *prog = to_clk_programmable(hw); struct at91_pmc *pmc = prog->pmc; const struct clk_programmable_layout *layout = prog->layout; unsigned long div = parent_rate / rate; int shift = 0; u32 tmp = pmc_read(pmc, AT91_PMC_PCKR(prog->id)) & ~(PROG_PRES_MASK << layout->pres_shift); if (!div) return -EINVAL; shift = fls(div) - 1; if (div != (1<<shift)) return -EINVAL; if (shift >= PROG_PRES_MASK) return -EINVAL; pmc_write(pmc, AT91_PMC_PCKR(prog->id), tmp | (shift << layout->pres_shift)); return 0; } static const struct clk_ops programmable_ops = { .recalc_rate = clk_programmable_recalc_rate, .determine_rate = clk_programmable_determine_rate, .get_parent = clk_programmable_get_parent, .set_parent = clk_programmable_set_parent, .set_rate = clk_programmable_set_rate, }; static struct clk * __init at91_clk_register_programmable(struct at91_pmc *pmc, const char *name, const char **parent_names, u8 num_parents, u8 id, const struct clk_programmable_layout *layout) { struct clk_programmable *prog; struct clk *clk = NULL; struct clk_init_data init; if (id > PROG_ID_MAX) return ERR_PTR(-EINVAL); prog = kzalloc(sizeof(*prog), GFP_KERNEL); if (!prog) return ERR_PTR(-ENOMEM); init.name = name; init.ops = &programmable_ops; init.parent_names = parent_names; init.num_parents = num_parents; init.flags = CLK_SET_RATE_GATE | CLK_SET_PARENT_GATE; prog->id = id; prog->layout = layout; prog->hw.init = &init; prog->pmc = pmc; clk = clk_register(NULL, &prog->hw); if (IS_ERR(clk)) kfree(prog); return clk; } static const struct clk_programmable_layout at91rm9200_programmable_layout = { .pres_shift = 2, .css_mask = 0x3, .have_slck_mck = 0, }; static const struct clk_programmable_layout at91sam9g45_programmable_layout = { .pres_shift = 2, .css_mask = 0x3, .have_slck_mck = 1, }; static const struct clk_programmable_layout at91sam9x5_programmable_layout = { .pres_shift = 4, .css_mask = 0x7, .have_slck_mck = 0, }; static void __init of_at91_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc, const struct clk_programmable_layout *layout) { int num; u32 id; int i; struct clk *clk; int num_parents; const char *parent_names[PROG_SOURCE_MAX]; const char *name; struct device_node *progclknp; num_parents = of_count_phandle_with_args(np, "clocks", "#clock-cells"); if (num_parents <= 0 || num_parents > PROG_SOURCE_MAX) return; for (i = 0; i < num_parents; ++i) { parent_names[i] = of_clk_get_parent_name(np, i); if (!parent_names[i]) return; } num = of_get_child_count(np); if (!num || num > (PROG_ID_MAX + 1)) return; for_each_child_of_node(np, progclknp) { if (of_property_read_u32(progclknp, "reg", &id)) continue; if (of_property_read_string(np, "clock-output-names", &name)) name = progclknp->name; clk = at91_clk_register_programmable(pmc, name, parent_names, num_parents, id, layout); if (IS_ERR(clk)) continue; of_clk_add_provider(progclknp, of_clk_src_simple_get, clk); } } void __init of_at91rm9200_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc) { of_at91_clk_prog_setup(np, pmc, &at91rm9200_programmable_layout); } void __init of_at91sam9g45_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc) { of_at91_clk_prog_setup(np, pmc, &at91sam9g45_programmable_layout); } void __init of_at91sam9x5_clk_prog_setup(struct device_node *np, struct at91_pmc *pmc) { of_at91_clk_prog_setup(np, pmc, &at91sam9x5_programmable_layout); }
gpl-2.0
shark147/k2ul-kernel
arch/arm/mach-msm/htc_watchdog_monitor.c
339
5389
/* arch/arm/mach-msm/htc_watchdog_monitor.c * * Copyright (C) 2011 HTC Corporation * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ #include <linux/kernel_stat.h> #include <linux/vmalloc.h> #include <linux/sched.h> #include <linux/tick.h> #include "htc_watchdog_monitor.h" #define MAX_PID 32768 #define NUM_BUSY_THREAD_CHECK 5 #ifndef arch_idle_time #define arch_idle_time(cpu) 0 #endif static unsigned int *prev_proc_stat; static int *curr_proc_delta; static struct task_struct **task_ptr_array; static struct htc_cpu_usage_stat old_cpu_stat; static spinlock_t lock; static u64 get_idle_time(int cpu) { u64 idle, idle_time = get_cpu_idle_time_us(cpu, NULL); if (idle_time == -1ULL) idle = kcpustat_cpu(cpu).cpustat[CPUTIME_IDLE]; else idle = usecs_to_cputime64(idle_time); return idle; } static u64 get_iowait_time(int cpu) { u64 iowait, iowait_time = get_cpu_iowait_time_us(cpu, NULL); if (iowait_time == -1ULL) iowait = kcpustat_cpu(cpu).cpustat[CPUTIME_IOWAIT]; else iowait = usecs_to_cputime64(iowait_time); return iowait; } static void get_all_cpu_stat(struct htc_cpu_usage_stat *cpu_stat) { int i; u64 user, nice, system, idle, iowait, irq, softirq, steal; u64 guest, guest_nice; if (!cpu_stat) return; user = nice = system = idle = iowait = irq = softirq = steal = 0; guest = guest_nice = 0; for_each_possible_cpu(i) { user += kcpustat_cpu(i).cpustat[CPUTIME_USER]; nice += kcpustat_cpu(i).cpustat[CPUTIME_NICE]; system += kcpustat_cpu(i).cpustat[CPUTIME_SYSTEM]; idle += get_idle_time(i); iowait += get_iowait_time(i); irq += kcpustat_cpu(i).cpustat[CPUTIME_IRQ]; softirq += kcpustat_cpu(i).cpustat[CPUTIME_SOFTIRQ]; steal += kcpustat_cpu(i).cpustat[CPUTIME_STEAL]; guest += kcpustat_cpu(i).cpustat[CPUTIME_GUEST]; guest_nice += kcpustat_cpu(i).cpustat[CPUTIME_GUEST_NICE]; } cpu_stat->user = user; cpu_stat->nice = nice; cpu_stat->system = system; cpu_stat->softirq = softirq; cpu_stat->irq = irq; cpu_stat->idle = idle; cpu_stat->iowait = iowait; cpu_stat->steal = steal; cpu_stat->guest = guest; cpu_stat->guest_nice = guest_nice; } void htc_watchdog_monitor_init(void) { spin_lock_init(&lock); prev_proc_stat = vmalloc(sizeof(int) * MAX_PID); curr_proc_delta = vmalloc(sizeof(int) * MAX_PID); task_ptr_array = vmalloc(sizeof(int) * MAX_PID); if (prev_proc_stat) memset(prev_proc_stat, 0, sizeof(int) * MAX_PID); if (curr_proc_delta) memset(curr_proc_delta, 0, sizeof(int) * MAX_PID); if (task_ptr_array) memset(task_ptr_array, 0, sizeof(int) * MAX_PID); get_all_cpu_stat(&old_cpu_stat); } static int findBiggestInRange(int *array, int max_limit_idx) { int largest_idx = 0, i; for (i = 0; i < MAX_PID; i++) { if (array[i] > array[largest_idx] && (max_limit_idx == -1 || array[i] < array[max_limit_idx])) largest_idx = i; } return largest_idx; } static void sorting(int *source, int *output) { int i; for (i = 0; i < NUM_BUSY_THREAD_CHECK; i++) { if (i == 0) output[i] = findBiggestInRange(source, -1); else output[i] = findBiggestInRange(source, output[i-1]); } } void htc_watchdog_top_stat(void) { struct task_struct *p; int top_loading[NUM_BUSY_THREAD_CHECK], i; unsigned long user_time, system_time, io_time; unsigned long irq_time, idle_time, delta_time; ulong flags; struct task_cputime cputime; struct htc_cpu_usage_stat new_cpu_stat; if (task_ptr_array == NULL || curr_proc_delta == NULL || prev_proc_stat == NULL) return; memset(curr_proc_delta, 0, sizeof(int) * MAX_PID); memset(task_ptr_array, 0, sizeof(int) * MAX_PID); printk(KERN_ERR"\n\n[%s] Start to dump:\n", __func__); spin_lock_irqsave(&lock, flags); get_all_cpu_stat(&new_cpu_stat); for_each_process(p) { thread_group_cputime(p, &cputime); if (p->pid < MAX_PID) { curr_proc_delta[p->pid] = (cputime.utime + cputime.stime) - (prev_proc_stat[p->pid]); task_ptr_array[p->pid] = p; } } sorting(curr_proc_delta, top_loading); user_time = (unsigned long)((new_cpu_stat.user + new_cpu_stat.nice) - (old_cpu_stat.user + old_cpu_stat.nice)); system_time = (unsigned long)(new_cpu_stat.system - old_cpu_stat.system); io_time = (unsigned long)(new_cpu_stat.iowait - old_cpu_stat.iowait); irq_time = (unsigned long)((new_cpu_stat.irq + new_cpu_stat.softirq) - (old_cpu_stat.irq + old_cpu_stat.softirq)); idle_time = (unsigned long) ((new_cpu_stat.idle + new_cpu_stat.steal + new_cpu_stat.guest) - (old_cpu_stat.idle + old_cpu_stat.steal + old_cpu_stat.guest)); delta_time = user_time + system_time + io_time + irq_time + idle_time; printk(KERN_ERR"CPU\t\tPID\t\tName\n"); for (i = 0; i < NUM_BUSY_THREAD_CHECK; i++) { printk(KERN_ERR "%lu%%\t\t%d\t\t%s\n", curr_proc_delta[top_loading[i]] * 100 / delta_time, top_loading[i], task_ptr_array[top_loading[i]]->comm); } spin_unlock_irqrestore(&lock, flags); printk(KERN_ERR "\n"); }
gpl-2.0
Ankarrr/Linux-2.6.32.60-rts
arch/mips/math-emu/sp_simple.c
595
1851
/* IEEE754 floating point arithmetic * single precision */ /* * MIPS floating point support * Copyright (C) 1994-2000 Algorithmics Ltd. * http://www.algor.co.uk * * ######################################################################## * * This program is free software; you can distribute it and/or modify it * under the terms of the GNU General Public License (Version 2) as * published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * for more details. * * You should have received a copy of the GNU General Public License along * with this program; if not, write to the Free Software Foundation, Inc., * 59 Temple Place - Suite 330, Boston MA 02111-1307, USA. * * ######################################################################## */ #include "ieee754sp.h" int ieee754sp_finite(ieee754sp x) { return SPBEXP(x) != SP_EMAX + 1 + SP_EBIAS; } ieee754sp ieee754sp_copysign(ieee754sp x, ieee754sp y) { CLEARCX; SPSIGN(x) = SPSIGN(y); return x; } ieee754sp ieee754sp_neg(ieee754sp x) { COMPXSP; EXPLODEXSP; CLEARCX; FLUSHXSP; /* * Invert the sign ALWAYS to prevent an endless recursion on * pow() in libc. */ /* quick fix up */ SPSIGN(x) ^= 1; if (xc == IEEE754_CLASS_SNAN) { ieee754sp y = ieee754sp_indef(); SETCX(IEEE754_INVALID_OPERATION); SPSIGN(y) = SPSIGN(x); return ieee754sp_nanxcpt(y, "neg"); } return x; } ieee754sp ieee754sp_abs(ieee754sp x) { COMPXSP; EXPLODEXSP; CLEARCX; FLUSHXSP; /* Clear sign ALWAYS, irrespective of NaN */ SPSIGN(x) = 0; if (xc == IEEE754_CLASS_SNAN) { return ieee754sp_nanxcpt(ieee754sp_indef(), "abs"); } return x; }
gpl-2.0
eoghan2t9/android_kernel_oppo_n1_test
fs/lockd/clntxdr.c
1619
13943
/* * linux/fs/lockd/clntxdr.c * * XDR functions to encode/decode NLM version 3 RPC arguments and results. * NLM version 3 is backwards compatible with NLM versions 1 and 2. * * NLM client-side only. * * Copyright (C) 2010, Oracle. All rights reserved. */ #include <linux/types.h> #include <linux/sunrpc/xdr.h> #include <linux/sunrpc/clnt.h> #include <linux/sunrpc/stats.h> #include <linux/lockd/lockd.h> #define NLMDBG_FACILITY NLMDBG_XDR #if (NLMCLNT_OHSIZE > XDR_MAX_NETOBJ) # error "NLM host name cannot be larger than XDR_MAX_NETOBJ!" #endif /* * Declare the space requirements for NLM arguments and replies as * number of 32bit-words */ #define NLM_cookie_sz (1+(NLM_MAXCOOKIELEN>>2)) #define NLM_caller_sz (1+(NLMCLNT_OHSIZE>>2)) #define NLM_owner_sz (1+(NLMCLNT_OHSIZE>>2)) #define NLM_fhandle_sz (1+(NFS2_FHSIZE>>2)) #define NLM_lock_sz (3+NLM_caller_sz+NLM_owner_sz+NLM_fhandle_sz) #define NLM_holder_sz (4+NLM_owner_sz) #define NLM_testargs_sz (NLM_cookie_sz+1+NLM_lock_sz) #define NLM_lockargs_sz (NLM_cookie_sz+4+NLM_lock_sz) #define NLM_cancargs_sz (NLM_cookie_sz+2+NLM_lock_sz) #define NLM_unlockargs_sz (NLM_cookie_sz+NLM_lock_sz) #define NLM_testres_sz (NLM_cookie_sz+1+NLM_holder_sz) #define NLM_res_sz (NLM_cookie_sz+1) #define NLM_norep_sz (0) static s32 loff_t_to_s32(loff_t offset) { s32 res; if (offset >= NLM_OFFSET_MAX) res = NLM_OFFSET_MAX; else if (offset <= -NLM_OFFSET_MAX) res = -NLM_OFFSET_MAX; else res = offset; return res; } static void nlm_compute_offsets(const struct nlm_lock *lock, u32 *l_offset, u32 *l_len) { const struct file_lock *fl = &lock->fl; BUG_ON(fl->fl_start > NLM_OFFSET_MAX); BUG_ON(fl->fl_end > NLM_OFFSET_MAX && fl->fl_end != OFFSET_MAX); *l_offset = loff_t_to_s32(fl->fl_start); if (fl->fl_end == OFFSET_MAX) *l_len = 0; else *l_len = loff_t_to_s32(fl->fl_end - fl->fl_start + 1); } /* * Handle decode buffer overflows out-of-line. */ static void print_overflow_msg(const char *func, const struct xdr_stream *xdr) { dprintk("lockd: %s prematurely hit the end of our receive buffer. " "Remaining buffer length is %tu words.\n", func, xdr->end - xdr->p); } /* * Encode/decode NLMv3 basic data types * * Basic NLMv3 data types are not defined in an IETF standards * document. X/Open has a description of these data types that * is useful. See Chapter 10 of "Protocols for Interworking: * XNFS, Version 3W". * * Not all basic data types have their own encoding and decoding * functions. For run-time efficiency, some data types are encoded * or decoded inline. */ static void encode_bool(struct xdr_stream *xdr, const int value) { __be32 *p; p = xdr_reserve_space(xdr, 4); *p = value ? xdr_one : xdr_zero; } static void encode_int32(struct xdr_stream *xdr, const s32 value) { __be32 *p; p = xdr_reserve_space(xdr, 4); *p = cpu_to_be32(value); } /* * typedef opaque netobj<MAXNETOBJ_SZ> */ static void encode_netobj(struct xdr_stream *xdr, const u8 *data, const unsigned int length) { __be32 *p; BUG_ON(length > XDR_MAX_NETOBJ); p = xdr_reserve_space(xdr, 4 + length); xdr_encode_opaque(p, data, length); } static int decode_netobj(struct xdr_stream *xdr, struct xdr_netobj *obj) { u32 length; __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) goto out_overflow; length = be32_to_cpup(p++); if (unlikely(length > XDR_MAX_NETOBJ)) goto out_size; obj->len = length; obj->data = (u8 *)p; return 0; out_size: dprintk("NFS: returned netobj was too long: %u\n", length); return -EIO; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } /* * netobj cookie; */ static void encode_cookie(struct xdr_stream *xdr, const struct nlm_cookie *cookie) { BUG_ON(cookie->len > NLM_MAXCOOKIELEN); encode_netobj(xdr, (u8 *)&cookie->data, cookie->len); } static int decode_cookie(struct xdr_stream *xdr, struct nlm_cookie *cookie) { u32 length; __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) goto out_overflow; length = be32_to_cpup(p++); /* apparently HPUX can return empty cookies */ if (length == 0) goto out_hpux; if (length > NLM_MAXCOOKIELEN) goto out_size; p = xdr_inline_decode(xdr, length); if (unlikely(p == NULL)) goto out_overflow; cookie->len = length; memcpy(cookie->data, p, length); return 0; out_hpux: cookie->len = 4; memset(cookie->data, 0, 4); return 0; out_size: dprintk("NFS: returned cookie was too long: %u\n", length); return -EIO; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } /* * netobj fh; */ static void encode_fh(struct xdr_stream *xdr, const struct nfs_fh *fh) { BUG_ON(fh->size != NFS2_FHSIZE); encode_netobj(xdr, (u8 *)&fh->data, NFS2_FHSIZE); } /* * enum nlm_stats { * LCK_GRANTED = 0, * LCK_DENIED = 1, * LCK_DENIED_NOLOCKS = 2, * LCK_BLOCKED = 3, * LCK_DENIED_GRACE_PERIOD = 4 * }; * * * struct nlm_stat { * nlm_stats stat; * }; * * NB: we don't swap bytes for the NLM status values. The upper * layers deal directly with the status value in network byte * order. */ static void encode_nlm_stat(struct xdr_stream *xdr, const __be32 stat) { __be32 *p; WARN_ON_ONCE(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD); p = xdr_reserve_space(xdr, 4); *p = stat; } static int decode_nlm_stat(struct xdr_stream *xdr, __be32 *stat) { __be32 *p; p = xdr_inline_decode(xdr, 4); if (unlikely(p == NULL)) goto out_overflow; if (unlikely(ntohl(*p) > ntohl(nlm_lck_denied_grace_period))) goto out_enum; *stat = *p; return 0; out_enum: dprintk("%s: server returned invalid nlm_stats value: %u\n", __func__, be32_to_cpup(p)); return -EIO; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } /* * struct nlm_holder { * bool exclusive; * int uppid; * netobj oh; * unsigned l_offset; * unsigned l_len; * }; */ static void encode_nlm_holder(struct xdr_stream *xdr, const struct nlm_res *result) { const struct nlm_lock *lock = &result->lock; u32 l_offset, l_len; __be32 *p; encode_bool(xdr, lock->fl.fl_type == F_RDLCK); encode_int32(xdr, lock->svid); encode_netobj(xdr, lock->oh.data, lock->oh.len); p = xdr_reserve_space(xdr, 4 + 4); nlm_compute_offsets(lock, &l_offset, &l_len); *p++ = cpu_to_be32(l_offset); *p = cpu_to_be32(l_len); } static int decode_nlm_holder(struct xdr_stream *xdr, struct nlm_res *result) { struct nlm_lock *lock = &result->lock; struct file_lock *fl = &lock->fl; u32 exclusive, l_offset, l_len; int error; __be32 *p; s32 end; memset(lock, 0, sizeof(*lock)); locks_init_lock(fl); p = xdr_inline_decode(xdr, 4 + 4); if (unlikely(p == NULL)) goto out_overflow; exclusive = be32_to_cpup(p++); lock->svid = be32_to_cpup(p); fl->fl_pid = (pid_t)lock->svid; error = decode_netobj(xdr, &lock->oh); if (unlikely(error)) goto out; p = xdr_inline_decode(xdr, 4 + 4); if (unlikely(p == NULL)) goto out_overflow; fl->fl_flags = FL_POSIX; fl->fl_type = exclusive != 0 ? F_WRLCK : F_RDLCK; l_offset = be32_to_cpup(p++); l_len = be32_to_cpup(p); end = l_offset + l_len - 1; fl->fl_start = (loff_t)l_offset; if (l_len == 0 || end < 0) fl->fl_end = OFFSET_MAX; else fl->fl_end = (loff_t)end; error = 0; out: return error; out_overflow: print_overflow_msg(__func__, xdr); return -EIO; } /* * string caller_name<LM_MAXSTRLEN>; */ static void encode_caller_name(struct xdr_stream *xdr, const char *name) { /* NB: client-side does not set lock->len */ u32 length = strlen(name); __be32 *p; BUG_ON(length > NLM_MAXSTRLEN); p = xdr_reserve_space(xdr, 4 + length); xdr_encode_opaque(p, name, length); } /* * struct nlm_lock { * string caller_name<LM_MAXSTRLEN>; * netobj fh; * netobj oh; * int uppid; * unsigned l_offset; * unsigned l_len; * }; */ static void encode_nlm_lock(struct xdr_stream *xdr, const struct nlm_lock *lock) { u32 l_offset, l_len; __be32 *p; encode_caller_name(xdr, lock->caller); encode_fh(xdr, &lock->fh); encode_netobj(xdr, lock->oh.data, lock->oh.len); p = xdr_reserve_space(xdr, 4 + 4 + 4); *p++ = cpu_to_be32(lock->svid); nlm_compute_offsets(lock, &l_offset, &l_len); *p++ = cpu_to_be32(l_offset); *p = cpu_to_be32(l_len); } /* * NLMv3 XDR encode functions * * NLMv3 argument types are defined in Chapter 10 of The Open Group's * "Protocols for Interworking: XNFS, Version 3W". */ /* * struct nlm_testargs { * netobj cookie; * bool exclusive; * struct nlm_lock alock; * }; */ static void nlm_xdr_enc_testargs(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nlm_args *args) { const struct nlm_lock *lock = &args->lock; encode_cookie(xdr, &args->cookie); encode_bool(xdr, lock->fl.fl_type == F_WRLCK); encode_nlm_lock(xdr, lock); } /* * struct nlm_lockargs { * netobj cookie; * bool block; * bool exclusive; * struct nlm_lock alock; * bool reclaim; * int state; * }; */ static void nlm_xdr_enc_lockargs(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nlm_args *args) { const struct nlm_lock *lock = &args->lock; encode_cookie(xdr, &args->cookie); encode_bool(xdr, args->block); encode_bool(xdr, lock->fl.fl_type == F_WRLCK); encode_nlm_lock(xdr, lock); encode_bool(xdr, args->reclaim); encode_int32(xdr, args->state); } /* * struct nlm_cancargs { * netobj cookie; * bool block; * bool exclusive; * struct nlm_lock alock; * }; */ static void nlm_xdr_enc_cancargs(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nlm_args *args) { const struct nlm_lock *lock = &args->lock; encode_cookie(xdr, &args->cookie); encode_bool(xdr, args->block); encode_bool(xdr, lock->fl.fl_type == F_WRLCK); encode_nlm_lock(xdr, lock); } /* * struct nlm_unlockargs { * netobj cookie; * struct nlm_lock alock; * }; */ static void nlm_xdr_enc_unlockargs(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nlm_args *args) { const struct nlm_lock *lock = &args->lock; encode_cookie(xdr, &args->cookie); encode_nlm_lock(xdr, lock); } /* * struct nlm_res { * netobj cookie; * nlm_stat stat; * }; */ static void nlm_xdr_enc_res(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nlm_res *result) { encode_cookie(xdr, &result->cookie); encode_nlm_stat(xdr, result->status); } /* * union nlm_testrply switch (nlm_stats stat) { * case LCK_DENIED: * struct nlm_holder holder; * default: * void; * }; * * struct nlm_testres { * netobj cookie; * nlm_testrply test_stat; * }; */ static void encode_nlm_testrply(struct xdr_stream *xdr, const struct nlm_res *result) { if (result->status == nlm_lck_denied) encode_nlm_holder(xdr, result); } static void nlm_xdr_enc_testres(struct rpc_rqst *req, struct xdr_stream *xdr, const struct nlm_res *result) { encode_cookie(xdr, &result->cookie); encode_nlm_stat(xdr, result->status); encode_nlm_testrply(xdr, result); } /* * NLMv3 XDR decode functions * * NLMv3 result types are defined in Chapter 10 of The Open Group's * "Protocols for Interworking: XNFS, Version 3W". */ /* * union nlm_testrply switch (nlm_stats stat) { * case LCK_DENIED: * struct nlm_holder holder; * default: * void; * }; * * struct nlm_testres { * netobj cookie; * nlm_testrply test_stat; * }; */ static int decode_nlm_testrply(struct xdr_stream *xdr, struct nlm_res *result) { int error; error = decode_nlm_stat(xdr, &result->status); if (unlikely(error)) goto out; if (result->status == nlm_lck_denied) error = decode_nlm_holder(xdr, result); out: return error; } static int nlm_xdr_dec_testres(struct rpc_rqst *req, struct xdr_stream *xdr, struct nlm_res *result) { int error; error = decode_cookie(xdr, &result->cookie); if (unlikely(error)) goto out; error = decode_nlm_testrply(xdr, result); out: return error; } /* * struct nlm_res { * netobj cookie; * nlm_stat stat; * }; */ static int nlm_xdr_dec_res(struct rpc_rqst *req, struct xdr_stream *xdr, struct nlm_res *result) { int error; error = decode_cookie(xdr, &result->cookie); if (unlikely(error)) goto out; error = decode_nlm_stat(xdr, &result->status); out: return error; } /* * For NLM, a void procedure really returns nothing */ #define nlm_xdr_dec_norep NULL #define PROC(proc, argtype, restype) \ [NLMPROC_##proc] = { \ .p_proc = NLMPROC_##proc, \ .p_encode = (kxdreproc_t)nlm_xdr_enc_##argtype, \ .p_decode = (kxdrdproc_t)nlm_xdr_dec_##restype, \ .p_arglen = NLM_##argtype##_sz, \ .p_replen = NLM_##restype##_sz, \ .p_statidx = NLMPROC_##proc, \ .p_name = #proc, \ } static struct rpc_procinfo nlm_procedures[] = { PROC(TEST, testargs, testres), PROC(LOCK, lockargs, res), PROC(CANCEL, cancargs, res), PROC(UNLOCK, unlockargs, res), PROC(GRANTED, testargs, res), PROC(TEST_MSG, testargs, norep), PROC(LOCK_MSG, lockargs, norep), PROC(CANCEL_MSG, cancargs, norep), PROC(UNLOCK_MSG, unlockargs, norep), PROC(GRANTED_MSG, testargs, norep), PROC(TEST_RES, testres, norep), PROC(LOCK_RES, res, norep), PROC(CANCEL_RES, res, norep), PROC(UNLOCK_RES, res, norep), PROC(GRANTED_RES, res, norep), }; static const struct rpc_version nlm_version1 = { .number = 1, .nrprocs = ARRAY_SIZE(nlm_procedures), .procs = nlm_procedures, }; static const struct rpc_version nlm_version3 = { .number = 3, .nrprocs = ARRAY_SIZE(nlm_procedures), .procs = nlm_procedures, }; static const struct rpc_version *nlm_versions[] = { [1] = &nlm_version1, [3] = &nlm_version3, #ifdef CONFIG_LOCKD_V4 [4] = &nlm_version4, #endif }; static struct rpc_stat nlm_rpc_stats; const struct rpc_program nlm_program = { .name = "lockd", .number = NLM_PROGRAM, .nrvers = ARRAY_SIZE(nlm_versions), .version = nlm_versions, .stats = &nlm_rpc_stats, };
gpl-2.0
lx324310/linux
drivers/hwmon/ab8500.c
1875
5505
/* * Copyright (C) ST-Ericsson 2010 - 2013 * Author: Martin Persson <martin.persson@stericsson.com> * Hongbo Zhang <hongbo.zhang@linaro.org> * License Terms: GNU General Public License v2 * * When the AB8500 thermal warning temperature is reached (threshold cannot * be changed by SW), an interrupt is set, and if no further action is taken * within a certain time frame, kernel_power_off will be called. * * When AB8500 thermal shutdown temperature is reached a hardware shutdown of * the AB8500 will occur. */ #include <linux/err.h> #include <linux/hwmon.h> #include <linux/hwmon-sysfs.h> #include <linux/mfd/abx500.h> #include <linux/mfd/abx500/ab8500-bm.h> #include <linux/mfd/abx500/ab8500-gpadc.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/power/ab8500.h> #include <linux/reboot.h> #include <linux/slab.h> #include <linux/sysfs.h> #include "abx500.h" #define DEFAULT_POWER_OFF_DELAY (HZ * 10) #define THERMAL_VCC 1800 #define PULL_UP_RESISTOR 47000 /* Number of monitored sensors should not greater than NUM_SENSORS */ #define NUM_MONITORED_SENSORS 4 struct ab8500_gpadc_cfg { const struct abx500_res_to_temp *temp_tbl; int tbl_sz; int vcc; int r_up; }; struct ab8500_temp { struct ab8500_gpadc *gpadc; struct ab8500_btemp *btemp; struct delayed_work power_off_work; struct ab8500_gpadc_cfg cfg; struct abx500_temp *abx500_data; }; /* * The hardware connection is like this: * VCC----[ R_up ]-----[ NTC ]----GND * where R_up is pull-up resistance, and GPADC measures voltage on NTC. * and res_to_temp table is strictly sorted by falling resistance values. */ static int ab8500_voltage_to_temp(struct ab8500_gpadc_cfg *cfg, int v_ntc, int *temp) { int r_ntc, i = 0, tbl_sz = cfg->tbl_sz; const struct abx500_res_to_temp *tbl = cfg->temp_tbl; if (cfg->vcc < 0 || v_ntc >= cfg->vcc) return -EINVAL; r_ntc = v_ntc * cfg->r_up / (cfg->vcc - v_ntc); if (r_ntc > tbl[0].resist || r_ntc < tbl[tbl_sz - 1].resist) return -EINVAL; while (!(r_ntc <= tbl[i].resist && r_ntc > tbl[i + 1].resist) && i < tbl_sz - 2) i++; /* return milli-Celsius */ *temp = tbl[i].temp * 1000 + ((tbl[i + 1].temp - tbl[i].temp) * 1000 * (r_ntc - tbl[i].resist)) / (tbl[i + 1].resist - tbl[i].resist); return 0; } static int ab8500_read_sensor(struct abx500_temp *data, u8 sensor, int *temp) { int voltage, ret; struct ab8500_temp *ab8500_data = data->plat_data; if (sensor == BAT_CTRL) { *temp = ab8500_btemp_get_batctrl_temp(ab8500_data->btemp); } else if (sensor == BTEMP_BALL) { *temp = ab8500_btemp_get_temp(ab8500_data->btemp); } else { voltage = ab8500_gpadc_convert(ab8500_data->gpadc, sensor); if (voltage < 0) return voltage; ret = ab8500_voltage_to_temp(&ab8500_data->cfg, voltage, temp); if (ret < 0) return ret; } return 0; } static void ab8500_thermal_power_off(struct work_struct *work) { struct ab8500_temp *ab8500_data = container_of(work, struct ab8500_temp, power_off_work.work); struct abx500_temp *abx500_data = ab8500_data->abx500_data; dev_warn(&abx500_data->pdev->dev, "Power off due to critical temp\n"); kernel_power_off(); } static ssize_t ab8500_show_name(struct device *dev, struct device_attribute *devattr, char *buf) { return sprintf(buf, "ab8500\n"); } static ssize_t ab8500_show_label(struct device *dev, struct device_attribute *devattr, char *buf) { char *label; struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); int index = attr->index; switch (index) { case 1: label = "ext_adc1"; break; case 2: label = "ext_adc2"; break; case 3: label = "bat_temp"; break; case 4: label = "bat_ctrl"; break; default: return -EINVAL; } return sprintf(buf, "%s\n", label); } static int ab8500_temp_irq_handler(int irq, struct abx500_temp *data) { struct ab8500_temp *ab8500_data = data->plat_data; dev_warn(&data->pdev->dev, "Power off in %d s\n", DEFAULT_POWER_OFF_DELAY / HZ); schedule_delayed_work(&ab8500_data->power_off_work, DEFAULT_POWER_OFF_DELAY); return 0; } int abx500_hwmon_init(struct abx500_temp *data) { struct ab8500_temp *ab8500_data; ab8500_data = devm_kzalloc(&data->pdev->dev, sizeof(*ab8500_data), GFP_KERNEL); if (!ab8500_data) return -ENOMEM; ab8500_data->gpadc = ab8500_gpadc_get("ab8500-gpadc.0"); if (IS_ERR(ab8500_data->gpadc)) return PTR_ERR(ab8500_data->gpadc); ab8500_data->btemp = ab8500_btemp_get(); if (IS_ERR(ab8500_data->btemp)) return PTR_ERR(ab8500_data->btemp); INIT_DELAYED_WORK(&ab8500_data->power_off_work, ab8500_thermal_power_off); ab8500_data->cfg.vcc = THERMAL_VCC; ab8500_data->cfg.r_up = PULL_UP_RESISTOR; ab8500_data->cfg.temp_tbl = ab8500_temp_tbl_a_thermistor; ab8500_data->cfg.tbl_sz = ab8500_temp_tbl_a_size; data->plat_data = ab8500_data; /* * ADC_AUX1 and ADC_AUX2, connected to external NTC * BTEMP_BALL and BAT_CTRL, fixed usage */ data->gpadc_addr[0] = ADC_AUX1; data->gpadc_addr[1] = ADC_AUX2; data->gpadc_addr[2] = BTEMP_BALL; data->gpadc_addr[3] = BAT_CTRL; data->monitored_sensors = NUM_MONITORED_SENSORS; data->ops.read_sensor = ab8500_read_sensor; data->ops.irq_handler = ab8500_temp_irq_handler; data->ops.show_name = ab8500_show_name; data->ops.show_label = ab8500_show_label; data->ops.is_visible = NULL; return 0; } EXPORT_SYMBOL(abx500_hwmon_init); MODULE_AUTHOR("Hongbo Zhang <hongbo.zhang@linaro.org>"); MODULE_DESCRIPTION("AB8500 temperature driver"); MODULE_LICENSE("GPL");
gpl-2.0
cile381/s7_flat_kernel
fs/proc/interrupts.c
2131
1092
#include <linux/fs.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/irqnr.h> #include <linux/proc_fs.h> #include <linux/seq_file.h> /* * /proc/interrupts */ static void *int_seq_start(struct seq_file *f, loff_t *pos) { return (*pos <= nr_irqs) ? pos : NULL; } static void *int_seq_next(struct seq_file *f, void *v, loff_t *pos) { (*pos)++; if (*pos > nr_irqs) return NULL; return pos; } static void int_seq_stop(struct seq_file *f, void *v) { /* Nothing to do */ } static const struct seq_operations int_seq_ops = { .start = int_seq_start, .next = int_seq_next, .stop = int_seq_stop, .show = show_interrupts }; static int interrupts_open(struct inode *inode, struct file *filp) { return seq_open(filp, &int_seq_ops); } static const struct file_operations proc_interrupts_operations = { .open = interrupts_open, .read = seq_read, .llseek = seq_lseek, .release = seq_release, }; static int __init proc_interrupts_init(void) { proc_create("interrupts", 0, NULL, &proc_interrupts_operations); return 0; } fs_initcall(proc_interrupts_init);
gpl-2.0
MikeC84/mac_kernel_moto_shamu
drivers/staging/csr/csr_wifi_sme_serialize.c
2387
271192
/***************************************************************************** (c) Cambridge Silicon Radio Limited 2012 All rights reserved and confidential information of CSR Refer to LICENSE.txt included with this source for details on the license terms. *****************************************************************************/ /* Note: this is an auto-generated file. */ #include <linux/string.h> #include <linux/slab.h> #include "csr_msgconv.h" #include "csr_wifi_sme_prim.h" #include "csr_wifi_sme_serialize.h" void CsrWifiSmePfree(void *ptr) { kfree(ptr); } size_t CsrWifiSmeAdhocConfigSetReqSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 11) */ bufferSize += 2; /* u16 primitive->adHocConfig.atimWindowTu */ bufferSize += 2; /* u16 primitive->adHocConfig.beaconPeriodTu */ bufferSize += 2; /* u16 primitive->adHocConfig.joinOnlyAttempts */ bufferSize += 2; /* u16 primitive->adHocConfig.joinAttemptIntervalMs */ return bufferSize; } u8* CsrWifiSmeAdhocConfigSetReqSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeAdhocConfigSetReq *primitive = (CsrWifiSmeAdhocConfigSetReq *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->adHocConfig.atimWindowTu); CsrUint16Ser(ptr, len, (u16) primitive->adHocConfig.beaconPeriodTu); CsrUint16Ser(ptr, len, (u16) primitive->adHocConfig.joinOnlyAttempts); CsrUint16Ser(ptr, len, (u16) primitive->adHocConfig.joinAttemptIntervalMs); return(ptr); } void* CsrWifiSmeAdhocConfigSetReqDes(u8 *buffer, size_t length) { CsrWifiSmeAdhocConfigSetReq *primitive = kmalloc(sizeof(CsrWifiSmeAdhocConfigSetReq), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->adHocConfig.atimWindowTu, buffer, &offset); CsrUint16Des((u16 *) &primitive->adHocConfig.beaconPeriodTu, buffer, &offset); CsrUint16Des((u16 *) &primitive->adHocConfig.joinOnlyAttempts, buffer, &offset); CsrUint16Des((u16 *) &primitive->adHocConfig.joinAttemptIntervalMs, buffer, &offset); return primitive; } size_t CsrWifiSmeBlacklistReqSizeof(void *msg) { CsrWifiSmeBlacklistReq *primitive = (CsrWifiSmeBlacklistReq *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 13) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 1; /* CsrWifiSmeListAction primitive->action */ bufferSize += 1; /* u8 primitive->setAddressCount */ { u16 i1; for (i1 = 0; i1 < primitive->setAddressCount; i1++) { bufferSize += 6; /* u8 primitive->setAddresses[i1].a[6] */ } } return bufferSize; } u8* CsrWifiSmeBlacklistReqSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeBlacklistReq *primitive = (CsrWifiSmeBlacklistReq *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint8Ser(ptr, len, (u8) primitive->action); CsrUint8Ser(ptr, len, (u8) primitive->setAddressCount); { u16 i1; for (i1 = 0; i1 < primitive->setAddressCount; i1++) { CsrMemCpySer(ptr, len, (const void *) primitive->setAddresses[i1].a, ((u16) (6))); } } return(ptr); } void* CsrWifiSmeBlacklistReqDes(u8 *buffer, size_t length) { CsrWifiSmeBlacklistReq *primitive = kmalloc(sizeof(CsrWifiSmeBlacklistReq), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint8Des((u8 *) &primitive->action, buffer, &offset); CsrUint8Des((u8 *) &primitive->setAddressCount, buffer, &offset); primitive->setAddresses = NULL; if (primitive->setAddressCount) { primitive->setAddresses = kmalloc(sizeof(CsrWifiMacAddress) * primitive->setAddressCount, GFP_KERNEL); } { u16 i1; for (i1 = 0; i1 < primitive->setAddressCount; i1++) { CsrMemCpyDes(primitive->setAddresses[i1].a, buffer, &offset, ((u16) (6))); } } return primitive; } void CsrWifiSmeBlacklistReqSerFree(void *voidPrimitivePointer) { CsrWifiSmeBlacklistReq *primitive = (CsrWifiSmeBlacklistReq *) voidPrimitivePointer; kfree(primitive->setAddresses); kfree(primitive); } size_t CsrWifiSmeCalibrationDataSetReqSizeof(void *msg) { CsrWifiSmeCalibrationDataSetReq *primitive = (CsrWifiSmeCalibrationDataSetReq *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 6) */ bufferSize += 2; /* u16 primitive->calibrationDataLength */ bufferSize += primitive->calibrationDataLength; /* u8 primitive->calibrationData */ return bufferSize; } u8* CsrWifiSmeCalibrationDataSetReqSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeCalibrationDataSetReq *primitive = (CsrWifiSmeCalibrationDataSetReq *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->calibrationDataLength); if (primitive->calibrationDataLength) { CsrMemCpySer(ptr, len, (const void *) primitive->calibrationData, ((u16) (primitive->calibrationDataLength))); } return(ptr); } void* CsrWifiSmeCalibrationDataSetReqDes(u8 *buffer, size_t length) { CsrWifiSmeCalibrationDataSetReq *primitive = kmalloc(sizeof(CsrWifiSmeCalibrationDataSetReq), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->calibrationDataLength, buffer, &offset); if (primitive->calibrationDataLength) { primitive->calibrationData = kmalloc(primitive->calibrationDataLength, GFP_KERNEL); CsrMemCpyDes(primitive->calibrationData, buffer, &offset, ((u16) (primitive->calibrationDataLength))); } else { primitive->calibrationData = NULL; } return primitive; } void CsrWifiSmeCalibrationDataSetReqSerFree(void *voidPrimitivePointer) { CsrWifiSmeCalibrationDataSetReq *primitive = (CsrWifiSmeCalibrationDataSetReq *) voidPrimitivePointer; kfree(primitive->calibrationData); kfree(primitive); } size_t CsrWifiSmeCcxConfigSetReqSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 9) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 1; /* u8 primitive->ccxConfig.keepAliveTimeMs */ bufferSize += 1; /* u8 primitive->ccxConfig.apRoamingEnabled */ bufferSize += 1; /* u8 primitive->ccxConfig.measurementsMask */ bufferSize += 1; /* u8 primitive->ccxConfig.ccxRadioMgtEnabled */ return bufferSize; } u8* CsrWifiSmeCcxConfigSetReqSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeCcxConfigSetReq *primitive = (CsrWifiSmeCcxConfigSetReq *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint8Ser(ptr, len, (u8) primitive->ccxConfig.keepAliveTimeMs); CsrUint8Ser(ptr, len, (u8) primitive->ccxConfig.apRoamingEnabled); CsrUint8Ser(ptr, len, (u8) primitive->ccxConfig.measurementsMask); CsrUint8Ser(ptr, len, (u8) primitive->ccxConfig.ccxRadioMgtEnabled); return(ptr); } void* CsrWifiSmeCcxConfigSetReqDes(u8 *buffer, size_t length) { CsrWifiSmeCcxConfigSetReq *primitive = kmalloc(sizeof(CsrWifiSmeCcxConfigSetReq), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint8Des((u8 *) &primitive->ccxConfig.keepAliveTimeMs, buffer, &offset); CsrUint8Des((u8 *) &primitive->ccxConfig.apRoamingEnabled, buffer, &offset); CsrUint8Des((u8 *) &primitive->ccxConfig.measurementsMask, buffer, &offset); CsrUint8Des((u8 *) &primitive->ccxConfig.ccxRadioMgtEnabled, buffer, &offset); return primitive; } size_t CsrWifiSmeCoexConfigSetReqSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 29) */ bufferSize += 1; /* u8 primitive->coexConfig.coexEnableSchemeManagement */ bufferSize += 1; /* u8 primitive->coexConfig.coexPeriodicWakeHost */ bufferSize += 2; /* u16 primitive->coexConfig.coexTrafficBurstyLatencyMs */ bufferSize += 2; /* u16 primitive->coexConfig.coexTrafficContinuousLatencyMs */ bufferSize += 2; /* u16 primitive->coexConfig.coexObexBlackoutDurationMs */ bufferSize += 2; /* u16 primitive->coexConfig.coexObexBlackoutPeriodMs */ bufferSize += 2; /* u16 primitive->coexConfig.coexA2dpBrBlackoutDurationMs */ bufferSize += 2; /* u16 primitive->coexConfig.coexA2dpBrBlackoutPeriodMs */ bufferSize += 2; /* u16 primitive->coexConfig.coexA2dpEdrBlackoutDurationMs */ bufferSize += 2; /* u16 primitive->coexConfig.coexA2dpEdrBlackoutPeriodMs */ bufferSize += 2; /* u16 primitive->coexConfig.coexPagingBlackoutDurationMs */ bufferSize += 2; /* u16 primitive->coexConfig.coexPagingBlackoutPeriodMs */ bufferSize += 2; /* u16 primitive->coexConfig.coexInquiryBlackoutDurationMs */ bufferSize += 2; /* u16 primitive->coexConfig.coexInquiryBlackoutPeriodMs */ return bufferSize; } u8* CsrWifiSmeCoexConfigSetReqSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeCoexConfigSetReq *primitive = (CsrWifiSmeCoexConfigSetReq *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint8Ser(ptr, len, (u8) primitive->coexConfig.coexEnableSchemeManagement); CsrUint8Ser(ptr, len, (u8) primitive->coexConfig.coexPeriodicWakeHost); CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexTrafficBurstyLatencyMs); CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexTrafficContinuousLatencyMs); CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexObexBlackoutDurationMs); CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexObexBlackoutPeriodMs); CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexA2dpBrBlackoutDurationMs); CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexA2dpBrBlackoutPeriodMs); CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexA2dpEdrBlackoutDurationMs); CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexA2dpEdrBlackoutPeriodMs); CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexPagingBlackoutDurationMs); CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexPagingBlackoutPeriodMs); CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexInquiryBlackoutDurationMs); CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexInquiryBlackoutPeriodMs); return(ptr); } void* CsrWifiSmeCoexConfigSetReqDes(u8 *buffer, size_t length) { CsrWifiSmeCoexConfigSetReq *primitive = kmalloc(sizeof(CsrWifiSmeCoexConfigSetReq), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint8Des((u8 *) &primitive->coexConfig.coexEnableSchemeManagement, buffer, &offset); CsrUint8Des((u8 *) &primitive->coexConfig.coexPeriodicWakeHost, buffer, &offset); CsrUint16Des((u16 *) &primitive->coexConfig.coexTrafficBurstyLatencyMs, buffer, &offset); CsrUint16Des((u16 *) &primitive->coexConfig.coexTrafficContinuousLatencyMs, buffer, &offset); CsrUint16Des((u16 *) &primitive->coexConfig.coexObexBlackoutDurationMs, buffer, &offset); CsrUint16Des((u16 *) &primitive->coexConfig.coexObexBlackoutPeriodMs, buffer, &offset); CsrUint16Des((u16 *) &primitive->coexConfig.coexA2dpBrBlackoutDurationMs, buffer, &offset); CsrUint16Des((u16 *) &primitive->coexConfig.coexA2dpBrBlackoutPeriodMs, buffer, &offset); CsrUint16Des((u16 *) &primitive->coexConfig.coexA2dpEdrBlackoutDurationMs, buffer, &offset); CsrUint16Des((u16 *) &primitive->coexConfig.coexA2dpEdrBlackoutPeriodMs, buffer, &offset); CsrUint16Des((u16 *) &primitive->coexConfig.coexPagingBlackoutDurationMs, buffer, &offset); CsrUint16Des((u16 *) &primitive->coexConfig.coexPagingBlackoutPeriodMs, buffer, &offset); CsrUint16Des((u16 *) &primitive->coexConfig.coexInquiryBlackoutDurationMs, buffer, &offset); CsrUint16Des((u16 *) &primitive->coexConfig.coexInquiryBlackoutPeriodMs, buffer, &offset); return primitive; } size_t CsrWifiSmeConnectReqSizeof(void *msg) { CsrWifiSmeConnectReq *primitive = (CsrWifiSmeConnectReq *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 57) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 32; /* u8 primitive->connectionConfig.ssid.ssid[32] */ bufferSize += 1; /* u8 primitive->connectionConfig.ssid.length */ bufferSize += 6; /* u8 primitive->connectionConfig.bssid.a[6] */ bufferSize += 1; /* CsrWifiSmeBssType primitive->connectionConfig.bssType */ bufferSize += 1; /* CsrWifiSmeRadioIF primitive->connectionConfig.ifIndex */ bufferSize += 1; /* CsrWifiSme80211PrivacyMode primitive->connectionConfig.privacyMode */ bufferSize += 2; /* CsrWifiSmeAuthModeMask primitive->connectionConfig.authModeMask */ bufferSize += 2; /* CsrWifiSmeEncryptionMask primitive->connectionConfig.encryptionModeMask */ bufferSize += 2; /* u16 primitive->connectionConfig.mlmeAssociateReqInformationElementsLength */ bufferSize += primitive->connectionConfig.mlmeAssociateReqInformationElementsLength; /* u8 primitive->connectionConfig.mlmeAssociateReqInformationElements */ bufferSize += 1; /* CsrWifiSmeWmmQosInfoMask primitive->connectionConfig.wmmQosInfo */ bufferSize += 1; /* u8 primitive->connectionConfig.adhocJoinOnly */ bufferSize += 1; /* u8 primitive->connectionConfig.adhocChannel */ return bufferSize; } u8* CsrWifiSmeConnectReqSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeConnectReq *primitive = (CsrWifiSmeConnectReq *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrMemCpySer(ptr, len, (const void *) primitive->connectionConfig.ssid.ssid, ((u16) (32))); CsrUint8Ser(ptr, len, (u8) primitive->connectionConfig.ssid.length); CsrMemCpySer(ptr, len, (const void *) primitive->connectionConfig.bssid.a, ((u16) (6))); CsrUint8Ser(ptr, len, (u8) primitive->connectionConfig.bssType); CsrUint8Ser(ptr, len, (u8) primitive->connectionConfig.ifIndex); CsrUint8Ser(ptr, len, (u8) primitive->connectionConfig.privacyMode); CsrUint16Ser(ptr, len, (u16) primitive->connectionConfig.authModeMask); CsrUint16Ser(ptr, len, (u16) primitive->connectionConfig.encryptionModeMask); CsrUint16Ser(ptr, len, (u16) primitive->connectionConfig.mlmeAssociateReqInformationElementsLength); if (primitive->connectionConfig.mlmeAssociateReqInformationElementsLength) { CsrMemCpySer(ptr, len, (const void *) primitive->connectionConfig.mlmeAssociateReqInformationElements, ((u16) (primitive->connectionConfig.mlmeAssociateReqInformationElementsLength))); } CsrUint8Ser(ptr, len, (u8) primitive->connectionConfig.wmmQosInfo); CsrUint8Ser(ptr, len, (u8) primitive->connectionConfig.adhocJoinOnly); CsrUint8Ser(ptr, len, (u8) primitive->connectionConfig.adhocChannel); return(ptr); } void* CsrWifiSmeConnectReqDes(u8 *buffer, size_t length) { CsrWifiSmeConnectReq *primitive = kmalloc(sizeof(CsrWifiSmeConnectReq), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrMemCpyDes(primitive->connectionConfig.ssid.ssid, buffer, &offset, ((u16) (32))); CsrUint8Des((u8 *) &primitive->connectionConfig.ssid.length, buffer, &offset); CsrMemCpyDes(primitive->connectionConfig.bssid.a, buffer, &offset, ((u16) (6))); CsrUint8Des((u8 *) &primitive->connectionConfig.bssType, buffer, &offset); CsrUint8Des((u8 *) &primitive->connectionConfig.ifIndex, buffer, &offset); CsrUint8Des((u8 *) &primitive->connectionConfig.privacyMode, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionConfig.authModeMask, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionConfig.encryptionModeMask, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionConfig.mlmeAssociateReqInformationElementsLength, buffer, &offset); if (primitive->connectionConfig.mlmeAssociateReqInformationElementsLength) { primitive->connectionConfig.mlmeAssociateReqInformationElements = kmalloc(primitive->connectionConfig.mlmeAssociateReqInformationElementsLength, GFP_KERNEL); CsrMemCpyDes(primitive->connectionConfig.mlmeAssociateReqInformationElements, buffer, &offset, ((u16) (primitive->connectionConfig.mlmeAssociateReqInformationElementsLength))); } else { primitive->connectionConfig.mlmeAssociateReqInformationElements = NULL; } CsrUint8Des((u8 *) &primitive->connectionConfig.wmmQosInfo, buffer, &offset); CsrUint8Des((u8 *) &primitive->connectionConfig.adhocJoinOnly, buffer, &offset); CsrUint8Des((u8 *) &primitive->connectionConfig.adhocChannel, buffer, &offset); return primitive; } void CsrWifiSmeConnectReqSerFree(void *voidPrimitivePointer) { CsrWifiSmeConnectReq *primitive = (CsrWifiSmeConnectReq *) voidPrimitivePointer; kfree(primitive->connectionConfig.mlmeAssociateReqInformationElements); kfree(primitive); } size_t CsrWifiSmeHostConfigSetReqSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 8) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 1; /* CsrWifiSmeHostPowerMode primitive->hostConfig.powerMode */ bufferSize += 2; /* u16 primitive->hostConfig.applicationDataPeriodMs */ return bufferSize; } u8* CsrWifiSmeHostConfigSetReqSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeHostConfigSetReq *primitive = (CsrWifiSmeHostConfigSetReq *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint8Ser(ptr, len, (u8) primitive->hostConfig.powerMode); CsrUint16Ser(ptr, len, (u16) primitive->hostConfig.applicationDataPeriodMs); return(ptr); } void* CsrWifiSmeHostConfigSetReqDes(u8 *buffer, size_t length) { CsrWifiSmeHostConfigSetReq *primitive = kmalloc(sizeof(CsrWifiSmeHostConfigSetReq), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint8Des((u8 *) &primitive->hostConfig.powerMode, buffer, &offset); CsrUint16Des((u16 *) &primitive->hostConfig.applicationDataPeriodMs, buffer, &offset); return primitive; } size_t CsrWifiSmeKeyReqSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 65) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 1; /* CsrWifiSmeListAction primitive->action */ bufferSize += 1; /* CsrWifiSmeKeyType primitive->key.keyType */ bufferSize += 1; /* u8 primitive->key.keyIndex */ bufferSize += 1; /* u8 primitive->key.wepTxKey */ { u16 i2; for (i2 = 0; i2 < 8; i2++) { bufferSize += 2; /* u16 primitive->key.keyRsc[8] */ } } bufferSize += 1; /* u8 primitive->key.authenticator */ bufferSize += 6; /* u8 primitive->key.address.a[6] */ bufferSize += 1; /* u8 primitive->key.keyLength */ bufferSize += 32; /* u8 primitive->key.key[32] */ return bufferSize; } u8* CsrWifiSmeKeyReqSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeKeyReq *primitive = (CsrWifiSmeKeyReq *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint8Ser(ptr, len, (u8) primitive->action); CsrUint8Ser(ptr, len, (u8) primitive->key.keyType); CsrUint8Ser(ptr, len, (u8) primitive->key.keyIndex); CsrUint8Ser(ptr, len, (u8) primitive->key.wepTxKey); { u16 i2; for (i2 = 0; i2 < 8; i2++) { CsrUint16Ser(ptr, len, (u16) primitive->key.keyRsc[i2]); } } CsrUint8Ser(ptr, len, (u8) primitive->key.authenticator); CsrMemCpySer(ptr, len, (const void *) primitive->key.address.a, ((u16) (6))); CsrUint8Ser(ptr, len, (u8) primitive->key.keyLength); CsrMemCpySer(ptr, len, (const void *) primitive->key.key, ((u16) (32))); return(ptr); } void* CsrWifiSmeKeyReqDes(u8 *buffer, size_t length) { CsrWifiSmeKeyReq *primitive = kmalloc(sizeof(CsrWifiSmeKeyReq), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint8Des((u8 *) &primitive->action, buffer, &offset); CsrUint8Des((u8 *) &primitive->key.keyType, buffer, &offset); CsrUint8Des((u8 *) &primitive->key.keyIndex, buffer, &offset); CsrUint8Des((u8 *) &primitive->key.wepTxKey, buffer, &offset); { u16 i2; for (i2 = 0; i2 < 8; i2++) { CsrUint16Des((u16 *) &primitive->key.keyRsc[i2], buffer, &offset); } } CsrUint8Des((u8 *) &primitive->key.authenticator, buffer, &offset); CsrMemCpyDes(primitive->key.address.a, buffer, &offset, ((u16) (6))); CsrUint8Des((u8 *) &primitive->key.keyLength, buffer, &offset); CsrMemCpyDes(primitive->key.key, buffer, &offset, ((u16) (32))); return primitive; } size_t CsrWifiSmeMibConfigSetReqSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 11) */ bufferSize += 1; /* u8 primitive->mibConfig.unifiFixMaxTxDataRate */ bufferSize += 1; /* u8 primitive->mibConfig.unifiFixTxDataRate */ bufferSize += 2; /* u16 primitive->mibConfig.dot11RtsThreshold */ bufferSize += 2; /* u16 primitive->mibConfig.dot11FragmentationThreshold */ bufferSize += 2; /* u16 primitive->mibConfig.dot11CurrentTxPowerLevel */ return bufferSize; } u8* CsrWifiSmeMibConfigSetReqSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeMibConfigSetReq *primitive = (CsrWifiSmeMibConfigSetReq *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint8Ser(ptr, len, (u8) primitive->mibConfig.unifiFixMaxTxDataRate); CsrUint8Ser(ptr, len, (u8) primitive->mibConfig.unifiFixTxDataRate); CsrUint16Ser(ptr, len, (u16) primitive->mibConfig.dot11RtsThreshold); CsrUint16Ser(ptr, len, (u16) primitive->mibConfig.dot11FragmentationThreshold); CsrUint16Ser(ptr, len, (u16) primitive->mibConfig.dot11CurrentTxPowerLevel); return(ptr); } void* CsrWifiSmeMibConfigSetReqDes(u8 *buffer, size_t length) { CsrWifiSmeMibConfigSetReq *primitive = kmalloc(sizeof(CsrWifiSmeMibConfigSetReq), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint8Des((u8 *) &primitive->mibConfig.unifiFixMaxTxDataRate, buffer, &offset); CsrUint8Des((u8 *) &primitive->mibConfig.unifiFixTxDataRate, buffer, &offset); CsrUint16Des((u16 *) &primitive->mibConfig.dot11RtsThreshold, buffer, &offset); CsrUint16Des((u16 *) &primitive->mibConfig.dot11FragmentationThreshold, buffer, &offset); CsrUint16Des((u16 *) &primitive->mibConfig.dot11CurrentTxPowerLevel, buffer, &offset); return primitive; } size_t CsrWifiSmeMibGetNextReqSizeof(void *msg) { CsrWifiSmeMibGetNextReq *primitive = (CsrWifiSmeMibGetNextReq *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 6) */ bufferSize += 2; /* u16 primitive->mibAttributeLength */ bufferSize += primitive->mibAttributeLength; /* u8 primitive->mibAttribute */ return bufferSize; } u8* CsrWifiSmeMibGetNextReqSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeMibGetNextReq *primitive = (CsrWifiSmeMibGetNextReq *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->mibAttributeLength); if (primitive->mibAttributeLength) { CsrMemCpySer(ptr, len, (const void *) primitive->mibAttribute, ((u16) (primitive->mibAttributeLength))); } return(ptr); } void* CsrWifiSmeMibGetNextReqDes(u8 *buffer, size_t length) { CsrWifiSmeMibGetNextReq *primitive = kmalloc(sizeof(CsrWifiSmeMibGetNextReq), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->mibAttributeLength, buffer, &offset); if (primitive->mibAttributeLength) { primitive->mibAttribute = kmalloc(primitive->mibAttributeLength, GFP_KERNEL); CsrMemCpyDes(primitive->mibAttribute, buffer, &offset, ((u16) (primitive->mibAttributeLength))); } else { primitive->mibAttribute = NULL; } return primitive; } void CsrWifiSmeMibGetNextReqSerFree(void *voidPrimitivePointer) { CsrWifiSmeMibGetNextReq *primitive = (CsrWifiSmeMibGetNextReq *) voidPrimitivePointer; kfree(primitive->mibAttribute); kfree(primitive); } size_t CsrWifiSmeMibGetReqSizeof(void *msg) { CsrWifiSmeMibGetReq *primitive = (CsrWifiSmeMibGetReq *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 6) */ bufferSize += 2; /* u16 primitive->mibAttributeLength */ bufferSize += primitive->mibAttributeLength; /* u8 primitive->mibAttribute */ return bufferSize; } u8* CsrWifiSmeMibGetReqSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeMibGetReq *primitive = (CsrWifiSmeMibGetReq *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->mibAttributeLength); if (primitive->mibAttributeLength) { CsrMemCpySer(ptr, len, (const void *) primitive->mibAttribute, ((u16) (primitive->mibAttributeLength))); } return(ptr); } void* CsrWifiSmeMibGetReqDes(u8 *buffer, size_t length) { CsrWifiSmeMibGetReq *primitive = kmalloc(sizeof(CsrWifiSmeMibGetReq), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->mibAttributeLength, buffer, &offset); if (primitive->mibAttributeLength) { primitive->mibAttribute = kmalloc(primitive->mibAttributeLength, GFP_KERNEL); CsrMemCpyDes(primitive->mibAttribute, buffer, &offset, ((u16) (primitive->mibAttributeLength))); } else { primitive->mibAttribute = NULL; } return primitive; } void CsrWifiSmeMibGetReqSerFree(void *voidPrimitivePointer) { CsrWifiSmeMibGetReq *primitive = (CsrWifiSmeMibGetReq *) voidPrimitivePointer; kfree(primitive->mibAttribute); kfree(primitive); } size_t CsrWifiSmeMibSetReqSizeof(void *msg) { CsrWifiSmeMibSetReq *primitive = (CsrWifiSmeMibSetReq *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 6) */ bufferSize += 2; /* u16 primitive->mibAttributeLength */ bufferSize += primitive->mibAttributeLength; /* u8 primitive->mibAttribute */ return bufferSize; } u8* CsrWifiSmeMibSetReqSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeMibSetReq *primitive = (CsrWifiSmeMibSetReq *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->mibAttributeLength); if (primitive->mibAttributeLength) { CsrMemCpySer(ptr, len, (const void *) primitive->mibAttribute, ((u16) (primitive->mibAttributeLength))); } return(ptr); } void* CsrWifiSmeMibSetReqDes(u8 *buffer, size_t length) { CsrWifiSmeMibSetReq *primitive = kmalloc(sizeof(CsrWifiSmeMibSetReq), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->mibAttributeLength, buffer, &offset); if (primitive->mibAttributeLength) { primitive->mibAttribute = kmalloc(primitive->mibAttributeLength, GFP_KERNEL); CsrMemCpyDes(primitive->mibAttribute, buffer, &offset, ((u16) (primitive->mibAttributeLength))); } else { primitive->mibAttribute = NULL; } return primitive; } void CsrWifiSmeMibSetReqSerFree(void *voidPrimitivePointer) { CsrWifiSmeMibSetReq *primitive = (CsrWifiSmeMibSetReq *) voidPrimitivePointer; kfree(primitive->mibAttribute); kfree(primitive); } size_t CsrWifiSmeMulticastAddressReqSizeof(void *msg) { CsrWifiSmeMulticastAddressReq *primitive = (CsrWifiSmeMulticastAddressReq *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 13) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 1; /* CsrWifiSmeListAction primitive->action */ bufferSize += 1; /* u8 primitive->setAddressesCount */ { u16 i1; for (i1 = 0; i1 < primitive->setAddressesCount; i1++) { bufferSize += 6; /* u8 primitive->setAddresses[i1].a[6] */ } } return bufferSize; } u8* CsrWifiSmeMulticastAddressReqSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeMulticastAddressReq *primitive = (CsrWifiSmeMulticastAddressReq *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint8Ser(ptr, len, (u8) primitive->action); CsrUint8Ser(ptr, len, (u8) primitive->setAddressesCount); { u16 i1; for (i1 = 0; i1 < primitive->setAddressesCount; i1++) { CsrMemCpySer(ptr, len, (const void *) primitive->setAddresses[i1].a, ((u16) (6))); } } return(ptr); } void* CsrWifiSmeMulticastAddressReqDes(u8 *buffer, size_t length) { CsrWifiSmeMulticastAddressReq *primitive = kmalloc(sizeof(CsrWifiSmeMulticastAddressReq), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint8Des((u8 *) &primitive->action, buffer, &offset); CsrUint8Des((u8 *) &primitive->setAddressesCount, buffer, &offset); primitive->setAddresses = NULL; if (primitive->setAddressesCount) { primitive->setAddresses = kmalloc(sizeof(CsrWifiMacAddress) * primitive->setAddressesCount, GFP_KERNEL); } { u16 i1; for (i1 = 0; i1 < primitive->setAddressesCount; i1++) { CsrMemCpyDes(primitive->setAddresses[i1].a, buffer, &offset, ((u16) (6))); } } return primitive; } void CsrWifiSmeMulticastAddressReqSerFree(void *voidPrimitivePointer) { CsrWifiSmeMulticastAddressReq *primitive = (CsrWifiSmeMulticastAddressReq *) voidPrimitivePointer; kfree(primitive->setAddresses); kfree(primitive); } size_t CsrWifiSmePacketFilterSetReqSizeof(void *msg) { CsrWifiSmePacketFilterSetReq *primitive = (CsrWifiSmePacketFilterSetReq *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 13) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 2; /* u16 primitive->filterLength */ bufferSize += primitive->filterLength; /* u8 primitive->filter */ bufferSize += 1; /* CsrWifiSmePacketFilterMode primitive->mode */ bufferSize += 4; /* u8 primitive->arpFilterAddress.a[4] */ return bufferSize; } u8* CsrWifiSmePacketFilterSetReqSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmePacketFilterSetReq *primitive = (CsrWifiSmePacketFilterSetReq *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint16Ser(ptr, len, (u16) primitive->filterLength); if (primitive->filterLength) { CsrMemCpySer(ptr, len, (const void *) primitive->filter, ((u16) (primitive->filterLength))); } CsrUint8Ser(ptr, len, (u8) primitive->mode); CsrMemCpySer(ptr, len, (const void *) primitive->arpFilterAddress.a, ((u16) (4))); return(ptr); } void* CsrWifiSmePacketFilterSetReqDes(u8 *buffer, size_t length) { CsrWifiSmePacketFilterSetReq *primitive = kmalloc(sizeof(CsrWifiSmePacketFilterSetReq), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint16Des((u16 *) &primitive->filterLength, buffer, &offset); if (primitive->filterLength) { primitive->filter = kmalloc(primitive->filterLength, GFP_KERNEL); CsrMemCpyDes(primitive->filter, buffer, &offset, ((u16) (primitive->filterLength))); } else { primitive->filter = NULL; } CsrUint8Des((u8 *) &primitive->mode, buffer, &offset); CsrMemCpyDes(primitive->arpFilterAddress.a, buffer, &offset, ((u16) (4))); return primitive; } void CsrWifiSmePacketFilterSetReqSerFree(void *voidPrimitivePointer) { CsrWifiSmePacketFilterSetReq *primitive = (CsrWifiSmePacketFilterSetReq *) voidPrimitivePointer; kfree(primitive->filter); kfree(primitive); } size_t CsrWifiSmePmkidReqSizeof(void *msg) { CsrWifiSmePmkidReq *primitive = (CsrWifiSmePmkidReq *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 29) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 1; /* CsrWifiSmeListAction primitive->action */ bufferSize += 1; /* u8 primitive->setPmkidsCount */ { u16 i1; for (i1 = 0; i1 < primitive->setPmkidsCount; i1++) { bufferSize += 6; /* u8 primitive->setPmkids[i1].bssid.a[6] */ bufferSize += 16; /* u8 primitive->setPmkids[i1].pmkid[16] */ } } return bufferSize; } u8* CsrWifiSmePmkidReqSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmePmkidReq *primitive = (CsrWifiSmePmkidReq *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint8Ser(ptr, len, (u8) primitive->action); CsrUint8Ser(ptr, len, (u8) primitive->setPmkidsCount); { u16 i1; for (i1 = 0; i1 < primitive->setPmkidsCount; i1++) { CsrMemCpySer(ptr, len, (const void *) primitive->setPmkids[i1].bssid.a, ((u16) (6))); CsrMemCpySer(ptr, len, (const void *) primitive->setPmkids[i1].pmkid, ((u16) (16))); } } return(ptr); } void* CsrWifiSmePmkidReqDes(u8 *buffer, size_t length) { CsrWifiSmePmkidReq *primitive = kmalloc(sizeof(CsrWifiSmePmkidReq), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint8Des((u8 *) &primitive->action, buffer, &offset); CsrUint8Des((u8 *) &primitive->setPmkidsCount, buffer, &offset); primitive->setPmkids = NULL; if (primitive->setPmkidsCount) { primitive->setPmkids = kmalloc(sizeof(CsrWifiSmePmkid) * primitive->setPmkidsCount, GFP_KERNEL); } { u16 i1; for (i1 = 0; i1 < primitive->setPmkidsCount; i1++) { CsrMemCpyDes(primitive->setPmkids[i1].bssid.a, buffer, &offset, ((u16) (6))); CsrMemCpyDes(primitive->setPmkids[i1].pmkid, buffer, &offset, ((u16) (16))); } } return primitive; } void CsrWifiSmePmkidReqSerFree(void *voidPrimitivePointer) { CsrWifiSmePmkidReq *primitive = (CsrWifiSmePmkidReq *) voidPrimitivePointer; kfree(primitive->setPmkids); kfree(primitive); } size_t CsrWifiSmePowerConfigSetReqSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 11) */ bufferSize += 1; /* CsrWifiSmePowerSaveLevel primitive->powerConfig.powerSaveLevel */ bufferSize += 2; /* u16 primitive->powerConfig.listenIntervalTu */ bufferSize += 1; /* u8 primitive->powerConfig.rxDtims */ bufferSize += 1; /* CsrWifiSmeD3AutoScanMode primitive->powerConfig.d3AutoScanMode */ bufferSize += 1; /* u8 primitive->powerConfig.clientTrafficWindow */ bufferSize += 1; /* u8 primitive->powerConfig.opportunisticPowerSave */ bufferSize += 1; /* u8 primitive->powerConfig.noticeOfAbsence */ return bufferSize; } u8* CsrWifiSmePowerConfigSetReqSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmePowerConfigSetReq *primitive = (CsrWifiSmePowerConfigSetReq *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint8Ser(ptr, len, (u8) primitive->powerConfig.powerSaveLevel); CsrUint16Ser(ptr, len, (u16) primitive->powerConfig.listenIntervalTu); CsrUint8Ser(ptr, len, (u8) primitive->powerConfig.rxDtims); CsrUint8Ser(ptr, len, (u8) primitive->powerConfig.d3AutoScanMode); CsrUint8Ser(ptr, len, (u8) primitive->powerConfig.clientTrafficWindow); CsrUint8Ser(ptr, len, (u8) primitive->powerConfig.opportunisticPowerSave); CsrUint8Ser(ptr, len, (u8) primitive->powerConfig.noticeOfAbsence); return(ptr); } void* CsrWifiSmePowerConfigSetReqDes(u8 *buffer, size_t length) { CsrWifiSmePowerConfigSetReq *primitive = kmalloc(sizeof(CsrWifiSmePowerConfigSetReq), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint8Des((u8 *) &primitive->powerConfig.powerSaveLevel, buffer, &offset); CsrUint16Des((u16 *) &primitive->powerConfig.listenIntervalTu, buffer, &offset); CsrUint8Des((u8 *) &primitive->powerConfig.rxDtims, buffer, &offset); CsrUint8Des((u8 *) &primitive->powerConfig.d3AutoScanMode, buffer, &offset); CsrUint8Des((u8 *) &primitive->powerConfig.clientTrafficWindow, buffer, &offset); CsrUint8Des((u8 *) &primitive->powerConfig.opportunisticPowerSave, buffer, &offset); CsrUint8Des((u8 *) &primitive->powerConfig.noticeOfAbsence, buffer, &offset); return primitive; } size_t CsrWifiSmeRoamingConfigSetReqSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 70) */ bufferSize += 2; /* u16 primitive->interfaceTag */ { u16 i2; for (i2 = 0; i2 < 3; i2++) { bufferSize += 2; /* s16 primitive->roamingConfig.roamingBands[i2].rssiHighThreshold */ bufferSize += 2; /* s16 primitive->roamingConfig.roamingBands[i2].rssiLowThreshold */ bufferSize += 2; /* s16 primitive->roamingConfig.roamingBands[i2].snrHighThreshold */ bufferSize += 2; /* s16 primitive->roamingConfig.roamingBands[i2].snrLowThreshold */ } } bufferSize += 1; /* u8 primitive->roamingConfig.disableSmoothRoaming */ bufferSize += 1; /* u8 primitive->roamingConfig.disableRoamScans */ bufferSize += 1; /* u8 primitive->roamingConfig.reconnectLimit */ bufferSize += 2; /* u16 primitive->roamingConfig.reconnectLimitIntervalMs */ { u16 i2; for (i2 = 0; i2 < 3; i2++) { bufferSize += 2; /* u16 primitive->roamingConfig.roamScanCfg[i2].intervalSeconds */ bufferSize += 2; /* u16 primitive->roamingConfig.roamScanCfg[i2].validitySeconds */ bufferSize += 2; /* u16 primitive->roamingConfig.roamScanCfg[i2].minActiveChannelTimeTu */ bufferSize += 2; /* u16 primitive->roamingConfig.roamScanCfg[i2].maxActiveChannelTimeTu */ bufferSize += 2; /* u16 primitive->roamingConfig.roamScanCfg[i2].minPassiveChannelTimeTu */ bufferSize += 2; /* u16 primitive->roamingConfig.roamScanCfg[i2].maxPassiveChannelTimeTu */ } } return bufferSize; } u8* CsrWifiSmeRoamingConfigSetReqSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeRoamingConfigSetReq *primitive = (CsrWifiSmeRoamingConfigSetReq *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); { u16 i2; for (i2 = 0; i2 < 3; i2++) { CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamingBands[i2].rssiHighThreshold); CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamingBands[i2].rssiLowThreshold); CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamingBands[i2].snrHighThreshold); CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamingBands[i2].snrLowThreshold); } } CsrUint8Ser(ptr, len, (u8) primitive->roamingConfig.disableSmoothRoaming); CsrUint8Ser(ptr, len, (u8) primitive->roamingConfig.disableRoamScans); CsrUint8Ser(ptr, len, (u8) primitive->roamingConfig.reconnectLimit); CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.reconnectLimitIntervalMs); { u16 i2; for (i2 = 0; i2 < 3; i2++) { CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamScanCfg[i2].intervalSeconds); CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamScanCfg[i2].validitySeconds); CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamScanCfg[i2].minActiveChannelTimeTu); CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamScanCfg[i2].maxActiveChannelTimeTu); CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamScanCfg[i2].minPassiveChannelTimeTu); CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamScanCfg[i2].maxPassiveChannelTimeTu); } } return(ptr); } void* CsrWifiSmeRoamingConfigSetReqDes(u8 *buffer, size_t length) { CsrWifiSmeRoamingConfigSetReq *primitive = kmalloc(sizeof(CsrWifiSmeRoamingConfigSetReq), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); { u16 i2; for (i2 = 0; i2 < 3; i2++) { CsrUint16Des((u16 *) &primitive->roamingConfig.roamingBands[i2].rssiHighThreshold, buffer, &offset); CsrUint16Des((u16 *) &primitive->roamingConfig.roamingBands[i2].rssiLowThreshold, buffer, &offset); CsrUint16Des((u16 *) &primitive->roamingConfig.roamingBands[i2].snrHighThreshold, buffer, &offset); CsrUint16Des((u16 *) &primitive->roamingConfig.roamingBands[i2].snrLowThreshold, buffer, &offset); } } CsrUint8Des((u8 *) &primitive->roamingConfig.disableSmoothRoaming, buffer, &offset); CsrUint8Des((u8 *) &primitive->roamingConfig.disableRoamScans, buffer, &offset); CsrUint8Des((u8 *) &primitive->roamingConfig.reconnectLimit, buffer, &offset); CsrUint16Des((u16 *) &primitive->roamingConfig.reconnectLimitIntervalMs, buffer, &offset); { u16 i2; for (i2 = 0; i2 < 3; i2++) { CsrUint16Des((u16 *) &primitive->roamingConfig.roamScanCfg[i2].intervalSeconds, buffer, &offset); CsrUint16Des((u16 *) &primitive->roamingConfig.roamScanCfg[i2].validitySeconds, buffer, &offset); CsrUint16Des((u16 *) &primitive->roamingConfig.roamScanCfg[i2].minActiveChannelTimeTu, buffer, &offset); CsrUint16Des((u16 *) &primitive->roamingConfig.roamScanCfg[i2].maxActiveChannelTimeTu, buffer, &offset); CsrUint16Des((u16 *) &primitive->roamingConfig.roamScanCfg[i2].minPassiveChannelTimeTu, buffer, &offset); CsrUint16Des((u16 *) &primitive->roamingConfig.roamScanCfg[i2].maxPassiveChannelTimeTu, buffer, &offset); } } return primitive; } size_t CsrWifiSmeScanConfigSetReqSizeof(void *msg) { CsrWifiSmeScanConfigSetReq *primitive = (CsrWifiSmeScanConfigSetReq *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 63) */ { u16 i2; for (i2 = 0; i2 < 4; i2++) { bufferSize += 2; /* u16 primitive->scanConfig.scanCfg[i2].intervalSeconds */ bufferSize += 2; /* u16 primitive->scanConfig.scanCfg[i2].validitySeconds */ bufferSize += 2; /* u16 primitive->scanConfig.scanCfg[i2].minActiveChannelTimeTu */ bufferSize += 2; /* u16 primitive->scanConfig.scanCfg[i2].maxActiveChannelTimeTu */ bufferSize += 2; /* u16 primitive->scanConfig.scanCfg[i2].minPassiveChannelTimeTu */ bufferSize += 2; /* u16 primitive->scanConfig.scanCfg[i2].maxPassiveChannelTimeTu */ } } bufferSize += 1; /* u8 primitive->scanConfig.disableAutonomousScans */ bufferSize += 2; /* u16 primitive->scanConfig.maxResults */ bufferSize += 1; /* s8 primitive->scanConfig.highRssiThreshold */ bufferSize += 1; /* s8 primitive->scanConfig.lowRssiThreshold */ bufferSize += 1; /* s8 primitive->scanConfig.deltaRssiThreshold */ bufferSize += 1; /* s8 primitive->scanConfig.highSnrThreshold */ bufferSize += 1; /* s8 primitive->scanConfig.lowSnrThreshold */ bufferSize += 1; /* s8 primitive->scanConfig.deltaSnrThreshold */ bufferSize += 2; /* u16 primitive->scanConfig.passiveChannelListCount */ bufferSize += primitive->scanConfig.passiveChannelListCount; /* u8 primitive->scanConfig.passiveChannelList */ return bufferSize; } u8* CsrWifiSmeScanConfigSetReqSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeScanConfigSetReq *primitive = (CsrWifiSmeScanConfigSetReq *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); { u16 i2; for (i2 = 0; i2 < 4; i2++) { CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.scanCfg[i2].intervalSeconds); CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.scanCfg[i2].validitySeconds); CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.scanCfg[i2].minActiveChannelTimeTu); CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.scanCfg[i2].maxActiveChannelTimeTu); CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.scanCfg[i2].minPassiveChannelTimeTu); CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.scanCfg[i2].maxPassiveChannelTimeTu); } } CsrUint8Ser(ptr, len, (u8) primitive->scanConfig.disableAutonomousScans); CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.maxResults); CsrUint8Ser(ptr, len, (u8) primitive->scanConfig.highRssiThreshold); CsrUint8Ser(ptr, len, (u8) primitive->scanConfig.lowRssiThreshold); CsrUint8Ser(ptr, len, (u8) primitive->scanConfig.deltaRssiThreshold); CsrUint8Ser(ptr, len, (u8) primitive->scanConfig.highSnrThreshold); CsrUint8Ser(ptr, len, (u8) primitive->scanConfig.lowSnrThreshold); CsrUint8Ser(ptr, len, (u8) primitive->scanConfig.deltaSnrThreshold); CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.passiveChannelListCount); if (primitive->scanConfig.passiveChannelListCount) { CsrMemCpySer(ptr, len, (const void *) primitive->scanConfig.passiveChannelList, ((u16) (primitive->scanConfig.passiveChannelListCount))); } return(ptr); } void* CsrWifiSmeScanConfigSetReqDes(u8 *buffer, size_t length) { CsrWifiSmeScanConfigSetReq *primitive = kmalloc(sizeof(CsrWifiSmeScanConfigSetReq), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); { u16 i2; for (i2 = 0; i2 < 4; i2++) { CsrUint16Des((u16 *) &primitive->scanConfig.scanCfg[i2].intervalSeconds, buffer, &offset); CsrUint16Des((u16 *) &primitive->scanConfig.scanCfg[i2].validitySeconds, buffer, &offset); CsrUint16Des((u16 *) &primitive->scanConfig.scanCfg[i2].minActiveChannelTimeTu, buffer, &offset); CsrUint16Des((u16 *) &primitive->scanConfig.scanCfg[i2].maxActiveChannelTimeTu, buffer, &offset); CsrUint16Des((u16 *) &primitive->scanConfig.scanCfg[i2].minPassiveChannelTimeTu, buffer, &offset); CsrUint16Des((u16 *) &primitive->scanConfig.scanCfg[i2].maxPassiveChannelTimeTu, buffer, &offset); } } CsrUint8Des((u8 *) &primitive->scanConfig.disableAutonomousScans, buffer, &offset); CsrUint16Des((u16 *) &primitive->scanConfig.maxResults, buffer, &offset); CsrUint8Des((u8 *) &primitive->scanConfig.highRssiThreshold, buffer, &offset); CsrUint8Des((u8 *) &primitive->scanConfig.lowRssiThreshold, buffer, &offset); CsrUint8Des((u8 *) &primitive->scanConfig.deltaRssiThreshold, buffer, &offset); CsrUint8Des((u8 *) &primitive->scanConfig.highSnrThreshold, buffer, &offset); CsrUint8Des((u8 *) &primitive->scanConfig.lowSnrThreshold, buffer, &offset); CsrUint8Des((u8 *) &primitive->scanConfig.deltaSnrThreshold, buffer, &offset); CsrUint16Des((u16 *) &primitive->scanConfig.passiveChannelListCount, buffer, &offset); if (primitive->scanConfig.passiveChannelListCount) { primitive->scanConfig.passiveChannelList = kmalloc(primitive->scanConfig.passiveChannelListCount, GFP_KERNEL); CsrMemCpyDes(primitive->scanConfig.passiveChannelList, buffer, &offset, ((u16) (primitive->scanConfig.passiveChannelListCount))); } else { primitive->scanConfig.passiveChannelList = NULL; } return primitive; } void CsrWifiSmeScanConfigSetReqSerFree(void *voidPrimitivePointer) { CsrWifiSmeScanConfigSetReq *primitive = (CsrWifiSmeScanConfigSetReq *) voidPrimitivePointer; kfree(primitive->scanConfig.passiveChannelList); kfree(primitive); } size_t CsrWifiSmeScanFullReqSizeof(void *msg) { CsrWifiSmeScanFullReq *primitive = (CsrWifiSmeScanFullReq *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 52) */ bufferSize += 1; /* u8 primitive->ssidCount */ { u16 i1; for (i1 = 0; i1 < primitive->ssidCount; i1++) { bufferSize += 32; /* u8 primitive->ssid[i1].ssid[32] */ bufferSize += 1; /* u8 primitive->ssid[i1].length */ } } bufferSize += 6; /* u8 primitive->bssid.a[6] */ bufferSize += 1; /* u8 primitive->forceScan */ bufferSize += 1; /* CsrWifiSmeBssType primitive->bssType */ bufferSize += 1; /* CsrWifiSmeScanType primitive->scanType */ bufferSize += 2; /* u16 primitive->channelListCount */ bufferSize += primitive->channelListCount; /* u8 primitive->channelList */ bufferSize += 2; /* u16 primitive->probeIeLength */ bufferSize += primitive->probeIeLength; /* u8 primitive->probeIe */ return bufferSize; } u8* CsrWifiSmeScanFullReqSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeScanFullReq *primitive = (CsrWifiSmeScanFullReq *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint8Ser(ptr, len, (u8) primitive->ssidCount); { u16 i1; for (i1 = 0; i1 < primitive->ssidCount; i1++) { CsrMemCpySer(ptr, len, (const void *) primitive->ssid[i1].ssid, ((u16) (32))); CsrUint8Ser(ptr, len, (u8) primitive->ssid[i1].length); } } CsrMemCpySer(ptr, len, (const void *) primitive->bssid.a, ((u16) (6))); CsrUint8Ser(ptr, len, (u8) primitive->forceScan); CsrUint8Ser(ptr, len, (u8) primitive->bssType); CsrUint8Ser(ptr, len, (u8) primitive->scanType); CsrUint16Ser(ptr, len, (u16) primitive->channelListCount); if (primitive->channelListCount) { CsrMemCpySer(ptr, len, (const void *) primitive->channelList, ((u16) (primitive->channelListCount))); } CsrUint16Ser(ptr, len, (u16) primitive->probeIeLength); if (primitive->probeIeLength) { CsrMemCpySer(ptr, len, (const void *) primitive->probeIe, ((u16) (primitive->probeIeLength))); } return(ptr); } void* CsrWifiSmeScanFullReqDes(u8 *buffer, size_t length) { CsrWifiSmeScanFullReq *primitive = kmalloc(sizeof(CsrWifiSmeScanFullReq), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint8Des((u8 *) &primitive->ssidCount, buffer, &offset); primitive->ssid = NULL; if (primitive->ssidCount) { primitive->ssid = kmalloc(sizeof(CsrWifiSsid) * primitive->ssidCount, GFP_KERNEL); } { u16 i1; for (i1 = 0; i1 < primitive->ssidCount; i1++) { CsrMemCpyDes(primitive->ssid[i1].ssid, buffer, &offset, ((u16) (32))); CsrUint8Des((u8 *) &primitive->ssid[i1].length, buffer, &offset); } } CsrMemCpyDes(primitive->bssid.a, buffer, &offset, ((u16) (6))); CsrUint8Des((u8 *) &primitive->forceScan, buffer, &offset); CsrUint8Des((u8 *) &primitive->bssType, buffer, &offset); CsrUint8Des((u8 *) &primitive->scanType, buffer, &offset); CsrUint16Des((u16 *) &primitive->channelListCount, buffer, &offset); if (primitive->channelListCount) { primitive->channelList = kmalloc(primitive->channelListCount, GFP_KERNEL); CsrMemCpyDes(primitive->channelList, buffer, &offset, ((u16) (primitive->channelListCount))); } else { primitive->channelList = NULL; } CsrUint16Des((u16 *) &primitive->probeIeLength, buffer, &offset); if (primitive->probeIeLength) { primitive->probeIe = kmalloc(primitive->probeIeLength, GFP_KERNEL); CsrMemCpyDes(primitive->probeIe, buffer, &offset, ((u16) (primitive->probeIeLength))); } else { primitive->probeIe = NULL; } return primitive; } void CsrWifiSmeScanFullReqSerFree(void *voidPrimitivePointer) { CsrWifiSmeScanFullReq *primitive = (CsrWifiSmeScanFullReq *) voidPrimitivePointer; kfree(primitive->ssid); kfree(primitive->channelList); kfree(primitive->probeIe); kfree(primitive); } size_t CsrWifiSmeSmeStaConfigSetReqSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 11) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 1; /* u8 primitive->smeConfig.connectionQualityRssiChangeTrigger */ bufferSize += 1; /* u8 primitive->smeConfig.connectionQualitySnrChangeTrigger */ bufferSize += 1; /* CsrWifiSmeWmmModeMask primitive->smeConfig.wmmModeMask */ bufferSize += 1; /* CsrWifiSmeRadioIF primitive->smeConfig.ifIndex */ bufferSize += 1; /* u8 primitive->smeConfig.allowUnicastUseGroupCipher */ bufferSize += 1; /* u8 primitive->smeConfig.enableOpportunisticKeyCaching */ return bufferSize; } u8* CsrWifiSmeSmeStaConfigSetReqSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeSmeStaConfigSetReq *primitive = (CsrWifiSmeSmeStaConfigSetReq *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint8Ser(ptr, len, (u8) primitive->smeConfig.connectionQualityRssiChangeTrigger); CsrUint8Ser(ptr, len, (u8) primitive->smeConfig.connectionQualitySnrChangeTrigger); CsrUint8Ser(ptr, len, (u8) primitive->smeConfig.wmmModeMask); CsrUint8Ser(ptr, len, (u8) primitive->smeConfig.ifIndex); CsrUint8Ser(ptr, len, (u8) primitive->smeConfig.allowUnicastUseGroupCipher); CsrUint8Ser(ptr, len, (u8) primitive->smeConfig.enableOpportunisticKeyCaching); return(ptr); } void* CsrWifiSmeSmeStaConfigSetReqDes(u8 *buffer, size_t length) { CsrWifiSmeSmeStaConfigSetReq *primitive = kmalloc(sizeof(CsrWifiSmeSmeStaConfigSetReq), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint8Des((u8 *) &primitive->smeConfig.connectionQualityRssiChangeTrigger, buffer, &offset); CsrUint8Des((u8 *) &primitive->smeConfig.connectionQualitySnrChangeTrigger, buffer, &offset); CsrUint8Des((u8 *) &primitive->smeConfig.wmmModeMask, buffer, &offset); CsrUint8Des((u8 *) &primitive->smeConfig.ifIndex, buffer, &offset); CsrUint8Des((u8 *) &primitive->smeConfig.allowUnicastUseGroupCipher, buffer, &offset); CsrUint8Des((u8 *) &primitive->smeConfig.enableOpportunisticKeyCaching, buffer, &offset); return primitive; } size_t CsrWifiSmeTspecReqSizeof(void *msg) { CsrWifiSmeTspecReq *primitive = (CsrWifiSmeTspecReq *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 18) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 1; /* CsrWifiSmeListAction primitive->action */ bufferSize += 4; /* u32 primitive->transactionId */ bufferSize += 1; /* u8 primitive->strict */ bufferSize += 1; /* CsrWifiSmeTspecCtrlMask primitive->ctrlMask */ bufferSize += 2; /* u16 primitive->tspecLength */ bufferSize += primitive->tspecLength; /* u8 primitive->tspec */ bufferSize += 2; /* u16 primitive->tclasLength */ bufferSize += primitive->tclasLength; /* u8 primitive->tclas */ return bufferSize; } u8* CsrWifiSmeTspecReqSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeTspecReq *primitive = (CsrWifiSmeTspecReq *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint8Ser(ptr, len, (u8) primitive->action); CsrUint32Ser(ptr, len, (u32) primitive->transactionId); CsrUint8Ser(ptr, len, (u8) primitive->strict); CsrUint8Ser(ptr, len, (u8) primitive->ctrlMask); CsrUint16Ser(ptr, len, (u16) primitive->tspecLength); if (primitive->tspecLength) { CsrMemCpySer(ptr, len, (const void *) primitive->tspec, ((u16) (primitive->tspecLength))); } CsrUint16Ser(ptr, len, (u16) primitive->tclasLength); if (primitive->tclasLength) { CsrMemCpySer(ptr, len, (const void *) primitive->tclas, ((u16) (primitive->tclasLength))); } return(ptr); } void* CsrWifiSmeTspecReqDes(u8 *buffer, size_t length) { CsrWifiSmeTspecReq *primitive = kmalloc(sizeof(CsrWifiSmeTspecReq), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint8Des((u8 *) &primitive->action, buffer, &offset); CsrUint32Des((u32 *) &primitive->transactionId, buffer, &offset); CsrUint8Des((u8 *) &primitive->strict, buffer, &offset); CsrUint8Des((u8 *) &primitive->ctrlMask, buffer, &offset); CsrUint16Des((u16 *) &primitive->tspecLength, buffer, &offset); if (primitive->tspecLength) { primitive->tspec = kmalloc(primitive->tspecLength, GFP_KERNEL); CsrMemCpyDes(primitive->tspec, buffer, &offset, ((u16) (primitive->tspecLength))); } else { primitive->tspec = NULL; } CsrUint16Des((u16 *) &primitive->tclasLength, buffer, &offset); if (primitive->tclasLength) { primitive->tclas = kmalloc(primitive->tclasLength, GFP_KERNEL); CsrMemCpyDes(primitive->tclas, buffer, &offset, ((u16) (primitive->tclasLength))); } else { primitive->tclas = NULL; } return primitive; } void CsrWifiSmeTspecReqSerFree(void *voidPrimitivePointer) { CsrWifiSmeTspecReq *primitive = (CsrWifiSmeTspecReq *) voidPrimitivePointer; kfree(primitive->tspec); kfree(primitive->tclas); kfree(primitive); } size_t CsrWifiSmeWifiFlightmodeReqSizeof(void *msg) { CsrWifiSmeWifiFlightmodeReq *primitive = (CsrWifiSmeWifiFlightmodeReq *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 14) */ bufferSize += 6; /* u8 primitive->address.a[6] */ bufferSize += 2; /* u16 primitive->mibFilesCount */ { u16 i1; for (i1 = 0; i1 < primitive->mibFilesCount; i1++) { bufferSize += 2; /* u16 primitive->mibFiles[i1].length */ bufferSize += primitive->mibFiles[i1].length; /* u8 primitive->mibFiles[i1].data */ } } return bufferSize; } u8* CsrWifiSmeWifiFlightmodeReqSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeWifiFlightmodeReq *primitive = (CsrWifiSmeWifiFlightmodeReq *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrMemCpySer(ptr, len, (const void *) primitive->address.a, ((u16) (6))); CsrUint16Ser(ptr, len, (u16) primitive->mibFilesCount); { u16 i1; for (i1 = 0; i1 < primitive->mibFilesCount; i1++) { CsrUint16Ser(ptr, len, (u16) primitive->mibFiles[i1].length); if (primitive->mibFiles[i1].length) { CsrMemCpySer(ptr, len, (const void *) primitive->mibFiles[i1].data, ((u16) (primitive->mibFiles[i1].length))); } } } return(ptr); } void* CsrWifiSmeWifiFlightmodeReqDes(u8 *buffer, size_t length) { CsrWifiSmeWifiFlightmodeReq *primitive = kmalloc(sizeof(CsrWifiSmeWifiFlightmodeReq), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrMemCpyDes(primitive->address.a, buffer, &offset, ((u16) (6))); CsrUint16Des((u16 *) &primitive->mibFilesCount, buffer, &offset); primitive->mibFiles = NULL; if (primitive->mibFilesCount) { primitive->mibFiles = kmalloc(sizeof(CsrWifiSmeDataBlock) * primitive->mibFilesCount, GFP_KERNEL); } { u16 i1; for (i1 = 0; i1 < primitive->mibFilesCount; i1++) { CsrUint16Des((u16 *) &primitive->mibFiles[i1].length, buffer, &offset); if (primitive->mibFiles[i1].length) { primitive->mibFiles[i1].data = kmalloc(primitive->mibFiles[i1].length, GFP_KERNEL); CsrMemCpyDes(primitive->mibFiles[i1].data, buffer, &offset, ((u16) (primitive->mibFiles[i1].length))); } else { primitive->mibFiles[i1].data = NULL; } } } return primitive; } void CsrWifiSmeWifiFlightmodeReqSerFree(void *voidPrimitivePointer) { CsrWifiSmeWifiFlightmodeReq *primitive = (CsrWifiSmeWifiFlightmodeReq *) voidPrimitivePointer; { u16 i1; for (i1 = 0; i1 < primitive->mibFilesCount; i1++) { kfree(primitive->mibFiles[i1].data); } } kfree(primitive->mibFiles); kfree(primitive); } size_t CsrWifiSmeWifiOnReqSizeof(void *msg) { CsrWifiSmeWifiOnReq *primitive = (CsrWifiSmeWifiOnReq *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 14) */ bufferSize += 6; /* u8 primitive->address.a[6] */ bufferSize += 2; /* u16 primitive->mibFilesCount */ { u16 i1; for (i1 = 0; i1 < primitive->mibFilesCount; i1++) { bufferSize += 2; /* u16 primitive->mibFiles[i1].length */ bufferSize += primitive->mibFiles[i1].length; /* u8 primitive->mibFiles[i1].data */ } } return bufferSize; } u8* CsrWifiSmeWifiOnReqSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeWifiOnReq *primitive = (CsrWifiSmeWifiOnReq *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrMemCpySer(ptr, len, (const void *) primitive->address.a, ((u16) (6))); CsrUint16Ser(ptr, len, (u16) primitive->mibFilesCount); { u16 i1; for (i1 = 0; i1 < primitive->mibFilesCount; i1++) { CsrUint16Ser(ptr, len, (u16) primitive->mibFiles[i1].length); if (primitive->mibFiles[i1].length) { CsrMemCpySer(ptr, len, (const void *) primitive->mibFiles[i1].data, ((u16) (primitive->mibFiles[i1].length))); } } } return(ptr); } void* CsrWifiSmeWifiOnReqDes(u8 *buffer, size_t length) { CsrWifiSmeWifiOnReq *primitive = kmalloc(sizeof(CsrWifiSmeWifiOnReq), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrMemCpyDes(primitive->address.a, buffer, &offset, ((u16) (6))); CsrUint16Des((u16 *) &primitive->mibFilesCount, buffer, &offset); primitive->mibFiles = NULL; if (primitive->mibFilesCount) { primitive->mibFiles = kmalloc(sizeof(CsrWifiSmeDataBlock) * primitive->mibFilesCount, GFP_KERNEL); } { u16 i1; for (i1 = 0; i1 < primitive->mibFilesCount; i1++) { CsrUint16Des((u16 *) &primitive->mibFiles[i1].length, buffer, &offset); if (primitive->mibFiles[i1].length) { primitive->mibFiles[i1].data = kmalloc(primitive->mibFiles[i1].length, GFP_KERNEL); CsrMemCpyDes(primitive->mibFiles[i1].data, buffer, &offset, ((u16) (primitive->mibFiles[i1].length))); } else { primitive->mibFiles[i1].data = NULL; } } } return primitive; } void CsrWifiSmeWifiOnReqSerFree(void *voidPrimitivePointer) { CsrWifiSmeWifiOnReq *primitive = (CsrWifiSmeWifiOnReq *) voidPrimitivePointer; { u16 i1; for (i1 = 0; i1 < primitive->mibFilesCount; i1++) { kfree(primitive->mibFiles[i1].data); } } kfree(primitive->mibFiles); kfree(primitive); } size_t CsrWifiSmeCloakedSsidsSetReqSizeof(void *msg) { CsrWifiSmeCloakedSsidsSetReq *primitive = (CsrWifiSmeCloakedSsidsSetReq *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 37) */ bufferSize += 1; /* u8 primitive->cloakedSsids.cloakedSsidsCount */ { u16 i2; for (i2 = 0; i2 < primitive->cloakedSsids.cloakedSsidsCount; i2++) { bufferSize += 32; /* u8 primitive->cloakedSsids.cloakedSsids[i2].ssid[32] */ bufferSize += 1; /* u8 primitive->cloakedSsids.cloakedSsids[i2].length */ } } return bufferSize; } u8* CsrWifiSmeCloakedSsidsSetReqSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeCloakedSsidsSetReq *primitive = (CsrWifiSmeCloakedSsidsSetReq *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint8Ser(ptr, len, (u8) primitive->cloakedSsids.cloakedSsidsCount); { u16 i2; for (i2 = 0; i2 < primitive->cloakedSsids.cloakedSsidsCount; i2++) { CsrMemCpySer(ptr, len, (const void *) primitive->cloakedSsids.cloakedSsids[i2].ssid, ((u16) (32))); CsrUint8Ser(ptr, len, (u8) primitive->cloakedSsids.cloakedSsids[i2].length); } } return(ptr); } void* CsrWifiSmeCloakedSsidsSetReqDes(u8 *buffer, size_t length) { CsrWifiSmeCloakedSsidsSetReq *primitive = kmalloc(sizeof(CsrWifiSmeCloakedSsidsSetReq), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint8Des((u8 *) &primitive->cloakedSsids.cloakedSsidsCount, buffer, &offset); primitive->cloakedSsids.cloakedSsids = NULL; if (primitive->cloakedSsids.cloakedSsidsCount) { primitive->cloakedSsids.cloakedSsids = kmalloc(sizeof(CsrWifiSsid) * primitive->cloakedSsids.cloakedSsidsCount, GFP_KERNEL); } { u16 i2; for (i2 = 0; i2 < primitive->cloakedSsids.cloakedSsidsCount; i2++) { CsrMemCpyDes(primitive->cloakedSsids.cloakedSsids[i2].ssid, buffer, &offset, ((u16) (32))); CsrUint8Des((u8 *) &primitive->cloakedSsids.cloakedSsids[i2].length, buffer, &offset); } } return primitive; } void CsrWifiSmeCloakedSsidsSetReqSerFree(void *voidPrimitivePointer) { CsrWifiSmeCloakedSsidsSetReq *primitive = (CsrWifiSmeCloakedSsidsSetReq *) voidPrimitivePointer; kfree(primitive->cloakedSsids.cloakedSsids); kfree(primitive); } size_t CsrWifiSmeSmeCommonConfigSetReqSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 8) */ bufferSize += 1; /* CsrWifiSme80211dTrustLevel primitive->deviceConfig.trustLevel */ bufferSize += 2; /* u8 primitive->deviceConfig.countryCode[2] */ bufferSize += 1; /* CsrWifiSmeFirmwareDriverInterface primitive->deviceConfig.firmwareDriverInterface */ bufferSize += 1; /* u8 primitive->deviceConfig.enableStrictDraftN */ return bufferSize; } u8* CsrWifiSmeSmeCommonConfigSetReqSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeSmeCommonConfigSetReq *primitive = (CsrWifiSmeSmeCommonConfigSetReq *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint8Ser(ptr, len, (u8) primitive->deviceConfig.trustLevel); CsrMemCpySer(ptr, len, (const void *) primitive->deviceConfig.countryCode, ((u16) (2))); CsrUint8Ser(ptr, len, (u8) primitive->deviceConfig.firmwareDriverInterface); CsrUint8Ser(ptr, len, (u8) primitive->deviceConfig.enableStrictDraftN); return(ptr); } void* CsrWifiSmeSmeCommonConfigSetReqDes(u8 *buffer, size_t length) { CsrWifiSmeSmeCommonConfigSetReq *primitive = kmalloc(sizeof(CsrWifiSmeSmeCommonConfigSetReq), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint8Des((u8 *) &primitive->deviceConfig.trustLevel, buffer, &offset); CsrMemCpyDes(primitive->deviceConfig.countryCode, buffer, &offset, ((u16) (2))); CsrUint8Des((u8 *) &primitive->deviceConfig.firmwareDriverInterface, buffer, &offset); CsrUint8Des((u8 *) &primitive->deviceConfig.enableStrictDraftN, buffer, &offset); return primitive; } size_t CsrWifiSmeWpsConfigurationReqSizeof(void *msg) { CsrWifiSmeWpsConfigurationReq *primitive = (CsrWifiSmeWpsConfigurationReq *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 240) */ bufferSize += 1; /* u8 primitive->wpsConfig.wpsVersion */ bufferSize += 16; /* u8 primitive->wpsConfig.uuid[16] */ bufferSize += 32; /* u8 primitive->wpsConfig.deviceName[32] */ bufferSize += 1; /* u8 primitive->wpsConfig.deviceNameLength */ bufferSize += 64; /* u8 primitive->wpsConfig.manufacturer[64] */ bufferSize += 1; /* u8 primitive->wpsConfig.manufacturerLength */ bufferSize += 32; /* u8 primitive->wpsConfig.modelName[32] */ bufferSize += 1; /* u8 primitive->wpsConfig.modelNameLength */ bufferSize += 32; /* u8 primitive->wpsConfig.modelNumber[32] */ bufferSize += 1; /* u8 primitive->wpsConfig.modelNumberLength */ bufferSize += 32; /* u8 primitive->wpsConfig.serialNumber[32] */ bufferSize += 8; /* u8 primitive->wpsConfig.primDeviceType.deviceDetails[8] */ bufferSize += 1; /* u8 primitive->wpsConfig.secondaryDeviceTypeCount */ { u16 i2; for (i2 = 0; i2 < primitive->wpsConfig.secondaryDeviceTypeCount; i2++) { bufferSize += 8; /* u8 primitive->wpsConfig.secondaryDeviceType[i2].deviceDetails[8] */ } } bufferSize += 2; /* CsrWifiSmeWpsConfigTypeMask primitive->wpsConfig.configMethods */ bufferSize += 1; /* u8 primitive->wpsConfig.rfBands */ bufferSize += 4; /* u8 primitive->wpsConfig.osVersion[4] */ return bufferSize; } u8* CsrWifiSmeWpsConfigurationReqSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeWpsConfigurationReq *primitive = (CsrWifiSmeWpsConfigurationReq *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint8Ser(ptr, len, (u8) primitive->wpsConfig.wpsVersion); CsrMemCpySer(ptr, len, (const void *) primitive->wpsConfig.uuid, ((u16) (16))); CsrMemCpySer(ptr, len, (const void *) primitive->wpsConfig.deviceName, ((u16) (32))); CsrUint8Ser(ptr, len, (u8) primitive->wpsConfig.deviceNameLength); CsrMemCpySer(ptr, len, (const void *) primitive->wpsConfig.manufacturer, ((u16) (64))); CsrUint8Ser(ptr, len, (u8) primitive->wpsConfig.manufacturerLength); CsrMemCpySer(ptr, len, (const void *) primitive->wpsConfig.modelName, ((u16) (32))); CsrUint8Ser(ptr, len, (u8) primitive->wpsConfig.modelNameLength); CsrMemCpySer(ptr, len, (const void *) primitive->wpsConfig.modelNumber, ((u16) (32))); CsrUint8Ser(ptr, len, (u8) primitive->wpsConfig.modelNumberLength); CsrMemCpySer(ptr, len, (const void *) primitive->wpsConfig.serialNumber, ((u16) (32))); CsrMemCpySer(ptr, len, (const void *) primitive->wpsConfig.primDeviceType.deviceDetails, ((u16) (8))); CsrUint8Ser(ptr, len, (u8) primitive->wpsConfig.secondaryDeviceTypeCount); { u16 i2; for (i2 = 0; i2 < primitive->wpsConfig.secondaryDeviceTypeCount; i2++) { CsrMemCpySer(ptr, len, (const void *) primitive->wpsConfig.secondaryDeviceType[i2].deviceDetails, ((u16) (8))); } } CsrUint16Ser(ptr, len, (u16) primitive->wpsConfig.configMethods); CsrUint8Ser(ptr, len, (u8) primitive->wpsConfig.rfBands); CsrMemCpySer(ptr, len, (const void *) primitive->wpsConfig.osVersion, ((u16) (4))); return(ptr); } void* CsrWifiSmeWpsConfigurationReqDes(u8 *buffer, size_t length) { CsrWifiSmeWpsConfigurationReq *primitive = kmalloc(sizeof(CsrWifiSmeWpsConfigurationReq), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint8Des((u8 *) &primitive->wpsConfig.wpsVersion, buffer, &offset); CsrMemCpyDes(primitive->wpsConfig.uuid, buffer, &offset, ((u16) (16))); CsrMemCpyDes(primitive->wpsConfig.deviceName, buffer, &offset, ((u16) (32))); CsrUint8Des((u8 *) &primitive->wpsConfig.deviceNameLength, buffer, &offset); CsrMemCpyDes(primitive->wpsConfig.manufacturer, buffer, &offset, ((u16) (64))); CsrUint8Des((u8 *) &primitive->wpsConfig.manufacturerLength, buffer, &offset); CsrMemCpyDes(primitive->wpsConfig.modelName, buffer, &offset, ((u16) (32))); CsrUint8Des((u8 *) &primitive->wpsConfig.modelNameLength, buffer, &offset); CsrMemCpyDes(primitive->wpsConfig.modelNumber, buffer, &offset, ((u16) (32))); CsrUint8Des((u8 *) &primitive->wpsConfig.modelNumberLength, buffer, &offset); CsrMemCpyDes(primitive->wpsConfig.serialNumber, buffer, &offset, ((u16) (32))); CsrMemCpyDes(primitive->wpsConfig.primDeviceType.deviceDetails, buffer, &offset, ((u16) (8))); CsrUint8Des((u8 *) &primitive->wpsConfig.secondaryDeviceTypeCount, buffer, &offset); primitive->wpsConfig.secondaryDeviceType = NULL; if (primitive->wpsConfig.secondaryDeviceTypeCount) { primitive->wpsConfig.secondaryDeviceType = kmalloc(sizeof(CsrWifiSmeWpsDeviceType) * primitive->wpsConfig.secondaryDeviceTypeCount, GFP_KERNEL); } { u16 i2; for (i2 = 0; i2 < primitive->wpsConfig.secondaryDeviceTypeCount; i2++) { CsrMemCpyDes(primitive->wpsConfig.secondaryDeviceType[i2].deviceDetails, buffer, &offset, ((u16) (8))); } } CsrUint16Des((u16 *) &primitive->wpsConfig.configMethods, buffer, &offset); CsrUint8Des((u8 *) &primitive->wpsConfig.rfBands, buffer, &offset); CsrMemCpyDes(primitive->wpsConfig.osVersion, buffer, &offset, ((u16) (4))); return primitive; } void CsrWifiSmeWpsConfigurationReqSerFree(void *voidPrimitivePointer) { CsrWifiSmeWpsConfigurationReq *primitive = (CsrWifiSmeWpsConfigurationReq *) voidPrimitivePointer; kfree(primitive->wpsConfig.secondaryDeviceType); kfree(primitive); } size_t CsrWifiSmeSetReqSizeof(void *msg) { CsrWifiSmeSetReq *primitive = (CsrWifiSmeSetReq *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 8) */ bufferSize += 4; /* u32 primitive->dataLength */ bufferSize += primitive->dataLength; /* u8 primitive->data */ return bufferSize; } u8* CsrWifiSmeSetReqSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeSetReq *primitive = (CsrWifiSmeSetReq *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint32Ser(ptr, len, (u32) primitive->dataLength); if (primitive->dataLength) { CsrMemCpySer(ptr, len, (const void *) primitive->data, ((u16) (primitive->dataLength))); } return(ptr); } void* CsrWifiSmeSetReqDes(u8 *buffer, size_t length) { CsrWifiSmeSetReq *primitive = kmalloc(sizeof(CsrWifiSmeSetReq), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint32Des((u32 *) &primitive->dataLength, buffer, &offset); if (primitive->dataLength) { primitive->data = kmalloc(primitive->dataLength, GFP_KERNEL); CsrMemCpyDes(primitive->data, buffer, &offset, ((u16) (primitive->dataLength))); } else { primitive->data = NULL; } return primitive; } void CsrWifiSmeSetReqSerFree(void *voidPrimitivePointer) { CsrWifiSmeSetReq *primitive = (CsrWifiSmeSetReq *) voidPrimitivePointer; kfree(primitive->data); kfree(primitive); } size_t CsrWifiSmeAdhocConfigGetCfmSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 13) */ bufferSize += 2; /* CsrResult primitive->status */ bufferSize += 2; /* u16 primitive->adHocConfig.atimWindowTu */ bufferSize += 2; /* u16 primitive->adHocConfig.beaconPeriodTu */ bufferSize += 2; /* u16 primitive->adHocConfig.joinOnlyAttempts */ bufferSize += 2; /* u16 primitive->adHocConfig.joinAttemptIntervalMs */ return bufferSize; } u8* CsrWifiSmeAdhocConfigGetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeAdhocConfigGetCfm *primitive = (CsrWifiSmeAdhocConfigGetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->status); CsrUint16Ser(ptr, len, (u16) primitive->adHocConfig.atimWindowTu); CsrUint16Ser(ptr, len, (u16) primitive->adHocConfig.beaconPeriodTu); CsrUint16Ser(ptr, len, (u16) primitive->adHocConfig.joinOnlyAttempts); CsrUint16Ser(ptr, len, (u16) primitive->adHocConfig.joinAttemptIntervalMs); return(ptr); } void* CsrWifiSmeAdhocConfigGetCfmDes(u8 *buffer, size_t length) { CsrWifiSmeAdhocConfigGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeAdhocConfigGetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); CsrUint16Des((u16 *) &primitive->adHocConfig.atimWindowTu, buffer, &offset); CsrUint16Des((u16 *) &primitive->adHocConfig.beaconPeriodTu, buffer, &offset); CsrUint16Des((u16 *) &primitive->adHocConfig.joinOnlyAttempts, buffer, &offset); CsrUint16Des((u16 *) &primitive->adHocConfig.joinAttemptIntervalMs, buffer, &offset); return primitive; } size_t CsrWifiSmeAssociationCompleteIndSizeof(void *msg) { CsrWifiSmeAssociationCompleteInd *primitive = (CsrWifiSmeAssociationCompleteInd *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 98) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 2; /* CsrResult primitive->status */ bufferSize += 32; /* u8 primitive->connectionInfo.ssid.ssid[32] */ bufferSize += 1; /* u8 primitive->connectionInfo.ssid.length */ bufferSize += 6; /* u8 primitive->connectionInfo.bssid.a[6] */ bufferSize += 1; /* CsrWifiSme80211NetworkType primitive->connectionInfo.networkType80211 */ bufferSize += 1; /* u8 primitive->connectionInfo.channelNumber */ bufferSize += 2; /* u16 primitive->connectionInfo.channelFrequency */ bufferSize += 2; /* CsrWifiSmeAuthMode primitive->connectionInfo.authMode */ bufferSize += 2; /* CsrWifiSmeEncryption primitive->connectionInfo.pairwiseCipher */ bufferSize += 2; /* CsrWifiSmeEncryption primitive->connectionInfo.groupCipher */ bufferSize += 1; /* CsrWifiSmeRadioIF primitive->connectionInfo.ifIndex */ bufferSize += 2; /* u16 primitive->connectionInfo.atimWindowTu */ bufferSize += 2; /* u16 primitive->connectionInfo.beaconPeriodTu */ bufferSize += 1; /* u8 primitive->connectionInfo.reassociation */ bufferSize += 2; /* u16 primitive->connectionInfo.beaconFrameLength */ bufferSize += primitive->connectionInfo.beaconFrameLength; /* u8 primitive->connectionInfo.beaconFrame */ bufferSize += 2; /* u16 primitive->connectionInfo.associationReqFrameLength */ bufferSize += primitive->connectionInfo.associationReqFrameLength; /* u8 primitive->connectionInfo.associationReqFrame */ bufferSize += 2; /* u16 primitive->connectionInfo.associationRspFrameLength */ bufferSize += primitive->connectionInfo.associationRspFrameLength; /* u8 primitive->connectionInfo.associationRspFrame */ bufferSize += 2; /* u16 primitive->connectionInfo.assocScanInfoElementsLength */ bufferSize += primitive->connectionInfo.assocScanInfoElementsLength; /* u8 primitive->connectionInfo.assocScanInfoElements */ bufferSize += 2; /* u16 primitive->connectionInfo.assocReqCapabilities */ bufferSize += 2; /* u16 primitive->connectionInfo.assocReqListenIntervalTu */ bufferSize += 6; /* u8 primitive->connectionInfo.assocReqApAddress.a[6] */ bufferSize += 2; /* u16 primitive->connectionInfo.assocReqInfoElementsLength */ bufferSize += primitive->connectionInfo.assocReqInfoElementsLength; /* u8 primitive->connectionInfo.assocReqInfoElements */ bufferSize += 2; /* CsrWifiSmeIEEE80211Result primitive->connectionInfo.assocRspResult */ bufferSize += 2; /* u16 primitive->connectionInfo.assocRspCapabilityInfo */ bufferSize += 2; /* u16 primitive->connectionInfo.assocRspAssociationId */ bufferSize += 2; /* u16 primitive->connectionInfo.assocRspInfoElementsLength */ bufferSize += primitive->connectionInfo.assocRspInfoElementsLength; /* u8 primitive->connectionInfo.assocRspInfoElements */ bufferSize += 2; /* CsrWifiSmeIEEE80211Reason primitive->deauthReason */ return bufferSize; } u8* CsrWifiSmeAssociationCompleteIndSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeAssociationCompleteInd *primitive = (CsrWifiSmeAssociationCompleteInd *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint16Ser(ptr, len, (u16) primitive->status); CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.ssid.ssid, ((u16) (32))); CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.ssid.length); CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.bssid.a, ((u16) (6))); CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.networkType80211); CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.channelNumber); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.channelFrequency); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.authMode); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.pairwiseCipher); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.groupCipher); CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.ifIndex); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.atimWindowTu); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.beaconPeriodTu); CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.reassociation); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.beaconFrameLength); if (primitive->connectionInfo.beaconFrameLength) { CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.beaconFrame, ((u16) (primitive->connectionInfo.beaconFrameLength))); } CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.associationReqFrameLength); if (primitive->connectionInfo.associationReqFrameLength) { CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.associationReqFrame, ((u16) (primitive->connectionInfo.associationReqFrameLength))); } CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.associationRspFrameLength); if (primitive->connectionInfo.associationRspFrameLength) { CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.associationRspFrame, ((u16) (primitive->connectionInfo.associationRspFrameLength))); } CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocScanInfoElementsLength); if (primitive->connectionInfo.assocScanInfoElementsLength) { CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.assocScanInfoElements, ((u16) (primitive->connectionInfo.assocScanInfoElementsLength))); } CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocReqCapabilities); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocReqListenIntervalTu); CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.assocReqApAddress.a, ((u16) (6))); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocReqInfoElementsLength); if (primitive->connectionInfo.assocReqInfoElementsLength) { CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.assocReqInfoElements, ((u16) (primitive->connectionInfo.assocReqInfoElementsLength))); } CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocRspResult); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocRspCapabilityInfo); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocRspAssociationId); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocRspInfoElementsLength); if (primitive->connectionInfo.assocRspInfoElementsLength) { CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.assocRspInfoElements, ((u16) (primitive->connectionInfo.assocRspInfoElementsLength))); } CsrUint16Ser(ptr, len, (u16) primitive->deauthReason); return(ptr); } void* CsrWifiSmeAssociationCompleteIndDes(u8 *buffer, size_t length) { CsrWifiSmeAssociationCompleteInd *primitive = kmalloc(sizeof(CsrWifiSmeAssociationCompleteInd), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); CsrMemCpyDes(primitive->connectionInfo.ssid.ssid, buffer, &offset, ((u16) (32))); CsrUint8Des((u8 *) &primitive->connectionInfo.ssid.length, buffer, &offset); CsrMemCpyDes(primitive->connectionInfo.bssid.a, buffer, &offset, ((u16) (6))); CsrUint8Des((u8 *) &primitive->connectionInfo.networkType80211, buffer, &offset); CsrUint8Des((u8 *) &primitive->connectionInfo.channelNumber, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.channelFrequency, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.authMode, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.pairwiseCipher, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.groupCipher, buffer, &offset); CsrUint8Des((u8 *) &primitive->connectionInfo.ifIndex, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.atimWindowTu, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.beaconPeriodTu, buffer, &offset); CsrUint8Des((u8 *) &primitive->connectionInfo.reassociation, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.beaconFrameLength, buffer, &offset); if (primitive->connectionInfo.beaconFrameLength) { primitive->connectionInfo.beaconFrame = kmalloc(primitive->connectionInfo.beaconFrameLength, GFP_KERNEL); CsrMemCpyDes(primitive->connectionInfo.beaconFrame, buffer, &offset, ((u16) (primitive->connectionInfo.beaconFrameLength))); } else { primitive->connectionInfo.beaconFrame = NULL; } CsrUint16Des((u16 *) &primitive->connectionInfo.associationReqFrameLength, buffer, &offset); if (primitive->connectionInfo.associationReqFrameLength) { primitive->connectionInfo.associationReqFrame = kmalloc(primitive->connectionInfo.associationReqFrameLength, GFP_KERNEL); CsrMemCpyDes(primitive->connectionInfo.associationReqFrame, buffer, &offset, ((u16) (primitive->connectionInfo.associationReqFrameLength))); } else { primitive->connectionInfo.associationReqFrame = NULL; } CsrUint16Des((u16 *) &primitive->connectionInfo.associationRspFrameLength, buffer, &offset); if (primitive->connectionInfo.associationRspFrameLength) { primitive->connectionInfo.associationRspFrame = kmalloc(primitive->connectionInfo.associationRspFrameLength, GFP_KERNEL); CsrMemCpyDes(primitive->connectionInfo.associationRspFrame, buffer, &offset, ((u16) (primitive->connectionInfo.associationRspFrameLength))); } else { primitive->connectionInfo.associationRspFrame = NULL; } CsrUint16Des((u16 *) &primitive->connectionInfo.assocScanInfoElementsLength, buffer, &offset); if (primitive->connectionInfo.assocScanInfoElementsLength) { primitive->connectionInfo.assocScanInfoElements = kmalloc(primitive->connectionInfo.assocScanInfoElementsLength, GFP_KERNEL); CsrMemCpyDes(primitive->connectionInfo.assocScanInfoElements, buffer, &offset, ((u16) (primitive->connectionInfo.assocScanInfoElementsLength))); } else { primitive->connectionInfo.assocScanInfoElements = NULL; } CsrUint16Des((u16 *) &primitive->connectionInfo.assocReqCapabilities, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.assocReqListenIntervalTu, buffer, &offset); CsrMemCpyDes(primitive->connectionInfo.assocReqApAddress.a, buffer, &offset, ((u16) (6))); CsrUint16Des((u16 *) &primitive->connectionInfo.assocReqInfoElementsLength, buffer, &offset); if (primitive->connectionInfo.assocReqInfoElementsLength) { primitive->connectionInfo.assocReqInfoElements = kmalloc(primitive->connectionInfo.assocReqInfoElementsLength, GFP_KERNEL); CsrMemCpyDes(primitive->connectionInfo.assocReqInfoElements, buffer, &offset, ((u16) (primitive->connectionInfo.assocReqInfoElementsLength))); } else { primitive->connectionInfo.assocReqInfoElements = NULL; } CsrUint16Des((u16 *) &primitive->connectionInfo.assocRspResult, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.assocRspCapabilityInfo, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.assocRspAssociationId, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.assocRspInfoElementsLength, buffer, &offset); if (primitive->connectionInfo.assocRspInfoElementsLength) { primitive->connectionInfo.assocRspInfoElements = kmalloc(primitive->connectionInfo.assocRspInfoElementsLength, GFP_KERNEL); CsrMemCpyDes(primitive->connectionInfo.assocRspInfoElements, buffer, &offset, ((u16) (primitive->connectionInfo.assocRspInfoElementsLength))); } else { primitive->connectionInfo.assocRspInfoElements = NULL; } CsrUint16Des((u16 *) &primitive->deauthReason, buffer, &offset); return primitive; } void CsrWifiSmeAssociationCompleteIndSerFree(void *voidPrimitivePointer) { CsrWifiSmeAssociationCompleteInd *primitive = (CsrWifiSmeAssociationCompleteInd *) voidPrimitivePointer; kfree(primitive->connectionInfo.beaconFrame); kfree(primitive->connectionInfo.associationReqFrame); kfree(primitive->connectionInfo.associationRspFrame); kfree(primitive->connectionInfo.assocScanInfoElements); kfree(primitive->connectionInfo.assocReqInfoElements); kfree(primitive->connectionInfo.assocRspInfoElements); kfree(primitive); } size_t CsrWifiSmeAssociationStartIndSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 44) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 6; /* u8 primitive->address.a[6] */ bufferSize += 32; /* u8 primitive->ssid.ssid[32] */ bufferSize += 1; /* u8 primitive->ssid.length */ return bufferSize; } u8* CsrWifiSmeAssociationStartIndSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeAssociationStartInd *primitive = (CsrWifiSmeAssociationStartInd *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrMemCpySer(ptr, len, (const void *) primitive->address.a, ((u16) (6))); CsrMemCpySer(ptr, len, (const void *) primitive->ssid.ssid, ((u16) (32))); CsrUint8Ser(ptr, len, (u8) primitive->ssid.length); return(ptr); } void* CsrWifiSmeAssociationStartIndDes(u8 *buffer, size_t length) { CsrWifiSmeAssociationStartInd *primitive = kmalloc(sizeof(CsrWifiSmeAssociationStartInd), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrMemCpyDes(primitive->address.a, buffer, &offset, ((u16) (6))); CsrMemCpyDes(primitive->ssid.ssid, buffer, &offset, ((u16) (32))); CsrUint8Des((u8 *) &primitive->ssid.length, buffer, &offset); return primitive; } size_t CsrWifiSmeBlacklistCfmSizeof(void *msg) { CsrWifiSmeBlacklistCfm *primitive = (CsrWifiSmeBlacklistCfm *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 15) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 2; /* CsrResult primitive->status */ bufferSize += 1; /* CsrWifiSmeListAction primitive->action */ bufferSize += 1; /* u8 primitive->getAddressCount */ { u16 i1; for (i1 = 0; i1 < primitive->getAddressCount; i1++) { bufferSize += 6; /* u8 primitive->getAddresses[i1].a[6] */ } } return bufferSize; } u8* CsrWifiSmeBlacklistCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeBlacklistCfm *primitive = (CsrWifiSmeBlacklistCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint16Ser(ptr, len, (u16) primitive->status); CsrUint8Ser(ptr, len, (u8) primitive->action); CsrUint8Ser(ptr, len, (u8) primitive->getAddressCount); { u16 i1; for (i1 = 0; i1 < primitive->getAddressCount; i1++) { CsrMemCpySer(ptr, len, (const void *) primitive->getAddresses[i1].a, ((u16) (6))); } } return(ptr); } void* CsrWifiSmeBlacklistCfmDes(u8 *buffer, size_t length) { CsrWifiSmeBlacklistCfm *primitive = kmalloc(sizeof(CsrWifiSmeBlacklistCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); CsrUint8Des((u8 *) &primitive->action, buffer, &offset); CsrUint8Des((u8 *) &primitive->getAddressCount, buffer, &offset); primitive->getAddresses = NULL; if (primitive->getAddressCount) { primitive->getAddresses = kmalloc(sizeof(CsrWifiMacAddress) * primitive->getAddressCount, GFP_KERNEL); } { u16 i1; for (i1 = 0; i1 < primitive->getAddressCount; i1++) { CsrMemCpyDes(primitive->getAddresses[i1].a, buffer, &offset, ((u16) (6))); } } return primitive; } void CsrWifiSmeBlacklistCfmSerFree(void *voidPrimitivePointer) { CsrWifiSmeBlacklistCfm *primitive = (CsrWifiSmeBlacklistCfm *) voidPrimitivePointer; kfree(primitive->getAddresses); kfree(primitive); } size_t CsrWifiSmeCalibrationDataGetCfmSizeof(void *msg) { CsrWifiSmeCalibrationDataGetCfm *primitive = (CsrWifiSmeCalibrationDataGetCfm *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 8) */ bufferSize += 2; /* CsrResult primitive->status */ bufferSize += 2; /* u16 primitive->calibrationDataLength */ bufferSize += primitive->calibrationDataLength; /* u8 primitive->calibrationData */ return bufferSize; } u8* CsrWifiSmeCalibrationDataGetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeCalibrationDataGetCfm *primitive = (CsrWifiSmeCalibrationDataGetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->status); CsrUint16Ser(ptr, len, (u16) primitive->calibrationDataLength); if (primitive->calibrationDataLength) { CsrMemCpySer(ptr, len, (const void *) primitive->calibrationData, ((u16) (primitive->calibrationDataLength))); } return(ptr); } void* CsrWifiSmeCalibrationDataGetCfmDes(u8 *buffer, size_t length) { CsrWifiSmeCalibrationDataGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeCalibrationDataGetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); CsrUint16Des((u16 *) &primitive->calibrationDataLength, buffer, &offset); if (primitive->calibrationDataLength) { primitive->calibrationData = kmalloc(primitive->calibrationDataLength, GFP_KERNEL); CsrMemCpyDes(primitive->calibrationData, buffer, &offset, ((u16) (primitive->calibrationDataLength))); } else { primitive->calibrationData = NULL; } return primitive; } void CsrWifiSmeCalibrationDataGetCfmSerFree(void *voidPrimitivePointer) { CsrWifiSmeCalibrationDataGetCfm *primitive = (CsrWifiSmeCalibrationDataGetCfm *) voidPrimitivePointer; kfree(primitive->calibrationData); kfree(primitive); } size_t CsrWifiSmeCcxConfigGetCfmSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 11) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 2; /* CsrResult primitive->status */ bufferSize += 1; /* u8 primitive->ccxConfig.keepAliveTimeMs */ bufferSize += 1; /* u8 primitive->ccxConfig.apRoamingEnabled */ bufferSize += 1; /* u8 primitive->ccxConfig.measurementsMask */ bufferSize += 1; /* u8 primitive->ccxConfig.ccxRadioMgtEnabled */ return bufferSize; } u8* CsrWifiSmeCcxConfigGetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeCcxConfigGetCfm *primitive = (CsrWifiSmeCcxConfigGetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint16Ser(ptr, len, (u16) primitive->status); CsrUint8Ser(ptr, len, (u8) primitive->ccxConfig.keepAliveTimeMs); CsrUint8Ser(ptr, len, (u8) primitive->ccxConfig.apRoamingEnabled); CsrUint8Ser(ptr, len, (u8) primitive->ccxConfig.measurementsMask); CsrUint8Ser(ptr, len, (u8) primitive->ccxConfig.ccxRadioMgtEnabled); return(ptr); } void* CsrWifiSmeCcxConfigGetCfmDes(u8 *buffer, size_t length) { CsrWifiSmeCcxConfigGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeCcxConfigGetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); CsrUint8Des((u8 *) &primitive->ccxConfig.keepAliveTimeMs, buffer, &offset); CsrUint8Des((u8 *) &primitive->ccxConfig.apRoamingEnabled, buffer, &offset); CsrUint8Des((u8 *) &primitive->ccxConfig.measurementsMask, buffer, &offset); CsrUint8Des((u8 *) &primitive->ccxConfig.ccxRadioMgtEnabled, buffer, &offset); return primitive; } size_t CsrWifiSmeCcxConfigSetCfmSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 7) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 2; /* CsrResult primitive->status */ return bufferSize; } u8* CsrWifiSmeCcxConfigSetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeCcxConfigSetCfm *primitive = (CsrWifiSmeCcxConfigSetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint16Ser(ptr, len, (u16) primitive->status); return(ptr); } void* CsrWifiSmeCcxConfigSetCfmDes(u8 *buffer, size_t length) { CsrWifiSmeCcxConfigSetCfm *primitive = kmalloc(sizeof(CsrWifiSmeCcxConfigSetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); return primitive; } size_t CsrWifiSmeCoexConfigGetCfmSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 31) */ bufferSize += 2; /* CsrResult primitive->status */ bufferSize += 1; /* u8 primitive->coexConfig.coexEnableSchemeManagement */ bufferSize += 1; /* u8 primitive->coexConfig.coexPeriodicWakeHost */ bufferSize += 2; /* u16 primitive->coexConfig.coexTrafficBurstyLatencyMs */ bufferSize += 2; /* u16 primitive->coexConfig.coexTrafficContinuousLatencyMs */ bufferSize += 2; /* u16 primitive->coexConfig.coexObexBlackoutDurationMs */ bufferSize += 2; /* u16 primitive->coexConfig.coexObexBlackoutPeriodMs */ bufferSize += 2; /* u16 primitive->coexConfig.coexA2dpBrBlackoutDurationMs */ bufferSize += 2; /* u16 primitive->coexConfig.coexA2dpBrBlackoutPeriodMs */ bufferSize += 2; /* u16 primitive->coexConfig.coexA2dpEdrBlackoutDurationMs */ bufferSize += 2; /* u16 primitive->coexConfig.coexA2dpEdrBlackoutPeriodMs */ bufferSize += 2; /* u16 primitive->coexConfig.coexPagingBlackoutDurationMs */ bufferSize += 2; /* u16 primitive->coexConfig.coexPagingBlackoutPeriodMs */ bufferSize += 2; /* u16 primitive->coexConfig.coexInquiryBlackoutDurationMs */ bufferSize += 2; /* u16 primitive->coexConfig.coexInquiryBlackoutPeriodMs */ return bufferSize; } u8* CsrWifiSmeCoexConfigGetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeCoexConfigGetCfm *primitive = (CsrWifiSmeCoexConfigGetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->status); CsrUint8Ser(ptr, len, (u8) primitive->coexConfig.coexEnableSchemeManagement); CsrUint8Ser(ptr, len, (u8) primitive->coexConfig.coexPeriodicWakeHost); CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexTrafficBurstyLatencyMs); CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexTrafficContinuousLatencyMs); CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexObexBlackoutDurationMs); CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexObexBlackoutPeriodMs); CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexA2dpBrBlackoutDurationMs); CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexA2dpBrBlackoutPeriodMs); CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexA2dpEdrBlackoutDurationMs); CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexA2dpEdrBlackoutPeriodMs); CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexPagingBlackoutDurationMs); CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexPagingBlackoutPeriodMs); CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexInquiryBlackoutDurationMs); CsrUint16Ser(ptr, len, (u16) primitive->coexConfig.coexInquiryBlackoutPeriodMs); return(ptr); } void* CsrWifiSmeCoexConfigGetCfmDes(u8 *buffer, size_t length) { CsrWifiSmeCoexConfigGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeCoexConfigGetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); CsrUint8Des((u8 *) &primitive->coexConfig.coexEnableSchemeManagement, buffer, &offset); CsrUint8Des((u8 *) &primitive->coexConfig.coexPeriodicWakeHost, buffer, &offset); CsrUint16Des((u16 *) &primitive->coexConfig.coexTrafficBurstyLatencyMs, buffer, &offset); CsrUint16Des((u16 *) &primitive->coexConfig.coexTrafficContinuousLatencyMs, buffer, &offset); CsrUint16Des((u16 *) &primitive->coexConfig.coexObexBlackoutDurationMs, buffer, &offset); CsrUint16Des((u16 *) &primitive->coexConfig.coexObexBlackoutPeriodMs, buffer, &offset); CsrUint16Des((u16 *) &primitive->coexConfig.coexA2dpBrBlackoutDurationMs, buffer, &offset); CsrUint16Des((u16 *) &primitive->coexConfig.coexA2dpBrBlackoutPeriodMs, buffer, &offset); CsrUint16Des((u16 *) &primitive->coexConfig.coexA2dpEdrBlackoutDurationMs, buffer, &offset); CsrUint16Des((u16 *) &primitive->coexConfig.coexA2dpEdrBlackoutPeriodMs, buffer, &offset); CsrUint16Des((u16 *) &primitive->coexConfig.coexPagingBlackoutDurationMs, buffer, &offset); CsrUint16Des((u16 *) &primitive->coexConfig.coexPagingBlackoutPeriodMs, buffer, &offset); CsrUint16Des((u16 *) &primitive->coexConfig.coexInquiryBlackoutDurationMs, buffer, &offset); CsrUint16Des((u16 *) &primitive->coexConfig.coexInquiryBlackoutPeriodMs, buffer, &offset); return primitive; } size_t CsrWifiSmeCoexInfoGetCfmSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 24) */ bufferSize += 2; /* CsrResult primitive->status */ bufferSize += 1; /* u8 primitive->coexInfo.hasTrafficData */ bufferSize += 1; /* CsrWifiSmeTrafficType primitive->coexInfo.currentTrafficType */ bufferSize += 2; /* u16 primitive->coexInfo.currentPeriodMs */ bufferSize += 1; /* CsrWifiSmePowerSaveLevel primitive->coexInfo.currentPowerSave */ bufferSize += 2; /* u16 primitive->coexInfo.currentCoexPeriodMs */ bufferSize += 2; /* u16 primitive->coexInfo.currentCoexLatencyMs */ bufferSize += 1; /* u8 primitive->coexInfo.hasBtDevice */ bufferSize += 4; /* u32 primitive->coexInfo.currentBlackoutDurationUs */ bufferSize += 4; /* u32 primitive->coexInfo.currentBlackoutPeriodUs */ bufferSize += 1; /* CsrWifiSmeCoexScheme primitive->coexInfo.currentCoexScheme */ return bufferSize; } u8* CsrWifiSmeCoexInfoGetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeCoexInfoGetCfm *primitive = (CsrWifiSmeCoexInfoGetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->status); CsrUint8Ser(ptr, len, (u8) primitive->coexInfo.hasTrafficData); CsrUint8Ser(ptr, len, (u8) primitive->coexInfo.currentTrafficType); CsrUint16Ser(ptr, len, (u16) primitive->coexInfo.currentPeriodMs); CsrUint8Ser(ptr, len, (u8) primitive->coexInfo.currentPowerSave); CsrUint16Ser(ptr, len, (u16) primitive->coexInfo.currentCoexPeriodMs); CsrUint16Ser(ptr, len, (u16) primitive->coexInfo.currentCoexLatencyMs); CsrUint8Ser(ptr, len, (u8) primitive->coexInfo.hasBtDevice); CsrUint32Ser(ptr, len, (u32) primitive->coexInfo.currentBlackoutDurationUs); CsrUint32Ser(ptr, len, (u32) primitive->coexInfo.currentBlackoutPeriodUs); CsrUint8Ser(ptr, len, (u8) primitive->coexInfo.currentCoexScheme); return(ptr); } void* CsrWifiSmeCoexInfoGetCfmDes(u8 *buffer, size_t length) { CsrWifiSmeCoexInfoGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeCoexInfoGetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); CsrUint8Des((u8 *) &primitive->coexInfo.hasTrafficData, buffer, &offset); CsrUint8Des((u8 *) &primitive->coexInfo.currentTrafficType, buffer, &offset); CsrUint16Des((u16 *) &primitive->coexInfo.currentPeriodMs, buffer, &offset); CsrUint8Des((u8 *) &primitive->coexInfo.currentPowerSave, buffer, &offset); CsrUint16Des((u16 *) &primitive->coexInfo.currentCoexPeriodMs, buffer, &offset); CsrUint16Des((u16 *) &primitive->coexInfo.currentCoexLatencyMs, buffer, &offset); CsrUint8Des((u8 *) &primitive->coexInfo.hasBtDevice, buffer, &offset); CsrUint32Des((u32 *) &primitive->coexInfo.currentBlackoutDurationUs, buffer, &offset); CsrUint32Des((u32 *) &primitive->coexInfo.currentBlackoutPeriodUs, buffer, &offset); CsrUint8Des((u8 *) &primitive->coexInfo.currentCoexScheme, buffer, &offset); return primitive; } size_t CsrWifiSmeConnectCfmSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 7) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 2; /* CsrResult primitive->status */ return bufferSize; } u8* CsrWifiSmeConnectCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeConnectCfm *primitive = (CsrWifiSmeConnectCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint16Ser(ptr, len, (u16) primitive->status); return(ptr); } void* CsrWifiSmeConnectCfmDes(u8 *buffer, size_t length) { CsrWifiSmeConnectCfm *primitive = kmalloc(sizeof(CsrWifiSmeConnectCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); return primitive; } size_t CsrWifiSmeConnectionConfigGetCfmSizeof(void *msg) { CsrWifiSmeConnectionConfigGetCfm *primitive = (CsrWifiSmeConnectionConfigGetCfm *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 59) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 2; /* CsrResult primitive->status */ bufferSize += 32; /* u8 primitive->connectionConfig.ssid.ssid[32] */ bufferSize += 1; /* u8 primitive->connectionConfig.ssid.length */ bufferSize += 6; /* u8 primitive->connectionConfig.bssid.a[6] */ bufferSize += 1; /* CsrWifiSmeBssType primitive->connectionConfig.bssType */ bufferSize += 1; /* CsrWifiSmeRadioIF primitive->connectionConfig.ifIndex */ bufferSize += 1; /* CsrWifiSme80211PrivacyMode primitive->connectionConfig.privacyMode */ bufferSize += 2; /* CsrWifiSmeAuthModeMask primitive->connectionConfig.authModeMask */ bufferSize += 2; /* CsrWifiSmeEncryptionMask primitive->connectionConfig.encryptionModeMask */ bufferSize += 2; /* u16 primitive->connectionConfig.mlmeAssociateReqInformationElementsLength */ bufferSize += primitive->connectionConfig.mlmeAssociateReqInformationElementsLength; /* u8 primitive->connectionConfig.mlmeAssociateReqInformationElements */ bufferSize += 1; /* CsrWifiSmeWmmQosInfoMask primitive->connectionConfig.wmmQosInfo */ bufferSize += 1; /* u8 primitive->connectionConfig.adhocJoinOnly */ bufferSize += 1; /* u8 primitive->connectionConfig.adhocChannel */ return bufferSize; } u8* CsrWifiSmeConnectionConfigGetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeConnectionConfigGetCfm *primitive = (CsrWifiSmeConnectionConfigGetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint16Ser(ptr, len, (u16) primitive->status); CsrMemCpySer(ptr, len, (const void *) primitive->connectionConfig.ssid.ssid, ((u16) (32))); CsrUint8Ser(ptr, len, (u8) primitive->connectionConfig.ssid.length); CsrMemCpySer(ptr, len, (const void *) primitive->connectionConfig.bssid.a, ((u16) (6))); CsrUint8Ser(ptr, len, (u8) primitive->connectionConfig.bssType); CsrUint8Ser(ptr, len, (u8) primitive->connectionConfig.ifIndex); CsrUint8Ser(ptr, len, (u8) primitive->connectionConfig.privacyMode); CsrUint16Ser(ptr, len, (u16) primitive->connectionConfig.authModeMask); CsrUint16Ser(ptr, len, (u16) primitive->connectionConfig.encryptionModeMask); CsrUint16Ser(ptr, len, (u16) primitive->connectionConfig.mlmeAssociateReqInformationElementsLength); if (primitive->connectionConfig.mlmeAssociateReqInformationElementsLength) { CsrMemCpySer(ptr, len, (const void *) primitive->connectionConfig.mlmeAssociateReqInformationElements, ((u16) (primitive->connectionConfig.mlmeAssociateReqInformationElementsLength))); } CsrUint8Ser(ptr, len, (u8) primitive->connectionConfig.wmmQosInfo); CsrUint8Ser(ptr, len, (u8) primitive->connectionConfig.adhocJoinOnly); CsrUint8Ser(ptr, len, (u8) primitive->connectionConfig.adhocChannel); return(ptr); } void* CsrWifiSmeConnectionConfigGetCfmDes(u8 *buffer, size_t length) { CsrWifiSmeConnectionConfigGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeConnectionConfigGetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); CsrMemCpyDes(primitive->connectionConfig.ssid.ssid, buffer, &offset, ((u16) (32))); CsrUint8Des((u8 *) &primitive->connectionConfig.ssid.length, buffer, &offset); CsrMemCpyDes(primitive->connectionConfig.bssid.a, buffer, &offset, ((u16) (6))); CsrUint8Des((u8 *) &primitive->connectionConfig.bssType, buffer, &offset); CsrUint8Des((u8 *) &primitive->connectionConfig.ifIndex, buffer, &offset); CsrUint8Des((u8 *) &primitive->connectionConfig.privacyMode, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionConfig.authModeMask, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionConfig.encryptionModeMask, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionConfig.mlmeAssociateReqInformationElementsLength, buffer, &offset); if (primitive->connectionConfig.mlmeAssociateReqInformationElementsLength) { primitive->connectionConfig.mlmeAssociateReqInformationElements = kmalloc(primitive->connectionConfig.mlmeAssociateReqInformationElementsLength, GFP_KERNEL); CsrMemCpyDes(primitive->connectionConfig.mlmeAssociateReqInformationElements, buffer, &offset, ((u16) (primitive->connectionConfig.mlmeAssociateReqInformationElementsLength))); } else { primitive->connectionConfig.mlmeAssociateReqInformationElements = NULL; } CsrUint8Des((u8 *) &primitive->connectionConfig.wmmQosInfo, buffer, &offset); CsrUint8Des((u8 *) &primitive->connectionConfig.adhocJoinOnly, buffer, &offset); CsrUint8Des((u8 *) &primitive->connectionConfig.adhocChannel, buffer, &offset); return primitive; } void CsrWifiSmeConnectionConfigGetCfmSerFree(void *voidPrimitivePointer) { CsrWifiSmeConnectionConfigGetCfm *primitive = (CsrWifiSmeConnectionConfigGetCfm *) voidPrimitivePointer; kfree(primitive->connectionConfig.mlmeAssociateReqInformationElements); kfree(primitive); } size_t CsrWifiSmeConnectionInfoGetCfmSizeof(void *msg) { CsrWifiSmeConnectionInfoGetCfm *primitive = (CsrWifiSmeConnectionInfoGetCfm *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 96) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 2; /* CsrResult primitive->status */ bufferSize += 32; /* u8 primitive->connectionInfo.ssid.ssid[32] */ bufferSize += 1; /* u8 primitive->connectionInfo.ssid.length */ bufferSize += 6; /* u8 primitive->connectionInfo.bssid.a[6] */ bufferSize += 1; /* CsrWifiSme80211NetworkType primitive->connectionInfo.networkType80211 */ bufferSize += 1; /* u8 primitive->connectionInfo.channelNumber */ bufferSize += 2; /* u16 primitive->connectionInfo.channelFrequency */ bufferSize += 2; /* CsrWifiSmeAuthMode primitive->connectionInfo.authMode */ bufferSize += 2; /* CsrWifiSmeEncryption primitive->connectionInfo.pairwiseCipher */ bufferSize += 2; /* CsrWifiSmeEncryption primitive->connectionInfo.groupCipher */ bufferSize += 1; /* CsrWifiSmeRadioIF primitive->connectionInfo.ifIndex */ bufferSize += 2; /* u16 primitive->connectionInfo.atimWindowTu */ bufferSize += 2; /* u16 primitive->connectionInfo.beaconPeriodTu */ bufferSize += 1; /* u8 primitive->connectionInfo.reassociation */ bufferSize += 2; /* u16 primitive->connectionInfo.beaconFrameLength */ bufferSize += primitive->connectionInfo.beaconFrameLength; /* u8 primitive->connectionInfo.beaconFrame */ bufferSize += 2; /* u16 primitive->connectionInfo.associationReqFrameLength */ bufferSize += primitive->connectionInfo.associationReqFrameLength; /* u8 primitive->connectionInfo.associationReqFrame */ bufferSize += 2; /* u16 primitive->connectionInfo.associationRspFrameLength */ bufferSize += primitive->connectionInfo.associationRspFrameLength; /* u8 primitive->connectionInfo.associationRspFrame */ bufferSize += 2; /* u16 primitive->connectionInfo.assocScanInfoElementsLength */ bufferSize += primitive->connectionInfo.assocScanInfoElementsLength; /* u8 primitive->connectionInfo.assocScanInfoElements */ bufferSize += 2; /* u16 primitive->connectionInfo.assocReqCapabilities */ bufferSize += 2; /* u16 primitive->connectionInfo.assocReqListenIntervalTu */ bufferSize += 6; /* u8 primitive->connectionInfo.assocReqApAddress.a[6] */ bufferSize += 2; /* u16 primitive->connectionInfo.assocReqInfoElementsLength */ bufferSize += primitive->connectionInfo.assocReqInfoElementsLength; /* u8 primitive->connectionInfo.assocReqInfoElements */ bufferSize += 2; /* CsrWifiSmeIEEE80211Result primitive->connectionInfo.assocRspResult */ bufferSize += 2; /* u16 primitive->connectionInfo.assocRspCapabilityInfo */ bufferSize += 2; /* u16 primitive->connectionInfo.assocRspAssociationId */ bufferSize += 2; /* u16 primitive->connectionInfo.assocRspInfoElementsLength */ bufferSize += primitive->connectionInfo.assocRspInfoElementsLength; /* u8 primitive->connectionInfo.assocRspInfoElements */ return bufferSize; } u8* CsrWifiSmeConnectionInfoGetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeConnectionInfoGetCfm *primitive = (CsrWifiSmeConnectionInfoGetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint16Ser(ptr, len, (u16) primitive->status); CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.ssid.ssid, ((u16) (32))); CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.ssid.length); CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.bssid.a, ((u16) (6))); CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.networkType80211); CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.channelNumber); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.channelFrequency); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.authMode); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.pairwiseCipher); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.groupCipher); CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.ifIndex); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.atimWindowTu); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.beaconPeriodTu); CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.reassociation); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.beaconFrameLength); if (primitive->connectionInfo.beaconFrameLength) { CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.beaconFrame, ((u16) (primitive->connectionInfo.beaconFrameLength))); } CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.associationReqFrameLength); if (primitive->connectionInfo.associationReqFrameLength) { CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.associationReqFrame, ((u16) (primitive->connectionInfo.associationReqFrameLength))); } CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.associationRspFrameLength); if (primitive->connectionInfo.associationRspFrameLength) { CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.associationRspFrame, ((u16) (primitive->connectionInfo.associationRspFrameLength))); } CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocScanInfoElementsLength); if (primitive->connectionInfo.assocScanInfoElementsLength) { CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.assocScanInfoElements, ((u16) (primitive->connectionInfo.assocScanInfoElementsLength))); } CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocReqCapabilities); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocReqListenIntervalTu); CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.assocReqApAddress.a, ((u16) (6))); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocReqInfoElementsLength); if (primitive->connectionInfo.assocReqInfoElementsLength) { CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.assocReqInfoElements, ((u16) (primitive->connectionInfo.assocReqInfoElementsLength))); } CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocRspResult); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocRspCapabilityInfo); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocRspAssociationId); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocRspInfoElementsLength); if (primitive->connectionInfo.assocRspInfoElementsLength) { CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.assocRspInfoElements, ((u16) (primitive->connectionInfo.assocRspInfoElementsLength))); } return(ptr); } void* CsrWifiSmeConnectionInfoGetCfmDes(u8 *buffer, size_t length) { CsrWifiSmeConnectionInfoGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeConnectionInfoGetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); CsrMemCpyDes(primitive->connectionInfo.ssid.ssid, buffer, &offset, ((u16) (32))); CsrUint8Des((u8 *) &primitive->connectionInfo.ssid.length, buffer, &offset); CsrMemCpyDes(primitive->connectionInfo.bssid.a, buffer, &offset, ((u16) (6))); CsrUint8Des((u8 *) &primitive->connectionInfo.networkType80211, buffer, &offset); CsrUint8Des((u8 *) &primitive->connectionInfo.channelNumber, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.channelFrequency, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.authMode, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.pairwiseCipher, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.groupCipher, buffer, &offset); CsrUint8Des((u8 *) &primitive->connectionInfo.ifIndex, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.atimWindowTu, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.beaconPeriodTu, buffer, &offset); CsrUint8Des((u8 *) &primitive->connectionInfo.reassociation, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.beaconFrameLength, buffer, &offset); if (primitive->connectionInfo.beaconFrameLength) { primitive->connectionInfo.beaconFrame = kmalloc(primitive->connectionInfo.beaconFrameLength, GFP_KERNEL); CsrMemCpyDes(primitive->connectionInfo.beaconFrame, buffer, &offset, ((u16) (primitive->connectionInfo.beaconFrameLength))); } else { primitive->connectionInfo.beaconFrame = NULL; } CsrUint16Des((u16 *) &primitive->connectionInfo.associationReqFrameLength, buffer, &offset); if (primitive->connectionInfo.associationReqFrameLength) { primitive->connectionInfo.associationReqFrame = kmalloc(primitive->connectionInfo.associationReqFrameLength, GFP_KERNEL); CsrMemCpyDes(primitive->connectionInfo.associationReqFrame, buffer, &offset, ((u16) (primitive->connectionInfo.associationReqFrameLength))); } else { primitive->connectionInfo.associationReqFrame = NULL; } CsrUint16Des((u16 *) &primitive->connectionInfo.associationRspFrameLength, buffer, &offset); if (primitive->connectionInfo.associationRspFrameLength) { primitive->connectionInfo.associationRspFrame = kmalloc(primitive->connectionInfo.associationRspFrameLength, GFP_KERNEL); CsrMemCpyDes(primitive->connectionInfo.associationRspFrame, buffer, &offset, ((u16) (primitive->connectionInfo.associationRspFrameLength))); } else { primitive->connectionInfo.associationRspFrame = NULL; } CsrUint16Des((u16 *) &primitive->connectionInfo.assocScanInfoElementsLength, buffer, &offset); if (primitive->connectionInfo.assocScanInfoElementsLength) { primitive->connectionInfo.assocScanInfoElements = kmalloc(primitive->connectionInfo.assocScanInfoElementsLength, GFP_KERNEL); CsrMemCpyDes(primitive->connectionInfo.assocScanInfoElements, buffer, &offset, ((u16) (primitive->connectionInfo.assocScanInfoElementsLength))); } else { primitive->connectionInfo.assocScanInfoElements = NULL; } CsrUint16Des((u16 *) &primitive->connectionInfo.assocReqCapabilities, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.assocReqListenIntervalTu, buffer, &offset); CsrMemCpyDes(primitive->connectionInfo.assocReqApAddress.a, buffer, &offset, ((u16) (6))); CsrUint16Des((u16 *) &primitive->connectionInfo.assocReqInfoElementsLength, buffer, &offset); if (primitive->connectionInfo.assocReqInfoElementsLength) { primitive->connectionInfo.assocReqInfoElements = kmalloc(primitive->connectionInfo.assocReqInfoElementsLength, GFP_KERNEL); CsrMemCpyDes(primitive->connectionInfo.assocReqInfoElements, buffer, &offset, ((u16) (primitive->connectionInfo.assocReqInfoElementsLength))); } else { primitive->connectionInfo.assocReqInfoElements = NULL; } CsrUint16Des((u16 *) &primitive->connectionInfo.assocRspResult, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.assocRspCapabilityInfo, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.assocRspAssociationId, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.assocRspInfoElementsLength, buffer, &offset); if (primitive->connectionInfo.assocRspInfoElementsLength) { primitive->connectionInfo.assocRspInfoElements = kmalloc(primitive->connectionInfo.assocRspInfoElementsLength, GFP_KERNEL); CsrMemCpyDes(primitive->connectionInfo.assocRspInfoElements, buffer, &offset, ((u16) (primitive->connectionInfo.assocRspInfoElementsLength))); } else { primitive->connectionInfo.assocRspInfoElements = NULL; } return primitive; } void CsrWifiSmeConnectionInfoGetCfmSerFree(void *voidPrimitivePointer) { CsrWifiSmeConnectionInfoGetCfm *primitive = (CsrWifiSmeConnectionInfoGetCfm *) voidPrimitivePointer; kfree(primitive->connectionInfo.beaconFrame); kfree(primitive->connectionInfo.associationReqFrame); kfree(primitive->connectionInfo.associationRspFrame); kfree(primitive->connectionInfo.assocScanInfoElements); kfree(primitive->connectionInfo.assocReqInfoElements); kfree(primitive->connectionInfo.assocRspInfoElements); kfree(primitive); } size_t CsrWifiSmeConnectionQualityIndSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 9) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 2; /* s16 primitive->linkQuality.unifiRssi */ bufferSize += 2; /* s16 primitive->linkQuality.unifiSnr */ return bufferSize; } u8* CsrWifiSmeConnectionQualityIndSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeConnectionQualityInd *primitive = (CsrWifiSmeConnectionQualityInd *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint16Ser(ptr, len, (u16) primitive->linkQuality.unifiRssi); CsrUint16Ser(ptr, len, (u16) primitive->linkQuality.unifiSnr); return(ptr); } void* CsrWifiSmeConnectionQualityIndDes(u8 *buffer, size_t length) { CsrWifiSmeConnectionQualityInd *primitive = kmalloc(sizeof(CsrWifiSmeConnectionQualityInd), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint16Des((u16 *) &primitive->linkQuality.unifiRssi, buffer, &offset); CsrUint16Des((u16 *) &primitive->linkQuality.unifiSnr, buffer, &offset); return primitive; } size_t CsrWifiSmeConnectionStatsGetCfmSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 101) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 2; /* CsrResult primitive->status */ bufferSize += 1; /* u8 primitive->connectionStats.unifiTxDataRate */ bufferSize += 1; /* u8 primitive->connectionStats.unifiRxDataRate */ bufferSize += 4; /* u32 primitive->connectionStats.dot11RetryCount */ bufferSize += 4; /* u32 primitive->connectionStats.dot11MultipleRetryCount */ bufferSize += 4; /* u32 primitive->connectionStats.dot11AckFailureCount */ bufferSize += 4; /* u32 primitive->connectionStats.dot11FrameDuplicateCount */ bufferSize += 4; /* u32 primitive->connectionStats.dot11FcsErrorCount */ bufferSize += 4; /* u32 primitive->connectionStats.dot11RtsSuccessCount */ bufferSize += 4; /* u32 primitive->connectionStats.dot11RtsFailureCount */ bufferSize += 4; /* u32 primitive->connectionStats.dot11FailedCount */ bufferSize += 4; /* u32 primitive->connectionStats.dot11TransmittedFragmentCount */ bufferSize += 4; /* u32 primitive->connectionStats.dot11TransmittedFrameCount */ bufferSize += 4; /* u32 primitive->connectionStats.dot11WepExcludedCount */ bufferSize += 4; /* u32 primitive->connectionStats.dot11WepIcvErrorCount */ bufferSize += 4; /* u32 primitive->connectionStats.dot11WepUndecryptableCount */ bufferSize += 4; /* u32 primitive->connectionStats.dot11MulticastReceivedFrameCount */ bufferSize += 4; /* u32 primitive->connectionStats.dot11MulticastTransmittedFrameCount */ bufferSize += 4; /* u32 primitive->connectionStats.dot11ReceivedFragmentCount */ bufferSize += 4; /* u32 primitive->connectionStats.dot11Rsna4WayHandshakeFailures */ bufferSize += 4; /* u32 primitive->connectionStats.dot11RsnaTkipCounterMeasuresInvoked */ bufferSize += 4; /* u32 primitive->connectionStats.dot11RsnaStatsTkipLocalMicFailures */ bufferSize += 4; /* u32 primitive->connectionStats.dot11RsnaStatsTkipReplays */ bufferSize += 4; /* u32 primitive->connectionStats.dot11RsnaStatsTkipIcvErrors */ bufferSize += 4; /* u32 primitive->connectionStats.dot11RsnaStatsCcmpReplays */ bufferSize += 4; /* u32 primitive->connectionStats.dot11RsnaStatsCcmpDecryptErrors */ return bufferSize; } u8* CsrWifiSmeConnectionStatsGetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeConnectionStatsGetCfm *primitive = (CsrWifiSmeConnectionStatsGetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint16Ser(ptr, len, (u16) primitive->status); CsrUint8Ser(ptr, len, (u8) primitive->connectionStats.unifiTxDataRate); CsrUint8Ser(ptr, len, (u8) primitive->connectionStats.unifiRxDataRate); CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11RetryCount); CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11MultipleRetryCount); CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11AckFailureCount); CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11FrameDuplicateCount); CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11FcsErrorCount); CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11RtsSuccessCount); CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11RtsFailureCount); CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11FailedCount); CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11TransmittedFragmentCount); CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11TransmittedFrameCount); CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11WepExcludedCount); CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11WepIcvErrorCount); CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11WepUndecryptableCount); CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11MulticastReceivedFrameCount); CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11MulticastTransmittedFrameCount); CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11ReceivedFragmentCount); CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11Rsna4WayHandshakeFailures); CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11RsnaTkipCounterMeasuresInvoked); CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11RsnaStatsTkipLocalMicFailures); CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11RsnaStatsTkipReplays); CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11RsnaStatsTkipIcvErrors); CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11RsnaStatsCcmpReplays); CsrUint32Ser(ptr, len, (u32) primitive->connectionStats.dot11RsnaStatsCcmpDecryptErrors); return(ptr); } void* CsrWifiSmeConnectionStatsGetCfmDes(u8 *buffer, size_t length) { CsrWifiSmeConnectionStatsGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeConnectionStatsGetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); CsrUint8Des((u8 *) &primitive->connectionStats.unifiTxDataRate, buffer, &offset); CsrUint8Des((u8 *) &primitive->connectionStats.unifiRxDataRate, buffer, &offset); CsrUint32Des((u32 *) &primitive->connectionStats.dot11RetryCount, buffer, &offset); CsrUint32Des((u32 *) &primitive->connectionStats.dot11MultipleRetryCount, buffer, &offset); CsrUint32Des((u32 *) &primitive->connectionStats.dot11AckFailureCount, buffer, &offset); CsrUint32Des((u32 *) &primitive->connectionStats.dot11FrameDuplicateCount, buffer, &offset); CsrUint32Des((u32 *) &primitive->connectionStats.dot11FcsErrorCount, buffer, &offset); CsrUint32Des((u32 *) &primitive->connectionStats.dot11RtsSuccessCount, buffer, &offset); CsrUint32Des((u32 *) &primitive->connectionStats.dot11RtsFailureCount, buffer, &offset); CsrUint32Des((u32 *) &primitive->connectionStats.dot11FailedCount, buffer, &offset); CsrUint32Des((u32 *) &primitive->connectionStats.dot11TransmittedFragmentCount, buffer, &offset); CsrUint32Des((u32 *) &primitive->connectionStats.dot11TransmittedFrameCount, buffer, &offset); CsrUint32Des((u32 *) &primitive->connectionStats.dot11WepExcludedCount, buffer, &offset); CsrUint32Des((u32 *) &primitive->connectionStats.dot11WepIcvErrorCount, buffer, &offset); CsrUint32Des((u32 *) &primitive->connectionStats.dot11WepUndecryptableCount, buffer, &offset); CsrUint32Des((u32 *) &primitive->connectionStats.dot11MulticastReceivedFrameCount, buffer, &offset); CsrUint32Des((u32 *) &primitive->connectionStats.dot11MulticastTransmittedFrameCount, buffer, &offset); CsrUint32Des((u32 *) &primitive->connectionStats.dot11ReceivedFragmentCount, buffer, &offset); CsrUint32Des((u32 *) &primitive->connectionStats.dot11Rsna4WayHandshakeFailures, buffer, &offset); CsrUint32Des((u32 *) &primitive->connectionStats.dot11RsnaTkipCounterMeasuresInvoked, buffer, &offset); CsrUint32Des((u32 *) &primitive->connectionStats.dot11RsnaStatsTkipLocalMicFailures, buffer, &offset); CsrUint32Des((u32 *) &primitive->connectionStats.dot11RsnaStatsTkipReplays, buffer, &offset); CsrUint32Des((u32 *) &primitive->connectionStats.dot11RsnaStatsTkipIcvErrors, buffer, &offset); CsrUint32Des((u32 *) &primitive->connectionStats.dot11RsnaStatsCcmpReplays, buffer, &offset); CsrUint32Des((u32 *) &primitive->connectionStats.dot11RsnaStatsCcmpDecryptErrors, buffer, &offset); return primitive; } size_t CsrWifiSmeDisconnectCfmSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 7) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 2; /* CsrResult primitive->status */ return bufferSize; } u8* CsrWifiSmeDisconnectCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeDisconnectCfm *primitive = (CsrWifiSmeDisconnectCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint16Ser(ptr, len, (u16) primitive->status); return(ptr); } void* CsrWifiSmeDisconnectCfmDes(u8 *buffer, size_t length) { CsrWifiSmeDisconnectCfm *primitive = kmalloc(sizeof(CsrWifiSmeDisconnectCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); return primitive; } size_t CsrWifiSmeHostConfigGetCfmSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 10) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 2; /* CsrResult primitive->status */ bufferSize += 1; /* CsrWifiSmeHostPowerMode primitive->hostConfig.powerMode */ bufferSize += 2; /* u16 primitive->hostConfig.applicationDataPeriodMs */ return bufferSize; } u8* CsrWifiSmeHostConfigGetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeHostConfigGetCfm *primitive = (CsrWifiSmeHostConfigGetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint16Ser(ptr, len, (u16) primitive->status); CsrUint8Ser(ptr, len, (u8) primitive->hostConfig.powerMode); CsrUint16Ser(ptr, len, (u16) primitive->hostConfig.applicationDataPeriodMs); return(ptr); } void* CsrWifiSmeHostConfigGetCfmDes(u8 *buffer, size_t length) { CsrWifiSmeHostConfigGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeHostConfigGetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); CsrUint8Des((u8 *) &primitive->hostConfig.powerMode, buffer, &offset); CsrUint16Des((u16 *) &primitive->hostConfig.applicationDataPeriodMs, buffer, &offset); return primitive; } size_t CsrWifiSmeHostConfigSetCfmSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 7) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 2; /* CsrResult primitive->status */ return bufferSize; } u8* CsrWifiSmeHostConfigSetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeHostConfigSetCfm *primitive = (CsrWifiSmeHostConfigSetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint16Ser(ptr, len, (u16) primitive->status); return(ptr); } void* CsrWifiSmeHostConfigSetCfmDes(u8 *buffer, size_t length) { CsrWifiSmeHostConfigSetCfm *primitive = kmalloc(sizeof(CsrWifiSmeHostConfigSetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); return primitive; } size_t CsrWifiSmeIbssStationIndSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 10) */ bufferSize += 6; /* u8 primitive->address.a[6] */ bufferSize += 1; /* u8 primitive->isconnected */ return bufferSize; } u8* CsrWifiSmeIbssStationIndSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeIbssStationInd *primitive = (CsrWifiSmeIbssStationInd *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrMemCpySer(ptr, len, (const void *) primitive->address.a, ((u16) (6))); CsrUint8Ser(ptr, len, (u8) primitive->isconnected); return(ptr); } void* CsrWifiSmeIbssStationIndDes(u8 *buffer, size_t length) { CsrWifiSmeIbssStationInd *primitive = kmalloc(sizeof(CsrWifiSmeIbssStationInd), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrMemCpyDes(primitive->address.a, buffer, &offset, ((u16) (6))); CsrUint8Des((u8 *) &primitive->isconnected, buffer, &offset); return primitive; } size_t CsrWifiSmeKeyCfmSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 15) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 2; /* CsrResult primitive->status */ bufferSize += 1; /* CsrWifiSmeListAction primitive->action */ bufferSize += 1; /* CsrWifiSmeKeyType primitive->keyType */ bufferSize += 6; /* u8 primitive->peerMacAddress.a[6] */ return bufferSize; } u8* CsrWifiSmeKeyCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeKeyCfm *primitive = (CsrWifiSmeKeyCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint16Ser(ptr, len, (u16) primitive->status); CsrUint8Ser(ptr, len, (u8) primitive->action); CsrUint8Ser(ptr, len, (u8) primitive->keyType); CsrMemCpySer(ptr, len, (const void *) primitive->peerMacAddress.a, ((u16) (6))); return(ptr); } void* CsrWifiSmeKeyCfmDes(u8 *buffer, size_t length) { CsrWifiSmeKeyCfm *primitive = kmalloc(sizeof(CsrWifiSmeKeyCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); CsrUint8Des((u8 *) &primitive->action, buffer, &offset); CsrUint8Des((u8 *) &primitive->keyType, buffer, &offset); CsrMemCpyDes(primitive->peerMacAddress.a, buffer, &offset, ((u16) (6))); return primitive; } size_t CsrWifiSmeLinkQualityGetCfmSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 11) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 2; /* CsrResult primitive->status */ bufferSize += 2; /* s16 primitive->linkQuality.unifiRssi */ bufferSize += 2; /* s16 primitive->linkQuality.unifiSnr */ return bufferSize; } u8* CsrWifiSmeLinkQualityGetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeLinkQualityGetCfm *primitive = (CsrWifiSmeLinkQualityGetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint16Ser(ptr, len, (u16) primitive->status); CsrUint16Ser(ptr, len, (u16) primitive->linkQuality.unifiRssi); CsrUint16Ser(ptr, len, (u16) primitive->linkQuality.unifiSnr); return(ptr); } void* CsrWifiSmeLinkQualityGetCfmDes(u8 *buffer, size_t length) { CsrWifiSmeLinkQualityGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeLinkQualityGetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); CsrUint16Des((u16 *) &primitive->linkQuality.unifiRssi, buffer, &offset); CsrUint16Des((u16 *) &primitive->linkQuality.unifiSnr, buffer, &offset); return primitive; } size_t CsrWifiSmeMediaStatusIndSizeof(void *msg) { CsrWifiSmeMediaStatusInd *primitive = (CsrWifiSmeMediaStatusInd *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 99) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 1; /* CsrWifiSmeMediaStatus primitive->mediaStatus */ bufferSize += 32; /* u8 primitive->connectionInfo.ssid.ssid[32] */ bufferSize += 1; /* u8 primitive->connectionInfo.ssid.length */ bufferSize += 6; /* u8 primitive->connectionInfo.bssid.a[6] */ bufferSize += 1; /* CsrWifiSme80211NetworkType primitive->connectionInfo.networkType80211 */ bufferSize += 1; /* u8 primitive->connectionInfo.channelNumber */ bufferSize += 2; /* u16 primitive->connectionInfo.channelFrequency */ bufferSize += 2; /* CsrWifiSmeAuthMode primitive->connectionInfo.authMode */ bufferSize += 2; /* CsrWifiSmeEncryption primitive->connectionInfo.pairwiseCipher */ bufferSize += 2; /* CsrWifiSmeEncryption primitive->connectionInfo.groupCipher */ bufferSize += 1; /* CsrWifiSmeRadioIF primitive->connectionInfo.ifIndex */ bufferSize += 2; /* u16 primitive->connectionInfo.atimWindowTu */ bufferSize += 2; /* u16 primitive->connectionInfo.beaconPeriodTu */ bufferSize += 1; /* u8 primitive->connectionInfo.reassociation */ bufferSize += 2; /* u16 primitive->connectionInfo.beaconFrameLength */ bufferSize += primitive->connectionInfo.beaconFrameLength; /* u8 primitive->connectionInfo.beaconFrame */ bufferSize += 2; /* u16 primitive->connectionInfo.associationReqFrameLength */ bufferSize += primitive->connectionInfo.associationReqFrameLength; /* u8 primitive->connectionInfo.associationReqFrame */ bufferSize += 2; /* u16 primitive->connectionInfo.associationRspFrameLength */ bufferSize += primitive->connectionInfo.associationRspFrameLength; /* u8 primitive->connectionInfo.associationRspFrame */ bufferSize += 2; /* u16 primitive->connectionInfo.assocScanInfoElementsLength */ bufferSize += primitive->connectionInfo.assocScanInfoElementsLength; /* u8 primitive->connectionInfo.assocScanInfoElements */ bufferSize += 2; /* u16 primitive->connectionInfo.assocReqCapabilities */ bufferSize += 2; /* u16 primitive->connectionInfo.assocReqListenIntervalTu */ bufferSize += 6; /* u8 primitive->connectionInfo.assocReqApAddress.a[6] */ bufferSize += 2; /* u16 primitive->connectionInfo.assocReqInfoElementsLength */ bufferSize += primitive->connectionInfo.assocReqInfoElementsLength; /* u8 primitive->connectionInfo.assocReqInfoElements */ bufferSize += 2; /* CsrWifiSmeIEEE80211Result primitive->connectionInfo.assocRspResult */ bufferSize += 2; /* u16 primitive->connectionInfo.assocRspCapabilityInfo */ bufferSize += 2; /* u16 primitive->connectionInfo.assocRspAssociationId */ bufferSize += 2; /* u16 primitive->connectionInfo.assocRspInfoElementsLength */ bufferSize += primitive->connectionInfo.assocRspInfoElementsLength; /* u8 primitive->connectionInfo.assocRspInfoElements */ bufferSize += 2; /* CsrWifiSmeIEEE80211Reason primitive->disassocReason */ bufferSize += 2; /* CsrWifiSmeIEEE80211Reason primitive->deauthReason */ return bufferSize; } u8* CsrWifiSmeMediaStatusIndSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeMediaStatusInd *primitive = (CsrWifiSmeMediaStatusInd *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint8Ser(ptr, len, (u8) primitive->mediaStatus); CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.ssid.ssid, ((u16) (32))); CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.ssid.length); CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.bssid.a, ((u16) (6))); CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.networkType80211); CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.channelNumber); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.channelFrequency); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.authMode); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.pairwiseCipher); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.groupCipher); CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.ifIndex); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.atimWindowTu); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.beaconPeriodTu); CsrUint8Ser(ptr, len, (u8) primitive->connectionInfo.reassociation); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.beaconFrameLength); if (primitive->connectionInfo.beaconFrameLength) { CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.beaconFrame, ((u16) (primitive->connectionInfo.beaconFrameLength))); } CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.associationReqFrameLength); if (primitive->connectionInfo.associationReqFrameLength) { CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.associationReqFrame, ((u16) (primitive->connectionInfo.associationReqFrameLength))); } CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.associationRspFrameLength); if (primitive->connectionInfo.associationRspFrameLength) { CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.associationRspFrame, ((u16) (primitive->connectionInfo.associationRspFrameLength))); } CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocScanInfoElementsLength); if (primitive->connectionInfo.assocScanInfoElementsLength) { CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.assocScanInfoElements, ((u16) (primitive->connectionInfo.assocScanInfoElementsLength))); } CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocReqCapabilities); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocReqListenIntervalTu); CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.assocReqApAddress.a, ((u16) (6))); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocReqInfoElementsLength); if (primitive->connectionInfo.assocReqInfoElementsLength) { CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.assocReqInfoElements, ((u16) (primitive->connectionInfo.assocReqInfoElementsLength))); } CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocRspResult); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocRspCapabilityInfo); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocRspAssociationId); CsrUint16Ser(ptr, len, (u16) primitive->connectionInfo.assocRspInfoElementsLength); if (primitive->connectionInfo.assocRspInfoElementsLength) { CsrMemCpySer(ptr, len, (const void *) primitive->connectionInfo.assocRspInfoElements, ((u16) (primitive->connectionInfo.assocRspInfoElementsLength))); } CsrUint16Ser(ptr, len, (u16) primitive->disassocReason); CsrUint16Ser(ptr, len, (u16) primitive->deauthReason); return(ptr); } void* CsrWifiSmeMediaStatusIndDes(u8 *buffer, size_t length) { CsrWifiSmeMediaStatusInd *primitive = kmalloc(sizeof(CsrWifiSmeMediaStatusInd), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint8Des((u8 *) &primitive->mediaStatus, buffer, &offset); CsrMemCpyDes(primitive->connectionInfo.ssid.ssid, buffer, &offset, ((u16) (32))); CsrUint8Des((u8 *) &primitive->connectionInfo.ssid.length, buffer, &offset); CsrMemCpyDes(primitive->connectionInfo.bssid.a, buffer, &offset, ((u16) (6))); CsrUint8Des((u8 *) &primitive->connectionInfo.networkType80211, buffer, &offset); CsrUint8Des((u8 *) &primitive->connectionInfo.channelNumber, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.channelFrequency, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.authMode, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.pairwiseCipher, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.groupCipher, buffer, &offset); CsrUint8Des((u8 *) &primitive->connectionInfo.ifIndex, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.atimWindowTu, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.beaconPeriodTu, buffer, &offset); CsrUint8Des((u8 *) &primitive->connectionInfo.reassociation, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.beaconFrameLength, buffer, &offset); if (primitive->connectionInfo.beaconFrameLength) { primitive->connectionInfo.beaconFrame = kmalloc(primitive->connectionInfo.beaconFrameLength, GFP_KERNEL); CsrMemCpyDes(primitive->connectionInfo.beaconFrame, buffer, &offset, ((u16) (primitive->connectionInfo.beaconFrameLength))); } else { primitive->connectionInfo.beaconFrame = NULL; } CsrUint16Des((u16 *) &primitive->connectionInfo.associationReqFrameLength, buffer, &offset); if (primitive->connectionInfo.associationReqFrameLength) { primitive->connectionInfo.associationReqFrame = kmalloc(primitive->connectionInfo.associationReqFrameLength, GFP_KERNEL); CsrMemCpyDes(primitive->connectionInfo.associationReqFrame, buffer, &offset, ((u16) (primitive->connectionInfo.associationReqFrameLength))); } else { primitive->connectionInfo.associationReqFrame = NULL; } CsrUint16Des((u16 *) &primitive->connectionInfo.associationRspFrameLength, buffer, &offset); if (primitive->connectionInfo.associationRspFrameLength) { primitive->connectionInfo.associationRspFrame = kmalloc(primitive->connectionInfo.associationRspFrameLength, GFP_KERNEL); CsrMemCpyDes(primitive->connectionInfo.associationRspFrame, buffer, &offset, ((u16) (primitive->connectionInfo.associationRspFrameLength))); } else { primitive->connectionInfo.associationRspFrame = NULL; } CsrUint16Des((u16 *) &primitive->connectionInfo.assocScanInfoElementsLength, buffer, &offset); if (primitive->connectionInfo.assocScanInfoElementsLength) { primitive->connectionInfo.assocScanInfoElements = kmalloc(primitive->connectionInfo.assocScanInfoElementsLength, GFP_KERNEL); CsrMemCpyDes(primitive->connectionInfo.assocScanInfoElements, buffer, &offset, ((u16) (primitive->connectionInfo.assocScanInfoElementsLength))); } else { primitive->connectionInfo.assocScanInfoElements = NULL; } CsrUint16Des((u16 *) &primitive->connectionInfo.assocReqCapabilities, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.assocReqListenIntervalTu, buffer, &offset); CsrMemCpyDes(primitive->connectionInfo.assocReqApAddress.a, buffer, &offset, ((u16) (6))); CsrUint16Des((u16 *) &primitive->connectionInfo.assocReqInfoElementsLength, buffer, &offset); if (primitive->connectionInfo.assocReqInfoElementsLength) { primitive->connectionInfo.assocReqInfoElements = kmalloc(primitive->connectionInfo.assocReqInfoElementsLength, GFP_KERNEL); CsrMemCpyDes(primitive->connectionInfo.assocReqInfoElements, buffer, &offset, ((u16) (primitive->connectionInfo.assocReqInfoElementsLength))); } else { primitive->connectionInfo.assocReqInfoElements = NULL; } CsrUint16Des((u16 *) &primitive->connectionInfo.assocRspResult, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.assocRspCapabilityInfo, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.assocRspAssociationId, buffer, &offset); CsrUint16Des((u16 *) &primitive->connectionInfo.assocRspInfoElementsLength, buffer, &offset); if (primitive->connectionInfo.assocRspInfoElementsLength) { primitive->connectionInfo.assocRspInfoElements = kmalloc(primitive->connectionInfo.assocRspInfoElementsLength, GFP_KERNEL); CsrMemCpyDes(primitive->connectionInfo.assocRspInfoElements, buffer, &offset, ((u16) (primitive->connectionInfo.assocRspInfoElementsLength))); } else { primitive->connectionInfo.assocRspInfoElements = NULL; } CsrUint16Des((u16 *) &primitive->disassocReason, buffer, &offset); CsrUint16Des((u16 *) &primitive->deauthReason, buffer, &offset); return primitive; } void CsrWifiSmeMediaStatusIndSerFree(void *voidPrimitivePointer) { CsrWifiSmeMediaStatusInd *primitive = (CsrWifiSmeMediaStatusInd *) voidPrimitivePointer; kfree(primitive->connectionInfo.beaconFrame); kfree(primitive->connectionInfo.associationReqFrame); kfree(primitive->connectionInfo.associationRspFrame); kfree(primitive->connectionInfo.assocScanInfoElements); kfree(primitive->connectionInfo.assocReqInfoElements); kfree(primitive->connectionInfo.assocRspInfoElements); kfree(primitive); } size_t CsrWifiSmeMibConfigGetCfmSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 13) */ bufferSize += 2; /* CsrResult primitive->status */ bufferSize += 1; /* u8 primitive->mibConfig.unifiFixMaxTxDataRate */ bufferSize += 1; /* u8 primitive->mibConfig.unifiFixTxDataRate */ bufferSize += 2; /* u16 primitive->mibConfig.dot11RtsThreshold */ bufferSize += 2; /* u16 primitive->mibConfig.dot11FragmentationThreshold */ bufferSize += 2; /* u16 primitive->mibConfig.dot11CurrentTxPowerLevel */ return bufferSize; } u8* CsrWifiSmeMibConfigGetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeMibConfigGetCfm *primitive = (CsrWifiSmeMibConfigGetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->status); CsrUint8Ser(ptr, len, (u8) primitive->mibConfig.unifiFixMaxTxDataRate); CsrUint8Ser(ptr, len, (u8) primitive->mibConfig.unifiFixTxDataRate); CsrUint16Ser(ptr, len, (u16) primitive->mibConfig.dot11RtsThreshold); CsrUint16Ser(ptr, len, (u16) primitive->mibConfig.dot11FragmentationThreshold); CsrUint16Ser(ptr, len, (u16) primitive->mibConfig.dot11CurrentTxPowerLevel); return(ptr); } void* CsrWifiSmeMibConfigGetCfmDes(u8 *buffer, size_t length) { CsrWifiSmeMibConfigGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeMibConfigGetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); CsrUint8Des((u8 *) &primitive->mibConfig.unifiFixMaxTxDataRate, buffer, &offset); CsrUint8Des((u8 *) &primitive->mibConfig.unifiFixTxDataRate, buffer, &offset); CsrUint16Des((u16 *) &primitive->mibConfig.dot11RtsThreshold, buffer, &offset); CsrUint16Des((u16 *) &primitive->mibConfig.dot11FragmentationThreshold, buffer, &offset); CsrUint16Des((u16 *) &primitive->mibConfig.dot11CurrentTxPowerLevel, buffer, &offset); return primitive; } size_t CsrWifiSmeMibGetCfmSizeof(void *msg) { CsrWifiSmeMibGetCfm *primitive = (CsrWifiSmeMibGetCfm *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 8) */ bufferSize += 2; /* CsrResult primitive->status */ bufferSize += 2; /* u16 primitive->mibAttributeLength */ bufferSize += primitive->mibAttributeLength; /* u8 primitive->mibAttribute */ return bufferSize; } u8* CsrWifiSmeMibGetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeMibGetCfm *primitive = (CsrWifiSmeMibGetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->status); CsrUint16Ser(ptr, len, (u16) primitive->mibAttributeLength); if (primitive->mibAttributeLength) { CsrMemCpySer(ptr, len, (const void *) primitive->mibAttribute, ((u16) (primitive->mibAttributeLength))); } return(ptr); } void* CsrWifiSmeMibGetCfmDes(u8 *buffer, size_t length) { CsrWifiSmeMibGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeMibGetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); CsrUint16Des((u16 *) &primitive->mibAttributeLength, buffer, &offset); if (primitive->mibAttributeLength) { primitive->mibAttribute = kmalloc(primitive->mibAttributeLength, GFP_KERNEL); CsrMemCpyDes(primitive->mibAttribute, buffer, &offset, ((u16) (primitive->mibAttributeLength))); } else { primitive->mibAttribute = NULL; } return primitive; } void CsrWifiSmeMibGetCfmSerFree(void *voidPrimitivePointer) { CsrWifiSmeMibGetCfm *primitive = (CsrWifiSmeMibGetCfm *) voidPrimitivePointer; kfree(primitive->mibAttribute); kfree(primitive); } size_t CsrWifiSmeMibGetNextCfmSizeof(void *msg) { CsrWifiSmeMibGetNextCfm *primitive = (CsrWifiSmeMibGetNextCfm *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 8) */ bufferSize += 2; /* CsrResult primitive->status */ bufferSize += 2; /* u16 primitive->mibAttributeLength */ bufferSize += primitive->mibAttributeLength; /* u8 primitive->mibAttribute */ return bufferSize; } u8* CsrWifiSmeMibGetNextCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeMibGetNextCfm *primitive = (CsrWifiSmeMibGetNextCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->status); CsrUint16Ser(ptr, len, (u16) primitive->mibAttributeLength); if (primitive->mibAttributeLength) { CsrMemCpySer(ptr, len, (const void *) primitive->mibAttribute, ((u16) (primitive->mibAttributeLength))); } return(ptr); } void* CsrWifiSmeMibGetNextCfmDes(u8 *buffer, size_t length) { CsrWifiSmeMibGetNextCfm *primitive = kmalloc(sizeof(CsrWifiSmeMibGetNextCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); CsrUint16Des((u16 *) &primitive->mibAttributeLength, buffer, &offset); if (primitive->mibAttributeLength) { primitive->mibAttribute = kmalloc(primitive->mibAttributeLength, GFP_KERNEL); CsrMemCpyDes(primitive->mibAttribute, buffer, &offset, ((u16) (primitive->mibAttributeLength))); } else { primitive->mibAttribute = NULL; } return primitive; } void CsrWifiSmeMibGetNextCfmSerFree(void *voidPrimitivePointer) { CsrWifiSmeMibGetNextCfm *primitive = (CsrWifiSmeMibGetNextCfm *) voidPrimitivePointer; kfree(primitive->mibAttribute); kfree(primitive); } size_t CsrWifiSmeMicFailureIndSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 15) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 1; /* u8 primitive->secondFailure */ bufferSize += 2; /* u16 primitive->count */ bufferSize += 6; /* u8 primitive->address.a[6] */ bufferSize += 1; /* CsrWifiSmeKeyType primitive->keyType */ return bufferSize; } u8* CsrWifiSmeMicFailureIndSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeMicFailureInd *primitive = (CsrWifiSmeMicFailureInd *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint8Ser(ptr, len, (u8) primitive->secondFailure); CsrUint16Ser(ptr, len, (u16) primitive->count); CsrMemCpySer(ptr, len, (const void *) primitive->address.a, ((u16) (6))); CsrUint8Ser(ptr, len, (u8) primitive->keyType); return(ptr); } void* CsrWifiSmeMicFailureIndDes(u8 *buffer, size_t length) { CsrWifiSmeMicFailureInd *primitive = kmalloc(sizeof(CsrWifiSmeMicFailureInd), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint8Des((u8 *) &primitive->secondFailure, buffer, &offset); CsrUint16Des((u16 *) &primitive->count, buffer, &offset); CsrMemCpyDes(primitive->address.a, buffer, &offset, ((u16) (6))); CsrUint8Des((u8 *) &primitive->keyType, buffer, &offset); return primitive; } size_t CsrWifiSmeMulticastAddressCfmSizeof(void *msg) { CsrWifiSmeMulticastAddressCfm *primitive = (CsrWifiSmeMulticastAddressCfm *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 15) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 2; /* CsrResult primitive->status */ bufferSize += 1; /* CsrWifiSmeListAction primitive->action */ bufferSize += 1; /* u8 primitive->getAddressesCount */ { u16 i1; for (i1 = 0; i1 < primitive->getAddressesCount; i1++) { bufferSize += 6; /* u8 primitive->getAddresses[i1].a[6] */ } } return bufferSize; } u8* CsrWifiSmeMulticastAddressCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeMulticastAddressCfm *primitive = (CsrWifiSmeMulticastAddressCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint16Ser(ptr, len, (u16) primitive->status); CsrUint8Ser(ptr, len, (u8) primitive->action); CsrUint8Ser(ptr, len, (u8) primitive->getAddressesCount); { u16 i1; for (i1 = 0; i1 < primitive->getAddressesCount; i1++) { CsrMemCpySer(ptr, len, (const void *) primitive->getAddresses[i1].a, ((u16) (6))); } } return(ptr); } void* CsrWifiSmeMulticastAddressCfmDes(u8 *buffer, size_t length) { CsrWifiSmeMulticastAddressCfm *primitive = kmalloc(sizeof(CsrWifiSmeMulticastAddressCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); CsrUint8Des((u8 *) &primitive->action, buffer, &offset); CsrUint8Des((u8 *) &primitive->getAddressesCount, buffer, &offset); primitive->getAddresses = NULL; if (primitive->getAddressesCount) { primitive->getAddresses = kmalloc(sizeof(CsrWifiMacAddress) * primitive->getAddressesCount, GFP_KERNEL); } { u16 i1; for (i1 = 0; i1 < primitive->getAddressesCount; i1++) { CsrMemCpyDes(primitive->getAddresses[i1].a, buffer, &offset, ((u16) (6))); } } return primitive; } void CsrWifiSmeMulticastAddressCfmSerFree(void *voidPrimitivePointer) { CsrWifiSmeMulticastAddressCfm *primitive = (CsrWifiSmeMulticastAddressCfm *) voidPrimitivePointer; kfree(primitive->getAddresses); kfree(primitive); } size_t CsrWifiSmePacketFilterSetCfmSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 7) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 2; /* CsrResult primitive->status */ return bufferSize; } u8* CsrWifiSmePacketFilterSetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmePacketFilterSetCfm *primitive = (CsrWifiSmePacketFilterSetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint16Ser(ptr, len, (u16) primitive->status); return(ptr); } void* CsrWifiSmePacketFilterSetCfmDes(u8 *buffer, size_t length) { CsrWifiSmePacketFilterSetCfm *primitive = kmalloc(sizeof(CsrWifiSmePacketFilterSetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); return primitive; } size_t CsrWifiSmePermanentMacAddressGetCfmSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 11) */ bufferSize += 2; /* CsrResult primitive->status */ bufferSize += 6; /* u8 primitive->permanentMacAddress.a[6] */ return bufferSize; } u8* CsrWifiSmePermanentMacAddressGetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmePermanentMacAddressGetCfm *primitive = (CsrWifiSmePermanentMacAddressGetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->status); CsrMemCpySer(ptr, len, (const void *) primitive->permanentMacAddress.a, ((u16) (6))); return(ptr); } void* CsrWifiSmePermanentMacAddressGetCfmDes(u8 *buffer, size_t length) { CsrWifiSmePermanentMacAddressGetCfm *primitive = kmalloc(sizeof(CsrWifiSmePermanentMacAddressGetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); CsrMemCpyDes(primitive->permanentMacAddress.a, buffer, &offset, ((u16) (6))); return primitive; } size_t CsrWifiSmePmkidCandidateListIndSizeof(void *msg) { CsrWifiSmePmkidCandidateListInd *primitive = (CsrWifiSmePmkidCandidateListInd *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 13) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 1; /* u8 primitive->pmkidCandidatesCount */ { u16 i1; for (i1 = 0; i1 < primitive->pmkidCandidatesCount; i1++) { bufferSize += 6; /* u8 primitive->pmkidCandidates[i1].bssid.a[6] */ bufferSize += 1; /* u8 primitive->pmkidCandidates[i1].preAuthAllowed */ } } return bufferSize; } u8* CsrWifiSmePmkidCandidateListIndSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmePmkidCandidateListInd *primitive = (CsrWifiSmePmkidCandidateListInd *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint8Ser(ptr, len, (u8) primitive->pmkidCandidatesCount); { u16 i1; for (i1 = 0; i1 < primitive->pmkidCandidatesCount; i1++) { CsrMemCpySer(ptr, len, (const void *) primitive->pmkidCandidates[i1].bssid.a, ((u16) (6))); CsrUint8Ser(ptr, len, (u8) primitive->pmkidCandidates[i1].preAuthAllowed); } } return(ptr); } void* CsrWifiSmePmkidCandidateListIndDes(u8 *buffer, size_t length) { CsrWifiSmePmkidCandidateListInd *primitive = kmalloc(sizeof(CsrWifiSmePmkidCandidateListInd), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint8Des((u8 *) &primitive->pmkidCandidatesCount, buffer, &offset); primitive->pmkidCandidates = NULL; if (primitive->pmkidCandidatesCount) { primitive->pmkidCandidates = kmalloc(sizeof(CsrWifiSmePmkidCandidate) * primitive->pmkidCandidatesCount, GFP_KERNEL); } { u16 i1; for (i1 = 0; i1 < primitive->pmkidCandidatesCount; i1++) { CsrMemCpyDes(primitive->pmkidCandidates[i1].bssid.a, buffer, &offset, ((u16) (6))); CsrUint8Des((u8 *) &primitive->pmkidCandidates[i1].preAuthAllowed, buffer, &offset); } } return primitive; } void CsrWifiSmePmkidCandidateListIndSerFree(void *voidPrimitivePointer) { CsrWifiSmePmkidCandidateListInd *primitive = (CsrWifiSmePmkidCandidateListInd *) voidPrimitivePointer; kfree(primitive->pmkidCandidates); kfree(primitive); } size_t CsrWifiSmePmkidCfmSizeof(void *msg) { CsrWifiSmePmkidCfm *primitive = (CsrWifiSmePmkidCfm *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 31) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 2; /* CsrResult primitive->status */ bufferSize += 1; /* CsrWifiSmeListAction primitive->action */ bufferSize += 1; /* u8 primitive->getPmkidsCount */ { u16 i1; for (i1 = 0; i1 < primitive->getPmkidsCount; i1++) { bufferSize += 6; /* u8 primitive->getPmkids[i1].bssid.a[6] */ bufferSize += 16; /* u8 primitive->getPmkids[i1].pmkid[16] */ } } return bufferSize; } u8* CsrWifiSmePmkidCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmePmkidCfm *primitive = (CsrWifiSmePmkidCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint16Ser(ptr, len, (u16) primitive->status); CsrUint8Ser(ptr, len, (u8) primitive->action); CsrUint8Ser(ptr, len, (u8) primitive->getPmkidsCount); { u16 i1; for (i1 = 0; i1 < primitive->getPmkidsCount; i1++) { CsrMemCpySer(ptr, len, (const void *) primitive->getPmkids[i1].bssid.a, ((u16) (6))); CsrMemCpySer(ptr, len, (const void *) primitive->getPmkids[i1].pmkid, ((u16) (16))); } } return(ptr); } void* CsrWifiSmePmkidCfmDes(u8 *buffer, size_t length) { CsrWifiSmePmkidCfm *primitive = kmalloc(sizeof(CsrWifiSmePmkidCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); CsrUint8Des((u8 *) &primitive->action, buffer, &offset); CsrUint8Des((u8 *) &primitive->getPmkidsCount, buffer, &offset); primitive->getPmkids = NULL; if (primitive->getPmkidsCount) { primitive->getPmkids = kmalloc(sizeof(CsrWifiSmePmkid) * primitive->getPmkidsCount, GFP_KERNEL); } { u16 i1; for (i1 = 0; i1 < primitive->getPmkidsCount; i1++) { CsrMemCpyDes(primitive->getPmkids[i1].bssid.a, buffer, &offset, ((u16) (6))); CsrMemCpyDes(primitive->getPmkids[i1].pmkid, buffer, &offset, ((u16) (16))); } } return primitive; } void CsrWifiSmePmkidCfmSerFree(void *voidPrimitivePointer) { CsrWifiSmePmkidCfm *primitive = (CsrWifiSmePmkidCfm *) voidPrimitivePointer; kfree(primitive->getPmkids); kfree(primitive); } size_t CsrWifiSmePowerConfigGetCfmSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 13) */ bufferSize += 2; /* CsrResult primitive->status */ bufferSize += 1; /* CsrWifiSmePowerSaveLevel primitive->powerConfig.powerSaveLevel */ bufferSize += 2; /* u16 primitive->powerConfig.listenIntervalTu */ bufferSize += 1; /* u8 primitive->powerConfig.rxDtims */ bufferSize += 1; /* CsrWifiSmeD3AutoScanMode primitive->powerConfig.d3AutoScanMode */ bufferSize += 1; /* u8 primitive->powerConfig.clientTrafficWindow */ bufferSize += 1; /* u8 primitive->powerConfig.opportunisticPowerSave */ bufferSize += 1; /* u8 primitive->powerConfig.noticeOfAbsence */ return bufferSize; } u8* CsrWifiSmePowerConfigGetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmePowerConfigGetCfm *primitive = (CsrWifiSmePowerConfigGetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->status); CsrUint8Ser(ptr, len, (u8) primitive->powerConfig.powerSaveLevel); CsrUint16Ser(ptr, len, (u16) primitive->powerConfig.listenIntervalTu); CsrUint8Ser(ptr, len, (u8) primitive->powerConfig.rxDtims); CsrUint8Ser(ptr, len, (u8) primitive->powerConfig.d3AutoScanMode); CsrUint8Ser(ptr, len, (u8) primitive->powerConfig.clientTrafficWindow); CsrUint8Ser(ptr, len, (u8) primitive->powerConfig.opportunisticPowerSave); CsrUint8Ser(ptr, len, (u8) primitive->powerConfig.noticeOfAbsence); return(ptr); } void* CsrWifiSmePowerConfigGetCfmDes(u8 *buffer, size_t length) { CsrWifiSmePowerConfigGetCfm *primitive = kmalloc(sizeof(CsrWifiSmePowerConfigGetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); CsrUint8Des((u8 *) &primitive->powerConfig.powerSaveLevel, buffer, &offset); CsrUint16Des((u16 *) &primitive->powerConfig.listenIntervalTu, buffer, &offset); CsrUint8Des((u8 *) &primitive->powerConfig.rxDtims, buffer, &offset); CsrUint8Des((u8 *) &primitive->powerConfig.d3AutoScanMode, buffer, &offset); CsrUint8Des((u8 *) &primitive->powerConfig.clientTrafficWindow, buffer, &offset); CsrUint8Des((u8 *) &primitive->powerConfig.opportunisticPowerSave, buffer, &offset); CsrUint8Des((u8 *) &primitive->powerConfig.noticeOfAbsence, buffer, &offset); return primitive; } size_t CsrWifiSmeRegulatoryDomainInfoGetCfmSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 10) */ bufferSize += 2; /* CsrResult primitive->status */ bufferSize += 1; /* u8 primitive->regDomInfo.dot11MultiDomainCapabilityImplemented */ bufferSize += 1; /* u8 primitive->regDomInfo.dot11MultiDomainCapabilityEnabled */ bufferSize += 1; /* CsrWifiSmeRegulatoryDomain primitive->regDomInfo.currentRegulatoryDomain */ bufferSize += 2; /* u8 primitive->regDomInfo.currentCountryCode[2] */ return bufferSize; } u8* CsrWifiSmeRegulatoryDomainInfoGetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeRegulatoryDomainInfoGetCfm *primitive = (CsrWifiSmeRegulatoryDomainInfoGetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->status); CsrUint8Ser(ptr, len, (u8) primitive->regDomInfo.dot11MultiDomainCapabilityImplemented); CsrUint8Ser(ptr, len, (u8) primitive->regDomInfo.dot11MultiDomainCapabilityEnabled); CsrUint8Ser(ptr, len, (u8) primitive->regDomInfo.currentRegulatoryDomain); CsrMemCpySer(ptr, len, (const void *) primitive->regDomInfo.currentCountryCode, ((u16) (2))); return(ptr); } void* CsrWifiSmeRegulatoryDomainInfoGetCfmDes(u8 *buffer, size_t length) { CsrWifiSmeRegulatoryDomainInfoGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeRegulatoryDomainInfoGetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); CsrUint8Des((u8 *) &primitive->regDomInfo.dot11MultiDomainCapabilityImplemented, buffer, &offset); CsrUint8Des((u8 *) &primitive->regDomInfo.dot11MultiDomainCapabilityEnabled, buffer, &offset); CsrUint8Des((u8 *) &primitive->regDomInfo.currentRegulatoryDomain, buffer, &offset); CsrMemCpyDes(primitive->regDomInfo.currentCountryCode, buffer, &offset, ((u16) (2))); return primitive; } size_t CsrWifiSmeRoamCompleteIndSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 7) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 2; /* CsrResult primitive->status */ return bufferSize; } u8* CsrWifiSmeRoamCompleteIndSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeRoamCompleteInd *primitive = (CsrWifiSmeRoamCompleteInd *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint16Ser(ptr, len, (u16) primitive->status); return(ptr); } void* CsrWifiSmeRoamCompleteIndDes(u8 *buffer, size_t length) { CsrWifiSmeRoamCompleteInd *primitive = kmalloc(sizeof(CsrWifiSmeRoamCompleteInd), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); return primitive; } size_t CsrWifiSmeRoamStartIndSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 8) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 1; /* CsrWifiSmeRoamReason primitive->roamReason */ bufferSize += 2; /* CsrWifiSmeIEEE80211Reason primitive->reason80211 */ return bufferSize; } u8* CsrWifiSmeRoamStartIndSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeRoamStartInd *primitive = (CsrWifiSmeRoamStartInd *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint8Ser(ptr, len, (u8) primitive->roamReason); CsrUint16Ser(ptr, len, (u16) primitive->reason80211); return(ptr); } void* CsrWifiSmeRoamStartIndDes(u8 *buffer, size_t length) { CsrWifiSmeRoamStartInd *primitive = kmalloc(sizeof(CsrWifiSmeRoamStartInd), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint8Des((u8 *) &primitive->roamReason, buffer, &offset); CsrUint16Des((u16 *) &primitive->reason80211, buffer, &offset); return primitive; } size_t CsrWifiSmeRoamingConfigGetCfmSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 72) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 2; /* CsrResult primitive->status */ { u16 i2; for (i2 = 0; i2 < 3; i2++) { bufferSize += 2; /* s16 primitive->roamingConfig.roamingBands[i2].rssiHighThreshold */ bufferSize += 2; /* s16 primitive->roamingConfig.roamingBands[i2].rssiLowThreshold */ bufferSize += 2; /* s16 primitive->roamingConfig.roamingBands[i2].snrHighThreshold */ bufferSize += 2; /* s16 primitive->roamingConfig.roamingBands[i2].snrLowThreshold */ } } bufferSize += 1; /* u8 primitive->roamingConfig.disableSmoothRoaming */ bufferSize += 1; /* u8 primitive->roamingConfig.disableRoamScans */ bufferSize += 1; /* u8 primitive->roamingConfig.reconnectLimit */ bufferSize += 2; /* u16 primitive->roamingConfig.reconnectLimitIntervalMs */ { u16 i2; for (i2 = 0; i2 < 3; i2++) { bufferSize += 2; /* u16 primitive->roamingConfig.roamScanCfg[i2].intervalSeconds */ bufferSize += 2; /* u16 primitive->roamingConfig.roamScanCfg[i2].validitySeconds */ bufferSize += 2; /* u16 primitive->roamingConfig.roamScanCfg[i2].minActiveChannelTimeTu */ bufferSize += 2; /* u16 primitive->roamingConfig.roamScanCfg[i2].maxActiveChannelTimeTu */ bufferSize += 2; /* u16 primitive->roamingConfig.roamScanCfg[i2].minPassiveChannelTimeTu */ bufferSize += 2; /* u16 primitive->roamingConfig.roamScanCfg[i2].maxPassiveChannelTimeTu */ } } return bufferSize; } u8* CsrWifiSmeRoamingConfigGetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeRoamingConfigGetCfm *primitive = (CsrWifiSmeRoamingConfigGetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint16Ser(ptr, len, (u16) primitive->status); { u16 i2; for (i2 = 0; i2 < 3; i2++) { CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamingBands[i2].rssiHighThreshold); CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamingBands[i2].rssiLowThreshold); CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamingBands[i2].snrHighThreshold); CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamingBands[i2].snrLowThreshold); } } CsrUint8Ser(ptr, len, (u8) primitive->roamingConfig.disableSmoothRoaming); CsrUint8Ser(ptr, len, (u8) primitive->roamingConfig.disableRoamScans); CsrUint8Ser(ptr, len, (u8) primitive->roamingConfig.reconnectLimit); CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.reconnectLimitIntervalMs); { u16 i2; for (i2 = 0; i2 < 3; i2++) { CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamScanCfg[i2].intervalSeconds); CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamScanCfg[i2].validitySeconds); CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamScanCfg[i2].minActiveChannelTimeTu); CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamScanCfg[i2].maxActiveChannelTimeTu); CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamScanCfg[i2].minPassiveChannelTimeTu); CsrUint16Ser(ptr, len, (u16) primitive->roamingConfig.roamScanCfg[i2].maxPassiveChannelTimeTu); } } return(ptr); } void* CsrWifiSmeRoamingConfigGetCfmDes(u8 *buffer, size_t length) { CsrWifiSmeRoamingConfigGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeRoamingConfigGetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); { u16 i2; for (i2 = 0; i2 < 3; i2++) { CsrUint16Des((u16 *) &primitive->roamingConfig.roamingBands[i2].rssiHighThreshold, buffer, &offset); CsrUint16Des((u16 *) &primitive->roamingConfig.roamingBands[i2].rssiLowThreshold, buffer, &offset); CsrUint16Des((u16 *) &primitive->roamingConfig.roamingBands[i2].snrHighThreshold, buffer, &offset); CsrUint16Des((u16 *) &primitive->roamingConfig.roamingBands[i2].snrLowThreshold, buffer, &offset); } } CsrUint8Des((u8 *) &primitive->roamingConfig.disableSmoothRoaming, buffer, &offset); CsrUint8Des((u8 *) &primitive->roamingConfig.disableRoamScans, buffer, &offset); CsrUint8Des((u8 *) &primitive->roamingConfig.reconnectLimit, buffer, &offset); CsrUint16Des((u16 *) &primitive->roamingConfig.reconnectLimitIntervalMs, buffer, &offset); { u16 i2; for (i2 = 0; i2 < 3; i2++) { CsrUint16Des((u16 *) &primitive->roamingConfig.roamScanCfg[i2].intervalSeconds, buffer, &offset); CsrUint16Des((u16 *) &primitive->roamingConfig.roamScanCfg[i2].validitySeconds, buffer, &offset); CsrUint16Des((u16 *) &primitive->roamingConfig.roamScanCfg[i2].minActiveChannelTimeTu, buffer, &offset); CsrUint16Des((u16 *) &primitive->roamingConfig.roamScanCfg[i2].maxActiveChannelTimeTu, buffer, &offset); CsrUint16Des((u16 *) &primitive->roamingConfig.roamScanCfg[i2].minPassiveChannelTimeTu, buffer, &offset); CsrUint16Des((u16 *) &primitive->roamingConfig.roamScanCfg[i2].maxPassiveChannelTimeTu, buffer, &offset); } } return primitive; } size_t CsrWifiSmeRoamingConfigSetCfmSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 7) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 2; /* CsrResult primitive->status */ return bufferSize; } u8* CsrWifiSmeRoamingConfigSetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeRoamingConfigSetCfm *primitive = (CsrWifiSmeRoamingConfigSetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint16Ser(ptr, len, (u16) primitive->status); return(ptr); } void* CsrWifiSmeRoamingConfigSetCfmDes(u8 *buffer, size_t length) { CsrWifiSmeRoamingConfigSetCfm *primitive = kmalloc(sizeof(CsrWifiSmeRoamingConfigSetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); return primitive; } size_t CsrWifiSmeScanConfigGetCfmSizeof(void *msg) { CsrWifiSmeScanConfigGetCfm *primitive = (CsrWifiSmeScanConfigGetCfm *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 65) */ bufferSize += 2; /* CsrResult primitive->status */ { u16 i2; for (i2 = 0; i2 < 4; i2++) { bufferSize += 2; /* u16 primitive->scanConfig.scanCfg[i2].intervalSeconds */ bufferSize += 2; /* u16 primitive->scanConfig.scanCfg[i2].validitySeconds */ bufferSize += 2; /* u16 primitive->scanConfig.scanCfg[i2].minActiveChannelTimeTu */ bufferSize += 2; /* u16 primitive->scanConfig.scanCfg[i2].maxActiveChannelTimeTu */ bufferSize += 2; /* u16 primitive->scanConfig.scanCfg[i2].minPassiveChannelTimeTu */ bufferSize += 2; /* u16 primitive->scanConfig.scanCfg[i2].maxPassiveChannelTimeTu */ } } bufferSize += 1; /* u8 primitive->scanConfig.disableAutonomousScans */ bufferSize += 2; /* u16 primitive->scanConfig.maxResults */ bufferSize += 1; /* s8 primitive->scanConfig.highRssiThreshold */ bufferSize += 1; /* s8 primitive->scanConfig.lowRssiThreshold */ bufferSize += 1; /* s8 primitive->scanConfig.deltaRssiThreshold */ bufferSize += 1; /* s8 primitive->scanConfig.highSnrThreshold */ bufferSize += 1; /* s8 primitive->scanConfig.lowSnrThreshold */ bufferSize += 1; /* s8 primitive->scanConfig.deltaSnrThreshold */ bufferSize += 2; /* u16 primitive->scanConfig.passiveChannelListCount */ bufferSize += primitive->scanConfig.passiveChannelListCount; /* u8 primitive->scanConfig.passiveChannelList */ return bufferSize; } u8* CsrWifiSmeScanConfigGetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeScanConfigGetCfm *primitive = (CsrWifiSmeScanConfigGetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->status); { u16 i2; for (i2 = 0; i2 < 4; i2++) { CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.scanCfg[i2].intervalSeconds); CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.scanCfg[i2].validitySeconds); CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.scanCfg[i2].minActiveChannelTimeTu); CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.scanCfg[i2].maxActiveChannelTimeTu); CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.scanCfg[i2].minPassiveChannelTimeTu); CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.scanCfg[i2].maxPassiveChannelTimeTu); } } CsrUint8Ser(ptr, len, (u8) primitive->scanConfig.disableAutonomousScans); CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.maxResults); CsrUint8Ser(ptr, len, (u8) primitive->scanConfig.highRssiThreshold); CsrUint8Ser(ptr, len, (u8) primitive->scanConfig.lowRssiThreshold); CsrUint8Ser(ptr, len, (u8) primitive->scanConfig.deltaRssiThreshold); CsrUint8Ser(ptr, len, (u8) primitive->scanConfig.highSnrThreshold); CsrUint8Ser(ptr, len, (u8) primitive->scanConfig.lowSnrThreshold); CsrUint8Ser(ptr, len, (u8) primitive->scanConfig.deltaSnrThreshold); CsrUint16Ser(ptr, len, (u16) primitive->scanConfig.passiveChannelListCount); if (primitive->scanConfig.passiveChannelListCount) { CsrMemCpySer(ptr, len, (const void *) primitive->scanConfig.passiveChannelList, ((u16) (primitive->scanConfig.passiveChannelListCount))); } return(ptr); } void* CsrWifiSmeScanConfigGetCfmDes(u8 *buffer, size_t length) { CsrWifiSmeScanConfigGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeScanConfigGetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); { u16 i2; for (i2 = 0; i2 < 4; i2++) { CsrUint16Des((u16 *) &primitive->scanConfig.scanCfg[i2].intervalSeconds, buffer, &offset); CsrUint16Des((u16 *) &primitive->scanConfig.scanCfg[i2].validitySeconds, buffer, &offset); CsrUint16Des((u16 *) &primitive->scanConfig.scanCfg[i2].minActiveChannelTimeTu, buffer, &offset); CsrUint16Des((u16 *) &primitive->scanConfig.scanCfg[i2].maxActiveChannelTimeTu, buffer, &offset); CsrUint16Des((u16 *) &primitive->scanConfig.scanCfg[i2].minPassiveChannelTimeTu, buffer, &offset); CsrUint16Des((u16 *) &primitive->scanConfig.scanCfg[i2].maxPassiveChannelTimeTu, buffer, &offset); } } CsrUint8Des((u8 *) &primitive->scanConfig.disableAutonomousScans, buffer, &offset); CsrUint16Des((u16 *) &primitive->scanConfig.maxResults, buffer, &offset); CsrUint8Des((u8 *) &primitive->scanConfig.highRssiThreshold, buffer, &offset); CsrUint8Des((u8 *) &primitive->scanConfig.lowRssiThreshold, buffer, &offset); CsrUint8Des((u8 *) &primitive->scanConfig.deltaRssiThreshold, buffer, &offset); CsrUint8Des((u8 *) &primitive->scanConfig.highSnrThreshold, buffer, &offset); CsrUint8Des((u8 *) &primitive->scanConfig.lowSnrThreshold, buffer, &offset); CsrUint8Des((u8 *) &primitive->scanConfig.deltaSnrThreshold, buffer, &offset); CsrUint16Des((u16 *) &primitive->scanConfig.passiveChannelListCount, buffer, &offset); if (primitive->scanConfig.passiveChannelListCount) { primitive->scanConfig.passiveChannelList = kmalloc(primitive->scanConfig.passiveChannelListCount, GFP_KERNEL); CsrMemCpyDes(primitive->scanConfig.passiveChannelList, buffer, &offset, ((u16) (primitive->scanConfig.passiveChannelListCount))); } else { primitive->scanConfig.passiveChannelList = NULL; } return primitive; } void CsrWifiSmeScanConfigGetCfmSerFree(void *voidPrimitivePointer) { CsrWifiSmeScanConfigGetCfm *primitive = (CsrWifiSmeScanConfigGetCfm *) voidPrimitivePointer; kfree(primitive->scanConfig.passiveChannelList); kfree(primitive); } size_t CsrWifiSmeScanResultIndSizeof(void *msg) { CsrWifiSmeScanResultInd *primitive = (CsrWifiSmeScanResultInd *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 149) */ bufferSize += 32; /* u8 primitive->result.ssid.ssid[32] */ bufferSize += 1; /* u8 primitive->result.ssid.length */ bufferSize += 6; /* u8 primitive->result.bssid.a[6] */ bufferSize += 2; /* s16 primitive->result.rssi */ bufferSize += 2; /* s16 primitive->result.snr */ bufferSize += 1; /* CsrWifiSmeRadioIF primitive->result.ifIndex */ bufferSize += 2; /* u16 primitive->result.beaconPeriodTu */ bufferSize += 8; /* u8 primitive->result.timeStamp.data[8] */ bufferSize += 8; /* u8 primitive->result.localTime.data[8] */ bufferSize += 2; /* u16 primitive->result.channelFrequency */ bufferSize += 2; /* u16 primitive->result.capabilityInformation */ bufferSize += 1; /* u8 primitive->result.channelNumber */ bufferSize += 1; /* CsrWifiSmeBasicUsability primitive->result.usability */ bufferSize += 1; /* CsrWifiSmeBssType primitive->result.bssType */ bufferSize += 2; /* u16 primitive->result.informationElementsLength */ bufferSize += primitive->result.informationElementsLength; /* u8 primitive->result.informationElements */ bufferSize += 1; /* CsrWifiSmeP2pRole primitive->result.p2pDeviceRole */ switch (primitive->result.p2pDeviceRole) { case CSR_WIFI_SME_P2P_ROLE_CLI: bufferSize += 1; /* u8 primitive->result.deviceInfo.reservedCli.empty */ break; case CSR_WIFI_SME_P2P_ROLE_GO: bufferSize += 1; /* CsrWifiSmeP2pGroupCapabilityMask primitive->result.deviceInfo.groupInfo.groupCapability */ bufferSize += 6; /* u8 primitive->result.deviceInfo.groupInfo.p2pDeviceAddress.a[6] */ bufferSize += 1; /* u8 primitive->result.deviceInfo.groupInfo.p2pClientInfoCount */ { u16 i4; for (i4 = 0; i4 < primitive->result.deviceInfo.groupInfo.p2pClientInfoCount; i4++) { bufferSize += 6; /* u8 primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].p2PClientInterfaceAddress.a[6] */ bufferSize += 6; /* u8 primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceAddress.a[6] */ bufferSize += 2; /* CsrWifiSmeWpsConfigTypeMask primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.configMethods */ bufferSize += 1; /* CsrWifiSmeP2pCapabilityMask primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.p2PDeviceCap */ bufferSize += 8; /* u8 primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.primDeviceType.deviceDetails[8] */ bufferSize += 1; /* u8 primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount */ { u16 i6; for (i6 = 0; i6 < primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount; i6++) { bufferSize += 8; /* u8 primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secDeviceType[i6].deviceDetails[8] */ } } bufferSize += 32; /* u8 primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceName[32] */ bufferSize += 1; /* u8 primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceNameLength */ } } break; case CSR_WIFI_SME_P2P_ROLE_NONE: bufferSize += 1; /* u8 primitive->result.deviceInfo.reservedNone.empty */ break; case CSR_WIFI_SME_P2P_ROLE_STANDALONE: bufferSize += 6; /* u8 primitive->result.deviceInfo.standalonedevInfo.deviceAddress.a[6] */ bufferSize += 2; /* CsrWifiSmeWpsConfigTypeMask primitive->result.deviceInfo.standalonedevInfo.configMethods */ bufferSize += 1; /* CsrWifiSmeP2pCapabilityMask primitive->result.deviceInfo.standalonedevInfo.p2PDeviceCap */ bufferSize += 8; /* u8 primitive->result.deviceInfo.standalonedevInfo.primDeviceType.deviceDetails[8] */ bufferSize += 1; /* u8 primitive->result.deviceInfo.standalonedevInfo.secondaryDeviceTypeCount */ { u16 i4; for (i4 = 0; i4 < primitive->result.deviceInfo.standalonedevInfo.secondaryDeviceTypeCount; i4++) { bufferSize += 8; /* u8 primitive->result.deviceInfo.standalonedevInfo.secDeviceType[i4].deviceDetails[8] */ } } bufferSize += 32; /* u8 primitive->result.deviceInfo.standalonedevInfo.deviceName[32] */ bufferSize += 1; /* u8 primitive->result.deviceInfo.standalonedevInfo.deviceNameLength */ break; default: break; } return bufferSize; } u8* CsrWifiSmeScanResultIndSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeScanResultInd *primitive = (CsrWifiSmeScanResultInd *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrMemCpySer(ptr, len, (const void *) primitive->result.ssid.ssid, ((u16) (32))); CsrUint8Ser(ptr, len, (u8) primitive->result.ssid.length); CsrMemCpySer(ptr, len, (const void *) primitive->result.bssid.a, ((u16) (6))); CsrUint16Ser(ptr, len, (u16) primitive->result.rssi); CsrUint16Ser(ptr, len, (u16) primitive->result.snr); CsrUint8Ser(ptr, len, (u8) primitive->result.ifIndex); CsrUint16Ser(ptr, len, (u16) primitive->result.beaconPeriodTu); CsrMemCpySer(ptr, len, (const void *) primitive->result.timeStamp.data, ((u16) (8))); CsrMemCpySer(ptr, len, (const void *) primitive->result.localTime.data, ((u16) (8))); CsrUint16Ser(ptr, len, (u16) primitive->result.channelFrequency); CsrUint16Ser(ptr, len, (u16) primitive->result.capabilityInformation); CsrUint8Ser(ptr, len, (u8) primitive->result.channelNumber); CsrUint8Ser(ptr, len, (u8) primitive->result.usability); CsrUint8Ser(ptr, len, (u8) primitive->result.bssType); CsrUint16Ser(ptr, len, (u16) primitive->result.informationElementsLength); if (primitive->result.informationElementsLength) { CsrMemCpySer(ptr, len, (const void *) primitive->result.informationElements, ((u16) (primitive->result.informationElementsLength))); } CsrUint8Ser(ptr, len, (u8) primitive->result.p2pDeviceRole); switch (primitive->result.p2pDeviceRole) { case CSR_WIFI_SME_P2P_ROLE_CLI: CsrUint8Ser(ptr, len, (u8) primitive->result.deviceInfo.reservedCli.empty); break; case CSR_WIFI_SME_P2P_ROLE_GO: CsrUint8Ser(ptr, len, (u8) primitive->result.deviceInfo.groupInfo.groupCapability); CsrMemCpySer(ptr, len, (const void *) primitive->result.deviceInfo.groupInfo.p2pDeviceAddress.a, ((u16) (6))); CsrUint8Ser(ptr, len, (u8) primitive->result.deviceInfo.groupInfo.p2pClientInfoCount); { u16 i4; for (i4 = 0; i4 < primitive->result.deviceInfo.groupInfo.p2pClientInfoCount; i4++) { CsrMemCpySer(ptr, len, (const void *) primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].p2PClientInterfaceAddress.a, ((u16) (6))); CsrMemCpySer(ptr, len, (const void *) primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceAddress.a, ((u16) (6))); CsrUint16Ser(ptr, len, (u16) primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.configMethods); CsrUint8Ser(ptr, len, (u8) primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.p2PDeviceCap); CsrMemCpySer(ptr, len, (const void *) primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.primDeviceType.deviceDetails, ((u16) (8))); CsrUint8Ser(ptr, len, (u8) primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount); { u16 i6; for (i6 = 0; i6 < primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount; i6++) { CsrMemCpySer(ptr, len, (const void *) primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secDeviceType[i6].deviceDetails, ((u16) (8))); } } CsrMemCpySer(ptr, len, (const void *) primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceName, ((u16) (32))); CsrUint8Ser(ptr, len, (u8) primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceNameLength); } } break; case CSR_WIFI_SME_P2P_ROLE_NONE: CsrUint8Ser(ptr, len, (u8) primitive->result.deviceInfo.reservedNone.empty); break; case CSR_WIFI_SME_P2P_ROLE_STANDALONE: CsrMemCpySer(ptr, len, (const void *) primitive->result.deviceInfo.standalonedevInfo.deviceAddress.a, ((u16) (6))); CsrUint16Ser(ptr, len, (u16) primitive->result.deviceInfo.standalonedevInfo.configMethods); CsrUint8Ser(ptr, len, (u8) primitive->result.deviceInfo.standalonedevInfo.p2PDeviceCap); CsrMemCpySer(ptr, len, (const void *) primitive->result.deviceInfo.standalonedevInfo.primDeviceType.deviceDetails, ((u16) (8))); CsrUint8Ser(ptr, len, (u8) primitive->result.deviceInfo.standalonedevInfo.secondaryDeviceTypeCount); { u16 i4; for (i4 = 0; i4 < primitive->result.deviceInfo.standalonedevInfo.secondaryDeviceTypeCount; i4++) { CsrMemCpySer(ptr, len, (const void *) primitive->result.deviceInfo.standalonedevInfo.secDeviceType[i4].deviceDetails, ((u16) (8))); } } CsrMemCpySer(ptr, len, (const void *) primitive->result.deviceInfo.standalonedevInfo.deviceName, ((u16) (32))); CsrUint8Ser(ptr, len, (u8) primitive->result.deviceInfo.standalonedevInfo.deviceNameLength); break; default: break; } return(ptr); } void* CsrWifiSmeScanResultIndDes(u8 *buffer, size_t length) { CsrWifiSmeScanResultInd *primitive = kmalloc(sizeof(CsrWifiSmeScanResultInd), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrMemCpyDes(primitive->result.ssid.ssid, buffer, &offset, ((u16) (32))); CsrUint8Des((u8 *) &primitive->result.ssid.length, buffer, &offset); CsrMemCpyDes(primitive->result.bssid.a, buffer, &offset, ((u16) (6))); CsrUint16Des((u16 *) &primitive->result.rssi, buffer, &offset); CsrUint16Des((u16 *) &primitive->result.snr, buffer, &offset); CsrUint8Des((u8 *) &primitive->result.ifIndex, buffer, &offset); CsrUint16Des((u16 *) &primitive->result.beaconPeriodTu, buffer, &offset); CsrMemCpyDes(primitive->result.timeStamp.data, buffer, &offset, ((u16) (8))); CsrMemCpyDes(primitive->result.localTime.data, buffer, &offset, ((u16) (8))); CsrUint16Des((u16 *) &primitive->result.channelFrequency, buffer, &offset); CsrUint16Des((u16 *) &primitive->result.capabilityInformation, buffer, &offset); CsrUint8Des((u8 *) &primitive->result.channelNumber, buffer, &offset); CsrUint8Des((u8 *) &primitive->result.usability, buffer, &offset); CsrUint8Des((u8 *) &primitive->result.bssType, buffer, &offset); CsrUint16Des((u16 *) &primitive->result.informationElementsLength, buffer, &offset); if (primitive->result.informationElementsLength) { primitive->result.informationElements = kmalloc(primitive->result.informationElementsLength, GFP_KERNEL); CsrMemCpyDes(primitive->result.informationElements, buffer, &offset, ((u16) (primitive->result.informationElementsLength))); } else { primitive->result.informationElements = NULL; } CsrUint8Des((u8 *) &primitive->result.p2pDeviceRole, buffer, &offset); switch (primitive->result.p2pDeviceRole) { case CSR_WIFI_SME_P2P_ROLE_CLI: CsrUint8Des((u8 *) &primitive->result.deviceInfo.reservedCli.empty, buffer, &offset); break; case CSR_WIFI_SME_P2P_ROLE_GO: CsrUint8Des((u8 *) &primitive->result.deviceInfo.groupInfo.groupCapability, buffer, &offset); CsrMemCpyDes(primitive->result.deviceInfo.groupInfo.p2pDeviceAddress.a, buffer, &offset, ((u16) (6))); CsrUint8Des((u8 *) &primitive->result.deviceInfo.groupInfo.p2pClientInfoCount, buffer, &offset); primitive->result.deviceInfo.groupInfo.p2PClientInfo = NULL; if (primitive->result.deviceInfo.groupInfo.p2pClientInfoCount) { primitive->result.deviceInfo.groupInfo.p2PClientInfo = kmalloc(sizeof(CsrWifiSmeP2pClientInfoType) * primitive->result.deviceInfo.groupInfo.p2pClientInfoCount, GFP_KERNEL); } { u16 i4; for (i4 = 0; i4 < primitive->result.deviceInfo.groupInfo.p2pClientInfoCount; i4++) { CsrMemCpyDes(primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].p2PClientInterfaceAddress.a, buffer, &offset, ((u16) (6))); CsrMemCpyDes(primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceAddress.a, buffer, &offset, ((u16) (6))); CsrUint16Des((u16 *) &primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.configMethods, buffer, &offset); CsrUint8Des((u8 *) &primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.p2PDeviceCap, buffer, &offset); CsrMemCpyDes(primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.primDeviceType.deviceDetails, buffer, &offset, ((u16) (8))); CsrUint8Des((u8 *) &primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount, buffer, &offset); primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secDeviceType = NULL; if (primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount) { primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secDeviceType = kmalloc(sizeof(CsrWifiSmeWpsDeviceType) * primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount, GFP_KERNEL); } { u16 i6; for (i6 = 0; i6 < primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount; i6++) { CsrMemCpyDes(primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secDeviceType[i6].deviceDetails, buffer, &offset, ((u16) (8))); } } CsrMemCpyDes(primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceName, buffer, &offset, ((u16) (32))); CsrUint8Des((u8 *) &primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceNameLength, buffer, &offset); } } break; case CSR_WIFI_SME_P2P_ROLE_NONE: CsrUint8Des((u8 *) &primitive->result.deviceInfo.reservedNone.empty, buffer, &offset); break; case CSR_WIFI_SME_P2P_ROLE_STANDALONE: CsrMemCpyDes(primitive->result.deviceInfo.standalonedevInfo.deviceAddress.a, buffer, &offset, ((u16) (6))); CsrUint16Des((u16 *) &primitive->result.deviceInfo.standalonedevInfo.configMethods, buffer, &offset); CsrUint8Des((u8 *) &primitive->result.deviceInfo.standalonedevInfo.p2PDeviceCap, buffer, &offset); CsrMemCpyDes(primitive->result.deviceInfo.standalonedevInfo.primDeviceType.deviceDetails, buffer, &offset, ((u16) (8))); CsrUint8Des((u8 *) &primitive->result.deviceInfo.standalonedevInfo.secondaryDeviceTypeCount, buffer, &offset); primitive->result.deviceInfo.standalonedevInfo.secDeviceType = NULL; if (primitive->result.deviceInfo.standalonedevInfo.secondaryDeviceTypeCount) { primitive->result.deviceInfo.standalonedevInfo.secDeviceType = kmalloc(sizeof(CsrWifiSmeWpsDeviceType) * primitive->result.deviceInfo.standalonedevInfo.secondaryDeviceTypeCount, GFP_KERNEL); } { u16 i4; for (i4 = 0; i4 < primitive->result.deviceInfo.standalonedevInfo.secondaryDeviceTypeCount; i4++) { CsrMemCpyDes(primitive->result.deviceInfo.standalonedevInfo.secDeviceType[i4].deviceDetails, buffer, &offset, ((u16) (8))); } } CsrMemCpyDes(primitive->result.deviceInfo.standalonedevInfo.deviceName, buffer, &offset, ((u16) (32))); CsrUint8Des((u8 *) &primitive->result.deviceInfo.standalonedevInfo.deviceNameLength, buffer, &offset); break; default: break; } return primitive; } void CsrWifiSmeScanResultIndSerFree(void *voidPrimitivePointer) { CsrWifiSmeScanResultInd *primitive = (CsrWifiSmeScanResultInd *) voidPrimitivePointer; kfree(primitive->result.informationElements); switch (primitive->result.p2pDeviceRole) { case CSR_WIFI_SME_P2P_ROLE_GO: { u16 i4; for (i4 = 0; i4 < primitive->result.deviceInfo.groupInfo.p2pClientInfoCount; i4++) { kfree(primitive->result.deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secDeviceType); } } kfree(primitive->result.deviceInfo.groupInfo.p2PClientInfo); break; case CSR_WIFI_SME_P2P_ROLE_STANDALONE: kfree(primitive->result.deviceInfo.standalonedevInfo.secDeviceType); break; default: break; } kfree(primitive); } size_t CsrWifiSmeScanResultsGetCfmSizeof(void *msg) { CsrWifiSmeScanResultsGetCfm *primitive = (CsrWifiSmeScanResultsGetCfm *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 153) */ bufferSize += 2; /* CsrResult primitive->status */ bufferSize += 2; /* u16 primitive->scanResultsCount */ { u16 i1; for (i1 = 0; i1 < primitive->scanResultsCount; i1++) { bufferSize += 32; /* u8 primitive->scanResults[i1].ssid.ssid[32] */ bufferSize += 1; /* u8 primitive->scanResults[i1].ssid.length */ bufferSize += 6; /* u8 primitive->scanResults[i1].bssid.a[6] */ bufferSize += 2; /* s16 primitive->scanResults[i1].rssi */ bufferSize += 2; /* s16 primitive->scanResults[i1].snr */ bufferSize += 1; /* CsrWifiSmeRadioIF primitive->scanResults[i1].ifIndex */ bufferSize += 2; /* u16 primitive->scanResults[i1].beaconPeriodTu */ bufferSize += 8; /* u8 primitive->scanResults[i1].timeStamp.data[8] */ bufferSize += 8; /* u8 primitive->scanResults[i1].localTime.data[8] */ bufferSize += 2; /* u16 primitive->scanResults[i1].channelFrequency */ bufferSize += 2; /* u16 primitive->scanResults[i1].capabilityInformation */ bufferSize += 1; /* u8 primitive->scanResults[i1].channelNumber */ bufferSize += 1; /* CsrWifiSmeBasicUsability primitive->scanResults[i1].usability */ bufferSize += 1; /* CsrWifiSmeBssType primitive->scanResults[i1].bssType */ bufferSize += 2; /* u16 primitive->scanResults[i1].informationElementsLength */ bufferSize += primitive->scanResults[i1].informationElementsLength; /* u8 primitive->scanResults[i1].informationElements */ bufferSize += 1; /* CsrWifiSmeP2pRole primitive->scanResults[i1].p2pDeviceRole */ switch (primitive->scanResults[i1].p2pDeviceRole) { case CSR_WIFI_SME_P2P_ROLE_CLI: bufferSize += 1; /* u8 primitive->scanResults[i1].deviceInfo.reservedCli.empty */ break; case CSR_WIFI_SME_P2P_ROLE_GO: bufferSize += 1; /* CsrWifiSmeP2pGroupCapabilityMask primitive->scanResults[i1].deviceInfo.groupInfo.groupCapability */ bufferSize += 6; /* u8 primitive->scanResults[i1].deviceInfo.groupInfo.p2pDeviceAddress.a[6] */ bufferSize += 1; /* u8 primitive->scanResults[i1].deviceInfo.groupInfo.p2pClientInfoCount */ { u16 i4; for (i4 = 0; i4 < primitive->scanResults[i1].deviceInfo.groupInfo.p2pClientInfoCount; i4++) { bufferSize += 6; /* u8 primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].p2PClientInterfaceAddress.a[6] */ bufferSize += 6; /* u8 primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceAddress.a[6] */ bufferSize += 2; /* CsrWifiSmeWpsConfigTypeMask primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.configMethods */ bufferSize += 1; /* CsrWifiSmeP2pCapabilityMask primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.p2PDeviceCap */ bufferSize += 8; /* u8 primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.primDeviceType.deviceDetails[8] */ bufferSize += 1; /* u8 primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount */ { u16 i6; for (i6 = 0; i6 < primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount; i6++) { bufferSize += 8; /* u8 primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secDeviceType[i6].deviceDetails[8] */ } } bufferSize += 32; /* u8 primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceName[32] */ bufferSize += 1; /* u8 primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceNameLength */ } } break; case CSR_WIFI_SME_P2P_ROLE_NONE: bufferSize += 1; /* u8 primitive->scanResults[i1].deviceInfo.reservedNone.empty */ break; case CSR_WIFI_SME_P2P_ROLE_STANDALONE: bufferSize += 6; /* u8 primitive->scanResults[i1].deviceInfo.standalonedevInfo.deviceAddress.a[6] */ bufferSize += 2; /* CsrWifiSmeWpsConfigTypeMask primitive->scanResults[i1].deviceInfo.standalonedevInfo.configMethods */ bufferSize += 1; /* CsrWifiSmeP2pCapabilityMask primitive->scanResults[i1].deviceInfo.standalonedevInfo.p2PDeviceCap */ bufferSize += 8; /* u8 primitive->scanResults[i1].deviceInfo.standalonedevInfo.primDeviceType.deviceDetails[8] */ bufferSize += 1; /* u8 primitive->scanResults[i1].deviceInfo.standalonedevInfo.secondaryDeviceTypeCount */ { u16 i4; for (i4 = 0; i4 < primitive->scanResults[i1].deviceInfo.standalonedevInfo.secondaryDeviceTypeCount; i4++) { bufferSize += 8; /* u8 primitive->scanResults[i1].deviceInfo.standalonedevInfo.secDeviceType[i4].deviceDetails[8] */ } } bufferSize += 32; /* u8 primitive->scanResults[i1].deviceInfo.standalonedevInfo.deviceName[32] */ bufferSize += 1; /* u8 primitive->scanResults[i1].deviceInfo.standalonedevInfo.deviceNameLength */ break; default: break; } } } return bufferSize; } u8* CsrWifiSmeScanResultsGetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeScanResultsGetCfm *primitive = (CsrWifiSmeScanResultsGetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->status); CsrUint16Ser(ptr, len, (u16) primitive->scanResultsCount); { u16 i1; for (i1 = 0; i1 < primitive->scanResultsCount; i1++) { CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].ssid.ssid, ((u16) (32))); CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].ssid.length); CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].bssid.a, ((u16) (6))); CsrUint16Ser(ptr, len, (u16) primitive->scanResults[i1].rssi); CsrUint16Ser(ptr, len, (u16) primitive->scanResults[i1].snr); CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].ifIndex); CsrUint16Ser(ptr, len, (u16) primitive->scanResults[i1].beaconPeriodTu); CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].timeStamp.data, ((u16) (8))); CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].localTime.data, ((u16) (8))); CsrUint16Ser(ptr, len, (u16) primitive->scanResults[i1].channelFrequency); CsrUint16Ser(ptr, len, (u16) primitive->scanResults[i1].capabilityInformation); CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].channelNumber); CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].usability); CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].bssType); CsrUint16Ser(ptr, len, (u16) primitive->scanResults[i1].informationElementsLength); if (primitive->scanResults[i1].informationElementsLength) { CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].informationElements, ((u16) (primitive->scanResults[i1].informationElementsLength))); } CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].p2pDeviceRole); switch (primitive->scanResults[i1].p2pDeviceRole) { case CSR_WIFI_SME_P2P_ROLE_CLI: CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].deviceInfo.reservedCli.empty); break; case CSR_WIFI_SME_P2P_ROLE_GO: CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].deviceInfo.groupInfo.groupCapability); CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].deviceInfo.groupInfo.p2pDeviceAddress.a, ((u16) (6))); CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].deviceInfo.groupInfo.p2pClientInfoCount); { u16 i4; for (i4 = 0; i4 < primitive->scanResults[i1].deviceInfo.groupInfo.p2pClientInfoCount; i4++) { CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].p2PClientInterfaceAddress.a, ((u16) (6))); CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceAddress.a, ((u16) (6))); CsrUint16Ser(ptr, len, (u16) primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.configMethods); CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.p2PDeviceCap); CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.primDeviceType.deviceDetails, ((u16) (8))); CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount); { u16 i6; for (i6 = 0; i6 < primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount; i6++) { CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secDeviceType[i6].deviceDetails, ((u16) (8))); } } CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceName, ((u16) (32))); CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceNameLength); } } break; case CSR_WIFI_SME_P2P_ROLE_NONE: CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].deviceInfo.reservedNone.empty); break; case CSR_WIFI_SME_P2P_ROLE_STANDALONE: CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].deviceInfo.standalonedevInfo.deviceAddress.a, ((u16) (6))); CsrUint16Ser(ptr, len, (u16) primitive->scanResults[i1].deviceInfo.standalonedevInfo.configMethods); CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].deviceInfo.standalonedevInfo.p2PDeviceCap); CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].deviceInfo.standalonedevInfo.primDeviceType.deviceDetails, ((u16) (8))); CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].deviceInfo.standalonedevInfo.secondaryDeviceTypeCount); { u16 i4; for (i4 = 0; i4 < primitive->scanResults[i1].deviceInfo.standalonedevInfo.secondaryDeviceTypeCount; i4++) { CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].deviceInfo.standalonedevInfo.secDeviceType[i4].deviceDetails, ((u16) (8))); } } CsrMemCpySer(ptr, len, (const void *) primitive->scanResults[i1].deviceInfo.standalonedevInfo.deviceName, ((u16) (32))); CsrUint8Ser(ptr, len, (u8) primitive->scanResults[i1].deviceInfo.standalonedevInfo.deviceNameLength); break; default: break; } } } return(ptr); } void* CsrWifiSmeScanResultsGetCfmDes(u8 *buffer, size_t length) { CsrWifiSmeScanResultsGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeScanResultsGetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); CsrUint16Des((u16 *) &primitive->scanResultsCount, buffer, &offset); primitive->scanResults = NULL; if (primitive->scanResultsCount) { primitive->scanResults = kmalloc(sizeof(CsrWifiSmeScanResult) * primitive->scanResultsCount, GFP_KERNEL); } { u16 i1; for (i1 = 0; i1 < primitive->scanResultsCount; i1++) { CsrMemCpyDes(primitive->scanResults[i1].ssid.ssid, buffer, &offset, ((u16) (32))); CsrUint8Des((u8 *) &primitive->scanResults[i1].ssid.length, buffer, &offset); CsrMemCpyDes(primitive->scanResults[i1].bssid.a, buffer, &offset, ((u16) (6))); CsrUint16Des((u16 *) &primitive->scanResults[i1].rssi, buffer, &offset); CsrUint16Des((u16 *) &primitive->scanResults[i1].snr, buffer, &offset); CsrUint8Des((u8 *) &primitive->scanResults[i1].ifIndex, buffer, &offset); CsrUint16Des((u16 *) &primitive->scanResults[i1].beaconPeriodTu, buffer, &offset); CsrMemCpyDes(primitive->scanResults[i1].timeStamp.data, buffer, &offset, ((u16) (8))); CsrMemCpyDes(primitive->scanResults[i1].localTime.data, buffer, &offset, ((u16) (8))); CsrUint16Des((u16 *) &primitive->scanResults[i1].channelFrequency, buffer, &offset); CsrUint16Des((u16 *) &primitive->scanResults[i1].capabilityInformation, buffer, &offset); CsrUint8Des((u8 *) &primitive->scanResults[i1].channelNumber, buffer, &offset); CsrUint8Des((u8 *) &primitive->scanResults[i1].usability, buffer, &offset); CsrUint8Des((u8 *) &primitive->scanResults[i1].bssType, buffer, &offset); CsrUint16Des((u16 *) &primitive->scanResults[i1].informationElementsLength, buffer, &offset); if (primitive->scanResults[i1].informationElementsLength) { primitive->scanResults[i1].informationElements = kmalloc(primitive->scanResults[i1].informationElementsLength, GFP_KERNEL); CsrMemCpyDes(primitive->scanResults[i1].informationElements, buffer, &offset, ((u16) (primitive->scanResults[i1].informationElementsLength))); } else { primitive->scanResults[i1].informationElements = NULL; } CsrUint8Des((u8 *) &primitive->scanResults[i1].p2pDeviceRole, buffer, &offset); switch (primitive->scanResults[i1].p2pDeviceRole) { case CSR_WIFI_SME_P2P_ROLE_CLI: CsrUint8Des((u8 *) &primitive->scanResults[i1].deviceInfo.reservedCli.empty, buffer, &offset); break; case CSR_WIFI_SME_P2P_ROLE_GO: CsrUint8Des((u8 *) &primitive->scanResults[i1].deviceInfo.groupInfo.groupCapability, buffer, &offset); CsrMemCpyDes(primitive->scanResults[i1].deviceInfo.groupInfo.p2pDeviceAddress.a, buffer, &offset, ((u16) (6))); CsrUint8Des((u8 *) &primitive->scanResults[i1].deviceInfo.groupInfo.p2pClientInfoCount, buffer, &offset); primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo = NULL; if (primitive->scanResults[i1].deviceInfo.groupInfo.p2pClientInfoCount) { primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo = kmalloc(sizeof(CsrWifiSmeP2pClientInfoType) * primitive->scanResults[i1].deviceInfo.groupInfo.p2pClientInfoCount, GFP_KERNEL); } { u16 i4; for (i4 = 0; i4 < primitive->scanResults[i1].deviceInfo.groupInfo.p2pClientInfoCount; i4++) { CsrMemCpyDes(primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].p2PClientInterfaceAddress.a, buffer, &offset, ((u16) (6))); CsrMemCpyDes(primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceAddress.a, buffer, &offset, ((u16) (6))); CsrUint16Des((u16 *) &primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.configMethods, buffer, &offset); CsrUint8Des((u8 *) &primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.p2PDeviceCap, buffer, &offset); CsrMemCpyDes(primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.primDeviceType.deviceDetails, buffer, &offset, ((u16) (8))); CsrUint8Des((u8 *) &primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount, buffer, &offset); primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secDeviceType = NULL; if (primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount) { primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secDeviceType = kmalloc(sizeof(CsrWifiSmeWpsDeviceType) * primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount, GFP_KERNEL); } { u16 i6; for (i6 = 0; i6 < primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secondaryDeviceTypeCount; i6++) { CsrMemCpyDes(primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secDeviceType[i6].deviceDetails, buffer, &offset, ((u16) (8))); } } CsrMemCpyDes(primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceName, buffer, &offset, ((u16) (32))); CsrUint8Des((u8 *) &primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.deviceNameLength, buffer, &offset); } } break; case CSR_WIFI_SME_P2P_ROLE_NONE: CsrUint8Des((u8 *) &primitive->scanResults[i1].deviceInfo.reservedNone.empty, buffer, &offset); break; case CSR_WIFI_SME_P2P_ROLE_STANDALONE: CsrMemCpyDes(primitive->scanResults[i1].deviceInfo.standalonedevInfo.deviceAddress.a, buffer, &offset, ((u16) (6))); CsrUint16Des((u16 *) &primitive->scanResults[i1].deviceInfo.standalonedevInfo.configMethods, buffer, &offset); CsrUint8Des((u8 *) &primitive->scanResults[i1].deviceInfo.standalonedevInfo.p2PDeviceCap, buffer, &offset); CsrMemCpyDes(primitive->scanResults[i1].deviceInfo.standalonedevInfo.primDeviceType.deviceDetails, buffer, &offset, ((u16) (8))); CsrUint8Des((u8 *) &primitive->scanResults[i1].deviceInfo.standalonedevInfo.secondaryDeviceTypeCount, buffer, &offset); primitive->scanResults[i1].deviceInfo.standalonedevInfo.secDeviceType = NULL; if (primitive->scanResults[i1].deviceInfo.standalonedevInfo.secondaryDeviceTypeCount) { primitive->scanResults[i1].deviceInfo.standalonedevInfo.secDeviceType = kmalloc(sizeof(CsrWifiSmeWpsDeviceType) * primitive->scanResults[i1].deviceInfo.standalonedevInfo.secondaryDeviceTypeCount, GFP_KERNEL); } { u16 i4; for (i4 = 0; i4 < primitive->scanResults[i1].deviceInfo.standalonedevInfo.secondaryDeviceTypeCount; i4++) { CsrMemCpyDes(primitive->scanResults[i1].deviceInfo.standalonedevInfo.secDeviceType[i4].deviceDetails, buffer, &offset, ((u16) (8))); } } CsrMemCpyDes(primitive->scanResults[i1].deviceInfo.standalonedevInfo.deviceName, buffer, &offset, ((u16) (32))); CsrUint8Des((u8 *) &primitive->scanResults[i1].deviceInfo.standalonedevInfo.deviceNameLength, buffer, &offset); break; default: break; } } } return primitive; } void CsrWifiSmeScanResultsGetCfmSerFree(void *voidPrimitivePointer) { CsrWifiSmeScanResultsGetCfm *primitive = (CsrWifiSmeScanResultsGetCfm *) voidPrimitivePointer; { u16 i1; for (i1 = 0; i1 < primitive->scanResultsCount; i1++) { kfree(primitive->scanResults[i1].informationElements); switch (primitive->scanResults[i1].p2pDeviceRole) { case CSR_WIFI_SME_P2P_ROLE_GO: { u16 i4; for (i4 = 0; i4 < primitive->scanResults[i1].deviceInfo.groupInfo.p2pClientInfoCount; i4++) { kfree(primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo[i4].clientDeviceInfo.secDeviceType); } } kfree(primitive->scanResults[i1].deviceInfo.groupInfo.p2PClientInfo); break; case CSR_WIFI_SME_P2P_ROLE_STANDALONE: kfree(primitive->scanResults[i1].deviceInfo.standalonedevInfo.secDeviceType); break; default: break; } } } kfree(primitive->scanResults); kfree(primitive); } size_t CsrWifiSmeSmeStaConfigGetCfmSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 13) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 2; /* CsrResult primitive->status */ bufferSize += 1; /* u8 primitive->smeConfig.connectionQualityRssiChangeTrigger */ bufferSize += 1; /* u8 primitive->smeConfig.connectionQualitySnrChangeTrigger */ bufferSize += 1; /* CsrWifiSmeWmmModeMask primitive->smeConfig.wmmModeMask */ bufferSize += 1; /* CsrWifiSmeRadioIF primitive->smeConfig.ifIndex */ bufferSize += 1; /* u8 primitive->smeConfig.allowUnicastUseGroupCipher */ bufferSize += 1; /* u8 primitive->smeConfig.enableOpportunisticKeyCaching */ return bufferSize; } u8* CsrWifiSmeSmeStaConfigGetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeSmeStaConfigGetCfm *primitive = (CsrWifiSmeSmeStaConfigGetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint16Ser(ptr, len, (u16) primitive->status); CsrUint8Ser(ptr, len, (u8) primitive->smeConfig.connectionQualityRssiChangeTrigger); CsrUint8Ser(ptr, len, (u8) primitive->smeConfig.connectionQualitySnrChangeTrigger); CsrUint8Ser(ptr, len, (u8) primitive->smeConfig.wmmModeMask); CsrUint8Ser(ptr, len, (u8) primitive->smeConfig.ifIndex); CsrUint8Ser(ptr, len, (u8) primitive->smeConfig.allowUnicastUseGroupCipher); CsrUint8Ser(ptr, len, (u8) primitive->smeConfig.enableOpportunisticKeyCaching); return(ptr); } void* CsrWifiSmeSmeStaConfigGetCfmDes(u8 *buffer, size_t length) { CsrWifiSmeSmeStaConfigGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeSmeStaConfigGetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); CsrUint8Des((u8 *) &primitive->smeConfig.connectionQualityRssiChangeTrigger, buffer, &offset); CsrUint8Des((u8 *) &primitive->smeConfig.connectionQualitySnrChangeTrigger, buffer, &offset); CsrUint8Des((u8 *) &primitive->smeConfig.wmmModeMask, buffer, &offset); CsrUint8Des((u8 *) &primitive->smeConfig.ifIndex, buffer, &offset); CsrUint8Des((u8 *) &primitive->smeConfig.allowUnicastUseGroupCipher, buffer, &offset); CsrUint8Des((u8 *) &primitive->smeConfig.enableOpportunisticKeyCaching, buffer, &offset); return primitive; } size_t CsrWifiSmeSmeStaConfigSetCfmSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 7) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 2; /* CsrResult primitive->status */ return bufferSize; } u8* CsrWifiSmeSmeStaConfigSetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeSmeStaConfigSetCfm *primitive = (CsrWifiSmeSmeStaConfigSetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint16Ser(ptr, len, (u16) primitive->status); return(ptr); } void* CsrWifiSmeSmeStaConfigSetCfmDes(u8 *buffer, size_t length) { CsrWifiSmeSmeStaConfigSetCfm *primitive = kmalloc(sizeof(CsrWifiSmeSmeStaConfigSetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); return primitive; } size_t CsrWifiSmeStationMacAddressGetCfmSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 17) */ bufferSize += 2; /* CsrResult primitive->status */ { u16 i1; for (i1 = 0; i1 < 2; i1++) { bufferSize += 6; /* u8 primitive->stationMacAddress[i1].a[6] */ } } return bufferSize; } u8* CsrWifiSmeStationMacAddressGetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeStationMacAddressGetCfm *primitive = (CsrWifiSmeStationMacAddressGetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->status); { u16 i1; for (i1 = 0; i1 < 2; i1++) { CsrMemCpySer(ptr, len, (const void *) primitive->stationMacAddress[i1].a, ((u16) (6))); } } return(ptr); } void* CsrWifiSmeStationMacAddressGetCfmDes(u8 *buffer, size_t length) { CsrWifiSmeStationMacAddressGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeStationMacAddressGetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); { u16 i1; for (i1 = 0; i1 < 2; i1++) { CsrMemCpyDes(primitive->stationMacAddress[i1].a, buffer, &offset, ((u16) (6))); } } return primitive; } size_t CsrWifiSmeTspecIndSizeof(void *msg) { CsrWifiSmeTspecInd *primitive = (CsrWifiSmeTspecInd *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 13) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 4; /* u32 primitive->transactionId */ bufferSize += 1; /* CsrWifiSmeTspecResultCode primitive->tspecResultCode */ bufferSize += 2; /* u16 primitive->tspecLength */ bufferSize += primitive->tspecLength; /* u8 primitive->tspec */ return bufferSize; } u8* CsrWifiSmeTspecIndSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeTspecInd *primitive = (CsrWifiSmeTspecInd *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint32Ser(ptr, len, (u32) primitive->transactionId); CsrUint8Ser(ptr, len, (u8) primitive->tspecResultCode); CsrUint16Ser(ptr, len, (u16) primitive->tspecLength); if (primitive->tspecLength) { CsrMemCpySer(ptr, len, (const void *) primitive->tspec, ((u16) (primitive->tspecLength))); } return(ptr); } void* CsrWifiSmeTspecIndDes(u8 *buffer, size_t length) { CsrWifiSmeTspecInd *primitive = kmalloc(sizeof(CsrWifiSmeTspecInd), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint32Des((u32 *) &primitive->transactionId, buffer, &offset); CsrUint8Des((u8 *) &primitive->tspecResultCode, buffer, &offset); CsrUint16Des((u16 *) &primitive->tspecLength, buffer, &offset); if (primitive->tspecLength) { primitive->tspec = kmalloc(primitive->tspecLength, GFP_KERNEL); CsrMemCpyDes(primitive->tspec, buffer, &offset, ((u16) (primitive->tspecLength))); } else { primitive->tspec = NULL; } return primitive; } void CsrWifiSmeTspecIndSerFree(void *voidPrimitivePointer) { CsrWifiSmeTspecInd *primitive = (CsrWifiSmeTspecInd *) voidPrimitivePointer; kfree(primitive->tspec); kfree(primitive); } size_t CsrWifiSmeTspecCfmSizeof(void *msg) { CsrWifiSmeTspecCfm *primitive = (CsrWifiSmeTspecCfm *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 15) */ bufferSize += 2; /* u16 primitive->interfaceTag */ bufferSize += 2; /* CsrResult primitive->status */ bufferSize += 4; /* u32 primitive->transactionId */ bufferSize += 1; /* CsrWifiSmeTspecResultCode primitive->tspecResultCode */ bufferSize += 2; /* u16 primitive->tspecLength */ bufferSize += primitive->tspecLength; /* u8 primitive->tspec */ return bufferSize; } u8* CsrWifiSmeTspecCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeTspecCfm *primitive = (CsrWifiSmeTspecCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->interfaceTag); CsrUint16Ser(ptr, len, (u16) primitive->status); CsrUint32Ser(ptr, len, (u32) primitive->transactionId); CsrUint8Ser(ptr, len, (u8) primitive->tspecResultCode); CsrUint16Ser(ptr, len, (u16) primitive->tspecLength); if (primitive->tspecLength) { CsrMemCpySer(ptr, len, (const void *) primitive->tspec, ((u16) (primitive->tspecLength))); } return(ptr); } void* CsrWifiSmeTspecCfmDes(u8 *buffer, size_t length) { CsrWifiSmeTspecCfm *primitive = kmalloc(sizeof(CsrWifiSmeTspecCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->interfaceTag, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); CsrUint32Des((u32 *) &primitive->transactionId, buffer, &offset); CsrUint8Des((u8 *) &primitive->tspecResultCode, buffer, &offset); CsrUint16Des((u16 *) &primitive->tspecLength, buffer, &offset); if (primitive->tspecLength) { primitive->tspec = kmalloc(primitive->tspecLength, GFP_KERNEL); CsrMemCpyDes(primitive->tspec, buffer, &offset, ((u16) (primitive->tspecLength))); } else { primitive->tspec = NULL; } return primitive; } void CsrWifiSmeTspecCfmSerFree(void *voidPrimitivePointer) { CsrWifiSmeTspecCfm *primitive = (CsrWifiSmeTspecCfm *) voidPrimitivePointer; kfree(primitive->tspec); kfree(primitive); } size_t CsrWifiSmeVersionsGetCfmSizeof(void *msg) { CsrWifiSmeVersionsGetCfm *primitive = (CsrWifiSmeVersionsGetCfm *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 33) */ bufferSize += 2; /* CsrResult primitive->status */ bufferSize += 4; /* u32 primitive->versions.chipId */ bufferSize += 4; /* u32 primitive->versions.chipVersion */ bufferSize += 4; /* u32 primitive->versions.firmwareBuild */ bufferSize += 4; /* u32 primitive->versions.firmwarePatch */ bufferSize += 4; /* u32 primitive->versions.firmwareHip */ bufferSize += (primitive->versions.routerBuild ? strlen(primitive->versions.routerBuild) : 0) + 1; /* char* primitive->versions.routerBuild (0 byte len + 1 for NULL Term) */ bufferSize += 4; /* u32 primitive->versions.routerHip */ bufferSize += (primitive->versions.smeBuild ? strlen(primitive->versions.smeBuild) : 0) + 1; /* char* primitive->versions.smeBuild (0 byte len + 1 for NULL Term) */ bufferSize += 4; /* u32 primitive->versions.smeHip */ return bufferSize; } u8* CsrWifiSmeVersionsGetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeVersionsGetCfm *primitive = (CsrWifiSmeVersionsGetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->status); CsrUint32Ser(ptr, len, (u32) primitive->versions.chipId); CsrUint32Ser(ptr, len, (u32) primitive->versions.chipVersion); CsrUint32Ser(ptr, len, (u32) primitive->versions.firmwareBuild); CsrUint32Ser(ptr, len, (u32) primitive->versions.firmwarePatch); CsrUint32Ser(ptr, len, (u32) primitive->versions.firmwareHip); CsrCharStringSer(ptr, len, primitive->versions.routerBuild); CsrUint32Ser(ptr, len, (u32) primitive->versions.routerHip); CsrCharStringSer(ptr, len, primitive->versions.smeBuild); CsrUint32Ser(ptr, len, (u32) primitive->versions.smeHip); return(ptr); } void* CsrWifiSmeVersionsGetCfmDes(u8 *buffer, size_t length) { CsrWifiSmeVersionsGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeVersionsGetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); CsrUint32Des((u32 *) &primitive->versions.chipId, buffer, &offset); CsrUint32Des((u32 *) &primitive->versions.chipVersion, buffer, &offset); CsrUint32Des((u32 *) &primitive->versions.firmwareBuild, buffer, &offset); CsrUint32Des((u32 *) &primitive->versions.firmwarePatch, buffer, &offset); CsrUint32Des((u32 *) &primitive->versions.firmwareHip, buffer, &offset); CsrCharStringDes(&primitive->versions.routerBuild, buffer, &offset); CsrUint32Des((u32 *) &primitive->versions.routerHip, buffer, &offset); CsrCharStringDes(&primitive->versions.smeBuild, buffer, &offset); CsrUint32Des((u32 *) &primitive->versions.smeHip, buffer, &offset); return primitive; } void CsrWifiSmeVersionsGetCfmSerFree(void *voidPrimitivePointer) { CsrWifiSmeVersionsGetCfm *primitive = (CsrWifiSmeVersionsGetCfm *) voidPrimitivePointer; kfree(primitive->versions.routerBuild); kfree(primitive->versions.smeBuild); kfree(primitive); } size_t CsrWifiSmeCloakedSsidsGetCfmSizeof(void *msg) { CsrWifiSmeCloakedSsidsGetCfm *primitive = (CsrWifiSmeCloakedSsidsGetCfm *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 39) */ bufferSize += 2; /* CsrResult primitive->status */ bufferSize += 1; /* u8 primitive->cloakedSsids.cloakedSsidsCount */ { u16 i2; for (i2 = 0; i2 < primitive->cloakedSsids.cloakedSsidsCount; i2++) { bufferSize += 32; /* u8 primitive->cloakedSsids.cloakedSsids[i2].ssid[32] */ bufferSize += 1; /* u8 primitive->cloakedSsids.cloakedSsids[i2].length */ } } return bufferSize; } u8* CsrWifiSmeCloakedSsidsGetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeCloakedSsidsGetCfm *primitive = (CsrWifiSmeCloakedSsidsGetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->status); CsrUint8Ser(ptr, len, (u8) primitive->cloakedSsids.cloakedSsidsCount); { u16 i2; for (i2 = 0; i2 < primitive->cloakedSsids.cloakedSsidsCount; i2++) { CsrMemCpySer(ptr, len, (const void *) primitive->cloakedSsids.cloakedSsids[i2].ssid, ((u16) (32))); CsrUint8Ser(ptr, len, (u8) primitive->cloakedSsids.cloakedSsids[i2].length); } } return(ptr); } void* CsrWifiSmeCloakedSsidsGetCfmDes(u8 *buffer, size_t length) { CsrWifiSmeCloakedSsidsGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeCloakedSsidsGetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); CsrUint8Des((u8 *) &primitive->cloakedSsids.cloakedSsidsCount, buffer, &offset); primitive->cloakedSsids.cloakedSsids = NULL; if (primitive->cloakedSsids.cloakedSsidsCount) { primitive->cloakedSsids.cloakedSsids = kmalloc(sizeof(CsrWifiSsid) * primitive->cloakedSsids.cloakedSsidsCount, GFP_KERNEL); } { u16 i2; for (i2 = 0; i2 < primitive->cloakedSsids.cloakedSsidsCount; i2++) { CsrMemCpyDes(primitive->cloakedSsids.cloakedSsids[i2].ssid, buffer, &offset, ((u16) (32))); CsrUint8Des((u8 *) &primitive->cloakedSsids.cloakedSsids[i2].length, buffer, &offset); } } return primitive; } void CsrWifiSmeCloakedSsidsGetCfmSerFree(void *voidPrimitivePointer) { CsrWifiSmeCloakedSsidsGetCfm *primitive = (CsrWifiSmeCloakedSsidsGetCfm *) voidPrimitivePointer; kfree(primitive->cloakedSsids.cloakedSsids); kfree(primitive); } size_t CsrWifiSmeWifiOnIndSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 9) */ bufferSize += 6; /* u8 primitive->address.a[6] */ return bufferSize; } u8* CsrWifiSmeWifiOnIndSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeWifiOnInd *primitive = (CsrWifiSmeWifiOnInd *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrMemCpySer(ptr, len, (const void *) primitive->address.a, ((u16) (6))); return(ptr); } void* CsrWifiSmeWifiOnIndDes(u8 *buffer, size_t length) { CsrWifiSmeWifiOnInd *primitive = kmalloc(sizeof(CsrWifiSmeWifiOnInd), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrMemCpyDes(primitive->address.a, buffer, &offset, ((u16) (6))); return primitive; } size_t CsrWifiSmeSmeCommonConfigGetCfmSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 10) */ bufferSize += 2; /* CsrResult primitive->status */ bufferSize += 1; /* CsrWifiSme80211dTrustLevel primitive->deviceConfig.trustLevel */ bufferSize += 2; /* u8 primitive->deviceConfig.countryCode[2] */ bufferSize += 1; /* CsrWifiSmeFirmwareDriverInterface primitive->deviceConfig.firmwareDriverInterface */ bufferSize += 1; /* u8 primitive->deviceConfig.enableStrictDraftN */ return bufferSize; } u8* CsrWifiSmeSmeCommonConfigGetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeSmeCommonConfigGetCfm *primitive = (CsrWifiSmeSmeCommonConfigGetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->status); CsrUint8Ser(ptr, len, (u8) primitive->deviceConfig.trustLevel); CsrMemCpySer(ptr, len, (const void *) primitive->deviceConfig.countryCode, ((u16) (2))); CsrUint8Ser(ptr, len, (u8) primitive->deviceConfig.firmwareDriverInterface); CsrUint8Ser(ptr, len, (u8) primitive->deviceConfig.enableStrictDraftN); return(ptr); } void* CsrWifiSmeSmeCommonConfigGetCfmDes(u8 *buffer, size_t length) { CsrWifiSmeSmeCommonConfigGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeSmeCommonConfigGetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); CsrUint8Des((u8 *) &primitive->deviceConfig.trustLevel, buffer, &offset); CsrMemCpyDes(primitive->deviceConfig.countryCode, buffer, &offset, ((u16) (2))); CsrUint8Des((u8 *) &primitive->deviceConfig.firmwareDriverInterface, buffer, &offset); CsrUint8Des((u8 *) &primitive->deviceConfig.enableStrictDraftN, buffer, &offset); return primitive; } size_t CsrWifiSmeInterfaceCapabilityGetCfmSizeof(void *msg) { size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 9) */ bufferSize += 2; /* CsrResult primitive->status */ bufferSize += 2; /* u16 primitive->numInterfaces */ bufferSize += 2; /* u8 primitive->capBitmap[2] */ return bufferSize; } u8* CsrWifiSmeInterfaceCapabilityGetCfmSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeInterfaceCapabilityGetCfm *primitive = (CsrWifiSmeInterfaceCapabilityGetCfm *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint16Ser(ptr, len, (u16) primitive->status); CsrUint16Ser(ptr, len, (u16) primitive->numInterfaces); CsrMemCpySer(ptr, len, (const void *) primitive->capBitmap, ((u16) (2))); return(ptr); } void* CsrWifiSmeInterfaceCapabilityGetCfmDes(u8 *buffer, size_t length) { CsrWifiSmeInterfaceCapabilityGetCfm *primitive = kmalloc(sizeof(CsrWifiSmeInterfaceCapabilityGetCfm), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint16Des((u16 *) &primitive->status, buffer, &offset); CsrUint16Des((u16 *) &primitive->numInterfaces, buffer, &offset); CsrMemCpyDes(primitive->capBitmap, buffer, &offset, ((u16) (2))); return primitive; } size_t CsrWifiSmeErrorIndSizeof(void *msg) { CsrWifiSmeErrorInd *primitive = (CsrWifiSmeErrorInd *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 3) */ bufferSize += (primitive->errorMessage ? strlen(primitive->errorMessage) : 0) + 1; /* char* primitive->errorMessage (0 byte len + 1 for NULL Term) */ return bufferSize; } u8* CsrWifiSmeErrorIndSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeErrorInd *primitive = (CsrWifiSmeErrorInd *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrCharStringSer(ptr, len, primitive->errorMessage); return(ptr); } void* CsrWifiSmeErrorIndDes(u8 *buffer, size_t length) { CsrWifiSmeErrorInd *primitive = kmalloc(sizeof(CsrWifiSmeErrorInd), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrCharStringDes(&primitive->errorMessage, buffer, &offset); return primitive; } void CsrWifiSmeErrorIndSerFree(void *voidPrimitivePointer) { CsrWifiSmeErrorInd *primitive = (CsrWifiSmeErrorInd *) voidPrimitivePointer; kfree(primitive->errorMessage); kfree(primitive); } size_t CsrWifiSmeInfoIndSizeof(void *msg) { CsrWifiSmeInfoInd *primitive = (CsrWifiSmeInfoInd *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 3) */ bufferSize += (primitive->infoMessage ? strlen(primitive->infoMessage) : 0) + 1; /* char* primitive->infoMessage (0 byte len + 1 for NULL Term) */ return bufferSize; } u8* CsrWifiSmeInfoIndSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeInfoInd *primitive = (CsrWifiSmeInfoInd *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrCharStringSer(ptr, len, primitive->infoMessage); return(ptr); } void* CsrWifiSmeInfoIndDes(u8 *buffer, size_t length) { CsrWifiSmeInfoInd *primitive = kmalloc(sizeof(CsrWifiSmeInfoInd), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrCharStringDes(&primitive->infoMessage, buffer, &offset); return primitive; } void CsrWifiSmeInfoIndSerFree(void *voidPrimitivePointer) { CsrWifiSmeInfoInd *primitive = (CsrWifiSmeInfoInd *) voidPrimitivePointer; kfree(primitive->infoMessage); kfree(primitive); } size_t CsrWifiSmeCoreDumpIndSizeof(void *msg) { CsrWifiSmeCoreDumpInd *primitive = (CsrWifiSmeCoreDumpInd *) msg; size_t bufferSize = 2; /* Calculate the Size of the Serialised Data. Could be more efficient (Try 8) */ bufferSize += 4; /* u32 primitive->dataLength */ bufferSize += primitive->dataLength; /* u8 primitive->data */ return bufferSize; } u8* CsrWifiSmeCoreDumpIndSer(u8 *ptr, size_t *len, void *msg) { CsrWifiSmeCoreDumpInd *primitive = (CsrWifiSmeCoreDumpInd *)msg; *len = 0; CsrUint16Ser(ptr, len, primitive->common.type); CsrUint32Ser(ptr, len, (u32) primitive->dataLength); if (primitive->dataLength) { CsrMemCpySer(ptr, len, (const void *) primitive->data, ((u16) (primitive->dataLength))); } return(ptr); } void* CsrWifiSmeCoreDumpIndDes(u8 *buffer, size_t length) { CsrWifiSmeCoreDumpInd *primitive = kmalloc(sizeof(CsrWifiSmeCoreDumpInd), GFP_KERNEL); size_t offset; offset = 0; CsrUint16Des(&primitive->common.type, buffer, &offset); CsrUint32Des((u32 *) &primitive->dataLength, buffer, &offset); if (primitive->dataLength) { primitive->data = kmalloc(primitive->dataLength, GFP_KERNEL); CsrMemCpyDes(primitive->data, buffer, &offset, ((u16) (primitive->dataLength))); } else { primitive->data = NULL; } return primitive; } void CsrWifiSmeCoreDumpIndSerFree(void *voidPrimitivePointer) { CsrWifiSmeCoreDumpInd *primitive = (CsrWifiSmeCoreDumpInd *) voidPrimitivePointer; kfree(primitive->data); kfree(primitive); }
gpl-2.0
MoKee/android_kernel_motorola_apq8084
drivers/staging/csr/sme_mgt.c
2387
28697
/* * --------------------------------------------------------------------------- * FILE: sme_mgt.c * * PURPOSE: * This file contains the driver specific implementation of * the SME MGT SAP. * It is part of the porting exercise. * * Copyright (C) 2008-2009 by Cambridge Silicon Radio Ltd. * * Refer to LICENSE.txt included with this source code for details on * the license terms. * * --------------------------------------------------------------------------- */ #include "csr_wifi_hip_unifiversion.h" #include "unifi_priv.h" #include "csr_wifi_hip_conversions.h" /* * This file implements the SME MGT API. It contains the following functions: * CsrWifiSmeWifiFlightmodeCfmSend() * CsrWifiSmeWifiOnCfmSend() * CsrWifiSmeWifiOffCfmSend() * CsrWifiSmeWifiOffIndSend() * CsrWifiSmeScanFullCfmSend() * CsrWifiSmeScanResultsGetCfmSend() * CsrWifiSmeScanResultIndSend() * CsrWifiSmeScanResultsFlushCfmSend() * CsrWifiSmeConnectCfmSend() * CsrWifiSmeMediaStatusIndSend() * CsrWifiSmeDisconnectCfmSend() * CsrWifiSmeKeyCfmSend() * CsrWifiSmeMulticastAddressCfmSend() * CsrWifiSmeSetValueCfmSend() * CsrWifiSmeGetValueCfmSend() * CsrWifiSmeMicFailureIndSend() * CsrWifiSmePmkidCfmSend() * CsrWifiSmePmkidCandidateListIndSend() * CsrWifiSmeMibSetCfmSend() * CsrWifiSmeMibGetCfmSend() * CsrWifiSmeMibGetNextCfmSend() * CsrWifiSmeConnectionQualityIndSend() * CsrWifiSmePacketFilterSetCfmSend() * CsrWifiSmeTspecCfmSend() * CsrWifiSmeTspecIndSend() * CsrWifiSmeBlacklistCfmSend() * CsrWifiSmeEventMaskSetCfmSend() * CsrWifiSmeRoamStartIndSend() * CsrWifiSmeRoamCompleteIndSend() * CsrWifiSmeAssociationStartIndSend() * CsrWifiSmeAssociationCompleteIndSend() * CsrWifiSmeIbssStationIndSend() */ void CsrWifiSmeMicFailureIndHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeMicFailureInd* ind = (CsrWifiSmeMicFailureInd*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeMicFailureIndSend: invalid priv\n"); return; } unifi_trace(priv, UDBG1, "CsrWifiSmeMicFailureIndSend: count=%d, KeyType=%d\n", ind->count, ind->keyType); wext_send_michaelmicfailure_event(priv, ind->count, ind->address, ind->keyType, ind->interfaceTag); #endif } void CsrWifiSmePmkidCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmePmkidCfm* cfm = (CsrWifiSmePmkidCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmePmkidCfmSend: Invalid ospriv.\n"); return; } /* * WEXT never does a GET operation the PMKIDs, so we don't need * handle data returned in pmkids. */ sme_complete_request(priv, cfm->status); #endif } void CsrWifiSmePmkidCandidateListIndHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmePmkidCandidateListInd* ind = (CsrWifiSmePmkidCandidateListInd*)msg; int i; if (priv->smepriv == NULL) { unifi_error(priv, "CsrWifiSmePmkidCandidateListIndSend: invalid smepriv\n"); return; } for (i = 0; i < ind->pmkidCandidatesCount; i++) { wext_send_pmkid_candidate_event(priv, ind->pmkidCandidates[i].bssid, ind->pmkidCandidates[i].preAuthAllowed, ind->interfaceTag); } #endif } void CsrWifiSmeScanResultsFlushCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } void CsrWifiSmeScanResultsGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeScanResultsGetCfm* cfm = (CsrWifiSmeScanResultsGetCfm*)msg; int bytesRequired = cfm->scanResultsCount * sizeof(CsrWifiSmeScanResult); int i; u8* current_buff; CsrWifiSmeScanResult* scanCopy; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeScanResultsGetCfmSend: Invalid ospriv.\n"); return; } /* Calc the size of the buffer reuired */ for (i = 0; i < cfm->scanResultsCount; ++i) { const CsrWifiSmeScanResult *scan_result = &cfm->scanResults[i]; bytesRequired += scan_result->informationElementsLength; } /* Take a Copy of the scan Results :-) */ scanCopy = kmalloc(bytesRequired, GFP_KERNEL); memcpy(scanCopy, cfm->scanResults, sizeof(CsrWifiSmeScanResult) * cfm->scanResultsCount); /* Take a Copy of the Info Elements AND update the scan result pointers */ current_buff = (u8*)&scanCopy[cfm->scanResultsCount]; for (i = 0; i < cfm->scanResultsCount; ++i) { CsrWifiSmeScanResult *scan_result = &scanCopy[i]; memcpy(current_buff, scan_result->informationElements, scan_result->informationElementsLength); scan_result->informationElements = current_buff; current_buff += scan_result->informationElementsLength; } priv->sme_reply.reply_scan_results_count = cfm->scanResultsCount; priv->sme_reply.reply_scan_results = scanCopy; sme_complete_request(priv, cfm->status); #endif } void CsrWifiSmeScanFullCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeScanFullCfm* cfm = (CsrWifiSmeScanFullCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeScanFullCfmSend: Invalid ospriv.\n"); return; } sme_complete_request(priv, cfm->status); #endif } void CsrWifiSmeScanResultIndHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } void CsrWifiSmeConnectCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeConnectCfm* cfm = (CsrWifiSmeConnectCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeConnectCfmSend: Invalid ospriv.\n"); return; } sme_complete_request(priv, cfm->status); #endif } void CsrWifiSmeDisconnectCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeDisconnectCfm* cfm = (CsrWifiSmeDisconnectCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeDisconnectCfmSend: Invalid ospriv.\n"); return; } sme_complete_request(priv, cfm->status); #endif } void CsrWifiSmeKeyCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeKeyCfm* cfm = (CsrWifiSmeKeyCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeKeyCfmSend: Invalid ospriv.\n"); return; } sme_complete_request(priv, cfm->status); #endif } void CsrWifiSmeMulticastAddressCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeMulticastAddressCfm* cfm = (CsrWifiSmeMulticastAddressCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeMulticastAddressCfmSend: Invalid ospriv.\n"); return; } sme_complete_request(priv, cfm->status); #endif } void CsrWifiSmeWifiFlightmodeCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeWifiFlightmodeCfm* cfm = (CsrWifiSmeWifiFlightmodeCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeWifiFlightmodeCfmSend: Invalid ospriv.\n"); return; } sme_complete_request(priv, cfm->status); #endif } void CsrWifiSmeWifiOnCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeWifiOnCfm* cfm = (CsrWifiSmeWifiOnCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeWifiOnCfmSend: Invalid ospriv.\n"); return; } unifi_trace(priv, UDBG4, "CsrWifiSmeWifiOnCfmSend: wake up status %d\n", cfm->status); #ifdef CSR_SUPPORT_WEXT_AP sme_complete_request(priv, cfm->status); #endif #endif } void CsrWifiSmeWifiOffCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeWifiOffCfm* cfm = (CsrWifiSmeWifiOffCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeWifiOffCfmSend: Invalid ospriv.\n"); return; } sme_complete_request(priv, cfm->status); #endif } void CsrWifiSmeWifiOffIndHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeWifiOffInd* ind = (CsrWifiSmeWifiOffInd*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiRouterCtrlStoppedReqSend: Invalid ospriv.\n"); return; } if (priv->smepriv == NULL) { unifi_error(priv, "CsrWifiRouterCtrlStoppedReqSend: invalid smepriv\n"); return; } /* * If the status indicates an error, the SME is in a stopped state. * We need to start it again in order to reinitialise UniFi. */ switch (ind->reason) { case CSR_WIFI_SME_CONTROL_INDICATION_ERROR: unifi_trace(priv, UDBG1, "CsrWifiRouterCtrlStoppedReqSend: Restarting SME (ind:%d)\n", ind->reason); /* On error, restart the SME */ sme_mgt_wifi_on(priv); break; case CSR_WIFI_SME_CONTROL_INDICATION_EXIT: #ifdef CSR_SUPPORT_WEXT_AP sme_complete_request(priv, 0); #endif break; default: break; } #endif } void CsrWifiSmeVersionsGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeVersionsGetCfm* cfm = (CsrWifiSmeVersionsGetCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeVersionsGetCfmSend: Invalid ospriv.\n"); return; } priv->sme_reply.versions = cfm->versions; sme_complete_request(priv, cfm->status); #endif } void CsrWifiSmePowerConfigGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmePowerConfigGetCfm* cfm = (CsrWifiSmePowerConfigGetCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmePowerConfigGetCfmSend: Invalid ospriv.\n"); return; } priv->sme_reply.powerConfig = cfm->powerConfig; sme_complete_request(priv, cfm->status); } void CsrWifiSmeHostConfigGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeHostConfigGetCfm* cfm = (CsrWifiSmeHostConfigGetCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeHostConfigGetCfmSend: Invalid ospriv.\n"); return; } priv->sme_reply.hostConfig = cfm->hostConfig; sme_complete_request(priv, cfm->status); } void CsrWifiSmeCoexInfoGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeCoexInfoGetCfm* cfm = (CsrWifiSmeCoexInfoGetCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeCoexInfoGetCfmSend: Invalid ospriv.\n"); return; } priv->sme_reply.coexInfo = cfm->coexInfo; sme_complete_request(priv, cfm->status); } void CsrWifiSmeCoexConfigGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeCoexConfigGetCfm* cfm = (CsrWifiSmeCoexConfigGetCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeCoexConfigGetCfmSend: Invalid ospriv.\n"); return; } priv->sme_reply.coexConfig = cfm->coexConfig; sme_complete_request(priv, cfm->status); #endif } void CsrWifiSmeMibConfigGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeMibConfigGetCfm* cfm = (CsrWifiSmeMibConfigGetCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeMibConfigGetCfmSend: Invalid ospriv.\n"); return; } priv->sme_reply.mibConfig = cfm->mibConfig; sme_complete_request(priv, cfm->status); #endif } void CsrWifiSmeConnectionInfoGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeConnectionInfoGetCfm* cfm = (CsrWifiSmeConnectionInfoGetCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeConnectionInfoGetCfmSend: Invalid ospriv.\n"); return; } priv->sme_reply.connectionInfo = cfm->connectionInfo; sme_complete_request(priv, cfm->status); #endif } void CsrWifiSmeConnectionConfigGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeConnectionConfigGetCfm* cfm = (CsrWifiSmeConnectionConfigGetCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeConnectionConfigGetCfmSend: Invalid ospriv.\n"); return; } priv->sme_reply.connectionConfig = cfm->connectionConfig; sme_complete_request(priv, cfm->status); #endif } void CsrWifiSmeConnectionStatsGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeConnectionStatsGetCfm* cfm = (CsrWifiSmeConnectionStatsGetCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeConnectionStatsGetCfmSend: Invalid ospriv.\n"); return; } priv->sme_reply.connectionStats = cfm->connectionStats; sme_complete_request(priv, cfm->status); #endif } void CsrWifiSmeMibSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeMibSetCfm* cfm = (CsrWifiSmeMibSetCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeMibSetCfmSend: Invalid ospriv.\n"); return; } sme_complete_request(priv, cfm->status); #endif } void CsrWifiSmeMibGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeMibGetCfm* cfm = (CsrWifiSmeMibGetCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeMibGetCfmSend: Invalid ospriv.\n"); return; } if (cfm->mibAttribute == NULL) { unifi_error(priv, "CsrWifiSmeMibGetCfmSend: Empty reply.\n"); sme_complete_request(priv, cfm->status); return; } if ((priv->mib_cfm_buffer != NULL) && (priv->mib_cfm_buffer_length >= cfm->mibAttributeLength)) { memcpy(priv->mib_cfm_buffer, cfm->mibAttribute, cfm->mibAttributeLength); priv->mib_cfm_buffer_length = cfm->mibAttributeLength; } else { unifi_error(priv, "CsrWifiSmeMibGetCfmSend: No room to store MIB data (have=%d need=%d).\n", priv->mib_cfm_buffer_length, cfm->mibAttributeLength); } sme_complete_request(priv, cfm->status); #endif } void CsrWifiSmeMibGetNextCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeMibGetNextCfm* cfm = (CsrWifiSmeMibGetNextCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeMibGetNextCfmSend: Invalid ospriv.\n"); return; } /* Need to copy MIB data */ sme_complete_request(priv, cfm->status); #endif } void CsrWifiSmeConnectionQualityIndHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeConnectionQualityInd* ind = (CsrWifiSmeConnectionQualityInd*)msg; int signal, noise, snr; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeConnectionQualityIndSend: Invalid ospriv.\n"); return; } /* * level and noise below are mapped into an unsigned 8 bit number, * ranging from [-192; 63]. The way this is achieved is simply to * add 0x100 onto the number if it is negative, * once clipped to the correct range. */ signal = ind->linkQuality.unifiRssi; /* Clip range of snr */ snr = (ind->linkQuality.unifiSnr > 0) ? ind->linkQuality.unifiSnr : 0; /* In dB relative, from 0 - 255 */ snr = (snr < 255) ? snr : 255; noise = signal - snr; /* Clip range of signal */ signal = (signal < 63) ? signal : 63; signal = (signal > -192) ? signal : -192; /* Clip range of noise */ noise = (noise < 63) ? noise : 63; noise = (noise > -192) ? noise : -192; /* Make u8 */ signal = ( signal < 0 ) ? signal + 0x100 : signal; noise = ( noise < 0 ) ? noise + 0x100 : noise; priv->wext_wireless_stats.qual.level = (u8)signal; /* -192 : 63 */ priv->wext_wireless_stats.qual.noise = (u8)noise; /* -192 : 63 */ priv->wext_wireless_stats.qual.qual = snr; /* 0 : 255 */ priv->wext_wireless_stats.qual.updated = 0; #if WIRELESS_EXT > 16 priv->wext_wireless_stats.qual.updated |= IW_QUAL_LEVEL_UPDATED | IW_QUAL_NOISE_UPDATED | IW_QUAL_QUAL_UPDATED; #if WIRELESS_EXT > 18 priv->wext_wireless_stats.qual.updated |= IW_QUAL_DBM; #endif #endif #endif } void CsrWifiSmePacketFilterSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { unifi_priv_t *priv = (unifi_priv_t*)drvpriv; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmePacketFilterSetCfmSend: Invalid ospriv.\n"); return; } /* The packet filter set request does not block for a reply */ } void CsrWifiSmeTspecCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeTspecCfm* cfm = (CsrWifiSmeTspecCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeTspecCfmSend: Invalid ospriv.\n"); return; } sme_complete_request(priv, cfm->status); } void CsrWifiSmeTspecIndHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } void CsrWifiSmeBlacklistCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } void CsrWifiSmeEventMaskSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } void CsrWifiSmeRoamStartIndHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } void CsrWifiSmeRoamCompleteIndHandler(void* drvpriv, CsrWifiFsmEvent* msg) { /* This is called when the association completes, before any 802.1x authentication */ } void CsrWifiSmeAssociationStartIndHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } void CsrWifiSmeAssociationCompleteIndHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } void CsrWifiSmeIbssStationIndHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } void CsrWifiSmeWifiOnIndHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } void CsrWifiSmeRestrictedAccessEnableCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } void CsrWifiSmeRestrictedAccessDisableCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } void CsrWifiSmeAdhocConfigGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } void CsrWifiSmeAdhocConfigSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeAdhocConfigSetCfm* cfm = (CsrWifiSmeAdhocConfigSetCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeSmeConfigSetCfmSend: Invalid ospriv.\n"); return; } sme_complete_request(priv, cfm->status); #endif } void CsrWifiSmeCalibrationDataGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } void CsrWifiSmeCalibrationDataSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeCalibrationDataSetCfm* cfm = (CsrWifiSmeCalibrationDataSetCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeSmeConfigSetCfmSend: Invalid ospriv.\n"); return; } sme_complete_request(priv, cfm->status); #endif } void CsrWifiSmeCcxConfigGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } void CsrWifiSmeCcxConfigSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeCcxConfigSetCfm* cfm = (CsrWifiSmeCcxConfigSetCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeSmeConfigSetCfmSend: Invalid ospriv.\n"); return; } sme_complete_request(priv, cfm->status); #endif } void CsrWifiSmeCloakedSsidsGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } void CsrWifiSmeCloakedSsidsSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeCloakedSsidsSetCfm* cfm = (CsrWifiSmeCloakedSsidsSetCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeSmeConfigSetCfmSend: Invalid ospriv.\n"); return; } sme_complete_request(priv, cfm->status); #endif } void CsrWifiSmeCoexConfigSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeCoexConfigSetCfm* cfm = (CsrWifiSmeCoexConfigSetCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeSmeConfigSetCfmSend: Invalid ospriv.\n"); return; } sme_complete_request(priv, cfm->status); #endif } void CsrWifiSmeHostConfigSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeHostConfigSetCfm* cfm = (CsrWifiSmeHostConfigSetCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeSmeConfigSetCfmSend: Invalid ospriv.\n"); return; } sme_complete_request(priv, cfm->status); } void CsrWifiSmeLinkQualityGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } void CsrWifiSmeMibConfigSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeMibConfigSetCfm* cfm = (CsrWifiSmeMibConfigSetCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeSmeConfigSetCfmSend: Invalid ospriv.\n"); return; } sme_complete_request(priv, cfm->status); #endif } void CsrWifiSmePermanentMacAddressGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } void CsrWifiSmePowerConfigSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmePowerConfigSetCfm* cfm = (CsrWifiSmePowerConfigSetCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeSmeConfigSetCfmSend: Invalid ospriv.\n"); return; } sme_complete_request(priv, cfm->status); } void CsrWifiSmeRegulatoryDomainInfoGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } void CsrWifiSmeRoamingConfigGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } void CsrWifiSmeMediaStatusIndHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeMediaStatusInd* ind = (CsrWifiSmeMediaStatusInd*)msg; if (priv->smepriv == NULL) { unifi_error(priv, "CsrWifiSmeMediaStatusIndSend: invalid smepriv\n"); return; } if (ind->mediaStatus == CSR_WIFI_SME_MEDIA_STATUS_CONNECTED) { /* * Send wireless-extension event up to userland to announce * connection. */ wext_send_assoc_event(priv, (unsigned char *)ind->connectionInfo.bssid.a, (unsigned char *)ind->connectionInfo.assocReqInfoElements, ind->connectionInfo.assocReqInfoElementsLength, (unsigned char *)ind->connectionInfo.assocRspInfoElements, ind->connectionInfo.assocRspInfoElementsLength, (unsigned char *)ind->connectionInfo.assocScanInfoElements, ind->connectionInfo.assocScanInfoElementsLength); unifi_trace(priv, UDBG2, "CsrWifiSmeMediaStatusIndSend: IBSS=%pM\n", ind->connectionInfo.bssid.a); sme_mgt_packet_filter_set(priv); } else { /* * Send wireless-extension event up to userland to announce * connection lost to a BSS. */ wext_send_disassoc_event(priv); } #endif } void CsrWifiSmeRoamingConfigSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeRoamingConfigSetCfm* cfm = (CsrWifiSmeRoamingConfigSetCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeRoamingConfigSetCfmSend: Invalid ospriv.\n"); return; } sme_complete_request(priv, cfm->status); } void CsrWifiSmeScanConfigGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } void CsrWifiSmeScanConfigSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { #ifdef CSR_SUPPORT_WEXT unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeScanConfigSetCfm* cfm = (CsrWifiSmeScanConfigSetCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeSmeConfigSetCfmSend: Invalid ospriv.\n"); return; } sme_complete_request(priv, cfm->status); #endif } void CsrWifiSmeStationMacAddressGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } void CsrWifiSmeSmeCommonConfigGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeSmeCommonConfigGetCfm* cfm = (CsrWifiSmeSmeCommonConfigGetCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeSmeCommonConfigGetCfmSend: Invalid ospriv.\n"); return; } priv->sme_reply.deviceConfig = cfm->deviceConfig; sme_complete_request(priv, cfm->status); } void CsrWifiSmeSmeStaConfigGetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeSmeStaConfigGetCfm* cfm = (CsrWifiSmeSmeStaConfigGetCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeSmeStaConfigGetCfmSend: Invalid ospriv.\n"); return; } priv->sme_reply.staConfig = cfm->smeConfig; sme_complete_request(priv, cfm->status); } void CsrWifiSmeSmeCommonConfigSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeSmeCommonConfigSetCfm* cfm = (CsrWifiSmeSmeCommonConfigSetCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeSmeCommonConfigGetCfmSend: Invalid ospriv.\n"); return; } sme_complete_request(priv, cfm->status); } void CsrWifiSmeSmeStaConfigSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiSmeSmeStaConfigSetCfm* cfm = (CsrWifiSmeSmeStaConfigSetCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiSmeSmeStaConfigGetCfmSend: Invalid ospriv.\n"); return; } sme_complete_request(priv, cfm->status); } void CsrWifiSmeGetInterfaceCapabilityCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } void CsrWifiSmeErrorIndHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } void CsrWifiSmeInfoIndHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } void CsrWifiSmeCoreDumpIndHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } void CsrWifiSmeAmpStatusChangeIndHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } void CsrWifiSmeActivateCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } void CsrWifiSmeDeactivateCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { } #ifdef CSR_SUPPORT_WEXT #ifdef CSR_SUPPORT_WEXT_AP void CsrWifiNmeApStartCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiNmeApStartCfm* cfm = (CsrWifiNmeApStartCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiNmeApStartCfmSend: Invalid ospriv.\n"); return; } sme_complete_request(priv, cfm->status); } void CsrWifiNmeApStopCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiNmeApStopCfm* cfm = (CsrWifiNmeApStopCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiNmeApStopCfmSend: Invalid ospriv.\n"); return; } sme_complete_request(priv, cfm->status); } void CsrWifiNmeApConfigSetCfmHandler(void* drvpriv, CsrWifiFsmEvent* msg) { unifi_priv_t *priv = (unifi_priv_t*)drvpriv; CsrWifiNmeApConfigSetCfm* cfm = (CsrWifiNmeApConfigSetCfm*)msg; if (priv == NULL) { unifi_error(NULL, "CsrWifiNmeApConfigSetCfmSend: Invalid ospriv.\n"); return; } sme_complete_request(priv, cfm->status); } #endif #endif
gpl-2.0